File size: 3,090 Bytes
701876b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import PIL
import torch
from datasets import Dataset, Features
from datasets import Image as ImageFeature
from datasets import Value, load_dataset
from diffusers import DiffusionPipeline
def main():
print("Loading dataset...")
parti_prompts = load_dataset("nateraw/parti-prompts", split="train")
print("Loading pipeline...")
pipe_prior = DiffusionPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
)
pipe_prior.to("cuda")
pipe_prior.set_progress_bar_config(disable=True)
t2i_pipe = DiffusionPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
)
t2i_pipe.to("cuda")
t2i_pipe.set_progress_bar_config(disable=True)
seed = 0
generator = torch.Generator("cuda").manual_seed(seed)
ckpt_id = (
"kandinsky-community/" + "kandinsky-2-2-prior" + "_" + "kandinsky-2-2-decoder"
)
print("Running inference...")
main_dict = {}
for i in range(len(parti_prompts)):
sample = parti_prompts[i]
prompt = sample["Prompt"]
image_embeds, negative_image_embeds = pipe_prior(
prompt,
generator=generator,
num_inference_steps=100,
guidance_scale=7.5,
).to_tuple()
image = t2i_pipe(
image_embeds=image_embeds,
negative_image_embeds=negative_image_embeds,
generator=generator,
num_inference_steps=100,
guidance_scale=7.5,
).images[0]
image = image.resize((256, 256), resample=PIL.Image.Resampling.LANCZOS)
img_path = f"kandinsky_22_{i}.png"
image.save(img_path)
main_dict.update(
{
prompt: {
"img_path": img_path,
"Category": sample["Category"],
"Challenge": sample["Challenge"],
"Note": sample["Note"],
"model_name": ckpt_id,
"seed": seed,
}
}
)
def generation_fn():
for prompt in main_dict:
prompt_entry = main_dict[prompt]
yield {
"Prompt": prompt,
"Category": prompt_entry["Category"],
"Challenge": prompt_entry["Challenge"],
"Note": prompt_entry["Note"],
"images": {"path": prompt_entry["img_path"]},
"model_name": prompt_entry["model_name"],
"seed": prompt_entry["seed"],
}
print("Preparing HF dataset...")
ds = Dataset.from_generator(
generation_fn,
features=Features(
Prompt=Value("string"),
Category=Value("string"),
Challenge=Value("string"),
Note=Value("string"),
images=ImageFeature(),
model_name=Value("string"),
seed=Value("int64"),
),
)
ds_id = "diffusers-parti-prompts/kandinsky-2-2"
ds.push_to_hub(ds_id)
if __name__ == "__main__":
main()
|