File size: 1,760 Bytes
78e8159 0421e9d 78e8159 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import os
import torch
from diffusers import ControlNetModel, UniPCMultistepScheduler, StableDiffusionControlNetPipeline
from diffusers.utils import load_image, make_image_grid
from diffusers.utils.torch_utils import randn_tensor
from controlnet_aux import OpenposeDetector
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose",
torch_dtype=torch.float16
)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
custom_pipeline="hyoungwoncho/sd_perturbed_attention_guidance_controlnet",
controlnet=controlnet,
torch_dtype=torch.float16
)
device="cuda"
pipe = pipe.to(device)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
original_image = load_image(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png"
)
openpose_image = openpose(original_image)
prompts=""
base_dir = "./results/openpose/"
if not os.path.exists(base_dir):
os.makedirs(base_dir)
latent_input = randn_tensor(shape=(1,4,64,64),generator=None, device=device, dtype=torch.float16)
output_baseline = pipe(
prompts,
image=openpose_image,
num_inference_steps=50,
guidance_scale=0.0,
pag_scale=0.0,
pag_applied_layers_index=["m0"],
latents=latent_input
).images[0]
output_pag = pipe(
prompts,
image=openpose_image,
num_inference_steps=50,
guidance_scale=0.0,
pag_scale=4.0,
pag_applied_layers_index=["m0"],
latents=latent_input
).images[0]
grid_image = make_image_grid([original_image, openpose_image, output_baseline, output_pag], rows=1, cols=4)
grid_image.save(base_dir + "sample.png") |