Spaces:
Running
Running
''' | |
!git clone https://huggingface.co/spaces/radames/SPIGA-face-alignment-headpose-estimator | |
!cp -r SPIGA-face-alignment-headpose-estimator/SPIGA . | |
!pip install -r SPIGA/requirements.txt | |
!pip install datasets | |
!huggingface-cli login | |
''' | |
from pred_color import * | |
import gradio as gr | |
from diffusers import ( | |
AutoencoderKL, | |
ControlNetModel, | |
DDPMScheduler, | |
StableDiffusionControlNetPipeline, | |
UNet2DConditionModel, | |
UniPCMultistepScheduler, | |
) | |
import torch | |
from diffusers.utils import load_image | |
controlnet_model_name_or_path = "svjack/ControlNet-Face-Zh" | |
controlnet = ControlNetModel.from_pretrained(controlnet_model_name_or_path) | |
#controlnet = controlnet.to("cuda") | |
base_model_path = "IDEA-CCNL/Taiyi-Stable-Diffusion-1B-Chinese-v0.1" | |
pipe = StableDiffusionControlNetPipeline.from_pretrained( | |
base_model_path, controlnet=controlnet, | |
#torch_dtype=torch.float16 | |
) | |
# speed up diffusion process with faster scheduler and memory optimization | |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) | |
#pipe.enable_model_cpu_offload() | |
#pipe = pipe.to("cuda") | |
if torch.cuda.is_available(): | |
pipe = pipe.to("cuda") | |
else: | |
#pipe.enable_model_cpu_offload() | |
pass | |
example_sample = [ | |
["Protector_Cromwell_style.png", "戴帽子穿灰色衣服的男子"] | |
] | |
from PIL import Image | |
def pred_func(image, prompt): | |
out = single_pred_features(image) | |
if type(out) == type({}): | |
#return out["spiga_seg"] | |
control_image = out["spiga_seg"] | |
if type(image) == type("") and os.path.exists(image): | |
image = Image.open(image).convert("RGB") | |
elif hasattr(image, "shape"): | |
image = Image.fromarray(image).convert("RGB") | |
else: | |
image = image.convert("RGB") | |
image = image.resize((512, 512)) | |
generator = torch.manual_seed(0) | |
image = pipe( | |
prompt, num_inference_steps=50, | |
generator=generator, image=control_image | |
).images[0] | |
return control_image ,image | |
gr=gr.Interface(fn=pred_func, inputs=['image','text'], | |
outputs=[gr.Image(label='output').style(height=512), | |
gr.Image(label='output').style(height=512)], | |
examples=example_sample if example_sample else None, | |
) | |
gr.launch(share=False) | |
if __name__ == "__main__": | |
''' | |
control_image = load_image("./conditioning_image_1.png") | |
prompt = "戴眼镜的中年男子" | |
# generate image | |
generator = torch.manual_seed(0) | |
image = pipe( | |
prompt, num_inference_steps=50, generator=generator, image=control_image | |
).images[0] | |
image | |
control_image = load_image("./conditioning_image_1.png") | |
prompt = "穿蓝色衣服的秃头男子" | |
# generate image | |
generator = torch.manual_seed(0) | |
image = pipe( | |
prompt, num_inference_steps=50, generator=generator, image=control_image | |
).images[0] | |
image | |
control_image = load_image("./conditioning_image_2.png") | |
prompt = "金色头发的美丽女子" | |
# generate image | |
generator = torch.manual_seed(0) | |
image = pipe( | |
prompt, num_inference_steps=50, generator=generator, image=control_image | |
).images[0] | |
image | |
control_image = load_image("./conditioning_image_2.png") | |
prompt = "绿色运动衫的男子" | |
# generate image | |
generator = torch.manual_seed(0) | |
image = pipe( | |
prompt, num_inference_steps=50, generator=generator, image=control_image | |
).images[0] | |
image | |
from huggingface_hub import HfApi | |
hf_api = HfApi() | |
hf_api.upload_file( | |
path_or_fileobj = "TSD_save_only/diffusion_pytorch_model.bin", | |
path_in_repo = "diffusion_pytorch_model.bin", | |
repo_id = "svjack/ControlNet-Face-Zh", | |
repo_type = "model", | |
) | |
hf_api.upload_file( | |
path_or_fileobj = "TSD_save_only/config.json", | |
path_in_repo = "config.json", | |
repo_id = "svjack/ControlNet-Face-Zh", | |
repo_type = "model", | |
) | |
''' | |
pass | |