Spaces:
Build error
Build error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from PIL import Image
|
4 |
+
from diffusers.utils import numpy_to_pil
|
5 |
+
from diffusers import (
|
6 |
+
T2IAdapter,
|
7 |
+
StableDiffusionXLAdapterPipeline,
|
8 |
+
AutoencoderKL,
|
9 |
+
EulerAncestralDiscreteScheduler
|
10 |
+
)
|
11 |
+
from controlnet_aux import PidiNetDetector
|
12 |
+
|
13 |
+
# Global variable to store the pipeline
|
14 |
+
pipe = None
|
15 |
+
|
16 |
+
def load_pipe():
|
17 |
+
global pipe
|
18 |
+
if pipe is None:
|
19 |
+
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
20 |
+
adapter = T2IAdapter.from_pretrained(
|
21 |
+
"Adapter/t2iadapter",
|
22 |
+
subfolder="sketch_sdxl_1.0",
|
23 |
+
torch_dtype=torch.float16,
|
24 |
+
adapter_type="full_adapter_xl")
|
25 |
+
euler_a = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
26 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
27 |
+
|
28 |
+
pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
|
29 |
+
model_id,
|
30 |
+
adapter=adapter,
|
31 |
+
vae=vae,
|
32 |
+
scheduler=euler_a,
|
33 |
+
torch_dtype=torch.float16,
|
34 |
+
variant="fp16",
|
35 |
+
).to("cuda")
|
36 |
+
pipe.enable_xformers_memory_efficient_attention()
|
37 |
+
|
38 |
+
def preprocess_image(uploaded_file):
|
39 |
+
if uploaded_file is None:
|
40 |
+
return None, "Please upload an image."
|
41 |
+
img_upload = Image.open(uploaded_file)
|
42 |
+
preprocessor = PidiNetDetector.from_pretrained("lllyasviel/Annotators").to("cuda")
|
43 |
+
img_preprocessed = preprocessor(
|
44 |
+
img_upload,
|
45 |
+
detect_resolution=1024,
|
46 |
+
image_resolution=1024,
|
47 |
+
apply_filter=True).convert("L")
|
48 |
+
return img_preprocessed, ""
|
49 |
+
|
50 |
+
def generate(prompt, uploaded_file, prompt_addition, negative_prompt, num_images, num_steps, guidance_scale, adapter_conditioning_scale, adapter_conditioning_factor):
|
51 |
+
global pipe
|
52 |
+
load_pipe() # Ensure the model is loaded
|
53 |
+
img_preprocessed, error_message = preprocess_image(uploaded_file)
|
54 |
+
if error_message:
|
55 |
+
return error_message
|
56 |
+
params = {
|
57 |
+
"image": img_preprocessed,
|
58 |
+
"num_inference_steps": num_steps,
|
59 |
+
"prompt": f"{prompt},{prompt_addition}" if prompt_addition.strip() else prompt,
|
60 |
+
"negative_prompt": negative_prompt,
|
61 |
+
"guidance_scale": guidance_scale,
|
62 |
+
"adapter_conditioning_scale": adapter_conditioning_scale / 100,
|
63 |
+
"adapter_conditioning_factor": adapter_conditioning_factor / 100,
|
64 |
+
"num_images_per_prompt": num_images
|
65 |
+
}
|
66 |
+
generated_images = pipe(**params).images
|
67 |
+
return [numpy_to_pil(image.numpy()) for image in generated_images]
|
68 |
+
|
69 |
+
with gr.Blocks() as demo:
|
70 |
+
with gr.Row():
|
71 |
+
with gr.Column():
|
72 |
+
prompt = gr.Textbox(label="Prompt", value="a robot elephant", placeholder="Enter a description for the image you want to generate")
|
73 |
+
prompt_addition = gr.Textbox(label="Prompt addition", value="in real world, 4k photo, highly detailed")
|
74 |
+
negative_prompt = gr.Textbox(label="Negative prompt", value="disfigured, extra digit, fewer digits, cropped, worst quality, low quality")
|
75 |
+
num_images = gr.Slider(minimum=1, maximum=10, value=1, label="Number of images to generate")
|
76 |
+
num_steps = gr.Slider(minimum=1, maximum=100, value=20, label="Number of steps")
|
77 |
+
guidance_scale = gr.Slider(minimum=6, maximum=10, value=7, label="Guidance scale")
|
78 |
+
adapter_conditioning_scale = gr.Slider(minimum=0, maximum=100, value=90, label="Adapter conditioning scale")
|
79 |
+
adapter_conditioning_factor = gr.Slider(minimum=0, maximum=100, value=90, label="Adapter conditioning factor")
|
80 |
+
uploaded_file = gr.File(label="Upload image", type='filepath')
|
81 |
+
|
82 |
+
with gr.Column():
|
83 |
+
output_gallery = gr.Gallery(label="Generated images")
|
84 |
+
generate_button = gr.Button("Generate")
|
85 |
+
generate_button.click(
|
86 |
+
generate,
|
87 |
+
inputs=[prompt, uploaded_file, prompt_addition, negative_prompt, num_images, num_steps, guidance_scale, adapter_conditioning_scale, adapter_conditioning_factor],
|
88 |
+
outputs=[output_gallery]
|
89 |
+
)
|
90 |
+
|
91 |
+
demo.launch()
|