Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
-
from diffusers import StableDiffusionPipeline
|
5 |
from peft import PeftModel, LoraConfig
|
6 |
import os
|
|
|
7 |
|
8 |
MAX_SEED = np.iinfo(np.int32).max
|
9 |
MAX_IMAGE_SIZE = 1024
|
@@ -12,13 +13,15 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
12 |
model_default = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
13 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
14 |
|
|
|
|
|
|
|
15 |
def get_lora_sd_pipeline(
|
16 |
lora_dir='./lora_man_animestyle',
|
17 |
base_model_name_or_path=None,
|
18 |
dtype=torch.float16,
|
19 |
adapter_name="default"
|
20 |
-
|
21 |
-
|
22 |
unet_sub_dir = os.path.join(lora_dir, "unet")
|
23 |
text_encoder_sub_dir = os.path.join(lora_dir, "text_encoder")
|
24 |
|
@@ -57,6 +60,23 @@ def align_embeddings(prompt_embeds, negative_prompt_embeds):
|
|
57 |
torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
|
58 |
|
59 |
pipe_default = get_lora_sd_pipeline(lora_dir='./lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
def infer(
|
62 |
prompt,
|
@@ -68,57 +88,97 @@ def infer(
|
|
68 |
seed=4,
|
69 |
guidance_scale=7.5,
|
70 |
lora_scale=0.5,
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
#
|
75 |
progress=gr.Progress(track_tqdm=True)
|
76 |
-
|
77 |
-
|
78 |
generator = torch.Generator(device).manual_seed(seed)
|
79 |
|
80 |
-
if
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
|
83 |
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
84 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
else:
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
109 |
|
110 |
-
return
|
111 |
|
112 |
examples = [
|
113 |
"A young man in anime style. The image is characterized by high definition and resolution. Handsome, thoughtful man, attentive eyes. The man is depicted in the foreground, close-up or in the middle. High-quality images of the face, eyes, nose, lips, hands and clothes. The background and background are blurred and indistinct. The play of light and shadow is visible on the face and clothes.",
|
114 |
-
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
|
115 |
-
"An astronaut riding a green horse.",
|
116 |
]
|
117 |
|
118 |
examples_negative = [
|
119 |
"blurred details, low resolution, poor image of a man's face, poor quality, artifacts, black and white image",
|
120 |
-
"blurry details, low resolution, poorly defined edges",
|
121 |
-
"bad face, bad quality, artifacts, low-res, black and white",
|
122 |
]
|
123 |
|
124 |
css = """
|
@@ -130,15 +190,7 @@ css = """
|
|
130 |
|
131 |
available_models = [
|
132 |
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
133 |
-
"SG161222/Realistic_Vision_V3.0_VAE",
|
134 |
"CompVis/stable-diffusion-v1-4",
|
135 |
-
"stabilityai/sdxl-turbo",
|
136 |
-
"runwayml/stable-diffusion-v1-5",
|
137 |
-
"sd-legacy/stable-diffusion-v1-5",
|
138 |
-
"prompthero/openjourney",
|
139 |
-
"stabilityai/stable-diffusion-3-medium-diffusers",
|
140 |
-
"stabilityai/stable-diffusion-3.5-large",
|
141 |
-
"stabilityai/stable-diffusion-3.5-large-turbo",
|
142 |
]
|
143 |
|
144 |
with gr.Blocks(css=css) as demo:
|
@@ -220,27 +272,14 @@ with gr.Blocks(css=css) as demo:
|
|
220 |
value=512,
|
221 |
)
|
222 |
|
223 |
-
# Функция для работы с ControlNet ---------------------------------------------------------------------
|
224 |
-
def process_input_ControlNet(image, use_control_net, control_strength, control_mode):
|
225 |
-
if use_control_net:
|
226 |
-
# Логика для обработки с использованием ControlNet
|
227 |
-
result = f"ControlNet активен! Режим: {control_mode}, Интенсивность: {control_strength}"
|
228 |
-
else:
|
229 |
-
# Логика для обработки без ControlNet
|
230 |
-
result = "ControlNet отключен."
|
231 |
-
return result
|
232 |
-
|
233 |
with gr.Blocks():
|
234 |
with gr.Row():
|
235 |
-
# Чекбокс для включения/отключения ControlNet
|
236 |
use_control_net = gr.Checkbox(
|
237 |
label="Use ControlNet",
|
238 |
value=False,
|
239 |
)
|
240 |
|
241 |
-
# Дополнительные опции для ControlNet
|
242 |
with gr.Column(visible=False) as control_net_options:
|
243 |
-
# Слайдер для настройки интенсивности
|
244 |
control_strength = gr.Slider(
|
245 |
label="Control Strength",
|
246 |
minimum=0.0,
|
@@ -249,112 +288,30 @@ with gr.Blocks(css=css) as demo:
|
|
249 |
step=0.05,
|
250 |
)
|
251 |
|
252 |
-
# Выпадающий список для выбора режима
|
253 |
control_mode = gr.Dropdown(
|
254 |
label="Control Mode",
|
255 |
choices=[
|
256 |
-
"edge_detection",
|
257 |
-
"canny_edge_detection",
|
258 |
"pose_estimation",
|
259 |
-
"depth_map",
|
260 |
-
"segmentation_map",
|
261 |
-
"scribble_sketch",
|
262 |
-
"normal_map",
|
263 |
-
"hed_edge_detection",
|
264 |
-
"openpose",
|
265 |
-
"mlsd_line_detection",
|
266 |
-
"scribble_diffusion",
|
267 |
-
"semantic_segmentation",
|
268 |
-
"style_transfer",
|
269 |
-
"colorization",
|
270 |
-
"custom_map"
|
271 |
],
|
272 |
value="pose_estimation",
|
273 |
)
|
274 |
|
275 |
-
|
|
|
276 |
control_image = gr.Image(label="Upload Control Image")
|
277 |
|
278 |
-
# Кнопка для запуска работы ControlNet
|
279 |
-
run_button = gr.Button("Run")
|
280 |
-
|
281 |
-
# Текстовое поле для вывода результата
|
282 |
-
output = gr.Textbox(label="Output")
|
283 |
-
|
284 |
-
# Логика для отображения/скрытия дополнительных опций
|
285 |
use_control_net.change(
|
286 |
fn=lambda x: gr.Row.update(visible=x),
|
287 |
inputs=use_control_net,
|
288 |
outputs=control_net_options,
|
289 |
)
|
290 |
|
291 |
-
# Привязка кнопки Run к функции работы с ControlNet
|
292 |
-
run_button.click(
|
293 |
-
fn=process_input_ControlNet,
|
294 |
-
inputs=[control_image, use_control_net, control_strength, control_mode],
|
295 |
-
outputs=output,
|
296 |
-
)
|
297 |
-
|
298 |
-
# Функция для работы с IP-adapter ----------------------------------------------------------------------------
|
299 |
-
def process_input_IP_adapter(image, use_ip_adapter, ip_adapter_scale, ip_adapter_image):
|
300 |
-
if use_ip_adapter:
|
301 |
-
# Логика для обработки с использованием IP-adapter
|
302 |
-
result = f"IP-adapter активен! Масштаб: {ip_adapter_scale}"
|
303 |
-
else:
|
304 |
-
# Логика для обработки без IP-adapter
|
305 |
-
result = "IP-adapter отключен."
|
306 |
-
return result
|
307 |
-
|
308 |
-
# Создание интерфейса
|
309 |
-
with gr.Blocks():
|
310 |
-
with gr.Row():
|
311 |
-
# Чекбокс для включения/отключения IP-adapter
|
312 |
-
use_ip_adapter = gr.Checkbox(
|
313 |
-
label="Use IP-adapter",
|
314 |
-
value=False,
|
315 |
-
)
|
316 |
-
|
317 |
-
# Дополнительные опции для IP-adapter
|
318 |
-
with gr.Column(visible=False) as ip_adapter_options:
|
319 |
-
# Слайдер для настройки масштаба
|
320 |
-
ip_adapter_scale = gr.Slider(
|
321 |
-
label="IP-adapter Scale",
|
322 |
-
minimum=0.0,
|
323 |
-
maximum=1.0,
|
324 |
-
value=0.5,
|
325 |
-
step=0.05,
|
326 |
-
)
|
327 |
-
|
328 |
-
# Окно для загрузки изображений
|
329 |
-
ip_adapter_image = gr.Image(label="Upload IP-adapter Image")
|
330 |
-
|
331 |
-
# Кнопка для запуска обработки
|
332 |
-
run_button = gr.Button("Run")
|
333 |
-
|
334 |
-
# Текстовое поле для вывода результата
|
335 |
-
output = gr.Textbox(label="Output")
|
336 |
-
|
337 |
-
# Логика для отображения/скрытия дополнительных опций
|
338 |
-
use_ip_adapter.change(
|
339 |
-
fn=lambda x: gr.Row.update(visible=x),
|
340 |
-
inputs=use_ip_adapter,
|
341 |
-
outputs=ip_adapter_options,
|
342 |
-
)
|
343 |
-
|
344 |
-
# Привязка кнопки Run к функции работы с IP-adapter
|
345 |
-
run_button.click(
|
346 |
-
fn=process_input_IP_adapter,
|
347 |
-
inputs=[ip_adapter_image, use_ip_adapter, ip_adapter_scale, ip_adapter_image],
|
348 |
-
outputs=output,
|
349 |
-
)
|
350 |
-
|
351 |
gr.Examples(examples=examples, inputs=[prompt])
|
352 |
gr.Examples(examples=examples_negative, inputs=[negative_prompt])
|
353 |
|
354 |
run_button = gr.Button("Run", scale=1, variant="primary")
|
355 |
result = gr.Image(label="Result", show_label=False)
|
356 |
|
357 |
-
|
358 |
gr.on(
|
359 |
triggers=[run_button.click, prompt.submit],
|
360 |
fn=infer,
|
@@ -368,10 +325,10 @@ with gr.Blocks(css=css) as demo:
|
|
368 |
seed,
|
369 |
guidance_scale,
|
370 |
lora_scale,
|
371 |
-
#
|
372 |
-
|
373 |
-
#
|
374 |
-
#
|
375 |
],
|
376 |
outputs=[result],
|
377 |
)
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
+
from diffusers import StableDiffusionPipeline, ControlNetModel, StableDiffusionControlNetPipeline, StableDiffusionControlNetImg2ImgPipeline
|
5 |
from peft import PeftModel, LoraConfig
|
6 |
import os
|
7 |
+
from PIL import Image
|
8 |
|
9 |
MAX_SEED = np.iinfo(np.int32).max
|
10 |
MAX_IMAGE_SIZE = 1024
|
|
|
13 |
model_default = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
14 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
15 |
|
16 |
+
# Инициализация ControlNet
|
17 |
+
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch_dtype)
|
18 |
+
|
19 |
def get_lora_sd_pipeline(
|
20 |
lora_dir='./lora_man_animestyle',
|
21 |
base_model_name_or_path=None,
|
22 |
dtype=torch.float16,
|
23 |
adapter_name="default"
|
24 |
+
):
|
|
|
25 |
unet_sub_dir = os.path.join(lora_dir, "unet")
|
26 |
text_encoder_sub_dir = os.path.join(lora_dir, "text_encoder")
|
27 |
|
|
|
60 |
torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
|
61 |
|
62 |
pipe_default = get_lora_sd_pipeline(lora_dir='./lora_man_animestyle', base_model_name_or_path=model_default, dtype=torch_dtype).to(device)
|
63 |
+
pipe_controlnet = StableDiffusionControlNetPipeline.from_pretrained(
|
64 |
+
model_default,
|
65 |
+
controlnet=controlnet,
|
66 |
+
torch_dtype=torch_dtype
|
67 |
+
).to(device)
|
68 |
+
|
69 |
+
def preprocess_image(image, target_width, target_height):
|
70 |
+
"""
|
71 |
+
Преобразует изображение в формат, подходящий для модели.
|
72 |
+
"""
|
73 |
+
if isinstance(image, np.ndarray):
|
74 |
+
image = Image.fromarray(image)
|
75 |
+
image = image.resize((target_width, target_height), Image.LANCZOS)
|
76 |
+
image = np.array(image).astype(np.float32) / 255.0 # Нормализация [0, 1]
|
77 |
+
image = image[None].transpose(0, 3, 1, 2) # Преобразуем в (batch, channels, height, width)
|
78 |
+
image = torch.from_numpy(image).to(device)
|
79 |
+
return image
|
80 |
|
81 |
def infer(
|
82 |
prompt,
|
|
|
88 |
seed=4,
|
89 |
guidance_scale=7.5,
|
90 |
lora_scale=0.5,
|
91 |
+
use_control_net=False, # Параметр для включения ControlNet
|
92 |
+
control_strength=0.5, # Сила влияния ControlNet
|
93 |
+
source_image=None, # Исходное изображение
|
94 |
+
control_image=None, # Контрольное изображение
|
95 |
progress=gr.Progress(track_tqdm=True)
|
96 |
+
):
|
|
|
97 |
generator = torch.Generator(device).manual_seed(seed)
|
98 |
|
99 |
+
if use_control_net and control_image is not None and source_image is not None:
|
100 |
+
# Преобразуем изображения
|
101 |
+
source_image = preprocess_image(source_image, width, height)
|
102 |
+
control_image = preprocess_image(control_image, width, height)
|
103 |
+
|
104 |
+
# Создаём пайплайн ControlNet с LoRA, если он ещё не создан
|
105 |
+
if not hasattr(pipe_controlnet, 'lora_loaded') or not pipe_controlnet.lora_loaded:
|
106 |
+
# Загружаем LoRA для UNet
|
107 |
+
pipe_controlnet.unet = PeftModel.from_pretrained(
|
108 |
+
pipe_controlnet.unet,
|
109 |
+
'./lora_man_animestyle/unet',
|
110 |
+
adapter_name="default"
|
111 |
+
)
|
112 |
+
pipe_controlnet.unet.set_adapter("default")
|
113 |
+
|
114 |
+
# Загружаем LoRA для Text Encoder, если она существует
|
115 |
+
text_encoder_lora_path = './lora_man_animestyle/text_encoder'
|
116 |
+
if os.path.exists(text_encoder_lora_path):
|
117 |
+
pipe_controlnet.text_encoder = PeftModel.from_pretrained(
|
118 |
+
pipe_controlnet.text_encoder,
|
119 |
+
text_encoder_lora_path,
|
120 |
+
adapter_name="default"
|
121 |
+
)
|
122 |
+
pipe_controlnet.text_encoder.set_adapter("default")
|
123 |
+
|
124 |
+
# Объединяем LoRA с основной моделью
|
125 |
+
pipe_controlnet.fuse_lora(lora_scale=lora_scale)
|
126 |
+
pipe_controlnet.lora_loaded = True # Помечаем, что LoRA загружена
|
127 |
+
|
128 |
+
# Убедимся, что control_strength имеет тип float
|
129 |
+
control_strength = float(control_strength)
|
130 |
+
|
131 |
+
# Используем ControlNet с LoRA
|
132 |
+
pipe = pipe_controlnet
|
133 |
prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
|
134 |
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
135 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
136 |
+
image = pipe_controlnet(
|
137 |
+
prompt_embeds=prompt_embeds,
|
138 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
139 |
+
image=source_image,
|
140 |
+
control_image=control_image,
|
141 |
+
width=width,
|
142 |
+
height=height,
|
143 |
+
num_inference_steps=num_inference_steps,
|
144 |
+
guidance_scale=guidance_scale,
|
145 |
+
controlnet_conditioning_scale=control_strength,
|
146 |
+
generator=generator
|
147 |
+
).images[0]
|
148 |
else:
|
149 |
+
# Стандартная генерация без ControlNet
|
150 |
+
if model != model_default:
|
151 |
+
pipe = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch_dtype).to(device)
|
152 |
+
prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
|
153 |
+
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
154 |
+
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
155 |
+
else:
|
156 |
+
pipe = pipe_default
|
157 |
+
prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
|
158 |
+
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
159 |
+
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
160 |
+
pipe.fuse_lora(lora_scale=lora_scale)
|
161 |
+
|
162 |
+
params = {
|
163 |
+
'prompt_embeds': prompt_embeds,
|
164 |
+
'negative_prompt_embeds': negative_prompt_embeds,
|
165 |
+
'guidance_scale': guidance_scale,
|
166 |
+
'num_inference_steps': num_inference_steps,
|
167 |
+
'width': width,
|
168 |
+
'height': height,
|
169 |
+
'generator': generator,
|
170 |
+
}
|
171 |
+
|
172 |
+
image = pipe(**params).images[0]
|
173 |
|
174 |
+
return image
|
175 |
|
176 |
examples = [
|
177 |
"A young man in anime style. The image is characterized by high definition and resolution. Handsome, thoughtful man, attentive eyes. The man is depicted in the foreground, close-up or in the middle. High-quality images of the face, eyes, nose, lips, hands and clothes. The background and background are blurred and indistinct. The play of light and shadow is visible on the face and clothes.",
|
|
|
|
|
178 |
]
|
179 |
|
180 |
examples_negative = [
|
181 |
"blurred details, low resolution, poor image of a man's face, poor quality, artifacts, black and white image",
|
|
|
|
|
182 |
]
|
183 |
|
184 |
css = """
|
|
|
190 |
|
191 |
available_models = [
|
192 |
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
|
|
193 |
"CompVis/stable-diffusion-v1-4",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
]
|
195 |
|
196 |
with gr.Blocks(css=css) as demo:
|
|
|
272 |
value=512,
|
273 |
)
|
274 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
275 |
with gr.Blocks():
|
276 |
with gr.Row():
|
|
|
277 |
use_control_net = gr.Checkbox(
|
278 |
label="Use ControlNet",
|
279 |
value=False,
|
280 |
)
|
281 |
|
|
|
282 |
with gr.Column(visible=False) as control_net_options:
|
|
|
283 |
control_strength = gr.Slider(
|
284 |
label="Control Strength",
|
285 |
minimum=0.0,
|
|
|
288 |
step=0.05,
|
289 |
)
|
290 |
|
|
|
291 |
control_mode = gr.Dropdown(
|
292 |
label="Control Mode",
|
293 |
choices=[
|
|
|
|
|
294 |
"pose_estimation",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
],
|
296 |
value="pose_estimation",
|
297 |
)
|
298 |
|
299 |
+
source_image = gr.Image(label="Upload Source Image")
|
300 |
+
|
301 |
control_image = gr.Image(label="Upload Control Image")
|
302 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
303 |
use_control_net.change(
|
304 |
fn=lambda x: gr.Row.update(visible=x),
|
305 |
inputs=use_control_net,
|
306 |
outputs=control_net_options,
|
307 |
)
|
308 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
309 |
gr.Examples(examples=examples, inputs=[prompt])
|
310 |
gr.Examples(examples=examples_negative, inputs=[negative_prompt])
|
311 |
|
312 |
run_button = gr.Button("Run", scale=1, variant="primary")
|
313 |
result = gr.Image(label="Result", show_label=False)
|
314 |
|
|
|
315 |
gr.on(
|
316 |
triggers=[run_button.click, prompt.submit],
|
317 |
fn=infer,
|
|
|
325 |
seed,
|
326 |
guidance_scale,
|
327 |
lora_scale,
|
328 |
+
use_control_net, # Добавляем чекбокс для ControlNet
|
329 |
+
control_strength, # Добавляем контроль силы
|
330 |
+
source_image, # Добавляем исходное изображение
|
331 |
+
control_image, # Добавляем контрольное изображение
|
332 |
],
|
333 |
outputs=[result],
|
334 |
)
|