Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -70,6 +70,7 @@ def infer(
|
|
70 |
lora_scale=0.5,
|
71 |
progress=gr.Progress(track_tqdm=True)
|
72 |
):
|
|
|
73 |
generator = torch.Generator(device).manual_seed(seed)
|
74 |
|
75 |
if model != model_default:
|
@@ -82,7 +83,13 @@ def infer(
|
|
82 |
prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
|
83 |
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
84 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
params = {
|
88 |
'prompt_embeds': prompt_embeds,
|
|
|
70 |
lora_scale=0.5,
|
71 |
progress=gr.Progress(track_tqdm=True)
|
72 |
):
|
73 |
+
print(f"Received lora_scale: {lora_scale}") # Лог для проверки значения lora_scale
|
74 |
generator = torch.Generator(device).manual_seed(seed)
|
75 |
|
76 |
if model != model_default:
|
|
|
83 |
prompt_embeds = long_prompt_encoder(prompt, pipe.tokenizer, pipe.text_encoder)
|
84 |
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe.tokenizer, pipe.text_encoder)
|
85 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
86 |
+
|
87 |
+
# Логирование параметров до и после применения LoRA
|
88 |
+
before_params = list(pipe.unet.parameters())
|
89 |
+
print(f"Applying LoRA with scale: {lora_scale}")
|
90 |
+
pipe.fuse_lora(lora_scale=lora_scale)
|
91 |
+
after_params = list(pipe.unet.parameters())
|
92 |
+
print(f"Parameters changed: {before_params != after_params}")
|
93 |
|
94 |
params = {
|
95 |
'prompt_embeds': prompt_embeds,
|