Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,13 @@ from diffusers import StableDiffusionPipeline
|
|
5 |
from peft import PeftModel, LoraConfig
|
6 |
import os
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
def get_lora_sd_pipeline(
|
9 |
ckpt_dir='./lora_man_animestyle',
|
10 |
base_model_name_or_path=None,
|
@@ -52,16 +59,8 @@ def align_embeddings(prompt_embeds, negative_prompt_embeds):
|
|
52 |
return torch.nn.functional.pad(prompt_embeds, (0, 0, 0, max_length - prompt_embeds.shape[1])), \
|
53 |
torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
|
54 |
|
55 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
56 |
-
model_id_default = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
57 |
-
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
58 |
-
|
59 |
pipe_default = get_lora_sd_pipeline(ckpt_dir='./lora_man_animestyle', base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
|
60 |
|
61 |
-
|
62 |
-
MAX_SEED = np.iinfo(np.int32).max
|
63 |
-
MAX_IMAGE_SIZE = 1024
|
64 |
-
|
65 |
def infer(
|
66 |
prompt,
|
67 |
negative_prompt,
|
|
|
5 |
from peft import PeftModel, LoraConfig
|
6 |
import os
|
7 |
|
8 |
+
MAX_SEED = np.iinfo(np.int32).max
|
9 |
+
MAX_IMAGE_SIZE = 1024
|
10 |
+
|
11 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
model_id_default = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
13 |
+
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
14 |
+
|
15 |
def get_lora_sd_pipeline(
|
16 |
ckpt_dir='./lora_man_animestyle',
|
17 |
base_model_name_or_path=None,
|
|
|
59 |
return torch.nn.functional.pad(prompt_embeds, (0, 0, 0, max_length - prompt_embeds.shape[1])), \
|
60 |
torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
|
61 |
|
|
|
|
|
|
|
|
|
62 |
pipe_default = get_lora_sd_pipeline(ckpt_dir='./lora_man_animestyle', base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
|
63 |
|
|
|
|
|
|
|
|
|
64 |
def infer(
|
65 |
prompt,
|
66 |
negative_prompt,
|