Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -33,9 +33,9 @@ import datetime
|
|
33 |
import cyper
|
34 |
from PIL import Image
|
35 |
|
36 |
-
from accelerate import Accelerator
|
37 |
|
38 |
-
accelerator = Accelerator(mixed_precision="bf16")
|
39 |
|
40 |
hftoken = os.getenv("HF_AUTH_TOKEN")
|
41 |
|
@@ -95,8 +95,8 @@ ll_transformer=SD3Transformer2DModel.from_pretrained("ford442/stable-diffusion-3
|
|
95 |
pipe.transformer=ll_transformer.eval()
|
96 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/UltraReal.safetensors")
|
97 |
|
98 |
-
pipe.to(accelerator.device)
|
99 |
-
|
100 |
|
101 |
upscaler_2 = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device('cpu'))
|
102 |
|
@@ -124,8 +124,7 @@ def infer_60(
|
|
124 |
seed = random.randint(0, MAX_SEED)
|
125 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
126 |
print('-- generating image --')
|
127 |
-
|
128 |
-
sd_image = pipe(
|
129 |
prompt=prompt,
|
130 |
prompt_2=prompt,
|
131 |
prompt_3=prompt,
|
@@ -138,7 +137,7 @@ def infer_60(
|
|
138 |
height=height,
|
139 |
generator=generator,
|
140 |
max_sequence_length=512
|
141 |
-
|
142 |
print('-- got image --')
|
143 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
144 |
sd35_path = f"sd35ll_{timestamp}.png"
|
@@ -179,8 +178,7 @@ def infer_90(
|
|
179 |
seed = random.randint(0, MAX_SEED)
|
180 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
181 |
print('-- generating image --')
|
182 |
-
|
183 |
-
sd_image = pipe(
|
184 |
prompt=prompt,
|
185 |
prompt_2=prompt,
|
186 |
prompt_3=prompt,
|
@@ -193,7 +191,7 @@ def infer_90(
|
|
193 |
height=height,
|
194 |
generator=generator,
|
195 |
max_sequence_length=512
|
196 |
-
|
197 |
print('-- got image --')
|
198 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
199 |
sd35_path = f"sd35ll_{timestamp}.png"
|
@@ -234,8 +232,7 @@ def infer_110(
|
|
234 |
seed = random.randint(0, MAX_SEED)
|
235 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
236 |
print('-- generating image --')
|
237 |
-
|
238 |
-
sd_image = pipe(
|
239 |
prompt=prompt,
|
240 |
prompt_2=prompt,
|
241 |
prompt_3=prompt,
|
@@ -248,7 +245,7 @@ def infer_110(
|
|
248 |
height=height,
|
249 |
generator=generator,
|
250 |
max_sequence_length=512
|
251 |
-
|
252 |
print('-- got image --')
|
253 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
254 |
sd35_path = f"sd35ll_{timestamp}.png"
|
|
|
33 |
import cyper
|
34 |
from PIL import Image
|
35 |
|
36 |
+
#from accelerate import Accelerator
|
37 |
|
38 |
+
#accelerator = Accelerator(mixed_precision="bf16")
|
39 |
|
40 |
hftoken = os.getenv("HF_AUTH_TOKEN")
|
41 |
|
|
|
95 |
pipe.transformer=ll_transformer.eval()
|
96 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/UltraReal.safetensors")
|
97 |
|
98 |
+
#pipe.to(accelerator.device)
|
99 |
+
pipe.to(device=device, dtype=torch.bfloat16)
|
100 |
|
101 |
upscaler_2 = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device('cpu'))
|
102 |
|
|
|
124 |
seed = random.randint(0, MAX_SEED)
|
125 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
126 |
print('-- generating image --')
|
127 |
+
sd_image = pipe(
|
|
|
128 |
prompt=prompt,
|
129 |
prompt_2=prompt,
|
130 |
prompt_3=prompt,
|
|
|
137 |
height=height,
|
138 |
generator=generator,
|
139 |
max_sequence_length=512
|
140 |
+
).images[0]
|
141 |
print('-- got image --')
|
142 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
143 |
sd35_path = f"sd35ll_{timestamp}.png"
|
|
|
178 |
seed = random.randint(0, MAX_SEED)
|
179 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
180 |
print('-- generating image --')
|
181 |
+
sd_image = pipe(
|
|
|
182 |
prompt=prompt,
|
183 |
prompt_2=prompt,
|
184 |
prompt_3=prompt,
|
|
|
191 |
height=height,
|
192 |
generator=generator,
|
193 |
max_sequence_length=512
|
194 |
+
).images[0]
|
195 |
print('-- got image --')
|
196 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
197 |
sd35_path = f"sd35ll_{timestamp}.png"
|
|
|
232 |
seed = random.randint(0, MAX_SEED)
|
233 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
234 |
print('-- generating image --')
|
235 |
+
sd_image = pipe(
|
|
|
236 |
prompt=prompt,
|
237 |
prompt_2=prompt,
|
238 |
prompt_3=prompt,
|
|
|
245 |
height=height,
|
246 |
generator=generator,
|
247 |
max_sequence_length=512
|
248 |
+
).images[0]
|
249 |
print('-- got image --')
|
250 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
251 |
sd35_path = f"sd35ll_{timestamp}.png"
|