Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
|
|
20 |
pipe = pipe.to(device)
|
21 |
torch.cuda.empty_cache()
|
22 |
|
23 |
-
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
24 |
torch.cuda.empty_cache()
|
25 |
return image
|
26 |
|
@@ -31,7 +31,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
|
|
31 |
animagine = animagine.to(device)
|
32 |
torch.cuda.empty_cache()
|
33 |
|
34 |
-
image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
35 |
torch.cuda.empty_cache()
|
36 |
return image
|
37 |
if Model == "FXL":
|
@@ -44,12 +44,12 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
|
|
44 |
torch.cuda.empty_cache()
|
45 |
|
46 |
#torch.cuda.max_memory_allocated(device=device)
|
47 |
-
int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
|
48 |
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
|
49 |
pipe.enable_xformers_memory_efficient_attention()
|
50 |
pipe = pipe.to(device)
|
51 |
torch.cuda.empty_cache()
|
52 |
-
image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=.99).images[0]
|
53 |
torch.cuda.empty_cache()
|
54 |
return image
|
55 |
|
|
|
20 |
pipe = pipe.to(device)
|
21 |
torch.cuda.empty_cache()
|
22 |
|
23 |
+
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, max_sequence_length=512).images[0]
|
24 |
torch.cuda.empty_cache()
|
25 |
return image
|
26 |
|
|
|
31 |
animagine = animagine.to(device)
|
32 |
torch.cuda.empty_cache()
|
33 |
|
34 |
+
image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, max_sequence_length=512).images[0]
|
35 |
torch.cuda.empty_cache()
|
36 |
return image
|
37 |
if Model == "FXL":
|
|
|
44 |
torch.cuda.empty_cache()
|
45 |
|
46 |
#torch.cuda.max_memory_allocated(device=device)
|
47 |
+
int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, max_sequence_length=512, output_type="latent").images
|
48 |
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
|
49 |
pipe.enable_xformers_memory_efficient_attention()
|
50 |
pipe = pipe.to(device)
|
51 |
torch.cuda.empty_cache()
|
52 |
+
image = pipe(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=.99, max_sequence_length=512).images[0]
|
53 |
torch.cuda.empty_cache()
|
54 |
return image
|
55 |
|