Manjushri commited on
Commit
42be2f3
·
verified ·
1 Parent(s): 21d8cba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -11
app.py CHANGED
@@ -33,19 +33,9 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
33
  torch.cuda.empty_cache()
34
  return image
35
 
36
- if Model == "Club":
37
- torch.cuda.max_memory_allocated(device=device)
38
- torch.cuda.empty_cache()
39
- pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
40
- pipe.enable_model_cpu_offload()
41
- pipe.vae.enable_slicing()
42
- pipe.vae.enable_tiling()
43
- image = pipe(prompt=Prompt, guidance_scale=scale, num_images_per_prompt=1, num_inference_steps=steps, width=1024, height=1024,).images[0]
44
- return image
45
-
46
  return image
47
 
48
- gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Animagine XL 4', "Club"], value='PhotoReal', label='Choose Model'),
49
  gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
50
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
51
  gr.Slider(512, 1024, 768, step=128, label='Height'),
 
33
  torch.cuda.empty_cache()
34
  return image
35
 
 
 
 
 
 
 
 
 
 
 
36
  return image
37
 
38
+ gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Animagine XL 4'], value='PhotoReal', label='Choose Model'),
39
  gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
40
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
41
  gr.Slider(512, 1024, 768, step=128, label='Height'),