Manjushri commited on
Commit
bc8b335
·
verified ·
1 Parent(s): 42be2f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -22
app.py CHANGED
@@ -3,40 +3,33 @@ import torch
3
  import numpy as np
4
  import modin.pandas as pd
5
  from PIL import Image
6
- from diffusers import DiffusionPipeline, FluxPipeline #CogView4Pipeline #, StableDiffusion3Pipeline from diffusers import CogView4Pipeline
7
  from huggingface_hub import hf_hub_download
8
 
9
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
10
  torch.cuda.max_memory_allocated(device=device)
11
  torch.cuda.empty_cache()
 
 
 
 
 
 
12
 
13
  def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
14
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
15
 
16
- if Model == "PhotoReal":
17
- pipe = DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.9.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.9.1")
18
- pipe.enable_xformers_memory_efficient_attention()
19
- pipe = pipe.to(device)
20
- torch.cuda.empty_cache()
21
-
22
- image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
23
- torch.cuda.empty_cache()
24
- return image
25
-
26
- if Model == "Animagine XL 4":
27
- animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-4.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-4.0")
28
- animagine.enable_xformers_memory_efficient_attention()
29
- animagine = animagine.to(device)
30
- torch.cuda.empty_cache()
31
-
32
- image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
33
- torch.cuda.empty_cache()
34
- return image
35
 
36
  return image
37
 
38
- gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Animagine XL 4'], value='PhotoReal', label='Choose Model'),
39
- gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
40
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
41
  gr.Slider(512, 1024, 768, step=128, label='Height'),
42
  gr.Slider(512, 1024, 768, step=128, label='Width'),
 
3
  import numpy as np
4
  import modin.pandas as pd
5
  from PIL import Image
6
+ from diffusers import FluxPipeline #CogView4Pipeline #, StableDiffusion3Pipeline from diffusers import CogView4Pipeline
7
  from huggingface_hub import hf_hub_download
8
 
9
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
10
  torch.cuda.max_memory_allocated(device=device)
11
  torch.cuda.empty_cache()
12
+ pipe = CogView4Pipeline.from_pretrained("THUDM/CogView4-6B", torch_dtype=torch.bfloat16)
13
+
14
+ # Open it for reduce GPU memory usage
15
+ pipe.enable_model_cpu_offload()
16
+ pipe.vae.enable_slicing()
17
+ pipe.vae.enable_tiling()
18
 
19
  def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
20
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
21
 
22
+ image = pipe(
23
+ prompt=Prompt, negative_prompt=negative_prompt,
24
+ guidance_scale=scale,
25
+ num_images_per_prompt=1,
26
+ num_inference_steps=steps,
27
+ width=width,
28
+ height=height,).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  return image
31
 
32
+ gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
 
33
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
34
  gr.Slider(512, 1024, 768, step=128, label='Height'),
35
  gr.Slider(512, 1024, 768, step=128, label='Width'),