Manjushri commited on
Commit
871bf7f
·
verified ·
1 Parent(s): 9e995ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -4
app.py CHANGED
@@ -5,20 +5,40 @@ import modin.pandas as pd
5
  from PIL import Image
6
  from diffusers import StableDiffusion3Pipeline #DiffusionPipeline #, StableDiffusion3Pipeline
7
  from huggingface_hub import hf_hub_download
 
8
 
9
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
10
  torch.cuda.max_memory_allocated(device=device)
11
  torch.cuda.empty_cache()
12
 
13
- pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-large-turbo", torch_dtype=torch.bfloat16)
14
- pipe = pipe.to(device)
15
- pipe.enable_model_cpu_offload()
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  def genie (Prompt, height, width, seed):
19
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
20
 
21
- image = pipe(Prompt, num_inference_steps=4, height=height, width=width, guidance_scale=0.0,).images[0]
22
 
23
  return image
24
 
 
5
  from PIL import Image
6
  from diffusers import StableDiffusion3Pipeline #DiffusionPipeline #, StableDiffusion3Pipeline
7
  from huggingface_hub import hf_hub_download
8
+ from diffusers import BitsAndBytesConfig, SD3Transformer2DModel
9
 
10
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
11
  torch.cuda.max_memory_allocated(device=device)
12
  torch.cuda.empty_cache()
13
 
14
+ model_id = "stabilityai/stable-diffusion-3.5-large-turbo"
 
 
15
 
16
+ nf4_config = BitsAndBytesConfig(
17
+ load_in_4bit=True,
18
+ bnb_4bit_quant_type="nf4",
19
+ bnb_4bit_compute_dtype=torch.bfloat16
20
+ )
21
+ model_nf4 = SD3Transformer2DModel.from_pretrained(
22
+ model_id,
23
+ subfolder="transformer",
24
+ quantization_config=nf4_config,
25
+ torch_dtype=torch.bfloat16
26
+ )
27
+
28
+ t5_nf4 = T5EncoderModel.from_pretrained("diffusers/t5-nf4", torch_dtype=torch.bfloat16)
29
+
30
+ pipeline = StableDiffusion3Pipeline.from_pretrained(
31
+ model_id,
32
+ transformer=model_nf4,
33
+ text_encoder_3=t5_nf4,
34
+ torch_dtype=torch.bfloat16
35
+ )
36
+ pipeline.enable_model_cpu_offload()
37
 
38
  def genie (Prompt, height, width, seed):
39
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
40
 
41
+ image = pipeline(Prompt, num_inference_steps=4, height=height, width=width, guidance_scale=0.0,).images[0]
42
 
43
  return image
44