Spaces:
Paused
Paused
up
Browse files- app-fast.py +2 -4
app-fast.py
CHANGED
@@ -50,7 +50,6 @@ text_encoder = AutoModelForCausalLM.from_pretrained(
|
|
50 |
output_attentions=True,
|
51 |
low_cpu_mem_usage=True,
|
52 |
quantization_config=quant_config,
|
53 |
-
device_map="auto",
|
54 |
torch_dtype=torch.bfloat16,
|
55 |
)
|
56 |
|
@@ -73,14 +72,13 @@ scheduler = MODEL_CONFIGS["scheduler"](
|
|
73 |
pipe = HiDreamImagePipeline.from_pretrained(
|
74 |
MODEL_PATH,
|
75 |
scheduler=scheduler,
|
|
|
76 |
tokenizer_4=tokenizer,
|
77 |
text_encoder_4=text_encoder,
|
78 |
-
device_map="
|
79 |
torch_dtype=torch.bfloat16,
|
80 |
).to(device)
|
81 |
|
82 |
-
pipe.transformer = transformer
|
83 |
-
|
84 |
|
85 |
@spaces.GPU(duration=120)
|
86 |
def generate_image(
|
|
|
50 |
output_attentions=True,
|
51 |
low_cpu_mem_usage=True,
|
52 |
quantization_config=quant_config,
|
|
|
53 |
torch_dtype=torch.bfloat16,
|
54 |
)
|
55 |
|
|
|
72 |
pipe = HiDreamImagePipeline.from_pretrained(
|
73 |
MODEL_PATH,
|
74 |
scheduler=scheduler,
|
75 |
+
transformer=transformer,
|
76 |
tokenizer_4=tokenizer,
|
77 |
text_encoder_4=text_encoder,
|
78 |
+
device_map="auto",
|
79 |
torch_dtype=torch.bfloat16,
|
80 |
).to(device)
|
81 |
|
|
|
|
|
82 |
|
83 |
@spaces.GPU(duration=120)
|
84 |
def generate_image(
|