Spaces:
Paused
Paused
up
Browse files- app-fast.py +5 -2
app-fast.py
CHANGED
@@ -51,7 +51,7 @@ text_encoder = AutoModelForCausalLM.from_pretrained(
|
|
51 |
LLAMA_MODEL_NAME,
|
52 |
torch_dtype=torch.bfloat16,
|
53 |
low_cpu_mem_usage=True,
|
54 |
-
device_map=
|
55 |
output_hidden_states=True,
|
56 |
output_attentions=True,
|
57 |
quantization_config=quantization_config,
|
@@ -84,7 +84,10 @@ pipe.transformer = transformer
|
|
84 |
|
85 |
@spaces.GPU(duration=120)
|
86 |
def generate_image(
|
87 |
-
prompt: str,
|
|
|
|
|
|
|
88 |
) -> tuple[PIL.Image.Image, int]:
|
89 |
if seed == -1:
|
90 |
seed = torch.randint(0, 1_000_000, (1,)).item()
|
|
|
51 |
LLAMA_MODEL_NAME,
|
52 |
torch_dtype=torch.bfloat16,
|
53 |
low_cpu_mem_usage=True,
|
54 |
+
device_map=None,
|
55 |
output_hidden_states=True,
|
56 |
output_attentions=True,
|
57 |
quantization_config=quantization_config,
|
|
|
84 |
|
85 |
@spaces.GPU(duration=120)
|
86 |
def generate_image(
|
87 |
+
prompt: str,
|
88 |
+
resolution: str,
|
89 |
+
seed: int,
|
90 |
+
progress=gr.Progress(track_tqdm=True), # noqa: ARG001, B008
|
91 |
) -> tuple[PIL.Image.Image, int]:
|
92 |
if seed == -1:
|
93 |
seed = torch.randint(0, 1_000_000, (1,)).item()
|