Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,34 +3,51 @@ import spaces # ZeroGPU helper module
|
|
3 |
from transformers import pipeline
|
4 |
|
5 |
# Preload the text-generation model on CPU at startup.
|
6 |
-
#
|
7 |
-
# We load on CPU (device=-1) so that initialization is done before the GUI is up.
|
8 |
generator = pipeline("text-generation", model="EleutherAI/gpt-j-6B", device=-1)
|
9 |
|
10 |
-
@spaces.GPU #
|
11 |
def expand_prompt(prompt, num_variants=5, max_length=100):
|
12 |
"""
|
13 |
-
Given a basic prompt, generate `num_variants` expanded
|
14 |
-
|
|
|
15 |
"""
|
16 |
-
# Move
|
17 |
generator.model.to("cuda")
|
18 |
-
|
19 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
generator.model.to("cpu")
|
21 |
-
|
22 |
return "\n\n".join(expanded)
|
23 |
|
24 |
-
# Create a Gradio Interface
|
25 |
iface = gr.Interface(
|
26 |
fn=expand_prompt,
|
27 |
inputs=gr.Textbox(lines=2, placeholder="Enter your basic prompt here...", label="Basic Prompt"),
|
28 |
outputs=gr.Textbox(lines=10, label="Expanded Prompts"),
|
29 |
title="Prompt Expansion Generator",
|
30 |
description=(
|
31 |
-
"Enter a basic prompt
|
32 |
-
"The model (EleutherAI/gpt-j-6B) is preloaded on CPU at startup and
|
33 |
-
"
|
|
|
34 |
)
|
35 |
)
|
36 |
|
|
|
3 |
from transformers import pipeline
|
4 |
|
5 |
# Preload the text-generation model on CPU at startup.
|
6 |
+
# We load EleutherAI/gpt-j-6B on CPU (device=-1).
|
|
|
7 |
generator = pipeline("text-generation", model="EleutherAI/gpt-j-6B", device=-1)
|
8 |
|
9 |
+
@spaces.GPU # ZeroGPU will allocate the GPU only during this function call.
|
10 |
def expand_prompt(prompt, num_variants=5, max_length=100):
|
11 |
"""
|
12 |
+
Given a basic prompt, generate `num_variants` expanded prompts using GPT-J-6B.
|
13 |
+
This function explicitly tokenizes the input with truncation (strategy 'longest_first'),
|
14 |
+
moves the input to GPU, generates output using the GPU, and then moves the model back to CPU.
|
15 |
"""
|
16 |
+
# Move model to GPU for generation.
|
17 |
generator.model.to("cuda")
|
18 |
+
|
19 |
+
# Explicitly tokenize the input with truncation.
|
20 |
+
inputs = generator.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=max_length)
|
21 |
+
# Move inputs to GPU.
|
22 |
+
inputs = {k: v.to("cuda") for k, v in inputs.items()}
|
23 |
+
|
24 |
+
# Generate text, explicitly setting pad_token_id to eos_token_id.
|
25 |
+
outputs = generator.model.generate(
|
26 |
+
**inputs,
|
27 |
+
max_length=max_length,
|
28 |
+
num_return_sequences=num_variants,
|
29 |
+
do_sample=True,
|
30 |
+
pad_token_id=generator.tokenizer.eos_token_id
|
31 |
+
)
|
32 |
+
|
33 |
+
# Decode outputs.
|
34 |
+
expanded = [generator.tokenizer.decode(output, skip_special_tokens=True).strip() for output in outputs]
|
35 |
+
|
36 |
+
# Move model back to CPU.
|
37 |
generator.model.to("cpu")
|
38 |
+
|
39 |
return "\n\n".join(expanded)
|
40 |
|
|
|
41 |
iface = gr.Interface(
|
42 |
fn=expand_prompt,
|
43 |
inputs=gr.Textbox(lines=2, placeholder="Enter your basic prompt here...", label="Basic Prompt"),
|
44 |
outputs=gr.Textbox(lines=10, label="Expanded Prompts"),
|
45 |
title="Prompt Expansion Generator",
|
46 |
description=(
|
47 |
+
"Enter a basic prompt to receive 5 creative, expanded prompt variants. "
|
48 |
+
"The model (EleutherAI/gpt-j-6B) is preloaded on CPU at startup and moved to GPU (via ZeroGPU) for generation. "
|
49 |
+
"Input is tokenized with truncation enabled. Once generation is complete, the model is moved back to CPU. "
|
50 |
+
"Simply copy the output for use in your downstream image-generation pipeline."
|
51 |
)
|
52 |
)
|
53 |
|