File size: 1,288 Bytes
368407f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import gradio as gr
from transformers import pipeline

# Load the larger text-generation model that uses GPU.
# Here we use EleutherAI/gpt-j-6B: https://huggingface.co/EleutherAI/gpt-j-6B
# Setting device=0 tells the pipeline to use GPU 0.
generator = pipeline("text-generation", model="EleutherAI/gpt-j-6B", device=0)

def expand_prompt(prompt, num_variants=5, max_length=100):
    """
    Given a basic prompt, generate `num_variants` expanded prompts using GPT-J-6B.
    """
    outputs = generator(prompt, max_length=max_length, num_return_sequences=num_variants, do_sample=True)
    expanded = [out["generated_text"].strip() for out in outputs]
    return "\n\n".join(expanded)

iface = gr.Interface(
    fn=expand_prompt,
    inputs=gr.Textbox(lines=2, placeholder="Enter your basic prompt here...", label="Basic Prompt"),
    outputs=gr.Textbox(lines=10, label="Expanded Prompts"),
    title="Prompt Expansion Generator",
    description=(
        "Enter a basic prompt and receive 5 creative, expanded prompt variants. "
        "This tool leverages the EleutherAI/gpt-j-6B model on an A100 GPU for fast, expressive prompt expansion. "
        "Simply copy the output for use with your downstream image-generation pipeline."
    )
)

if __name__ == "__main__":
    iface.launch()