Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,14 +7,48 @@ import os
|
|
7 |
# Get the Hugging Face token from the environment variable, or a secret if available.
|
8 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
9 |
|
10 |
-
# Check if HF_TOKEN is set
|
11 |
if not HF_TOKEN:
|
12 |
HF_TOKEN_ERROR = "Hugging Face API token (HF_TOKEN) not found. Please set it as an environment variable or Gradio secret."
|
13 |
else:
|
14 |
-
HF_TOKEN_ERROR = None
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
client = InferenceClient(token=HF_TOKEN) # Use token instead of provider and api_key
|
18 |
|
19 |
def generate_image(prompt, progress=gr.Progress()):
|
20 |
"""Generates an image using the InferenceClient and provides progress updates."""
|
@@ -22,30 +56,31 @@ def generate_image(prompt, progress=gr.Progress()):
|
|
22 |
if HF_TOKEN_ERROR:
|
23 |
raise gr.Error(HF_TOKEN_ERROR)
|
24 |
|
25 |
-
progress(0, desc="
|
|
|
|
|
|
|
26 |
try:
|
27 |
-
|
28 |
-
image = client.text_to_image(prompt, model="black-forest-labs/FLUX.1-schnell")
|
29 |
|
30 |
-
if not isinstance(image, Image.Image):
|
31 |
raise Exception(f"Expected a PIL Image, but got: {type(image)}")
|
32 |
|
33 |
progress(0.8, desc="Processing image...")
|
34 |
-
time.sleep(0.5)
|
35 |
progress(1.0, desc="Done!")
|
36 |
-
return image
|
37 |
-
except Exception as e:
|
38 |
-
|
39 |
-
if "rate limit" in str(e).lower(): # Check message, case-insensitively.
|
40 |
error_message = f"Rate limit exceeded. Please try again later. Error: {e}"
|
41 |
else:
|
42 |
-
error_message = f"An error occurred: {e}"
|
43 |
raise gr.Error(error_message)
|
44 |
|
45 |
|
46 |
|
47 |
-
# Gradio Interface (same CSS as before, for consistency)
|
48 |
css = """
|
|
|
49 |
.container {
|
50 |
max-width: 800px;
|
51 |
margin: auto;
|
@@ -70,14 +105,23 @@ css = """
|
|
70 |
.input-section, .output-section {
|
71 |
margin-bottom: 1.5em;
|
72 |
}
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
/* Animation for the image appearance - subtle fade-in */
|
76 |
@keyframes fadeIn {
|
77 |
from { opacity: 0; transform: translateY(20px); }
|
78 |
to { opacity: 1; transform: translateY(0); }
|
79 |
}
|
80 |
-
|
|
|
|
|
81 |
|
82 |
/* Improve button style */
|
83 |
.submit-button {
|
@@ -108,6 +152,16 @@ label{
|
|
108 |
display: block; /* Each label on its own line */
|
109 |
margin-bottom: 0.5em; /* Space between label and input */
|
110 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
"""
|
112 |
|
113 |
|
@@ -115,37 +169,35 @@ with gr.Blocks(css=css) as demo:
|
|
115 |
gr.Markdown(
|
116 |
"""
|
117 |
# Xylaria Iris Image Generator
|
118 |
-
Enter a text prompt and
|
119 |
""",
|
120 |
elem_classes="title"
|
121 |
)
|
122 |
|
123 |
-
|
124 |
with gr.Row():
|
125 |
with gr.Column():
|
126 |
with gr.Group(elem_classes="input-section"):
|
127 |
-
prompt_input = gr.Textbox(label="Enter your prompt", placeholder="e.g., A
|
128 |
generate_button = gr.Button("Generate Image", elem_classes="submit-button")
|
129 |
with gr.Column():
|
130 |
with gr.Group(elem_classes="output-section") as output_group:
|
131 |
-
image_output = gr.Image(label="Generated Image")
|
|
|
132 |
|
133 |
|
134 |
def on_generate_click(prompt):
|
135 |
output_group.elem_classes = ["output-section", "animate"]
|
136 |
-
image = generate_image(prompt)
|
137 |
output_group.elem_classes = ["output-section"]
|
138 |
-
return image
|
139 |
-
|
140 |
|
141 |
-
generate_button.click(on_generate_click, inputs=prompt_input, outputs=image_output)
|
142 |
-
prompt_input.submit(on_generate_click, inputs=prompt_input, outputs=image_output)
|
143 |
|
144 |
gr.Examples(
|
145 |
-
[["A
|
146 |
-
["A
|
147 |
-
["
|
148 |
-
["A cat wearing a top hat"]],
|
149 |
inputs=prompt_input
|
150 |
)
|
151 |
|
|
|
7 |
# Get the Hugging Face token from the environment variable, or a secret if available.
|
8 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
9 |
|
10 |
+
# Check if HF_TOKEN is set
|
11 |
if not HF_TOKEN:
|
12 |
HF_TOKEN_ERROR = "Hugging Face API token (HF_TOKEN) not found. Please set it as an environment variable or Gradio secret."
|
13 |
else:
|
14 |
+
HF_TOKEN_ERROR = None
|
15 |
|
16 |
+
client = InferenceClient(token=HF_TOKEN)
|
17 |
+
PROMPT_IMPROVER_MODEL = "TheBloke/zephyr-7B-beta-AWQ" # A good general-purpose text model. AWQ for speed.
|
18 |
+
|
19 |
+
def improve_prompt(original_prompt):
|
20 |
+
"""Improves the user's prompt using a language model."""
|
21 |
+
if HF_TOKEN_ERROR:
|
22 |
+
raise gr.Error(HF_TOKEN_ERROR)
|
23 |
+
|
24 |
+
try:
|
25 |
+
# Construct a prompt for the language model.
|
26 |
+
system_prompt = "You are a helpful assistant that improves text prompts for image generation models. Make the prompt more descriptive, detailed, and artistic, while keeping the user's original intent."
|
27 |
+
prompt_for_llm = f"""<|system|>
|
28 |
+
{system_prompt}</s>
|
29 |
+
<|user|>
|
30 |
+
Improve this prompt: {original_prompt}
|
31 |
+
</s>
|
32 |
+
<|assistant|>
|
33 |
+
"""
|
34 |
+
improved_prompt = client.text_generation(
|
35 |
+
prompt=prompt_for_llm,
|
36 |
+
model=PROMPT_IMPROVER_MODEL,
|
37 |
+
max_new_tokens=128, # Limit the length of the improved prompt
|
38 |
+
temperature=0.7,
|
39 |
+
top_p=0.9,
|
40 |
+
repetition_penalty=1.2, # Encourage diverse output
|
41 |
+
stop_sequences=["</s>"], # stop at end of sentence
|
42 |
+
|
43 |
+
)
|
44 |
+
|
45 |
+
return improved_prompt.strip() # Remove leading/trailing whitespace
|
46 |
+
|
47 |
+
|
48 |
+
except Exception as e:
|
49 |
+
print(f"Error improving prompt: {e}") # Log the error for debugging
|
50 |
+
return original_prompt # Return the original prompt if there's an error
|
51 |
|
|
|
52 |
|
53 |
def generate_image(prompt, progress=gr.Progress()):
|
54 |
"""Generates an image using the InferenceClient and provides progress updates."""
|
|
|
56 |
if HF_TOKEN_ERROR:
|
57 |
raise gr.Error(HF_TOKEN_ERROR)
|
58 |
|
59 |
+
progress(0, desc="Improving prompt...")
|
60 |
+
improved_prompt = improve_prompt(prompt)
|
61 |
+
|
62 |
+
progress(0.2, desc="Sending request to Hugging Face...") # More granular progress
|
63 |
try:
|
64 |
+
image = client.text_to_image(improved_prompt, model="black-forest-labs/FLUX.1-schnell")
|
|
|
65 |
|
66 |
+
if not isinstance(image, Image.Image):
|
67 |
raise Exception(f"Expected a PIL Image, but got: {type(image)}")
|
68 |
|
69 |
progress(0.8, desc="Processing image...")
|
70 |
+
time.sleep(0.5)
|
71 |
progress(1.0, desc="Done!")
|
72 |
+
return image, improved_prompt # Return both image and improved prompt
|
73 |
+
except Exception as e:
|
74 |
+
if "rate limit" in str(e).lower():
|
|
|
75 |
error_message = f"Rate limit exceeded. Please try again later. Error: {e}"
|
76 |
else:
|
77 |
+
error_message = f"An error occurred: {e}"
|
78 |
raise gr.Error(error_message)
|
79 |
|
80 |
|
81 |
|
|
|
82 |
css = """
|
83 |
+
/* ... (Rest of your CSS, unchanged, from the previous response) ... */
|
84 |
.container {
|
85 |
max-width: 800px;
|
86 |
margin: auto;
|
|
|
105 |
.input-section, .output-section {
|
106 |
margin-bottom: 1.5em;
|
107 |
}
|
108 |
+
.output-section img {
|
109 |
+
display: block; /* Ensure image takes full width of container */
|
110 |
+
margin: auto; /* Center the image horizontally */
|
111 |
+
max-width: 100%; /* Prevent image overflow */
|
112 |
+
height: auto; /* Maintain aspect ratio */
|
113 |
+
border-radius: 8px; /* Rounded corners for the image */
|
114 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); /* Subtle shadow */
|
115 |
+
}
|
116 |
|
117 |
/* Animation for the image appearance - subtle fade-in */
|
118 |
@keyframes fadeIn {
|
119 |
from { opacity: 0; transform: translateY(20px); }
|
120 |
to { opacity: 1; transform: translateY(0); }
|
121 |
}
|
122 |
+
.output-section.animate img {
|
123 |
+
animation: fadeIn 0.8s ease-out;
|
124 |
+
}
|
125 |
|
126 |
/* Improve button style */
|
127 |
.submit-button {
|
|
|
152 |
display: block; /* Each label on its own line */
|
153 |
margin-bottom: 0.5em; /* Space between label and input */
|
154 |
}
|
155 |
+
|
156 |
+
.improved-prompt-display {
|
157 |
+
margin-top: 10px;
|
158 |
+
padding: 8px;
|
159 |
+
border: 1px solid #ccc;
|
160 |
+
border-radius: 4px;
|
161 |
+
background-color: #f9f9f9;
|
162 |
+
font-style: italic;
|
163 |
+
color: #444;
|
164 |
+
}
|
165 |
"""
|
166 |
|
167 |
|
|
|
169 |
gr.Markdown(
|
170 |
"""
|
171 |
# Xylaria Iris Image Generator
|
172 |
+
Enter a text prompt, and we'll enhance it before generating an image!
|
173 |
""",
|
174 |
elem_classes="title"
|
175 |
)
|
176 |
|
|
|
177 |
with gr.Row():
|
178 |
with gr.Column():
|
179 |
with gr.Group(elem_classes="input-section"):
|
180 |
+
prompt_input = gr.Textbox(label="Enter your prompt", placeholder="e.g., A cat", lines=3)
|
181 |
generate_button = gr.Button("Generate Image", elem_classes="submit-button")
|
182 |
with gr.Column():
|
183 |
with gr.Group(elem_classes="output-section") as output_group:
|
184 |
+
image_output = gr.Image(label="Generated Image", show_download_button=False, interactive=False) # No SVG, not interactive
|
185 |
+
improved_prompt_output = gr.Textbox(label="Improved Prompt", interactive=False, elem_classes="improved-prompt-display")
|
186 |
|
187 |
|
188 |
def on_generate_click(prompt):
|
189 |
output_group.elem_classes = ["output-section", "animate"]
|
190 |
+
image, improved_prompt = generate_image(prompt)
|
191 |
output_group.elem_classes = ["output-section"]
|
192 |
+
return image, improved_prompt
|
|
|
193 |
|
194 |
+
generate_button.click(on_generate_click, inputs=prompt_input, outputs=[image_output, improved_prompt_output])
|
195 |
+
prompt_input.submit(on_generate_click, inputs=prompt_input, outputs=[image_output, improved_prompt_output])
|
196 |
|
197 |
gr.Examples(
|
198 |
+
[["A dog"],
|
199 |
+
["A house on a hill"],
|
200 |
+
["A spaceship"]],
|
|
|
201 |
inputs=prompt_input
|
202 |
)
|
203 |
|