Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,162 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
from PIL import Image
|
4 |
+
import time
|
5 |
+
import os
|
6 |
|
7 |
+
# Get the Hugging Face token from the environment variable, or a secret if available.
|
8 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
9 |
+
|
10 |
+
# Check if HF_TOKEN is set; if not, raise a configuration error (handled later)
|
11 |
+
if not HF_TOKEN:
|
12 |
+
HF_TOKEN_ERROR = "Hugging Face API token (HF_TOKEN) not found. Please set it as an environment variable or Gradio secret."
|
13 |
+
else:
|
14 |
+
HF_TOKEN_ERROR = None # No error if the token is found
|
15 |
+
|
16 |
+
|
17 |
+
client = InferenceClient(token=HF_TOKEN) # Use token instead of provider and api_key
|
18 |
+
|
19 |
+
def generate_image(prompt, progress=gr.Progress()):
|
20 |
+
"""Generates an image using the InferenceClient and provides progress updates."""
|
21 |
+
|
22 |
+
if HF_TOKEN_ERROR:
|
23 |
+
raise gr.Error(HF_TOKEN_ERROR)
|
24 |
+
|
25 |
+
progress(0, desc="Sending request to Hugging Face...")
|
26 |
+
try:
|
27 |
+
# Use the client.text_to_image method. Assume xylaria-iris is valid here.
|
28 |
+
image = client.text_to_image(prompt, model="black-forest-labs/FLUX.1-schnell")
|
29 |
+
|
30 |
+
if not isinstance(image, Image.Image): # Basic type checking.
|
31 |
+
raise Exception(f"Expected a PIL Image, but got: {type(image)}")
|
32 |
+
|
33 |
+
progress(0.8, desc="Processing image...")
|
34 |
+
time.sleep(0.5) # Simulate some processing
|
35 |
+
progress(1.0, desc="Done!")
|
36 |
+
return image
|
37 |
+
except Exception as e: # Catch all exceptions from the API call
|
38 |
+
# Check for rate limit errors (different with InferenceClient). This is a best-effort check.
|
39 |
+
if "rate limit" in str(e).lower(): # Check message, case-insensitively.
|
40 |
+
error_message = f"Rate limit exceeded. Please try again later. Error: {e}"
|
41 |
+
else:
|
42 |
+
error_message = f"An error occurred: {e}" # Generic error message
|
43 |
+
raise gr.Error(error_message)
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
# Gradio Interface (same CSS as before, for consistency)
|
48 |
+
css = """
|
49 |
+
.container {
|
50 |
+
max-width: 800px;
|
51 |
+
margin: auto;
|
52 |
+
padding: 20px;
|
53 |
+
border: 1px solid #ddd;
|
54 |
+
border-radius: 10px;
|
55 |
+
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
|
56 |
+
}
|
57 |
+
.title {
|
58 |
+
text-align: center;
|
59 |
+
font-size: 2.5em;
|
60 |
+
margin-bottom: 0.5em;
|
61 |
+
color: #333;
|
62 |
+
font-family: 'Arial', sans-serif; /* More readable font */
|
63 |
+
}
|
64 |
+
.description {
|
65 |
+
text-align: center;
|
66 |
+
font-size: 1.1em;
|
67 |
+
margin-bottom: 1.5em;
|
68 |
+
color: #555;
|
69 |
+
}
|
70 |
+
.input-section, .output-section {
|
71 |
+
margin-bottom: 1.5em;
|
72 |
+
}
|
73 |
+
.output-section img {
|
74 |
+
display: block; /* Ensure image takes full width of container */
|
75 |
+
margin: auto; /* Center the image horizontally */
|
76 |
+
max-width: 100%; /* Prevent image overflow */
|
77 |
+
height: auto; /* Maintain aspect ratio */
|
78 |
+
border-radius: 8px; /* Rounded corners for the image */
|
79 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); /* Subtle shadow */
|
80 |
+
}
|
81 |
+
|
82 |
+
/* Animation for the image appearance - subtle fade-in */
|
83 |
+
@keyframes fadeIn {
|
84 |
+
from { opacity: 0; transform: translateY(20px); }
|
85 |
+
to { opacity: 1; transform: translateY(0); }
|
86 |
+
}
|
87 |
+
.output-section.animate img {
|
88 |
+
animation: fadeIn 0.8s ease-out;
|
89 |
+
}
|
90 |
+
|
91 |
+
/* Improve button style */
|
92 |
+
.submit-button {
|
93 |
+
display: block;
|
94 |
+
margin: auto;
|
95 |
+
padding: 10px 20px;
|
96 |
+
font-size: 1.1em;
|
97 |
+
color: white;
|
98 |
+
background-color: #4CAF50;
|
99 |
+
border: none;
|
100 |
+
border-radius: 5px;
|
101 |
+
cursor: pointer;
|
102 |
+
transition: background-color 0.3s ease;
|
103 |
+
}
|
104 |
+
.submit-button:hover {
|
105 |
+
background-color: #367c39;
|
106 |
+
}
|
107 |
+
|
108 |
+
/* Style the error messages */
|
109 |
+
.error-message {
|
110 |
+
color: red;
|
111 |
+
text-align: center;
|
112 |
+
margin-top: 1em;
|
113 |
+
font-weight: bold;
|
114 |
+
}
|
115 |
+
label{
|
116 |
+
font-weight: bold; /* Make labels bold */
|
117 |
+
display: block; /* Each label on its own line */
|
118 |
+
margin-bottom: 0.5em; /* Space between label and input */
|
119 |
+
}
|
120 |
+
"""
|
121 |
+
|
122 |
+
|
123 |
+
with gr.Blocks(css=css) as demo:
|
124 |
+
gr.Markdown(
|
125 |
+
"""
|
126 |
+
# Xylaria Iris Image Generator
|
127 |
+
Enter a text prompt and generate an image using the Xylaria Iris model!
|
128 |
+
""",
|
129 |
+
elem_classes="title"
|
130 |
+
)
|
131 |
+
|
132 |
+
|
133 |
+
with gr.Row():
|
134 |
+
with gr.Column():
|
135 |
+
with gr.Group(elem_classes="input-section"):
|
136 |
+
prompt_input = gr.Textbox(label="Enter your prompt", placeholder="e.g., A beautiful landscape with a magical tree", lines=3)
|
137 |
+
generate_button = gr.Button("Generate Image", elem_classes="submit-button")
|
138 |
+
with gr.Column():
|
139 |
+
with gr.Group(elem_classes="output-section") as output_group:
|
140 |
+
image_output = gr.Image(label="Generated Image") # Removed width and height
|
141 |
+
|
142 |
+
|
143 |
+
def on_generate_click(prompt):
|
144 |
+
output_group.elem_classes = ["output-section", "animate"]
|
145 |
+
image = generate_image(prompt)
|
146 |
+
output_group.elem_classes = ["output-section"]
|
147 |
+
return image
|
148 |
+
|
149 |
+
|
150 |
+
generate_button.click(on_generate_click, inputs=prompt_input, outputs=image_output)
|
151 |
+
prompt_input.submit(on_generate_click, inputs=prompt_input, outputs=image_output)
|
152 |
+
|
153 |
+
gr.Examples(
|
154 |
+
[["A futuristic cityscape at night"],
|
155 |
+
["A mystical forest with glowing mushrooms"],
|
156 |
+
["An astronaut exploring a new planet"],
|
157 |
+
["A cat wearing a top hat"]],
|
158 |
+
inputs=prompt_input
|
159 |
+
)
|
160 |
+
|
161 |
+
if __name__ == "__main__":
|
162 |
+
demo.queue().launch()
|