Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,9 +3,17 @@ from huggingface_hub import InferenceClient
|
|
3 |
import os
|
4 |
import re # For post-processing fallback
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
# --- Configuration ---
|
7 |
API_TOKEN = os.getenv("HF_TOKEN", None)
|
8 |
-
MODEL = "HuggingFaceH4/zephyr-7b-beta"
|
|
|
|
|
9 |
|
10 |
# --- Initialize Inference Client ---
|
11 |
try:
|
@@ -19,9 +27,25 @@ try:
|
|
19 |
print("Inference Client initialized successfully.")
|
20 |
except Exception as e:
|
21 |
print(f"Error initializing Inference Client: {e}")
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
# --- Core Code Generation Function ---
|
|
|
25 |
def generate_code(
|
26 |
prompt: str,
|
27 |
backend_choice: str,
|
@@ -33,16 +57,23 @@ def generate_code(
|
|
33 |
"""
|
34 |
Generates website code based on user prompt and choices.
|
35 |
Aims for richer CSS, emphasizes completeness, and strictly outputs ONLY raw code.
|
36 |
-
Yields the code token by token for live updates.
|
|
|
37 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
print(f"--- Generating Code ---")
|
39 |
print(f"Prompt: {prompt[:100]}...")
|
40 |
print(f"Backend Context: {backend_choice}")
|
41 |
print(f"File Structure: {file_structure}")
|
42 |
-
# Log the max_tokens value being used for this request
|
43 |
print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
|
44 |
|
45 |
-
# --- Dynamically Build System Message ---
|
46 |
if file_structure == "Single File":
|
47 |
file_structure_instruction = (
|
48 |
"- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
|
@@ -58,15 +89,14 @@ def generate_code(
|
|
58 |
"- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
|
59 |
)
|
60 |
|
61 |
-
# Assemble the full system message - Emphasizing completeness and NO premature stopping
|
62 |
system_message = (
|
63 |
"You are an expert frontend web developer AI. Your primary goal is to generate **complete, visually appealing, modern, and well-styled** frontend code (HTML, CSS, client-side JS) based *only* on the user's description and selected options. "
|
64 |
"Follow ALL these rules with EXTREME STRICTNESS:\n"
|
65 |
"1. **STYLE & DETAIL:** Generate rich, detailed code. Use **plenty of CSS** for layout, spacing, typography, colors, and effects. Aim for a high-quality visual result.\n"
|
66 |
-
"2. **COMPLETENESS:** Generate the *entire* requested code structure. Ensure all files/sections are fully generated and properly closed
|
67 |
"3. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions, NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>`.\n"
|
68 |
"4. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (`<!DOCTYPE html>` or `<!-- index.html -->`).\n"
|
69 |
-
"5. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code
|
70 |
"6. **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
|
71 |
f"7. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure:\n"
|
72 |
f" {file_structure_instruction}\n"
|
@@ -76,21 +106,21 @@ def generate_code(
|
|
76 |
"REMEMBER: Create COMPLETE, visually appealing code. Output ONLY raw code. START immediately with code. FINISH the entire code generation. END immediately with code. NO extra text/tags."
|
77 |
)
|
78 |
|
79 |
-
# --- Construct the messages for the API ---
|
80 |
messages = [
|
81 |
{"role": "system", "content": system_message},
|
82 |
{"role": "user", "content": f"Generate the complete website frontend code for: {prompt}"}
|
83 |
]
|
84 |
|
85 |
-
# --- Stream the response from the API ---
|
86 |
response_stream = ""
|
87 |
full_response_for_cleaning = ""
|
88 |
-
token_count = 0
|
|
|
|
|
89 |
try:
|
90 |
print("Sending request to Hugging Face Inference API...")
|
91 |
stream = client.chat_completion(
|
92 |
messages=messages,
|
93 |
-
max_tokens=max_tokens,
|
94 |
stream=True,
|
95 |
temperature=temperature,
|
96 |
top_p=top_p,
|
@@ -98,52 +128,42 @@ def generate_code(
|
|
98 |
for message in stream:
|
99 |
token = message.choices[0].delta.content
|
100 |
if isinstance(token, str):
|
101 |
-
token_count += 1
|
102 |
response_stream += token
|
103 |
full_response_for_cleaning += token
|
104 |
-
#
|
105 |
-
#
|
106 |
-
# print(f"Stream progress: Received ~{token_count} tokens...")
|
107 |
-
yield response_stream # Yield cumulative response for live update
|
108 |
|
109 |
print(f"API stream finished. Received ~{token_count} tokens. Raw length: {len(full_response_for_cleaning)}")
|
110 |
-
|
111 |
-
if token_count >= max_tokens - 10: # Check if close to the limit (allowing for slight variations)
|
112 |
print(f"WARNING: Generation might have been cut short due to reaching max_tokens limit ({max_tokens}).")
|
113 |
-
# Optionally, append a warning to the output itself, though it violates the "code only" rule
|
114 |
-
# full_response_for_cleaning += "\n\n<!-- WARNING: Output may be incomplete due to max_tokens limit. -->"
|
115 |
-
|
116 |
|
117 |
-
# --- Post-Processing
|
118 |
cleaned_response = full_response_for_cleaning.strip()
|
119 |
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
|
120 |
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
|
121 |
cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
]
|
126 |
-
# Simple check, might need more robust cleaning if issues persist
|
127 |
-
for phrase in common_phrases:
|
128 |
-
# Check start
|
129 |
if cleaned_response.lower().startswith(phrase.lower()):
|
130 |
cleaned_response = cleaned_response[len(phrase):].lstrip()
|
131 |
-
# Check end - be careful not to remove parts of valid code
|
132 |
-
# This end check is risky, might remove valid closing comments or similar.
|
133 |
-
# Consider removing if it causes issues.
|
134 |
-
# if cleaned_response.lower().endswith(phrase.lower()):
|
135 |
-
# cleaned_response = cleaned_response[:-len(phrase)].rstrip()
|
136 |
|
137 |
-
|
138 |
-
yield
|
139 |
|
140 |
except Exception as e:
|
141 |
error_message = f"An error occurred during the API call: {e}"
|
142 |
print(error_message)
|
143 |
-
|
|
|
|
|
|
|
|
|
144 |
|
145 |
|
146 |
# --- Build Gradio Interface using Blocks ---
|
|
|
147 |
with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
|
148 |
gr.Markdown("# ✨ Website Code Generator ✨")
|
149 |
gr.Markdown(
|
@@ -151,82 +171,115 @@ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
|
|
151 |
"The code appears live below.\n"
|
152 |
"**Important:**\n"
|
153 |
"1. This generator creates code based *only* on your initial description. To refine, modify your description and generate again.\n"
|
154 |
-
"2. **If the code output stops abruptly**, it likely hit the 'Max New Tokens' limit. **Increase the slider value below** and try again
|
|
|
155 |
)
|
156 |
|
157 |
with gr.Row():
|
158 |
with gr.Column(scale=2):
|
159 |
-
prompt_input = gr.Textbox(
|
160 |
-
|
161 |
-
|
162 |
-
lines=6,
|
163 |
-
)
|
164 |
-
backend_radio = gr.Radio(
|
165 |
-
["Static", "Flask", "Node.js"], label="Backend Context Hint", value="Static",
|
166 |
-
info="Hint for AI (e.g., {{var}}) - generates ONLY frontend code."
|
167 |
-
)
|
168 |
-
file_structure_radio = gr.Radio(
|
169 |
-
["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files",
|
170 |
-
info="Choose 'Single File' (all in index.html) or 'Multiple Files' (separate css/js)."
|
171 |
-
)
|
172 |
generate_button = gr.Button("🎨 Generate Stylish Website Code", variant="primary")
|
173 |
|
174 |
with gr.Column(scale=3):
|
175 |
-
code_output = gr.Code(
|
176 |
-
label="Generated Code (Raw Output - Aiming for Style!)",
|
177 |
-
language="html",
|
178 |
-
lines=30,
|
179 |
-
interactive=False,
|
180 |
-
)
|
181 |
|
182 |
with gr.Accordion("Advanced Generation Settings", open=False):
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
step=256, # Larger steps might be practical
|
189 |
-
label="Max New Tokens",
|
190 |
-
info="Max length of generated code. Increase if output is cut off!" # Updated info
|
191 |
-
)
|
192 |
-
temperature_slider = gr.Slider(
|
193 |
-
minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature",
|
194 |
-
info="Controls randomness. Lower=focused, Higher=creative."
|
195 |
-
)
|
196 |
-
top_p_slider = gr.Slider(
|
197 |
-
minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P",
|
198 |
-
info="Alternative randomness control."
|
199 |
-
)
|
200 |
-
|
201 |
-
# --- Connect Inputs/Outputs ---
|
202 |
generate_button.click(
|
203 |
fn=generate_code,
|
204 |
-
inputs=[
|
205 |
-
prompt_input,
|
206 |
-
backend_radio,
|
207 |
-
file_structure_radio,
|
208 |
-
max_tokens_slider,
|
209 |
-
temperature_slider,
|
210 |
-
top_p_slider,
|
211 |
-
],
|
212 |
outputs=code_output,
|
213 |
)
|
214 |
|
215 |
-
# --- Examples ---
|
216 |
gr.Examples(
|
217 |
examples=[
|
218 |
-
["A simple counter page
|
219 |
-
["A responsive product grid
|
220 |
-
["A personal blog homepage
|
221 |
-
["A 'Coming Soon' page
|
222 |
],
|
223 |
inputs=[prompt_input, backend_radio, file_structure_radio],
|
224 |
label="Example Prompts (Aiming for Style)"
|
225 |
)
|
226 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
# --- Launch ---
|
|
|
228 |
if __name__ == "__main__":
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
|
|
|
|
|
3 |
import os
|
4 |
import re # For post-processing fallback
|
5 |
|
6 |
+
# --- FastAPI & Pydantic Imports ---
|
7 |
+
from fastapi import FastAPI, Request, HTTPException, Depends, Body
|
8 |
+
from fastapi.responses import JSONResponse
|
9 |
+
from pydantic import BaseModel, Field
|
10 |
+
from typing import Literal, Optional
|
11 |
+
|
12 |
# --- Configuration ---
|
13 |
API_TOKEN = os.getenv("HF_TOKEN", None)
|
14 |
+
MODEL = "HuggingFaceH4/zephyr-7b-beta"
|
15 |
+
# --- Define the Secret Key for the API ---
|
16 |
+
API_SECRET_KEY = "onlyfordearygt" # Keep this secure in a real application (e.g., env variable)
|
17 |
|
18 |
# --- Initialize Inference Client ---
|
19 |
try:
|
|
|
27 |
print("Inference Client initialized successfully.")
|
28 |
except Exception as e:
|
29 |
print(f"Error initializing Inference Client: {e}")
|
30 |
+
# We still want the Gradio app to potentially load, but maybe show an error
|
31 |
+
# raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")
|
32 |
+
client = None # Set client to None so we can check later
|
33 |
+
print("WARNING: AI Client initialization failed. API/Generation will not work.")
|
34 |
+
|
35 |
+
|
36 |
+
# --- Pydantic Model for API Request Body ---
|
37 |
+
class GenerateRequest(BaseModel):
|
38 |
+
prompt: str
|
39 |
+
backend_choice: Literal["Static", "Flask", "Node.js"] = "Static"
|
40 |
+
file_structure: Literal["Single File", "Multiple Files"] = "Multiple Files"
|
41 |
+
max_tokens: Optional[int] = Field(default=3072, gt=128, le=4096) # Add validation
|
42 |
+
temperature: Optional[float] = Field(default=0.7, gt=0.0, le=2.0)
|
43 |
+
top_p: Optional[float] = Field(default=0.9, gt=0.0, le=1.0)
|
44 |
+
secret_key: str # Required for authentication
|
45 |
+
|
46 |
|
47 |
+
# --- Core Code Generation Function (Mostly Unchanged) ---
|
48 |
+
# Note: This function is now also used by the API
|
49 |
def generate_code(
|
50 |
prompt: str,
|
51 |
backend_choice: str,
|
|
|
57 |
"""
|
58 |
Generates website code based on user prompt and choices.
|
59 |
Aims for richer CSS, emphasizes completeness, and strictly outputs ONLY raw code.
|
60 |
+
Yields the code token by token for live updates (for Gradio UI).
|
61 |
+
The *final* yielded value is the complete, cleaned code (for API).
|
62 |
"""
|
63 |
+
# Check if client initialized properly
|
64 |
+
if client is None:
|
65 |
+
final_error_message = "## Error\n\nAI Model Client not initialized. Generation is unavailable."
|
66 |
+
print(final_error_message)
|
67 |
+
# Yield the error for Gradio, return it for API callers later
|
68 |
+
yield final_error_message
|
69 |
+
return # Stop execution for this generator
|
70 |
+
|
71 |
print(f"--- Generating Code ---")
|
72 |
print(f"Prompt: {prompt[:100]}...")
|
73 |
print(f"Backend Context: {backend_choice}")
|
74 |
print(f"File Structure: {file_structure}")
|
|
|
75 |
print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
|
76 |
|
|
|
77 |
if file_structure == "Single File":
|
78 |
file_structure_instruction = (
|
79 |
"- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
|
|
|
89 |
"- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
|
90 |
)
|
91 |
|
|
|
92 |
system_message = (
|
93 |
"You are an expert frontend web developer AI. Your primary goal is to generate **complete, visually appealing, modern, and well-styled** frontend code (HTML, CSS, client-side JS) based *only* on the user's description and selected options. "
|
94 |
"Follow ALL these rules with EXTREME STRICTNESS:\n"
|
95 |
"1. **STYLE & DETAIL:** Generate rich, detailed code. Use **plenty of CSS** for layout, spacing, typography, colors, and effects. Aim for a high-quality visual result.\n"
|
96 |
+
"2. **COMPLETENESS:** Generate the *entire* requested code structure. Ensure all files/sections are fully generated and properly closed. **DO NOT STOP GENERATING PREMATURELY.** Finish the whole task.\n"
|
97 |
"3. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions, NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>`.\n"
|
98 |
"4. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (`<!DOCTYPE html>` or `<!-- index.html -->`).\n"
|
99 |
+
"5. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code. DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
|
100 |
"6. **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
|
101 |
f"7. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure:\n"
|
102 |
f" {file_structure_instruction}\n"
|
|
|
106 |
"REMEMBER: Create COMPLETE, visually appealing code. Output ONLY raw code. START immediately with code. FINISH the entire code generation. END immediately with code. NO extra text/tags."
|
107 |
)
|
108 |
|
|
|
109 |
messages = [
|
110 |
{"role": "system", "content": system_message},
|
111 |
{"role": "user", "content": f"Generate the complete website frontend code for: {prompt}"}
|
112 |
]
|
113 |
|
|
|
114 |
response_stream = ""
|
115 |
full_response_for_cleaning = ""
|
116 |
+
token_count = 0
|
117 |
+
last_yielded_value = "" # Store the last value for the API
|
118 |
+
|
119 |
try:
|
120 |
print("Sending request to Hugging Face Inference API...")
|
121 |
stream = client.chat_completion(
|
122 |
messages=messages,
|
123 |
+
max_tokens=max_tokens,
|
124 |
stream=True,
|
125 |
temperature=temperature,
|
126 |
top_p=top_p,
|
|
|
128 |
for message in stream:
|
129 |
token = message.choices[0].delta.content
|
130 |
if isinstance(token, str):
|
131 |
+
token_count += 1
|
132 |
response_stream += token
|
133 |
full_response_for_cleaning += token
|
134 |
+
last_yielded_value = response_stream # Keep updating last value during stream
|
135 |
+
yield response_stream # Yield cumulative response for live UI update
|
|
|
|
|
136 |
|
137 |
print(f"API stream finished. Received ~{token_count} tokens. Raw length: {len(full_response_for_cleaning)}")
|
138 |
+
if token_count >= max_tokens - 10:
|
|
|
139 |
print(f"WARNING: Generation might have been cut short due to reaching max_tokens limit ({max_tokens}).")
|
|
|
|
|
|
|
140 |
|
141 |
+
# --- Post-Processing ---
|
142 |
cleaned_response = full_response_for_cleaning.strip()
|
143 |
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
|
144 |
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
|
145 |
cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
|
146 |
+
# Remove common phrases only if they are clearly at the start/end and unlikely to be code
|
147 |
+
common_phrases_start = ["Here is the code:", "Okay, here is the code:", "Here's the code:", "Sure, here is the code you requested:"]
|
148 |
+
for phrase in common_phrases_start:
|
|
|
|
|
|
|
|
|
149 |
if cleaned_response.lower().startswith(phrase.lower()):
|
150 |
cleaned_response = cleaned_response[len(phrase):].lstrip()
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
+
last_yielded_value = cleaned_response.strip() # Final cleaned value
|
153 |
+
yield last_yielded_value # Yield final cleaned response for Gradio UI
|
154 |
|
155 |
except Exception as e:
|
156 |
error_message = f"An error occurred during the API call: {e}"
|
157 |
print(error_message)
|
158 |
+
final_error_message = f"## Error\n\nFailed to generate code.\n**Reason:** {e}"
|
159 |
+
# Yield error for Gradio UI
|
160 |
+
yield final_error_message
|
161 |
+
# Ensure the generator stops, API will handle the exception based on this
|
162 |
+
# For the API, we will raise an exception in the route handler if needed
|
163 |
|
164 |
|
165 |
# --- Build Gradio Interface using Blocks ---
|
166 |
+
# Define this *before* creating the FastAPI app that might mount it
|
167 |
with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
|
168 |
gr.Markdown("# ✨ Website Code Generator ✨")
|
169 |
gr.Markdown(
|
|
|
171 |
"The code appears live below.\n"
|
172 |
"**Important:**\n"
|
173 |
"1. This generator creates code based *only* on your initial description. To refine, modify your description and generate again.\n"
|
174 |
+
"2. **If the code output stops abruptly**, it likely hit the 'Max New Tokens' limit. **Increase the slider value below** and try again!\n"
|
175 |
+
"3. An API endpoint is available at `/api/generate` (POST request, requires secret key)." # Notify about API
|
176 |
)
|
177 |
|
178 |
with gr.Row():
|
179 |
with gr.Column(scale=2):
|
180 |
+
prompt_input = gr.Textbox(label="Website Description", placeholder="e.g., A modern portfolio...", lines=6,)
|
181 |
+
backend_radio = gr.Radio(["Static", "Flask", "Node.js"], label="Backend Context Hint", value="Static", info="Hint for AI - generates ONLY frontend code.")
|
182 |
+
file_structure_radio = gr.Radio(["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files", info="Choose 'Single File' or 'Multiple Files'.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
generate_button = gr.Button("🎨 Generate Stylish Website Code", variant="primary")
|
184 |
|
185 |
with gr.Column(scale=3):
|
186 |
+
code_output = gr.Code(label="Generated Code (Raw Output - Aiming for Style!)", language="html", lines=30, interactive=False,)
|
|
|
|
|
|
|
|
|
|
|
187 |
|
188 |
with gr.Accordion("Advanced Generation Settings", open=False):
|
189 |
+
max_tokens_slider = gr.Slider(minimum=512, maximum=4096, value=3072, step=256, label="Max New Tokens", info="Max length. Increase if output is cut off!")
|
190 |
+
temperature_slider = gr.Slider(minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature", info="Controls randomness.")
|
191 |
+
top_p_slider = gr.Slider( minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P", info="Alternative randomness control.")
|
192 |
+
|
193 |
+
# --- Connect Gradio Inputs/Outputs ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
generate_button.click(
|
195 |
fn=generate_code,
|
196 |
+
inputs=[prompt_input, backend_radio, file_structure_radio, max_tokens_slider, temperature_slider, top_p_slider,],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
outputs=code_output,
|
198 |
)
|
199 |
|
200 |
+
# --- Gradio Examples ---
|
201 |
gr.Examples(
|
202 |
examples=[
|
203 |
+
["A simple counter page...", "Static", "Single File"],
|
204 |
+
["A responsive product grid...", "Static", "Multiple Files"],
|
205 |
+
["A personal blog homepage...", "Flask", "Multiple Files"],
|
206 |
+
["A 'Coming Soon' page...", "Static", "Multiple Files"]
|
207 |
],
|
208 |
inputs=[prompt_input, backend_radio, file_structure_radio],
|
209 |
label="Example Prompts (Aiming for Style)"
|
210 |
)
|
211 |
|
212 |
+
|
213 |
+
# --- Create FastAPI App and Mount Gradio ---
|
214 |
+
# This approach uses FastAPI as the main server and mounts Gradio onto it
|
215 |
+
app = FastAPI()
|
216 |
+
|
217 |
+
# --- Define the API Endpoint ---
|
218 |
+
@app.post("/api/generate")
|
219 |
+
async def api_generate_code(payload: GenerateRequest):
|
220 |
+
"""
|
221 |
+
API endpoint to generate website code.
|
222 |
+
Requires a secret key for authentication.
|
223 |
+
"""
|
224 |
+
print(f"API Request received for prompt: {payload.prompt[:50]}...")
|
225 |
+
|
226 |
+
# --- Authentication ---
|
227 |
+
if payload.secret_key != API_SECRET_KEY:
|
228 |
+
print("API Authentication failed: Invalid secret key.")
|
229 |
+
raise HTTPException(status_code=403, detail="Invalid secret key")
|
230 |
+
|
231 |
+
# Check if HF client is available
|
232 |
+
if client is None:
|
233 |
+
print("API Error: AI Client not initialized.")
|
234 |
+
raise HTTPException(status_code=503, detail="AI Model Client not initialized. Service unavailable.")
|
235 |
+
|
236 |
+
print("API Authentication successful.")
|
237 |
+
|
238 |
+
# --- Call the generator function and consume it ---
|
239 |
+
final_code = ""
|
240 |
+
try:
|
241 |
+
# Consume the generator to get the last yielded value (the complete code)
|
242 |
+
code_generator = generate_code(
|
243 |
+
prompt=payload.prompt,
|
244 |
+
backend_choice=payload.backend_choice,
|
245 |
+
file_structure=payload.file_structure,
|
246 |
+
max_tokens=payload.max_tokens,
|
247 |
+
temperature=payload.temperature,
|
248 |
+
top_p=payload.top_p,
|
249 |
+
)
|
250 |
+
for code_chunk in code_generator:
|
251 |
+
final_code = code_chunk # Keep overwriting until the last one
|
252 |
+
|
253 |
+
# Check if the final result indicates an error from within generate_code
|
254 |
+
if final_code.strip().startswith("## Error"):
|
255 |
+
print(f"API Error during generation: {final_code}")
|
256 |
+
# Extract reason if possible, otherwise return generic error
|
257 |
+
reason = final_code.split("Reason:**")[-1].strip() if "Reason:**" in final_code else "Generation failed internally."
|
258 |
+
raise HTTPException(status_code=500, detail=f"Code generation failed: {reason}")
|
259 |
+
|
260 |
+
print(f"API generated code length: {len(final_code)}")
|
261 |
+
# --- Return the final code ---
|
262 |
+
return JSONResponse(content={"generated_code": final_code})
|
263 |
+
|
264 |
+
except HTTPException as http_exc:
|
265 |
+
# Re-raise HTTPException if it's already one (like auth failure or internal error)
|
266 |
+
raise http_exc
|
267 |
+
except Exception as e:
|
268 |
+
# Catch any other unexpected errors during generation/consumption
|
269 |
+
print(f"API - Unexpected Error during generation: {e}")
|
270 |
+
raise HTTPException(status_code=500, detail=f"An unexpected error occurred: {str(e)}")
|
271 |
+
|
272 |
+
|
273 |
+
# --- Mount the Gradio app onto the FastAPI app ---
|
274 |
+
# The Gradio UI will be available at the root path "/"
|
275 |
+
app = gr.mount_gradio_app(app, demo, path="/")
|
276 |
+
|
277 |
# --- Launch ---
|
278 |
+
# Use Uvicorn to run the FastAPI app (which now includes Gradio)
|
279 |
if __name__ == "__main__":
|
280 |
+
import uvicorn
|
281 |
+
print("Starting FastAPI server with Gradio mounted...")
|
282 |
+
# Recommended settings for Hugging Face Spaces: host="0.0.0.0", port=7860
|
283 |
+
# You might need to adjust port if running locally and 7860 is taken.
|
284 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
285 |
+
# Note: demo.launch() is no longer used directly here, as uvicorn runs the combined app.
|