Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,243 +1,50 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
-
import re
|
5 |
|
6 |
# --- Configuration ---
|
7 |
API_TOKEN = os.getenv("HF_TOKEN", None)
|
8 |
-
|
9 |
-
MODEL = "Qwen/Qwen2-7B-Instruct" # Example: Switched to a smaller, faster model for potentially better backend handling
|
10 |
-
# MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct" # Or keep your original model
|
11 |
|
12 |
# --- Initialize Inference Client ---
|
13 |
try:
|
14 |
-
print(f"
|
15 |
-
if API_TOKEN
|
16 |
-
print("Using HF Token found in environment.")
|
17 |
-
client = InferenceClient(model=MODEL, token=API_TOKEN)
|
18 |
-
else:
|
19 |
-
print("HF Token not found. Running without token (may lead to rate limits).")
|
20 |
-
client = InferenceClient(model=MODEL)
|
21 |
-
print("Inference Client initialized successfully.")
|
22 |
except Exception as e:
|
23 |
-
|
24 |
-
raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")
|
25 |
-
|
26 |
-
# --- Helper Function for Parsing ---
|
27 |
-
def parse_code_blocks(text, file_structure, backend_choice):
|
28 |
-
"""Parses the generated text into code blocks based on markers."""
|
29 |
-
if file_structure == "Single File":
|
30 |
-
# Everything goes into HTML for single file mode
|
31 |
-
return {
|
32 |
-
"html": text.strip(),
|
33 |
-
"css": "/* CSS is embedded in HTML */",
|
34 |
-
"js": "// JavaScript is embedded in HTML",
|
35 |
-
"backend": f"// No backend file generated for 'Single File' mode."
|
36 |
-
}
|
37 |
-
|
38 |
-
# Default markers
|
39 |
-
markers = {
|
40 |
-
"html": r"<!--\s*index\.html\s*-->",
|
41 |
-
"css": r"/\*\s*style\.css\s*\*/",
|
42 |
-
"js": r"//\s*script\.js\s*//", # Added trailing // to potentially help delimit
|
43 |
-
}
|
44 |
-
# Add backend markers based on choice
|
45 |
-
if backend_choice == "Flask":
|
46 |
-
markers["backend"] = r"#\s*app\.py\s*#" # Using # marker #
|
47 |
-
elif backend_choice == "Node.js":
|
48 |
-
markers["backend"] = r"//\s*(server|app)\.js\s*//" # Using // marker //
|
49 |
-
|
50 |
-
# Find all marker positions
|
51 |
-
marker_positions = {}
|
52 |
-
for key, pattern in markers.items():
|
53 |
-
match = re.search(pattern, text, re.IGNORECASE)
|
54 |
-
if match:
|
55 |
-
marker_positions[key] = match.start()
|
56 |
-
|
57 |
-
# If no markers found, assume it's all HTML (fallback)
|
58 |
-
if not marker_positions:
|
59 |
-
print("Warning: No file markers found in the output. Assuming all content is HTML.")
|
60 |
-
# Check if it looks like CSS or JS first before defaulting to HTML
|
61 |
-
cleaned_text = text.strip()
|
62 |
-
if cleaned_text.startswith(("{", ".", "#", "/*")) and "{" in cleaned_text and "}" in cleaned_text:
|
63 |
-
print("Heuristic: Output looks like CSS.")
|
64 |
-
return {"html": "", "css": cleaned_text, "js": "", "backend": ""}
|
65 |
-
elif cleaned_text.startswith(("function", "const", "let", "var", "//", "import")) and ("(" in cleaned_text or "{" in cleaned_text):
|
66 |
-
print("Heuristic: Output looks like JS.")
|
67 |
-
return {"html": "", "css": "", "js": cleaned_text, "backend": ""}
|
68 |
-
else: # Default to HTML
|
69 |
-
return {"html": cleaned_text, "css": "", "js": "", "backend": ""}
|
70 |
-
|
71 |
-
|
72 |
-
# Sort markers by their position
|
73 |
-
sorted_markers = sorted(marker_positions.items(), key=lambda item: item[1])
|
74 |
-
|
75 |
-
# Extract code blocks
|
76 |
-
code_blocks = {key: "" for key in markers} # Initialize all keys
|
77 |
-
for i, (key, start_pos) in enumerate(sorted_markers):
|
78 |
-
# Find the start of the code block (after the marker)
|
79 |
-
marker_match = re.search(markers[key], text, re.IGNORECASE) # Find the specific marker text
|
80 |
-
code_start = marker_match.end()
|
81 |
-
|
82 |
-
# Find the end of the code block (start of the next marker or end of text)
|
83 |
-
if i + 1 < len(sorted_markers):
|
84 |
-
next_marker_key, next_marker_pos = sorted_markers[i+1]
|
85 |
-
code_end = next_marker_pos
|
86 |
-
else:
|
87 |
-
code_end = len(text)
|
88 |
-
|
89 |
-
# Extract and clean the code
|
90 |
-
code = text[code_start:code_end].strip()
|
91 |
-
code_blocks[key] = code
|
92 |
-
|
93 |
-
# Fill potential missing keys if they existed in original markers dict
|
94 |
-
final_blocks = {
|
95 |
-
"html": code_blocks.get("html", ""),
|
96 |
-
"css": code_blocks.get("css", ""),
|
97 |
-
"js": code_blocks.get("js", ""),
|
98 |
-
"backend": code_blocks.get("backend", "")
|
99 |
-
}
|
100 |
-
|
101 |
-
# If backend is static but backend code was somehow generated, clear it
|
102 |
-
if backend_choice == "Static":
|
103 |
-
final_blocks["backend"] = "// No backend file needed for 'Static' mode."
|
104 |
-
|
105 |
-
|
106 |
-
# Fallback if HTML is empty but others aren't (marker parsing failed maybe?)
|
107 |
-
if not final_blocks["html"] and (final_blocks["css"] or final_blocks["js"] or final_blocks["backend"]):
|
108 |
-
# Check if the original text looks like HTML
|
109 |
-
if text.strip().startswith("<!DOCTYPE html") or text.strip().startswith("<html"):
|
110 |
-
print("Warning: Marker parsing might have failed, but text looks like HTML. Assigning full text to HTML.")
|
111 |
-
final_blocks["html"] = text.strip()
|
112 |
-
final_blocks["css"] = "" # Clear others to avoid duplication if parsing failed badly
|
113 |
-
final_blocks["js"] = ""
|
114 |
-
final_blocks["backend"] = ""
|
115 |
-
|
116 |
-
|
117 |
-
return final_blocks
|
118 |
-
|
119 |
|
120 |
# --- Core Code Generation Function ---
|
121 |
def generate_code(
|
122 |
prompt: str,
|
123 |
backend_choice: str,
|
124 |
-
file_structure: str,
|
125 |
max_tokens: int,
|
126 |
temperature: float,
|
127 |
top_p: float,
|
128 |
):
|
129 |
-
""
|
130 |
-
Generates website code based on user prompt and choices.
|
131 |
-
Aims for richer CSS, emphasizes completeness, and strictly outputs ONLY raw code.
|
132 |
-
Parses output into separate files for the UI tabs when 'Multiple Files' is selected.
|
133 |
-
Yields cumulative raw code to the first tab for live updates, then returns parsed blocks.
|
134 |
-
"""
|
135 |
-
print(f"--- Generating Code ---")
|
136 |
-
print(f"Prompt: {prompt[:100]}...")
|
137 |
-
print(f"Backend Context: {backend_choice}")
|
138 |
-
print(f"File Structure: {file_structure}")
|
139 |
-
print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
|
140 |
|
141 |
# --- Dynamically Build System Message ---
|
142 |
-
backend_instructions = ""
|
143 |
-
file_markers = ["<!-- index.html -->", "/* style.css */", "// script.js //"] # Base markers
|
144 |
-
|
145 |
-
if backend_choice == "Static":
|
146 |
-
backend_instructions = (
|
147 |
-
f"- **Backend is '{backend_choice}':** Generate ONLY frontend code (HTML, CSS, JS). Do NOT generate any server-side files or logic.\n"
|
148 |
-
)
|
149 |
-
file_structure_detail = (
|
150 |
-
"Generate code for `index.html`, `style.css`, and `script.js` (if JS is needed). "
|
151 |
-
"Use these EXACT markers to separate the files:\n"
|
152 |
-
" `<!-- index.html -->`\n"
|
153 |
-
" `/* style.css */`\n"
|
154 |
-
" `// script.js //` (only include if JS is generated)\n"
|
155 |
-
"- Place the corresponding code directly after each marker.\n"
|
156 |
-
"- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
|
157 |
-
)
|
158 |
-
elif backend_choice == "Flask":
|
159 |
-
backend_instructions = (
|
160 |
-
f"- **Backend is '{backend_choice}':** Generate a basic Python Flask application (`app.py`).\n"
|
161 |
-
" - Include necessary imports (`Flask`, `render_template`).\n"
|
162 |
-
" - Create a simple Flask app instance.\n"
|
163 |
-
" - Define a root route (`@app.route('/')`) that renders `index.html`.\n"
|
164 |
-
" - Include the standard `if __name__ == '__main__': app.run(debug=True)` block.\n"
|
165 |
-
"- **HTML Templates:** Modify the generated `index.html` to be a Flask template.\n"
|
166 |
-
" - Use Jinja2 syntax (e.g., `{{ variable }}`) *if* the prompt implies dynamic data, otherwise generate static HTML structure within the template.\n"
|
167 |
-
" - Link CSS using `url_for('static', filename='style.css')`.\n"
|
168 |
-
" - Include JS using `url_for('static', filename='script.js')`.\n"
|
169 |
-
"- Assume CSS and JS are served from a `static` folder (but generate the code for `style.css` and `script.js` directly).\n"
|
170 |
-
)
|
171 |
-
file_markers.append("# app.py #") # Add Flask marker
|
172 |
-
file_structure_detail = (
|
173 |
-
"Generate code for `index.html` (as a Flask template), `style.css`, `script.js` (if JS is needed), and `app.py`.\n"
|
174 |
-
"Use these EXACT markers to separate the files:\n"
|
175 |
-
" `<!-- index.html -->`\n"
|
176 |
-
" `/* style.css */`\n"
|
177 |
-
" `// script.js //` (only include if JS is generated)\n"
|
178 |
-
" `# app.py #`\n"
|
179 |
-
"- Place the corresponding code directly after each marker."
|
180 |
-
)
|
181 |
-
elif backend_choice == "Node.js":
|
182 |
-
backend_instructions = (
|
183 |
-
f"- **Backend is '{backend_choice}':** Generate a basic Node.js Express application (`server.js` or `app.js`).\n"
|
184 |
-
" - Include necessary requires (`express`, `path`).\n"
|
185 |
-
" - Create an Express app instance.\n"
|
186 |
-
" - Configure middleware to serve static files from a `public` directory (e.g., `app.use(express.static('public'))`).\n"
|
187 |
-
" - Define a root route (`app.get('/')`) that sends `index.html` (located in `public`).\n"
|
188 |
-
" - Start the server (`app.listen(...)`).\n"
|
189 |
-
"- **HTML:** Generate a standard `index.html` file. Link CSS (`/style.css`) and JS (`/script.js`) assuming they are in the `public` folder.\n"
|
190 |
-
)
|
191 |
-
file_markers.append("// server.js //") # Add Node marker
|
192 |
-
file_structure_detail = (
|
193 |
-
"Generate code for `index.html`, `style.css`, `script.js` (if JS is needed), and `server.js` (or `app.js`).\n"
|
194 |
-
"Use these EXACT markers to separate the files:\n"
|
195 |
-
" `<!-- index.html -->`\n"
|
196 |
-
" `/* style.css */`\n"
|
197 |
-
" `// script.js //` (only include if JS is generated)\n"
|
198 |
-
" `// server.js //`\n"
|
199 |
-
"- Place the corresponding code directly after each marker."
|
200 |
-
)
|
201 |
-
|
202 |
-
# File Structure Instructions
|
203 |
-
if file_structure == "Single File":
|
204 |
-
file_structure_instruction = (
|
205 |
-
"- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
|
206 |
-
"Embed ALL CSS directly within `<style>` tags inside the `<head>`. "
|
207 |
-
"Embed ALL necessary JavaScript directly within `<script>` tags just before the closing `</body>` tag. "
|
208 |
-
"Do NOT use any file separation markers. Ignore backend instructions if any were implied."
|
209 |
-
)
|
210 |
-
else: # Multiple Files
|
211 |
-
file_structure_instruction = (
|
212 |
-
f"- **File Structure is 'Multiple Files':** {file_structure_detail}"
|
213 |
-
)
|
214 |
-
|
215 |
-
# Assemble the full system message
|
216 |
system_message = (
|
217 |
-
"
|
218 |
-
"
|
219 |
-
"
|
220 |
-
"
|
221 |
-
"
|
222 |
-
"
|
223 |
-
"5. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code (`</html>`, `}`, `;`, `})`, etc.). DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
|
224 |
-
f"{backend_instructions}" # Inject backend specific instructions
|
225 |
-
f"6. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure:\n"
|
226 |
-
f" {file_structure_instruction}\n"
|
227 |
-
"7. **ACCURACY:** Generate functional code addressing the user's prompt, respecting the chosen backend context (templating, file serving).\n\n"
|
228 |
-
"REMEMBER: Output ONLY raw code. Respect the chosen backend and file structure. Use the specified markers EXACTLY if generating multiple files. START immediately with code. FINISH the entire code generation. END immediately with code. NO extra text/tags."
|
229 |
)
|
230 |
|
|
|
|
|
231 |
messages = [
|
232 |
{"role": "system", "content": system_message},
|
233 |
-
{"role": "user", "content":
|
234 |
]
|
235 |
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
try:
|
240 |
-
print("Sending request to Hugging Face Inference API...")
|
241 |
stream = client.chat_completion(
|
242 |
messages=messages,
|
243 |
max_tokens=max_tokens,
|
@@ -248,173 +55,83 @@ def generate_code(
|
|
248 |
for message in stream:
|
249 |
token = message.choices[0].delta.content
|
250 |
if isinstance(token, str):
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
yield (raw_response, "Streaming...", "Streaming...", "Streaming...")
|
255 |
|
256 |
-
|
257 |
-
if token_count >= max_tokens - 15: # Check if close to the limit
|
258 |
-
print(f"WARNING: Generation might have been cut short due to reaching max_tokens limit ({max_tokens}).")
|
259 |
-
raw_response += "\n\n<!-- WARNING: Output may be incomplete due to max_tokens limit. -->"
|
260 |
-
|
261 |
-
|
262 |
-
# --- Post-Processing (Basic Cleanup - Less aggressive now) ---
|
263 |
-
cleaned_response = raw_response.strip()
|
264 |
-
# Remove potential markdown code blocks (less likely with strict prompt but good safety)
|
265 |
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
|
266 |
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
|
267 |
-
|
268 |
-
cleaned_response = re.sub(r"<\|(user|assistant)\|>", "", cleaned_response, flags=re.IGNORECASE)
|
269 |
|
270 |
-
|
271 |
-
|
272 |
-
|
|
|
|
|
|
|
|
|
273 |
|
274 |
-
|
275 |
-
return (
|
276 |
-
parsed_code["html"],
|
277 |
-
parsed_code["css"],
|
278 |
-
parsed_code["js"],
|
279 |
-
parsed_code["backend"] # Will be empty or placeholder for Static/Single File
|
280 |
-
)
|
281 |
|
282 |
except Exception as e:
|
283 |
-
|
284 |
-
print(error_message)
|
285 |
-
# Return error message to all tabs
|
286 |
-
error_output = f"## Error\n\nFailed to generate code.\n**Reason:** {e}\n\nPlease check the model status, your connection, and API token (if applicable)."
|
287 |
-
return (error_output, error_output, error_output, error_output)
|
288 |
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
gr.Markdown("# ✨ Website Code Generator v2 ✨")
|
293 |
gr.Markdown(
|
294 |
-
"Describe the website you want.
|
295 |
-
"
|
296 |
-
"
|
297 |
-
"
|
298 |
-
"
|
299 |
-
"
|
300 |
-
"3. **If code seems cut off**, increase 'Max New Tokens' and regenerate!"
|
301 |
)
|
302 |
|
303 |
with gr.Row():
|
304 |
with gr.Column(scale=2):
|
305 |
prompt_input = gr.Textbox(
|
306 |
label="Website Description",
|
307 |
-
placeholder="e.g., A
|
308 |
lines=6,
|
309 |
)
|
310 |
backend_radio = gr.Radio(
|
311 |
-
["Static", "Flask", "Node.js"],
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files",
|
316 |
-
info="Single: All in index.html. Multiple: Separated into tabs (HTML, CSS, JS, Backend if applicable)."
|
317 |
)
|
318 |
-
generate_button = gr.Button("
|
319 |
|
320 |
with gr.Column(scale=3):
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
lines=28, # Adjust lines per tab
|
328 |
-
interactive=False,
|
329 |
-
show_label=True
|
330 |
-
)
|
331 |
-
with gr.Tab("CSS", elem_id="css-tab"):
|
332 |
-
css_output = gr.Code(
|
333 |
-
label="style.css",
|
334 |
-
language="css",
|
335 |
-
lines=28,
|
336 |
-
interactive=False,
|
337 |
-
show_label=True
|
338 |
-
)
|
339 |
-
with gr.Tab("JavaScript", elem_id="js-tab"):
|
340 |
-
js_output = gr.Code(
|
341 |
-
label="script.js",
|
342 |
-
language="javascript",
|
343 |
-
lines=28,
|
344 |
-
interactive=False,
|
345 |
-
show_label=True
|
346 |
-
)
|
347 |
-
with gr.Tab("Backend", elem_id="backend-tab"):
|
348 |
-
# Label will indicate file type
|
349 |
-
backend_output = gr.Code(
|
350 |
-
label="app.py / server.js",
|
351 |
-
language="python", # Default, can maybe be dynamic later if needed
|
352 |
-
lines=28,
|
353 |
-
interactive=False,
|
354 |
-
show_label=True
|
355 |
-
)
|
356 |
-
|
357 |
|
358 |
-
with gr.Accordion("Advanced
|
359 |
max_tokens_slider = gr.Slider(
|
360 |
minimum=512,
|
361 |
-
maximum=4096,
|
362 |
value=3072,
|
363 |
step=256,
|
364 |
-
label="Max New Tokens"
|
365 |
-
|
366 |
-
)
|
367 |
temperature_slider = gr.Slider(
|
368 |
-
minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature"
|
369 |
-
|
370 |
-
)
|
371 |
top_p_slider = gr.Slider(
|
372 |
-
minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P"
|
373 |
-
|
374 |
-
)
|
375 |
|
376 |
-
# --- Connect Inputs/Outputs ---
|
377 |
generate_button.click(
|
378 |
fn=generate_code,
|
379 |
-
inputs=[
|
380 |
-
|
381 |
-
backend_radio,
|
382 |
-
file_structure_radio,
|
383 |
-
max_tokens_slider,
|
384 |
-
temperature_slider,
|
385 |
-
top_p_slider,
|
386 |
-
],
|
387 |
-
# Output to the individual code blocks within the tabs
|
388 |
-
outputs=[
|
389 |
-
html_output,
|
390 |
-
css_output,
|
391 |
-
js_output,
|
392 |
-
backend_output,
|
393 |
-
],
|
394 |
-
)
|
395 |
-
|
396 |
-
# --- Examples ---
|
397 |
-
gr.Examples(
|
398 |
-
examples=[
|
399 |
-
# Static Examples
|
400 |
-
["A simple counter page with a number display, an increment button, and a decrement button. Style the buttons nicely and center everything.", "Static", "Single File"],
|
401 |
-
["A responsive product grid for an e-commerce site. Each card needs an image, title, price, and 'Add to Cart' button with a hover effect. Use modern CSS.", "Static", "Multiple Files"],
|
402 |
-
# Flask Example
|
403 |
-
["A personal blog homepage using Flask. Include a clean header with nav links, a main area for post summaries (use Jinja loops for placeholder posts like {{ post.title }}), and a simple footer.", "Flask", "Multiple Files"],
|
404 |
-
# Node.js Example
|
405 |
-
["A 'Coming Soon' page using Node.js/Express to serve the static files. Include a large countdown timer (use JS), a background image, and an email signup form. Make it look sleek.", "Node.js", "Multiple Files"],
|
406 |
-
# More Complex Examples
|
407 |
-
["A simple Flask app for a to-do list. The main page shows the list (use Jinja). Include a form to add new items (POST request handled by Flask). Store items in a simple Python list in memory for now.", "Flask", "Multiple Files"],
|
408 |
-
["A portfolio website using Static generation. Sections for Hero, About Me, Projects (grid layout), and Contact Form. Add subtle scroll animations.", "Static", "Multiple Files"],
|
409 |
-
|
410 |
-
],
|
411 |
-
inputs=[prompt_input, backend_radio, file_structure_radio],
|
412 |
-
label="Example Prompts (Try Different Backends!)"
|
413 |
)
|
414 |
|
415 |
-
# --- Launch ---
|
416 |
if __name__ == "__main__":
|
417 |
-
|
418 |
-
# Ensure queue is enabled for Spaces, might need higher concurrency if backend generation is slow
|
419 |
-
demo.queue(max_size=10).launch()
|
420 |
-
print("Gradio app launched.")
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
+
import re
|
5 |
|
6 |
# --- Configuration ---
|
7 |
API_TOKEN = os.getenv("HF_TOKEN", None)
|
8 |
+
MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
|
|
|
|
9 |
|
10 |
# --- Initialize Inference Client ---
|
11 |
try:
|
12 |
+
print(f"Initializing Inference Client for model: {MODEL}")
|
13 |
+
client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
except Exception as e:
|
15 |
+
raise gr.Error(f"Failed to initialize model client. Error: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
# --- Core Code Generation Function ---
|
18 |
def generate_code(
|
19 |
prompt: str,
|
20 |
backend_choice: str,
|
|
|
21 |
max_tokens: int,
|
22 |
temperature: float,
|
23 |
top_p: float,
|
24 |
):
|
25 |
+
print(f"Generating code for: {prompt[:100]}... | Backend: {backend_choice}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
# --- Dynamically Build System Message ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
system_message = (
|
29 |
+
"you are an ai that is supposed to generate websites, you must not say anything except giving code , "
|
30 |
+
"user can select backend like static , flask , nodejs only , you should always keep the website sfw and minimal errors, "
|
31 |
+
"you must create an index.html following the user prompt, "
|
32 |
+
"if the user asks you create an code that's not about an website you should say "
|
33 |
+
"'hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-(', "
|
34 |
+
"your code always must have no useless comments you should only add comments where users are required to modify the code."
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
)
|
36 |
|
37 |
+
user_prompt = f"USER_PROMPT = {prompt}\nUSER_BACKEND = {backend_choice}"
|
38 |
+
|
39 |
messages = [
|
40 |
{"role": "system", "content": system_message},
|
41 |
+
{"role": "user", "content": user_prompt}
|
42 |
]
|
43 |
|
44 |
+
response_stream = ""
|
45 |
+
full_response = ""
|
46 |
+
|
47 |
try:
|
|
|
48 |
stream = client.chat_completion(
|
49 |
messages=messages,
|
50 |
max_tokens=max_tokens,
|
|
|
55 |
for message in stream:
|
56 |
token = message.choices[0].delta.content
|
57 |
if isinstance(token, str):
|
58 |
+
response_stream += token
|
59 |
+
full_response += token
|
60 |
+
yield response_stream
|
|
|
61 |
|
62 |
+
cleaned_response = full_response.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
|
64 |
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
|
65 |
+
cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
|
|
|
66 |
|
67 |
+
common_phrases = [
|
68 |
+
"Here is the code:", "Okay, here is the code:", "Here's the code:",
|
69 |
+
"Sure, here is the code you requested:", "Let me know if you need anything else."
|
70 |
+
]
|
71 |
+
for phrase in common_phrases:
|
72 |
+
if cleaned_response.lower().startswith(phrase.lower()):
|
73 |
+
cleaned_response = cleaned_response[len(phrase):].lstrip()
|
74 |
|
75 |
+
yield cleaned_response.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
except Exception as e:
|
78 |
+
yield f"## Error\n\nFailed to generate code.\n**Reason:** {e}"
|
|
|
|
|
|
|
|
|
79 |
|
80 |
+
# --- Build Gradio Interface ---
|
81 |
+
with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
|
82 |
+
gr.Markdown("# ✨ Website Code Generator ✨")
|
|
|
83 |
gr.Markdown(
|
84 |
+
"Describe the website you want. The AI will generate a **single-file** `index.html` website.\n\n"
|
85 |
+
"**Rules:**\n"
|
86 |
+
"- Backend hint (Static / Flask / Node.js).\n"
|
87 |
+
"- Always fully SFW and minimal errors.\n"
|
88 |
+
"- Only generates websites. No other codes.\n"
|
89 |
+
"- Minimal necessary comments only."
|
|
|
90 |
)
|
91 |
|
92 |
with gr.Row():
|
93 |
with gr.Column(scale=2):
|
94 |
prompt_input = gr.Textbox(
|
95 |
label="Website Description",
|
96 |
+
placeholder="e.g., A simple landing page with a hero section and contact form.",
|
97 |
lines=6,
|
98 |
)
|
99 |
backend_radio = gr.Radio(
|
100 |
+
["Static", "Flask", "Node.js"],
|
101 |
+
label="Backend Context",
|
102 |
+
value="Static",
|
103 |
+
info="Hint only. Always generates only index.html."
|
|
|
|
|
104 |
)
|
105 |
+
generate_button = gr.Button("✨ Generate Website Code", variant="primary")
|
106 |
|
107 |
with gr.Column(scale=3):
|
108 |
+
code_output = gr.Code(
|
109 |
+
label="Generated index.html",
|
110 |
+
language="html",
|
111 |
+
lines=30,
|
112 |
+
interactive=False,
|
113 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
+
with gr.Accordion("Advanced Settings", open=False):
|
116 |
max_tokens_slider = gr.Slider(
|
117 |
minimum=512,
|
118 |
+
maximum=4096,
|
119 |
value=3072,
|
120 |
step=256,
|
121 |
+
label="Max New Tokens"
|
122 |
+
)
|
|
|
123 |
temperature_slider = gr.Slider(
|
124 |
+
minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature"
|
125 |
+
)
|
|
|
126 |
top_p_slider = gr.Slider(
|
127 |
+
minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P"
|
128 |
+
)
|
|
|
129 |
|
|
|
130 |
generate_button.click(
|
131 |
fn=generate_code,
|
132 |
+
inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
|
133 |
+
outputs=code_output,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
)
|
135 |
|
|
|
136 |
if __name__ == "__main__":
|
137 |
+
demo.queue(max_size=10).launch()
|
|
|
|
|
|