Spaces:
Running
Running
File size: 22,153 Bytes
aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 aa6f369 6541c57 f5a64b7 6541c57 f5a64b7 6541c57 f5a64b7 6541c57 f5a64b7 aa6f369 f5a64b7 aa6f369 f5a64b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
import re # For post-processing and parsing
# --- Configuration ---
API_TOKEN = os.getenv("HF_TOKEN", None)
# Consider using a model known for stronger coding capabilities if backend generation is complex
MODEL = "Qwen/Qwen2-7B-Instruct" # Example: Switched to a smaller, faster model for potentially better backend handling
# MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct" # Or keep your original model
# --- Initialize Inference Client ---
try:
print(f"Attempting to initialize Inference Client for model: {MODEL}")
if API_TOKEN:
print("Using HF Token found in environment.")
client = InferenceClient(model=MODEL, token=API_TOKEN)
else:
print("HF Token not found. Running without token (may lead to rate limits).")
client = InferenceClient(model=MODEL)
print("Inference Client initialized successfully.")
except Exception as e:
print(f"Error initializing Inference Client: {e}")
raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")
# --- Helper Function for Parsing ---
def parse_code_blocks(text, file_structure, backend_choice):
"""Parses the generated text into code blocks based on markers."""
if file_structure == "Single File":
# Everything goes into HTML for single file mode
return {
"html": text.strip(),
"css": "/* CSS is embedded in HTML */",
"js": "// JavaScript is embedded in HTML",
"backend": f"// No backend file generated for 'Single File' mode."
}
# Default markers
markers = {
"html": r"<!--\s*index\.html\s*-->",
"css": r"/\*\s*style\.css\s*\*/",
"js": r"//\s*script\.js\s*//", # Added trailing // to potentially help delimit
}
# Add backend markers based on choice
if backend_choice == "Flask":
markers["backend"] = r"#\s*app\.py\s*#" # Using # marker #
elif backend_choice == "Node.js":
markers["backend"] = r"//\s*(server|app)\.js\s*//" # Using // marker //
# Find all marker positions
marker_positions = {}
for key, pattern in markers.items():
match = re.search(pattern, text, re.IGNORECASE)
if match:
marker_positions[key] = match.start()
# If no markers found, assume it's all HTML (fallback)
if not marker_positions:
print("Warning: No file markers found in the output. Assuming all content is HTML.")
# Check if it looks like CSS or JS first before defaulting to HTML
cleaned_text = text.strip()
if cleaned_text.startswith(("{", ".", "#", "/*")) and "{" in cleaned_text and "}" in cleaned_text:
print("Heuristic: Output looks like CSS.")
return {"html": "", "css": cleaned_text, "js": "", "backend": ""}
elif cleaned_text.startswith(("function", "const", "let", "var", "//", "import")) and ("(" in cleaned_text or "{" in cleaned_text):
print("Heuristic: Output looks like JS.")
return {"html": "", "css": "", "js": cleaned_text, "backend": ""}
else: # Default to HTML
return {"html": cleaned_text, "css": "", "js": "", "backend": ""}
# Sort markers by their position
sorted_markers = sorted(marker_positions.items(), key=lambda item: item[1])
# Extract code blocks
code_blocks = {key: "" for key in markers} # Initialize all keys
for i, (key, start_pos) in enumerate(sorted_markers):
# Find the start of the code block (after the marker)
marker_match = re.search(markers[key], text, re.IGNORECASE) # Find the specific marker text
code_start = marker_match.end()
# Find the end of the code block (start of the next marker or end of text)
if i + 1 < len(sorted_markers):
next_marker_key, next_marker_pos = sorted_markers[i+1]
code_end = next_marker_pos
else:
code_end = len(text)
# Extract and clean the code
code = text[code_start:code_end].strip()
code_blocks[key] = code
# Fill potential missing keys if they existed in original markers dict
final_blocks = {
"html": code_blocks.get("html", ""),
"css": code_blocks.get("css", ""),
"js": code_blocks.get("js", ""),
"backend": code_blocks.get("backend", "")
}
# If backend is static but backend code was somehow generated, clear it
if backend_choice == "Static":
final_blocks["backend"] = "// No backend file needed for 'Static' mode."
# Fallback if HTML is empty but others aren't (marker parsing failed maybe?)
if not final_blocks["html"] and (final_blocks["css"] or final_blocks["js"] or final_blocks["backend"]):
# Check if the original text looks like HTML
if text.strip().startswith("<!DOCTYPE html") or text.strip().startswith("<html"):
print("Warning: Marker parsing might have failed, but text looks like HTML. Assigning full text to HTML.")
final_blocks["html"] = text.strip()
final_blocks["css"] = "" # Clear others to avoid duplication if parsing failed badly
final_blocks["js"] = ""
final_blocks["backend"] = ""
return final_blocks
# --- Core Code Generation Function ---
def generate_code(
prompt: str,
backend_choice: str,
file_structure: str,
max_tokens: int,
temperature: float,
top_p: float,
):
"""
Generates website code based on user prompt and choices.
Aims for richer CSS, emphasizes completeness, and strictly outputs ONLY raw code.
Parses output into separate files for the UI tabs when 'Multiple Files' is selected.
Yields cumulative raw code to the first tab for live updates, then returns parsed blocks.
"""
print(f"--- Generating Code ---")
print(f"Prompt: {prompt[:100]}...")
print(f"Backend Context: {backend_choice}")
print(f"File Structure: {file_structure}")
print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
# --- Dynamically Build System Message ---
backend_instructions = ""
file_markers = ["<!-- index.html -->", "/* style.css */", "// script.js //"] # Base markers
if backend_choice == "Static":
backend_instructions = (
f"- **Backend is '{backend_choice}':** Generate ONLY frontend code (HTML, CSS, JS). Do NOT generate any server-side files or logic.\n"
)
file_structure_detail = (
"Generate code for `index.html`, `style.css`, and `script.js` (if JS is needed). "
"Use these EXACT markers to separate the files:\n"
" `<!-- index.html -->`\n"
" `/* style.css */`\n"
" `// script.js //` (only include if JS is generated)\n"
"- Place the corresponding code directly after each marker.\n"
"- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
)
elif backend_choice == "Flask":
backend_instructions = (
f"- **Backend is '{backend_choice}':** Generate a basic Python Flask application (`app.py`).\n"
" - Include necessary imports (`Flask`, `render_template`).\n"
" - Create a simple Flask app instance.\n"
" - Define a root route (`@app.route('/')`) that renders `index.html`.\n"
" - Include the standard `if __name__ == '__main__': app.run(debug=True)` block.\n"
"- **HTML Templates:** Modify the generated `index.html` to be a Flask template.\n"
" - Use Jinja2 syntax (e.g., `{{ variable }}`) *if* the prompt implies dynamic data, otherwise generate static HTML structure within the template.\n"
" - Link CSS using `url_for('static', filename='style.css')`.\n"
" - Include JS using `url_for('static', filename='script.js')`.\n"
"- Assume CSS and JS are served from a `static` folder (but generate the code for `style.css` and `script.js` directly).\n"
)
file_markers.append("# app.py #") # Add Flask marker
file_structure_detail = (
"Generate code for `index.html` (as a Flask template), `style.css`, `script.js` (if JS is needed), and `app.py`.\n"
"Use these EXACT markers to separate the files:\n"
" `<!-- index.html -->`\n"
" `/* style.css */`\n"
" `// script.js //` (only include if JS is generated)\n"
" `# app.py #`\n"
"- Place the corresponding code directly after each marker."
)
elif backend_choice == "Node.js":
backend_instructions = (
f"- **Backend is '{backend_choice}':** Generate a basic Node.js Express application (`server.js` or `app.js`).\n"
" - Include necessary requires (`express`, `path`).\n"
" - Create an Express app instance.\n"
" - Configure middleware to serve static files from a `public` directory (e.g., `app.use(express.static('public'))`).\n"
" - Define a root route (`app.get('/')`) that sends `index.html` (located in `public`).\n"
" - Start the server (`app.listen(...)`).\n"
"- **HTML:** Generate a standard `index.html` file. Link CSS (`/style.css`) and JS (`/script.js`) assuming they are in the `public` folder.\n"
)
file_markers.append("// server.js //") # Add Node marker
file_structure_detail = (
"Generate code for `index.html`, `style.css`, `script.js` (if JS is needed), and `server.js` (or `app.js`).\n"
"Use these EXACT markers to separate the files:\n"
" `<!-- index.html -->`\n"
" `/* style.css */`\n"
" `// script.js //` (only include if JS is generated)\n"
" `// server.js //`\n"
"- Place the corresponding code directly after each marker."
)
# File Structure Instructions
if file_structure == "Single File":
file_structure_instruction = (
"- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
"Embed ALL CSS directly within `<style>` tags inside the `<head>`. "
"Embed ALL necessary JavaScript directly within `<script>` tags just before the closing `</body>` tag. "
"Do NOT use any file separation markers. Ignore backend instructions if any were implied."
)
else: # Multiple Files
file_structure_instruction = (
f"- **File Structure is 'Multiple Files':** {file_structure_detail}"
)
# Assemble the full system message
system_message = (
"You are an expert web developer AI. Your goal is to generate **complete, functional, and visually appealing** code based *only* on the user's description and selected options (Backend, File Structure). "
"Follow ALL these rules with EXTREME STRICTNESS:\n"
"1. **STYLE & DETAIL:** Generate rich frontend code. Use **plenty of CSS** for layout, spacing, typography, colors, and effects.\n"
"2. **COMPLETENESS:** Generate the *entire* requested code structure for ALL specified files. Ensure proper syntax and closing tags/brackets. **DO NOT STOP GENERATING PREMATURELY.**\n"
"3. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions, NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>`.\n"
"4. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (e.g., `<!DOCTYPE html>` or the first file marker like `<!-- index.html -->`).\n"
"5. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code (`</html>`, `}`, `;`, `})`, etc.). DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
f"{backend_instructions}" # Inject backend specific instructions
f"6. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure:\n"
f" {file_structure_instruction}\n"
"7. **ACCURACY:** Generate functional code addressing the user's prompt, respecting the chosen backend context (templating, file serving).\n\n"
"REMEMBER: Output ONLY raw code. Respect the chosen backend and file structure. Use the specified markers EXACTLY if generating multiple files. START immediately with code. FINISH the entire code generation. END immediately with code. NO extra text/tags."
)
messages = [
{"role": "system", "content": system_message},
{"role": "user", "content": f"Generate the complete website code for: {prompt}"}
]
# --- Stream the response ---
raw_response = ""
token_count = 0
try:
print("Sending request to Hugging Face Inference API...")
stream = client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
)
for message in stream:
token = message.choices[0].delta.content
if isinstance(token, str):
token_count += 1
raw_response += token
# Yield the raw, cumulative response to the first tab for live feedback
yield (raw_response, "Streaming...", "Streaming...", "Streaming...")
print(f"API stream finished. Received ~{token_count} tokens. Raw length: {len(raw_response)}")
if token_count >= max_tokens - 15: # Check if close to the limit
print(f"WARNING: Generation might have been cut short due to reaching max_tokens limit ({max_tokens}).")
raw_response += "\n\n<!-- WARNING: Output may be incomplete due to max_tokens limit. -->"
# --- Post-Processing (Basic Cleanup - Less aggressive now) ---
cleaned_response = raw_response.strip()
# Remove potential markdown code blocks (less likely with strict prompt but good safety)
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
# Remove assistant tags (less likely but possible)
cleaned_response = re.sub(r"<\|(user|assistant)\|>", "", cleaned_response, flags=re.IGNORECASE)
print("Parsing response into code blocks...")
parsed_code = parse_code_blocks(cleaned_response, file_structure, backend_choice)
print("Parsing complete.")
# Return the parsed code blocks for the respective tabs
return (
parsed_code["html"],
parsed_code["css"],
parsed_code["js"],
parsed_code["backend"] # Will be empty or placeholder for Static/Single File
)
except Exception as e:
error_message = f"An error occurred during the API call or processing: {e}"
print(error_message)
# Return error message to all tabs
error_output = f"## Error\n\nFailed to generate code.\n**Reason:** {e}\n\nPlease check the model status, your connection, and API token (if applicable)."
return (error_output, error_output, error_output, error_output)
# --- Build Gradio Interface using Blocks ---
with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
gr.Markdown("# β¨ Website Code Generator v2 β¨")
gr.Markdown(
"Describe the website you want. Select a backend context hint (Static, Flask, Node.js) and file structure. "
"The AI will generate code, attempting to respect the backend choice (e.g., basic server file, template syntax). "
"**Output appears in the tabs below.**\n"
"**Important:**\n"
"1. Backend generation is experimental. The AI might produce basic structures but not fully complex applications.\n"
"2. For 'Multiple Files', code is split based on markers (`<!-- index.html -->`, `/* style.css */`, etc.). Check tabs for results.\n"
"3. **If code seems cut off**, increase 'Max New Tokens' and regenerate!"
)
with gr.Row():
with gr.Column(scale=2):
prompt_input = gr.Textbox(
label="Website Description",
placeholder="e.g., A Flask app portfolio site with nav, hero, project cards (use Jinja placeholders for data), and contact form.",
lines=6,
)
backend_radio = gr.Radio(
["Static", "Flask", "Node.js"], label="Backend Context", value="Static",
info="Static: Frontend only. Flask/Node: Attempts basic backend file + frontend integration (experimental)."
)
file_structure_radio = gr.Radio(
["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files",
info="Single: All in index.html. Multiple: Separated into tabs (HTML, CSS, JS, Backend if applicable)."
)
generate_button = gr.Button("π Generate Code", variant="primary")
with gr.Column(scale=3):
# Use Tabs for output
with gr.Tabs(elem_id="code-tabs"):
with gr.Tab("HTML", elem_id="html-tab"):
html_output = gr.Code(
label="index.html", # Label clarifies file even if tab name is generic
language="html",
lines=28, # Adjust lines per tab
interactive=False,
show_label=True
)
with gr.Tab("CSS", elem_id="css-tab"):
css_output = gr.Code(
label="style.css",
language="css",
lines=28,
interactive=False,
show_label=True
)
with gr.Tab("JavaScript", elem_id="js-tab"):
js_output = gr.Code(
label="script.js",
language="javascript",
lines=28,
interactive=False,
show_label=True
)
with gr.Tab("Backend", elem_id="backend-tab"):
# Label will indicate file type
backend_output = gr.Code(
label="app.py / server.js",
language="python", # Default, can maybe be dynamic later if needed
lines=28,
interactive=False,
show_label=True
)
with gr.Accordion("Advanced Generation Settings", open=False):
max_tokens_slider = gr.Slider(
minimum=512,
maximum=4096, # Adjust based on model's max context if needed
value=3072,
step=256,
label="Max New Tokens",
info="Max length of generated code (all files combined). Increase if output is cut off!"
)
temperature_slider = gr.Slider(
minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature",
info="Controls randomness. Lower=focused, Higher=creative."
)
top_p_slider = gr.Slider(
minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P",
info="Alternative randomness control."
)
# --- Connect Inputs/Outputs ---
generate_button.click(
fn=generate_code,
inputs=[
prompt_input,
backend_radio,
file_structure_radio,
max_tokens_slider,
temperature_slider,
top_p_slider,
],
# Output to the individual code blocks within the tabs
outputs=[
html_output,
css_output,
js_output,
backend_output,
],
)
# --- Examples ---
gr.Examples(
examples=[
# Static Examples
["A simple counter page with a number display, an increment button, and a decrement button. Style the buttons nicely and center everything.", "Static", "Single File"],
["A responsive product grid for an e-commerce site. Each card needs an image, title, price, and 'Add to Cart' button with a hover effect. Use modern CSS.", "Static", "Multiple Files"],
# Flask Example
["A personal blog homepage using Flask. Include a clean header with nav links, a main area for post summaries (use Jinja loops for placeholder posts like {{ post.title }}), and a simple footer.", "Flask", "Multiple Files"],
# Node.js Example
["A 'Coming Soon' page using Node.js/Express to serve the static files. Include a large countdown timer (use JS), a background image, and an email signup form. Make it look sleek.", "Node.js", "Multiple Files"],
# More Complex Examples
["A simple Flask app for a to-do list. The main page shows the list (use Jinja). Include a form to add new items (POST request handled by Flask). Store items in a simple Python list in memory for now.", "Flask", "Multiple Files"],
["A portfolio website using Static generation. Sections for Hero, About Me, Projects (grid layout), and Contact Form. Add subtle scroll animations.", "Static", "Multiple Files"],
],
inputs=[prompt_input, backend_radio, file_structure_radio],
label="Example Prompts (Try Different Backends!)"
)
# --- Launch ---
if __name__ == "__main__":
print("Starting Gradio app...")
# Ensure queue is enabled for Spaces, might need higher concurrency if backend generation is slow
demo.queue(max_size=10).launch()
print("Gradio app launched.") |