MINEOGO commited on
Commit
f5a64b7
·
verified ·
1 Parent(s): e13fef7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +280 -100
app.py CHANGED
@@ -1,11 +1,13 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
- import re # For post-processing fallback
5
 
6
  # --- Configuration ---
7
  API_TOKEN = os.getenv("HF_TOKEN", None)
8
- MODEL = """Qwen/Qwen2.5-Coder-32B-Instruct""" # Or choose another suitable model
 
 
9
 
10
  # --- Initialize Inference Client ---
11
  try:
@@ -21,6 +23,100 @@ except Exception as e:
21
  print(f"Error initializing Inference Client: {e}")
22
  raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  # --- Core Code Generation Function ---
25
  def generate_code(
26
  prompt: str,
@@ -33,65 +129,118 @@ def generate_code(
33
  """
34
  Generates website code based on user prompt and choices.
35
  Aims for richer CSS, emphasizes completeness, and strictly outputs ONLY raw code.
36
- Yields the code token by token for live updates.
37
- under development not fully completed!
38
  """
39
  print(f"--- Generating Code ---")
40
  print(f"Prompt: {prompt[:100]}...")
41
  print(f"Backend Context: {backend_choice}")
42
  print(f"File Structure: {file_structure}")
43
- # Log the max_tokens value being used for this request
44
  print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
45
 
46
  # --- Dynamically Build System Message ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  if file_structure == "Single File":
48
  file_structure_instruction = (
49
  "- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
50
  "Embed ALL CSS directly within `<style>` tags inside the `<head>`. "
51
  "Embed ALL necessary JavaScript directly within `<script>` tags just before the closing `</body>` tag. "
52
- "Do NOT use file separation markers."
53
  )
54
  else: # Multiple Files
55
  file_structure_instruction = (
56
- "- **File Structure is 'Multiple Files':** Generate code for `index.html`, `style.css`, and `script.js` (if JS is needed). "
57
- "Use these EXACT markers: `<!-- index.html -->`, `/* style.css */`, `// script.js` (only if JS is needed).\n"
58
- "- Place the corresponding code directly after each marker.\n"
59
- "- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
60
  )
61
 
62
- # Assemble the full system message - Emphasizing completeness and NO premature stopping
63
  system_message = (
64
- "You are an expert frontend web developer AI. Your primary goal is to generate **complete, visually appealing, modern, and well-styled** frontend code (HTML, CSS, client-side JS) based *only* on the user's description and selected options. "
65
  "Follow ALL these rules with EXTREME STRICTNESS:\n"
66
- "1. **STYLE & DETAIL:** Generate rich, detailed code. Use **plenty of CSS** for layout, spacing, typography, colors, and effects. Aim for a high-quality visual result.\n"
67
- "2. **COMPLETENESS:** Generate the *entire* requested code structure. Ensure all files/sections are fully generated and properly closed (e.g., closing HTML tags `</html>`, CSS braces `}`, script tags `</script>`). **DO NOT STOP GENERATING PREMATURELY.** Finish the whole task.\n"
68
  "3. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions, NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>`.\n"
69
- "4. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (`<!DOCTYPE html>` or `<!-- index.html -->`).\n"
70
- "5. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code (`</html>`, `}`, `;`, etc.). DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
71
- "6. **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
72
- f"7. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure:\n"
73
  f" {file_structure_instruction}\n"
74
- "8. **BACKEND CONTEXT ({backend_choice}):** Use as a hint for frontend structure only. Generate ONLY frontend code.\n"
75
- "9. **FRONTEND ONLY:** Do NOT generate server-side code.\n"
76
- "10. **ACCURACY:** Generate functional code addressing the user's prompt.\n\n"
77
- "REMEMBER: Create COMPLETE, visually appealing code. Output ONLY raw code. START immediately with code. FINISH the entire code generation. END immediately with code. NO extra text/tags."
78
  )
79
 
80
- # --- Construct the messages for the API ---
81
  messages = [
82
  {"role": "system", "content": system_message},
83
- {"role": "user", "content": f"Generate the complete website frontend code for: {prompt}"}
84
  ]
85
 
86
- # --- Stream the response from the API ---
87
- response_stream = ""
88
- full_response_for_cleaning = ""
89
- token_count = 0 # Add a simple counter for debugging
90
  try:
91
  print("Sending request to Hugging Face Inference API...")
92
  stream = client.chat_completion(
93
  messages=messages,
94
- max_tokens=max_tokens, # Use the value from the slider
95
  stream=True,
96
  temperature=temperature,
97
  top_p=top_p,
@@ -99,96 +248,121 @@ def generate_code(
99
  for message in stream:
100
  token = message.choices[0].delta.content
101
  if isinstance(token, str):
102
- token_count += 1 # Crude approximation of tokens received
103
- response_stream += token
104
- full_response_for_cleaning += token
105
- # Log progress occasionally for debugging if needed
106
- # if token_count % 100 == 0:
107
- # print(f"Stream progress: Received ~{token_count} tokens...")
108
- yield response_stream # Yield cumulative response for live update
109
-
110
- print(f"API stream finished. Received ~{token_count} tokens. Raw length: {len(full_response_for_cleaning)}")
111
- # Check if received tokens are close to max_tokens, indicating potential cutoff
112
- if token_count >= max_tokens - 10: # Check if close to the limit (allowing for slight variations)
113
  print(f"WARNING: Generation might have been cut short due to reaching max_tokens limit ({max_tokens}).")
114
- # Optionally, append a warning to the output itself, though it violates the "code only" rule
115
- # full_response_for_cleaning += "\n\n<!-- WARNING: Output may be incomplete due to max_tokens limit. -->"
116
 
117
 
118
- # --- Post-Processing (Fallback Safety Net) ---
119
- cleaned_response = full_response_for_cleaning.strip()
 
120
  cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
121
  cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
122
- cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
123
- common_phrases = [
124
- "Here is the code:", "Okay, here is the code:", "Here's the code:",
125
- "Sure, here is the code you requested:", "Let me know if you need anything else."
126
- ]
127
- # Simple check, might need more robust cleaning if issues persist
128
- for phrase in common_phrases:
129
- # Check start
130
- if cleaned_response.lower().startswith(phrase.lower()):
131
- cleaned_response = cleaned_response[len(phrase):].lstrip()
132
- # Check end - be careful not to remove parts of valid code
133
- # This end check is risky, might remove valid closing comments or similar.
134
- # Consider removing if it causes issues.
135
- # if cleaned_response.lower().endswith(phrase.lower()):
136
- # cleaned_response = cleaned_response[:-len(phrase)].rstrip()
137
-
138
-
139
- yield cleaned_response.strip() # Yield final cleaned response
140
 
141
  except Exception as e:
142
- error_message = f"An error occurred during the API call: {e}"
143
  print(error_message)
144
- yield f"## Error\n\nFailed to generate code.\n**Reason:** {e}\n\nPlease check the model status, your connection, and API token (if applicable)."
 
 
145
 
146
 
147
  # --- Build Gradio Interface using Blocks ---
148
- with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
149
- gr.Markdown("# ✨ Website Code Generator ✨")
150
  gr.Markdown(
151
- "Describe the website you want. The AI will generate **visually styled** frontend code (HTML, CSS, JS) using **plenty of CSS**. "
152
- "The code appears live below.\n"
 
153
  "**Important:**\n"
154
- "1. This generator creates code based *only* on your initial description. To refine, modify your description and generate again.\n"
155
- "2. **If the code output stops abruptly**, it likely hit the 'Max New Tokens' limit. **Increase the slider value below** and try again!" # Added explanation
 
156
  )
157
 
158
  with gr.Row():
159
  with gr.Column(scale=2):
160
  prompt_input = gr.Textbox(
161
  label="Website Description",
162
- placeholder="e.g., A modern portfolio landing page with smooth scroll nav, stylish hero, project cards with hover effects, contact form.",
163
  lines=6,
164
  )
165
  backend_radio = gr.Radio(
166
- ["Static", "Flask", "Node.js"], label="Backend Context Hint", value="Static",
167
- info="Hint for AI (e.g., {{var}}) - generates ONLY frontend code."
168
  )
169
  file_structure_radio = gr.Radio(
170
  ["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files",
171
- info="Choose 'Single File' (all in index.html) or 'Multiple Files' (separate css/js)."
172
  )
173
- generate_button = gr.Button("🎨 Generate Stylish Website Code", variant="primary")
174
 
175
  with gr.Column(scale=3):
176
- code_output = gr.Code(
177
- label="Generated Code (Raw Output - Aiming for Style!)",
178
- language="html",
179
- lines=30,
180
- interactive=False,
181
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
 
183
  with gr.Accordion("Advanced Generation Settings", open=False):
184
- # INCREASED max_tokens range and default value
185
  max_tokens_slider = gr.Slider(
186
  minimum=512,
187
- maximum=4096, # Set maximum to model's limit (Zephyr 7B can handle this)
188
- value=3072, # Increased default significantly
189
- step=256, # Larger steps might be practical
190
  label="Max New Tokens",
191
- info="Max length of generated code. Increase if output is cut off!" # Updated info
192
  )
193
  temperature_slider = gr.Slider(
194
  minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature",
@@ -210,31 +384,37 @@ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
210
  temperature_slider,
211
  top_p_slider,
212
  ],
213
- outputs=code_output,
 
 
 
 
 
 
214
  )
215
 
216
  # --- Examples ---
217
  gr.Examples(
218
  examples=[
 
219
  ["A simple counter page with a number display, an increment button, and a decrement button. Style the buttons nicely and center everything.", "Static", "Single File"],
220
  ["A responsive product grid for an e-commerce site. Each card needs an image, title, price, and 'Add to Cart' button with a hover effect. Use modern CSS.", "Static", "Multiple Files"],
221
- ["A personal blog homepage featuring a clean header with navigation, a main content area for post summaries (placeholders ok), and a simple footer. Use a nice font.", "Flask", "Multiple Files"],
222
- ["A 'Coming Soon' page with a large countdown timer (use JS), a background image, and an email signup form. Make it look sleek.", "Static", "Multiple Files"],
223
- ["A to-do list app with add, edit, and delete features. Use JavaScript for interactivity. Save data in localStorage.", "Static", "Single File"],
224
- ["A weather dashboard that fetches current weather using an API and shows it with icons. Style with CSS grid.", "Static", "Multiple Files"],
225
- ["A personal portfolio site with a hero section, skills, project gallery, and contact form. Add animations using AOS or GSAP.", "Static", "Multiple Files"],
226
- ["A freelancer portfolio template with service cards, testimonials, and pricing sections. Use Tailwind CSS.", "Static", "Multiple Files"],
227
- ["A real-time chat web app using Flask-SocketIO. Basic login and support for multiple rooms.", "Flask", "Multiple Files"],
228
- ["A quiz app with multiple-choice questions, score tracking, and a final result page. Use JS to manage quiz logic.", "Static", "Single File"],
229
- ["A modern developer portfolio with sections for resume download, tech stack icons, GitHub activity, and a dark mode toggle.", "Static", "Multiple Files"]
230
  ],
231
  inputs=[prompt_input, backend_radio, file_structure_radio],
232
- label="Example Prompts (Aiming for Style)"
233
  )
234
-
235
  # --- Launch ---
236
  if __name__ == "__main__":
237
  print("Starting Gradio app...")
238
- # Ensure queue is enabled for Spaces
239
  demo.queue(max_size=10).launch()
240
- print("Gradio app launchdd.")
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
+ import re # For post-processing and parsing
5
 
6
  # --- Configuration ---
7
  API_TOKEN = os.getenv("HF_TOKEN", None)
8
+ # Consider using a model known for stronger coding capabilities if backend generation is complex
9
+ MODEL = "Qwen/Qwen2-7B-Instruct" # Example: Switched to a smaller, faster model for potentially better backend handling
10
+ # MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct" # Or keep your original model
11
 
12
  # --- Initialize Inference Client ---
13
  try:
 
23
  print(f"Error initializing Inference Client: {e}")
24
  raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")
25
 
26
+ # --- Helper Function for Parsing ---
27
+ def parse_code_blocks(text, file_structure, backend_choice):
28
+ """Parses the generated text into code blocks based on markers."""
29
+ if file_structure == "Single File":
30
+ # Everything goes into HTML for single file mode
31
+ return {
32
+ "html": text.strip(),
33
+ "css": "/* CSS is embedded in HTML */",
34
+ "js": "// JavaScript is embedded in HTML",
35
+ "backend": f"// No backend file generated for 'Single File' mode."
36
+ }
37
+
38
+ # Default markers
39
+ markers = {
40
+ "html": r"<!--\s*index\.html\s*-->",
41
+ "css": r"/\*\s*style\.css\s*\*/",
42
+ "js": r"//\s*script\.js\s*//", # Added trailing // to potentially help delimit
43
+ }
44
+ # Add backend markers based on choice
45
+ if backend_choice == "Flask":
46
+ markers["backend"] = r"#\s*app\.py\s*#" # Using # marker #
47
+ elif backend_choice == "Node.js":
48
+ markers["backend"] = r"//\s*(server|app)\.js\s*//" # Using // marker //
49
+
50
+ # Find all marker positions
51
+ marker_positions = {}
52
+ for key, pattern in markers.items():
53
+ match = re.search(pattern, text, re.IGNORECASE)
54
+ if match:
55
+ marker_positions[key] = match.start()
56
+
57
+ # If no markers found, assume it's all HTML (fallback)
58
+ if not marker_positions:
59
+ print("Warning: No file markers found in the output. Assuming all content is HTML.")
60
+ # Check if it looks like CSS or JS first before defaulting to HTML
61
+ cleaned_text = text.strip()
62
+ if cleaned_text.startswith(("{", ".", "#", "/*")) and "{" in cleaned_text and "}" in cleaned_text:
63
+ print("Heuristic: Output looks like CSS.")
64
+ return {"html": "", "css": cleaned_text, "js": "", "backend": ""}
65
+ elif cleaned_text.startswith(("function", "const", "let", "var", "//", "import")) and ("(" in cleaned_text or "{" in cleaned_text):
66
+ print("Heuristic: Output looks like JS.")
67
+ return {"html": "", "css": "", "js": cleaned_text, "backend": ""}
68
+ else: # Default to HTML
69
+ return {"html": cleaned_text, "css": "", "js": "", "backend": ""}
70
+
71
+
72
+ # Sort markers by their position
73
+ sorted_markers = sorted(marker_positions.items(), key=lambda item: item[1])
74
+
75
+ # Extract code blocks
76
+ code_blocks = {key: "" for key in markers} # Initialize all keys
77
+ for i, (key, start_pos) in enumerate(sorted_markers):
78
+ # Find the start of the code block (after the marker)
79
+ marker_match = re.search(markers[key], text, re.IGNORECASE) # Find the specific marker text
80
+ code_start = marker_match.end()
81
+
82
+ # Find the end of the code block (start of the next marker or end of text)
83
+ if i + 1 < len(sorted_markers):
84
+ next_marker_key, next_marker_pos = sorted_markers[i+1]
85
+ code_end = next_marker_pos
86
+ else:
87
+ code_end = len(text)
88
+
89
+ # Extract and clean the code
90
+ code = text[code_start:code_end].strip()
91
+ code_blocks[key] = code
92
+
93
+ # Fill potential missing keys if they existed in original markers dict
94
+ final_blocks = {
95
+ "html": code_blocks.get("html", ""),
96
+ "css": code_blocks.get("css", ""),
97
+ "js": code_blocks.get("js", ""),
98
+ "backend": code_blocks.get("backend", "")
99
+ }
100
+
101
+ # If backend is static but backend code was somehow generated, clear it
102
+ if backend_choice == "Static":
103
+ final_blocks["backend"] = "// No backend file needed for 'Static' mode."
104
+
105
+
106
+ # Fallback if HTML is empty but others aren't (marker parsing failed maybe?)
107
+ if not final_blocks["html"] and (final_blocks["css"] or final_blocks["js"] or final_blocks["backend"]):
108
+ # Check if the original text looks like HTML
109
+ if text.strip().startswith("<!DOCTYPE html") or text.strip().startswith("<html"):
110
+ print("Warning: Marker parsing might have failed, but text looks like HTML. Assigning full text to HTML.")
111
+ final_blocks["html"] = text.strip()
112
+ final_blocks["css"] = "" # Clear others to avoid duplication if parsing failed badly
113
+ final_blocks["js"] = ""
114
+ final_blocks["backend"] = ""
115
+
116
+
117
+ return final_blocks
118
+
119
+
120
  # --- Core Code Generation Function ---
121
  def generate_code(
122
  prompt: str,
 
129
  """
130
  Generates website code based on user prompt and choices.
131
  Aims for richer CSS, emphasizes completeness, and strictly outputs ONLY raw code.
132
+ Parses output into separate files for the UI tabs when 'Multiple Files' is selected.
133
+ Yields cumulative raw code to the first tab for live updates, then returns parsed blocks.
134
  """
135
  print(f"--- Generating Code ---")
136
  print(f"Prompt: {prompt[:100]}...")
137
  print(f"Backend Context: {backend_choice}")
138
  print(f"File Structure: {file_structure}")
 
139
  print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
140
 
141
  # --- Dynamically Build System Message ---
142
+ backend_instructions = ""
143
+ file_markers = ["<!-- index.html -->", "/* style.css */", "// script.js //"] # Base markers
144
+
145
+ if backend_choice == "Static":
146
+ backend_instructions = (
147
+ f"- **Backend is '{backend_choice}':** Generate ONLY frontend code (HTML, CSS, JS). Do NOT generate any server-side files or logic.\n"
148
+ )
149
+ file_structure_detail = (
150
+ "Generate code for `index.html`, `style.css`, and `script.js` (if JS is needed). "
151
+ "Use these EXACT markers to separate the files:\n"
152
+ " `<!-- index.html -->`\n"
153
+ " `/* style.css */`\n"
154
+ " `// script.js //` (only include if JS is generated)\n"
155
+ "- Place the corresponding code directly after each marker.\n"
156
+ "- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
157
+ )
158
+ elif backend_choice == "Flask":
159
+ backend_instructions = (
160
+ f"- **Backend is '{backend_choice}':** Generate a basic Python Flask application (`app.py`).\n"
161
+ " - Include necessary imports (`Flask`, `render_template`).\n"
162
+ " - Create a simple Flask app instance.\n"
163
+ " - Define a root route (`@app.route('/')`) that renders `index.html`.\n"
164
+ " - Include the standard `if __name__ == '__main__': app.run(debug=True)` block.\n"
165
+ "- **HTML Templates:** Modify the generated `index.html` to be a Flask template.\n"
166
+ " - Use Jinja2 syntax (e.g., `{{ variable }}`) *if* the prompt implies dynamic data, otherwise generate static HTML structure within the template.\n"
167
+ " - Link CSS using `url_for('static', filename='style.css')`.\n"
168
+ " - Include JS using `url_for('static', filename='script.js')`.\n"
169
+ "- Assume CSS and JS are served from a `static` folder (but generate the code for `style.css` and `script.js` directly).\n"
170
+ )
171
+ file_markers.append("# app.py #") # Add Flask marker
172
+ file_structure_detail = (
173
+ "Generate code for `index.html` (as a Flask template), `style.css`, `script.js` (if JS is needed), and `app.py`.\n"
174
+ "Use these EXACT markers to separate the files:\n"
175
+ " `<!-- index.html -->`\n"
176
+ " `/* style.css */`\n"
177
+ " `// script.js //` (only include if JS is generated)\n"
178
+ " `# app.py #`\n"
179
+ "- Place the corresponding code directly after each marker."
180
+ )
181
+ elif backend_choice == "Node.js":
182
+ backend_instructions = (
183
+ f"- **Backend is '{backend_choice}':** Generate a basic Node.js Express application (`server.js` or `app.js`).\n"
184
+ " - Include necessary requires (`express`, `path`).\n"
185
+ " - Create an Express app instance.\n"
186
+ " - Configure middleware to serve static files from a `public` directory (e.g., `app.use(express.static('public'))`).\n"
187
+ " - Define a root route (`app.get('/')`) that sends `index.html` (located in `public`).\n"
188
+ " - Start the server (`app.listen(...)`).\n"
189
+ "- **HTML:** Generate a standard `index.html` file. Link CSS (`/style.css`) and JS (`/script.js`) assuming they are in the `public` folder.\n"
190
+ )
191
+ file_markers.append("// server.js //") # Add Node marker
192
+ file_structure_detail = (
193
+ "Generate code for `index.html`, `style.css`, `script.js` (if JS is needed), and `server.js` (or `app.js`).\n"
194
+ "Use these EXACT markers to separate the files:\n"
195
+ " `<!-- index.html -->`\n"
196
+ " `/* style.css */`\n"
197
+ " `// script.js //` (only include if JS is generated)\n"
198
+ " `// server.js //`\n"
199
+ "- Place the corresponding code directly after each marker."
200
+ )
201
+
202
+ # File Structure Instructions
203
  if file_structure == "Single File":
204
  file_structure_instruction = (
205
  "- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
206
  "Embed ALL CSS directly within `<style>` tags inside the `<head>`. "
207
  "Embed ALL necessary JavaScript directly within `<script>` tags just before the closing `</body>` tag. "
208
+ "Do NOT use any file separation markers. Ignore backend instructions if any were implied."
209
  )
210
  else: # Multiple Files
211
  file_structure_instruction = (
212
+ f"- **File Structure is 'Multiple Files':** {file_structure_detail}"
 
 
 
213
  )
214
 
215
+ # Assemble the full system message
216
  system_message = (
217
+ "You are an expert web developer AI. Your goal is to generate **complete, functional, and visually appealing** code based *only* on the user's description and selected options (Backend, File Structure). "
218
  "Follow ALL these rules with EXTREME STRICTNESS:\n"
219
+ "1. **STYLE & DETAIL:** Generate rich frontend code. Use **plenty of CSS** for layout, spacing, typography, colors, and effects.\n"
220
+ "2. **COMPLETENESS:** Generate the *entire* requested code structure for ALL specified files. Ensure proper syntax and closing tags/brackets. **DO NOT STOP GENERATING PREMATURELY.**\n"
221
  "3. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions, NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>`.\n"
222
+ "4. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (e.g., `<!DOCTYPE html>` or the first file marker like `<!-- index.html -->`).\n"
223
+ "5. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code (`</html>`, `}`, `;`, `})`, etc.). DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
224
+ f"{backend_instructions}" # Inject backend specific instructions
225
+ f"6. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure:\n"
226
  f" {file_structure_instruction}\n"
227
+ "7. **ACCURACY:** Generate functional code addressing the user's prompt, respecting the chosen backend context (templating, file serving).\n\n"
228
+ "REMEMBER: Output ONLY raw code. Respect the chosen backend and file structure. Use the specified markers EXACTLY if generating multiple files. START immediately with code. FINISH the entire code generation. END immediately with code. NO extra text/tags."
 
 
229
  )
230
 
 
231
  messages = [
232
  {"role": "system", "content": system_message},
233
+ {"role": "user", "content": f"Generate the complete website code for: {prompt}"}
234
  ]
235
 
236
+ # --- Stream the response ---
237
+ raw_response = ""
238
+ token_count = 0
 
239
  try:
240
  print("Sending request to Hugging Face Inference API...")
241
  stream = client.chat_completion(
242
  messages=messages,
243
+ max_tokens=max_tokens,
244
  stream=True,
245
  temperature=temperature,
246
  top_p=top_p,
 
248
  for message in stream:
249
  token = message.choices[0].delta.content
250
  if isinstance(token, str):
251
+ token_count += 1
252
+ raw_response += token
253
+ # Yield the raw, cumulative response to the first tab for live feedback
254
+ yield (raw_response, "Streaming...", "Streaming...", "Streaming...")
255
+
256
+ print(f"API stream finished. Received ~{token_count} tokens. Raw length: {len(raw_response)}")
257
+ if token_count >= max_tokens - 15: # Check if close to the limit
 
 
 
 
258
  print(f"WARNING: Generation might have been cut short due to reaching max_tokens limit ({max_tokens}).")
259
+ raw_response += "\n\n<!-- WARNING: Output may be incomplete due to max_tokens limit. -->"
 
260
 
261
 
262
+ # --- Post-Processing (Basic Cleanup - Less aggressive now) ---
263
+ cleaned_response = raw_response.strip()
264
+ # Remove potential markdown code blocks (less likely with strict prompt but good safety)
265
  cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
266
  cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
267
+ # Remove assistant tags (less likely but possible)
268
+ cleaned_response = re.sub(r"<\|(user|assistant)\|>", "", cleaned_response, flags=re.IGNORECASE)
269
+
270
+ print("Parsing response into code blocks...")
271
+ parsed_code = parse_code_blocks(cleaned_response, file_structure, backend_choice)
272
+ print("Parsing complete.")
273
+
274
+ # Return the parsed code blocks for the respective tabs
275
+ return (
276
+ parsed_code["html"],
277
+ parsed_code["css"],
278
+ parsed_code["js"],
279
+ parsed_code["backend"] # Will be empty or placeholder for Static/Single File
280
+ )
 
 
 
 
281
 
282
  except Exception as e:
283
+ error_message = f"An error occurred during the API call or processing: {e}"
284
  print(error_message)
285
+ # Return error message to all tabs
286
+ error_output = f"## Error\n\nFailed to generate code.\n**Reason:** {e}\n\nPlease check the model status, your connection, and API token (if applicable)."
287
+ return (error_output, error_output, error_output, error_output)
288
 
289
 
290
  # --- Build Gradio Interface using Blocks ---
291
+ with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
292
+ gr.Markdown("# ✨ Website Code Generator v2 ✨")
293
  gr.Markdown(
294
+ "Describe the website you want. Select a backend context hint (Static, Flask, Node.js) and file structure. "
295
+ "The AI will generate code, attempting to respect the backend choice (e.g., basic server file, template syntax). "
296
+ "**Output appears in the tabs below.**\n"
297
  "**Important:**\n"
298
+ "1. Backend generation is experimental. The AI might produce basic structures but not fully complex applications.\n"
299
+ "2. For 'Multiple Files', code is split based on markers (`<!-- index.html -->`, `/* style.css */`, etc.). Check tabs for results.\n"
300
+ "3. **If code seems cut off**, increase 'Max New Tokens' and regenerate!"
301
  )
302
 
303
  with gr.Row():
304
  with gr.Column(scale=2):
305
  prompt_input = gr.Textbox(
306
  label="Website Description",
307
+ placeholder="e.g., A Flask app portfolio site with nav, hero, project cards (use Jinja placeholders for data), and contact form.",
308
  lines=6,
309
  )
310
  backend_radio = gr.Radio(
311
+ ["Static", "Flask", "Node.js"], label="Backend Context", value="Static",
312
+ info="Static: Frontend only. Flask/Node: Attempts basic backend file + frontend integration (experimental)."
313
  )
314
  file_structure_radio = gr.Radio(
315
  ["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files",
316
+ info="Single: All in index.html. Multiple: Separated into tabs (HTML, CSS, JS, Backend if applicable)."
317
  )
318
+ generate_button = gr.Button("🚀 Generate Code", variant="primary")
319
 
320
  with gr.Column(scale=3):
321
+ # Use Tabs for output
322
+ with gr.Tabs(elem_id="code-tabs"):
323
+ with gr.Tab("HTML", elem_id="html-tab"):
324
+ html_output = gr.Code(
325
+ label="index.html", # Label clarifies file even if tab name is generic
326
+ language="html",
327
+ lines=28, # Adjust lines per tab
328
+ interactive=False,
329
+ show_label=True
330
+ )
331
+ with gr.Tab("CSS", elem_id="css-tab"):
332
+ css_output = gr.Code(
333
+ label="style.css",
334
+ language="css",
335
+ lines=28,
336
+ interactive=False,
337
+ show_label=True
338
+ )
339
+ with gr.Tab("JavaScript", elem_id="js-tab"):
340
+ js_output = gr.Code(
341
+ label="script.js",
342
+ language="javascript",
343
+ lines=28,
344
+ interactive=False,
345
+ show_label=True
346
+ )
347
+ with gr.Tab("Backend", elem_id="backend-tab"):
348
+ # Label will indicate file type
349
+ backend_output = gr.Code(
350
+ label="app.py / server.js",
351
+ language="python", # Default, can maybe be dynamic later if needed
352
+ lines=28,
353
+ interactive=False,
354
+ show_label=True
355
+ )
356
+
357
 
358
  with gr.Accordion("Advanced Generation Settings", open=False):
 
359
  max_tokens_slider = gr.Slider(
360
  minimum=512,
361
+ maximum=4096, # Adjust based on model's max context if needed
362
+ value=3072,
363
+ step=256,
364
  label="Max New Tokens",
365
+ info="Max length of generated code (all files combined). Increase if output is cut off!"
366
  )
367
  temperature_slider = gr.Slider(
368
  minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature",
 
384
  temperature_slider,
385
  top_p_slider,
386
  ],
387
+ # Output to the individual code blocks within the tabs
388
+ outputs=[
389
+ html_output,
390
+ css_output,
391
+ js_output,
392
+ backend_output,
393
+ ],
394
  )
395
 
396
  # --- Examples ---
397
  gr.Examples(
398
  examples=[
399
+ # Static Examples
400
  ["A simple counter page with a number display, an increment button, and a decrement button. Style the buttons nicely and center everything.", "Static", "Single File"],
401
  ["A responsive product grid for an e-commerce site. Each card needs an image, title, price, and 'Add to Cart' button with a hover effect. Use modern CSS.", "Static", "Multiple Files"],
402
+ # Flask Example
403
+ ["A personal blog homepage using Flask. Include a clean header with nav links, a main area for post summaries (use Jinja loops for placeholder posts like {{ post.title }}), and a simple footer.", "Flask", "Multiple Files"],
404
+ # Node.js Example
405
+ ["A 'Coming Soon' page using Node.js/Express to serve the static files. Include a large countdown timer (use JS), a background image, and an email signup form. Make it look sleek.", "Node.js", "Multiple Files"],
406
+ # More Complex Examples
407
+ ["A simple Flask app for a to-do list. The main page shows the list (use Jinja). Include a form to add new items (POST request handled by Flask). Store items in a simple Python list in memory for now.", "Flask", "Multiple Files"],
408
+ ["A portfolio website using Static generation. Sections for Hero, About Me, Projects (grid layout), and Contact Form. Add subtle scroll animations.", "Static", "Multiple Files"],
409
+
 
410
  ],
411
  inputs=[prompt_input, backend_radio, file_structure_radio],
412
+ label="Example Prompts (Try Different Backends!)"
413
  )
414
+
415
  # --- Launch ---
416
  if __name__ == "__main__":
417
  print("Starting Gradio app...")
418
+ # Ensure queue is enabled for Spaces, might need higher concurrency if backend generation is slow
419
  demo.queue(max_size=10).launch()
420
+ print("Gradio app launched.")