MINEOGO commited on
Commit
de1581d
·
verified ·
1 Parent(s): 244ea60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -50
app.py CHANGED
@@ -32,17 +32,17 @@ def generate_code(
32
  ):
33
  """
34
  Generates website code based on user prompt and choices.
35
- Aims for richer CSS and strictly outputs ONLY raw code.
36
  Yields the code token by token for live updates.
37
  """
38
  print(f"--- Generating Code ---")
39
  print(f"Prompt: {prompt[:100]}...")
40
  print(f"Backend Context: {backend_choice}")
41
  print(f"File Structure: {file_structure}")
 
42
  print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
43
 
44
- # --- Dynamically Build System Message Based on File Structure & Style Request ---
45
-
46
  if file_structure == "Single File":
47
  file_structure_instruction = (
48
  "- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
@@ -58,75 +58,84 @@ def generate_code(
58
  "- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
59
  )
60
 
61
- # Assemble the full system message with enhanced style guidance and stricter output rules
62
  system_message = (
63
- "You are an expert frontend web developer AI. Your primary goal is to generate **visually appealing, modern, and well-styled** frontend code (HTML, CSS, client-side JS) based *only* on the user's description and selected options. "
64
  "Follow ALL these rules with EXTREME STRICTNESS:\n"
65
- "1. **STYLE & DETAIL:** Generate rich, detailed code. Don't just make minimal examples. Use **plenty of CSS** for layout (Flexbox/Grid), spacing (padding/margin), typography (fonts), colors, and consider adding subtle transitions or effects for a polished look. Aim for a high-quality visual result.\n"
66
- "2. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions ('Here is the code...'), NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>` before, during, or after the code.\n"
67
- "3. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (e.g., `<!DOCTYPE html>` or `<!-- index.html -->`). NO leading spaces, newlines, or any other characters.\n"
68
- "4. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code (e.g., the final `</html>`, `}`, or `;`). DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
69
- "5. **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
70
- f"6. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure below:\n"
71
- f" {file_structure_instruction}\n" # Insert the specific instruction here
72
- "7. **BACKEND CONTEXT ({backend_choice}):** Use this as a hint for frontend structure (e.g., placeholders like `{{ variable }}` if 'Flask' is chosen), but ONLY generate the static frontend code (HTML, CSS, client-side JS).\n"
73
- "8. **FRONTEND ONLY:** Do NOT generate server-side code (Python, Node.js, etc.).\n"
74
- "9. **ACCURACY:** Generate functional code that directly addresses the user's prompt.\n\n"
75
- "REMEMBER: Create visually appealing code. Output ONLY the raw code. START immediately with code. END immediately with code. NO extra text or tags EVER." # Final reinforcement
 
76
  )
77
 
78
  # --- Construct the messages for the API ---
79
  messages = [
80
  {"role": "system", "content": system_message},
81
- {"role": "user", "content": f"Generate the website frontend code for: {prompt}"} # Slightly rephrased user message
82
  ]
83
 
84
  # --- Stream the response from the API ---
85
  response_stream = ""
86
  full_response_for_cleaning = ""
 
87
  try:
88
  print("Sending request to Hugging Face Inference API...")
89
- for message in client.chat_completion(
90
  messages=messages,
91
- max_tokens=max_tokens,
92
  stream=True,
93
- temperature=temperature, # User controlled - 0.7 is a reasonable default balance
94
  top_p=top_p,
95
- ):
 
96
  token = message.choices[0].delta.content
97
  if isinstance(token, str):
 
98
  response_stream += token
99
  full_response_for_cleaning += token
 
 
 
100
  yield response_stream # Yield cumulative response for live update
101
 
102
- print(f"API stream finished. Raw length: {len(full_response_for_cleaning)}")
 
 
 
 
 
 
103
 
104
  # --- Post-Processing (Fallback Safety Net) ---
105
- # Primarily rely on the prompt, but clean common issues just in case.
106
  cleaned_response = full_response_for_cleaning.strip()
107
-
108
- # Remove potential leading/trailing markdown code fences
109
  cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
110
  cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
111
-
112
- # Remove potential conversational tags if they slip through (less likely now)
113
  cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
114
-
115
- # Remove common introductory/closing phrases if they slip through
116
  common_phrases = [
117
  "Here is the code:", "Okay, here is the code:", "Here's the code:",
118
  "Sure, here is the code you requested:", "Let me know if you need anything else."
119
- # Add more if needed
120
  ]
121
- temp_response = cleaned_response.lower()
122
  for phrase in common_phrases:
123
- if temp_response.startswith(phrase.lower()):
 
124
  cleaned_response = cleaned_response[len(phrase):].lstrip()
125
- if temp_response.endswith(phrase.lower()):
126
- cleaned_response = cleaned_response[:-len(phrase)].rstrip()
 
 
 
 
127
 
128
- # Yield the final cleaned response *once* after streaming.
129
- yield cleaned_response.strip()
130
 
131
  except Exception as e:
132
  error_message = f"An error occurred during the API call: {e}"
@@ -136,46 +145,53 @@ def generate_code(
136
 
137
  # --- Build Gradio Interface using Blocks ---
138
  with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
139
- gr.Markdown("# ✨ Website Code Generator ✨") # Added some flair
140
  gr.Markdown(
141
  "Describe the website you want. The AI will generate **visually styled** frontend code (HTML, CSS, JS) using **plenty of CSS**. "
142
- "The code appears live below. \n"
143
- "**Important:** This generator creates code based *only* on your initial description. To refine the output, modify your description and generate again." # Added clarification
 
 
144
  )
145
 
146
  with gr.Row():
147
  with gr.Column(scale=2):
148
  prompt_input = gr.Textbox(
149
  label="Website Description",
150
- placeholder="e.g., A modern portfolio landing page with a smooth scroll navigation, a stylish hero section, project cards with hover effects, and a contact form.", # More ambitious placeholder
151
- lines=6, # Slightly more lines
152
  )
153
  backend_radio = gr.Radio(
154
  ["Static", "Flask", "Node.js"], label="Backend Context Hint", value="Static",
155
- info="Hint for AI (e.g., template placeholders) - generates ONLY frontend code."
156
  )
157
  file_structure_radio = gr.Radio(
158
  ["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files",
159
  info="Choose 'Single File' (all in index.html) or 'Multiple Files' (separate css/js)."
160
  )
161
- generate_button = gr.Button("🎨 Generate Stylish Website Code", variant="primary") # Updated button text
162
 
163
  with gr.Column(scale=3):
164
  code_output = gr.Code(
165
- label="Generated Code (Raw Output - Aiming for Style!)", # Updated label
166
  language="html",
167
- lines=30, # More lines for potentially longer code
168
  interactive=False,
169
  )
170
 
171
  with gr.Accordion("Advanced Generation Settings", open=False):
 
172
  max_tokens_slider = gr.Slider(
173
- minimum=512, maximum=4096, value=2560, step=128, label="Max New Tokens", # Increased default
174
- info="Max length of generated code. Increase for complex pages."
 
 
 
 
175
  )
176
  temperature_slider = gr.Slider(
177
- minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature", # Default 0.7 is often good
178
- info="Controls randomness. Lower=more predictable, Higher=more creative."
179
  )
180
  top_p_slider = gr.Slider(
181
  minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P",
@@ -205,11 +221,12 @@ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
205
  ["A 'Coming Soon' page with a large countdown timer (use JS), a background image, and an email signup form. Make it look sleek.", "Static", "Multiple Files"]
206
  ],
207
  inputs=[prompt_input, backend_radio, file_structure_radio],
208
- label="Example Prompts (Aiming for Style)" # Updated label
209
  )
210
 
211
  # --- Launch ---
212
  if __name__ == "__main__":
213
  print("Starting Gradio app...")
 
214
  demo.queue(max_size=10).launch()
215
  print("Gradio app launched.")
 
32
  ):
33
  """
34
  Generates website code based on user prompt and choices.
35
+ Aims for richer CSS, emphasizes completeness, and strictly outputs ONLY raw code.
36
  Yields the code token by token for live updates.
37
  """
38
  print(f"--- Generating Code ---")
39
  print(f"Prompt: {prompt[:100]}...")
40
  print(f"Backend Context: {backend_choice}")
41
  print(f"File Structure: {file_structure}")
42
+ # Log the max_tokens value being used for this request
43
  print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
44
 
45
+ # --- Dynamically Build System Message ---
 
46
  if file_structure == "Single File":
47
  file_structure_instruction = (
48
  "- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
 
58
  "- Inside `index.html`, link `style.css` in the `<head>` and include `script.js` before `</body>` if generated."
59
  )
60
 
61
+ # Assemble the full system message - Emphasizing completeness and NO premature stopping
62
  system_message = (
63
+ "You are an expert frontend web developer AI. Your primary goal is to generate **complete, visually appealing, modern, and well-styled** frontend code (HTML, CSS, client-side JS) based *only* on the user's description and selected options. "
64
  "Follow ALL these rules with EXTREME STRICTNESS:\n"
65
+ "1. **STYLE & DETAIL:** Generate rich, detailed code. Use **plenty of CSS** for layout, spacing, typography, colors, and effects. Aim for a high-quality visual result.\n"
66
+ "2. **COMPLETENESS:** Generate the *entire* requested code structure. Ensure all files/sections are fully generated and properly closed (e.g., closing HTML tags `</html>`, CSS braces `}`, script tags `</script>`). **DO NOT STOP GENERATING PREMATURELY.** Finish the whole task.\n"
67
+ "3. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested source code. NO extra text, NO explanations, NO apologies, NO introductions, NO summaries, NO comments about the code (except standard code comments), NO MARKDOWN formatting (like ```html), and ***ABSOLUTELY NO CONVERSATIONAL TEXT OR TAGS*** like `<|user|>` or `<|assistant|>`.\n"
68
+ "4. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (`<!DOCTYPE html>` or `<!-- index.html -->`).\n"
69
+ "5. **IMMEDIATE CODE END:** The response MUST end *immediately* after the very last character of the generated code (`</html>`, `}`, `;`, etc.). DO NOT add *any* text, spaces, or newlines after the code concludes.\n"
70
+ "6. **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
71
+ f"7. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure:\n"
72
+ f" {file_structure_instruction}\n"
73
+ "8. **BACKEND CONTEXT ({backend_choice}):** Use as a hint for frontend structure only. Generate ONLY frontend code.\n"
74
+ "9. **FRONTEND ONLY:** Do NOT generate server-side code.\n"
75
+ "10. **ACCURACY:** Generate functional code addressing the user's prompt.\n\n"
76
+ "REMEMBER: Create COMPLETE, visually appealing code. Output ONLY raw code. START immediately with code. FINISH the entire code generation. END immediately with code. NO extra text/tags."
77
  )
78
 
79
  # --- Construct the messages for the API ---
80
  messages = [
81
  {"role": "system", "content": system_message},
82
+ {"role": "user", "content": f"Generate the complete website frontend code for: {prompt}"}
83
  ]
84
 
85
  # --- Stream the response from the API ---
86
  response_stream = ""
87
  full_response_for_cleaning = ""
88
+ token_count = 0 # Add a simple counter for debugging
89
  try:
90
  print("Sending request to Hugging Face Inference API...")
91
+ stream = client.chat_completion(
92
  messages=messages,
93
+ max_tokens=max_tokens, # Use the value from the slider
94
  stream=True,
95
+ temperature=temperature,
96
  top_p=top_p,
97
+ )
98
+ for message in stream:
99
  token = message.choices[0].delta.content
100
  if isinstance(token, str):
101
+ token_count += 1 # Crude approximation of tokens received
102
  response_stream += token
103
  full_response_for_cleaning += token
104
+ # Log progress occasionally for debugging if needed
105
+ # if token_count % 100 == 0:
106
+ # print(f"Stream progress: Received ~{token_count} tokens...")
107
  yield response_stream # Yield cumulative response for live update
108
 
109
+ print(f"API stream finished. Received ~{token_count} tokens. Raw length: {len(full_response_for_cleaning)}")
110
+ # Check if received tokens are close to max_tokens, indicating potential cutoff
111
+ if token_count >= max_tokens - 10: # Check if close to the limit (allowing for slight variations)
112
+ print(f"WARNING: Generation might have been cut short due to reaching max_tokens limit ({max_tokens}).")
113
+ # Optionally, append a warning to the output itself, though it violates the "code only" rule
114
+ # full_response_for_cleaning += "\n\n<!-- WARNING: Output may be incomplete due to max_tokens limit. -->"
115
+
116
 
117
  # --- Post-Processing (Fallback Safety Net) ---
 
118
  cleaned_response = full_response_for_cleaning.strip()
 
 
119
  cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
120
  cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
 
 
121
  cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
 
 
122
  common_phrases = [
123
  "Here is the code:", "Okay, here is the code:", "Here's the code:",
124
  "Sure, here is the code you requested:", "Let me know if you need anything else."
 
125
  ]
126
+ # Simple check, might need more robust cleaning if issues persist
127
  for phrase in common_phrases:
128
+ # Check start
129
+ if cleaned_response.lower().startswith(phrase.lower()):
130
  cleaned_response = cleaned_response[len(phrase):].lstrip()
131
+ # Check end - be careful not to remove parts of valid code
132
+ # This end check is risky, might remove valid closing comments or similar.
133
+ # Consider removing if it causes issues.
134
+ # if cleaned_response.lower().endswith(phrase.lower()):
135
+ # cleaned_response = cleaned_response[:-len(phrase)].rstrip()
136
+
137
 
138
+ yield cleaned_response.strip() # Yield final cleaned response
 
139
 
140
  except Exception as e:
141
  error_message = f"An error occurred during the API call: {e}"
 
145
 
146
  # --- Build Gradio Interface using Blocks ---
147
  with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
148
+ gr.Markdown("# ✨ Website Code Generator ✨")
149
  gr.Markdown(
150
  "Describe the website you want. The AI will generate **visually styled** frontend code (HTML, CSS, JS) using **plenty of CSS**. "
151
+ "The code appears live below.\n"
152
+ "**Important:**\n"
153
+ "1. This generator creates code based *only* on your initial description. To refine, modify your description and generate again.\n"
154
+ "2. **If the code output stops abruptly**, it likely hit the 'Max New Tokens' limit. **Increase the slider value below** and try again!" # Added explanation
155
  )
156
 
157
  with gr.Row():
158
  with gr.Column(scale=2):
159
  prompt_input = gr.Textbox(
160
  label="Website Description",
161
+ placeholder="e.g., A modern portfolio landing page with smooth scroll nav, stylish hero, project cards with hover effects, contact form.",
162
+ lines=6,
163
  )
164
  backend_radio = gr.Radio(
165
  ["Static", "Flask", "Node.js"], label="Backend Context Hint", value="Static",
166
+ info="Hint for AI (e.g., {{var}}) - generates ONLY frontend code."
167
  )
168
  file_structure_radio = gr.Radio(
169
  ["Multiple Files", "Single File"], label="Output File Structure", value="Multiple Files",
170
  info="Choose 'Single File' (all in index.html) or 'Multiple Files' (separate css/js)."
171
  )
172
+ generate_button = gr.Button("🎨 Generate Stylish Website Code", variant="primary")
173
 
174
  with gr.Column(scale=3):
175
  code_output = gr.Code(
176
+ label="Generated Code (Raw Output - Aiming for Style!)",
177
  language="html",
178
+ lines=30,
179
  interactive=False,
180
  )
181
 
182
  with gr.Accordion("Advanced Generation Settings", open=False):
183
+ # INCREASED max_tokens range and default value
184
  max_tokens_slider = gr.Slider(
185
+ minimum=512,
186
+ maximum=4096, # Set maximum to model's limit (Zephyr 7B can handle this)
187
+ value=3072, # Increased default significantly
188
+ step=256, # Larger steps might be practical
189
+ label="Max New Tokens",
190
+ info="Max length of generated code. Increase if output is cut off!" # Updated info
191
  )
192
  temperature_slider = gr.Slider(
193
+ minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature",
194
+ info="Controls randomness. Lower=focused, Higher=creative."
195
  )
196
  top_p_slider = gr.Slider(
197
  minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P",
 
221
  ["A 'Coming Soon' page with a large countdown timer (use JS), a background image, and an email signup form. Make it look sleek.", "Static", "Multiple Files"]
222
  ],
223
  inputs=[prompt_input, backend_radio, file_structure_radio],
224
+ label="Example Prompts (Aiming for Style)"
225
  )
226
 
227
  # --- Launch ---
228
  if __name__ == "__main__":
229
  print("Starting Gradio app...")
230
+ # Ensure queue is enabled for Spaces
231
  demo.queue(max_size=10).launch()
232
  print("Gradio app launched.")