MINEOGO commited on
Commit
4e60047
·
verified ·
1 Parent(s): 0e4a7a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -97
app.py CHANGED
@@ -1,10 +1,9 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
 
4
 
5
  # --- Configuration ---
6
- # Use environment variable for token, fallback to default if not set
7
- # For Spaces, set the HF_TOKEN secret
8
  API_TOKEN = os.getenv("HF_TOKEN", None)
9
  MODEL = "HuggingFaceH4/zephyr-7b-beta" # Or choose another suitable model
10
 
@@ -17,13 +16,10 @@ try:
17
  else:
18
  print("HF Token not found. Running without token (may lead to rate limits).")
19
  client = InferenceClient(model=MODEL)
20
- # Optional: Add a quick health check if needed, though client init usually suffices
21
- # client.get_model_status(MODEL) # Example check, might raise if model invalid
22
  print("Inference Client initialized successfully.")
23
  except Exception as e:
24
  print(f"Error initializing Inference Client: {e}")
25
- # Provide a more informative error in the Gradio interface
26
- raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Please check the model name and ensure network connectivity. If running locally without a token, you might hit rate limits. If using HF Spaces, ensure the HF_TOKEN secret is set correctly. Original Error: {e}")
27
 
28
  # --- Core Code Generation Function ---
29
  def generate_code(
@@ -37,50 +33,64 @@ def generate_code(
37
  """
38
  Generates website code based on user prompt and choices.
39
  Yields the code token by token for live updates.
 
40
  """
41
  print(f"--- Generating Code ---")
42
- print(f"Prompt: {prompt[:100]}...") # Log truncated prompt
43
  print(f"Backend Context: {backend_choice}")
44
- print(f"File Structure: {file_structure}")
45
  print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
46
 
47
- # --- System Message (Internal) ---
48
- # Guides the AI's behavior. Not user-editable in the UI.
49
- # Refined system prompt for clarity and stricter output formatting
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  system_message = (
51
- "You are an expert frontend web developer AI. Your task is to generate HTML, CSS, and potentially JavaScript code "
52
- "for a website based ONLY on the user's description. Adhere strictly to the following constraints:\n"
53
- "1. **Output ONLY Code:** Generate only the raw code for the requested files (`index.html`, `style.css`, `script.js`). Do NOT include any introductory text, explanations, apologies, markdown formatting (like ```html), or closing remarks. Your response must start *immediately* with the code (e.g., `<!DOCTYPE html>` or `<!-- index.html -->`).\n"
54
- "2. **index.html is Mandatory:** ALWAYS generate a complete `index.html` file.\n"
55
- "3. **File Structure:**\n"
56
- f" - If '{file_structure}' is 'Multiple Files':\n"
57
- " - Use clear markers EXACTLY as follows:\n"
58
- " `<!-- index.html -->`\n"
59
- " `/* style.css */`\n"
60
- " `// script.js` (only include if JavaScript is necessary for the described functionality)\n"
61
- " - Place the corresponding code directly after each marker.\n"
62
- " - Link the CSS (`<link rel='stylesheet' href='style.css'>`) in the `<head>` of `index.html`.\n"
63
- " - Include the JS (`<script src='script.js'></script>`) just before the closing `</body>` tag in `index.html` if `script.js` is generated.\n"
64
- f" - If '{file_structure}' is 'Single File':\n"
65
- " - Embed ALL CSS within `<style>` tags inside the `<head>` of the `index.html` file.\n"
66
- " - Embed ALL necessary JavaScript within `<script>` tags just before the closing `</body>` tag of the `index.html` file.\n"
67
- "4. **Backend Context ({backend_choice}):** This choice provides context. For 'Flask' or 'Node.js', you might include standard template placeholders (like `{{ variable }}` for Flask/Jinja2 or similar patterns for Node templating engines if appropriate for the frontend structure), but primarily focus on generating the static frontend assets (HTML structure, CSS styling, client-side JS interactions). For 'Static', generate standard HTML/CSS/JS without backend-specific placeholders.\n"
68
- "5. **Focus on Frontend:** Generate the client-side code. Do not generate server-side Flask or Node.js code.\n"
69
- "6. **Completeness:** Generate functional code based on the prompt. If the prompt is vague, create a reasonable default structure."
70
  )
71
 
72
-
73
  # --- Construct the messages for the API ---
74
  messages = [
75
  {"role": "system", "content": system_message},
76
- {"role": "user", "content": f"Create a website based on this description: {prompt}"} # Make user role explicit
 
77
  ]
78
 
79
  # --- Stream the response from the API ---
80
  response_stream = ""
 
81
  try:
82
  print("Sending request to Hugging Face Inference API...")
83
- # CORRECTED: Removed stop_sequences
84
  for message in client.chat_completion(
85
  messages=messages,
86
  max_tokens=max_tokens,
@@ -89,95 +99,85 @@ def generate_code(
89
  top_p=top_p,
90
  ):
91
  token = message.choices[0].delta.content
92
- # Basic check to ensure token is a string (it should be)
93
  if isinstance(token, str):
94
  response_stream += token
95
- # Clean potential unwanted prefixes sometimes added by models
96
- # if response_stream.strip().startswith(("```html", "```")):
97
- # response_stream = response_stream.split("\n", 1)[-1]
98
  yield response_stream # Yield the cumulative response for live update
99
 
100
- print(f"API stream finished. Total length: {len(response_stream)}")
101
- # Optional: Post-process to remove leading/trailing whitespace or markdown
102
- final_response = response_stream.strip()
103
- # More aggressive cleaning if needed:
104
- # if final_response.startswith("```html"):
105
- # final_response = final_response[7:]
106
- # if final_response.endswith("```"):
107
- # final_response = final_response[:-3]
108
- # yield final_response.strip() # Yield final cleaned response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  except Exception as e:
111
  error_message = f"An error occurred during the API call: {e}"
112
  print(error_message)
113
- # Display the error clearly in the output box
114
  yield f"## Error\n\nFailed to generate code.\n**Reason:** {e}\n\nPlease check the model status, your connection, and API token (if applicable)."
115
 
 
116
  # --- Build Gradio Interface using Blocks ---
117
- with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo: # Add CSS for wider layout
118
  gr.Markdown("# Website Code Generator 🚀")
119
  gr.Markdown(
120
- "Describe the website you want, choose your options, and the AI will generate the frontend code (HTML, CSS, JS). "
121
- "The code will appear live in the text editor below. **Note:** The AI generates only frontend code based on your description."
122
  )
123
 
124
  with gr.Row():
125
  with gr.Column(scale=2):
126
  prompt_input = gr.Textbox(
127
  label="Website Description",
128
- placeholder="e.g., A simple landing page with a navigation bar (Home, About, Contact), a hero section with a title and button, and a simple footer.",
129
- lines=5, # Increased lines for better prompt visibility
130
  )
131
  backend_radio = gr.Radio(
132
  ["Static", "Flask", "Node.js"],
133
  label="Backend Context Hint",
134
  value="Static",
135
- info="Hint for AI: influences potential template placeholders (e.g., {{var}}) but AI generates ONLY frontend code.",
136
  )
137
  file_structure_radio = gr.Radio(
138
- ["Multiple Files", "Single File"], # Default to multiple for clarity
139
  label="Output File Structure",
140
  value="Multiple Files",
141
- info="Generate separate files (index.html, style.css, script.js) or embed all in index.html?",
142
  )
143
  generate_button = gr.Button("Generate Website Code", variant="primary")
144
 
145
  with gr.Column(scale=3):
146
- # Use Code component which is better suited for displaying code
147
  code_output = gr.Code(
148
- label="Generated Code",
149
- language="html", # Base language, will contain CSS/JS markers if multiple files
150
- lines=28, # Increased lines
151
- interactive=False, # Read-only display
152
  )
153
 
154
  with gr.Accordion("Advanced Generation Settings", open=False):
155
- max_tokens_slider = gr.Slider(
156
- minimum=512, # Increased minimum for potentially complex sites
157
- maximum=4096, # Match common context lengths
158
- value=2048, # Increased default
159
- step=128,
160
- label="Max New Tokens",
161
- info="Maximum number of tokens (approx. words/code elements) the AI can generate."
162
- )
163
- temperature_slider = gr.Slider(
164
- minimum=0.1,
165
- maximum=1.2, # Allow slightly higher for more creativity if needed
166
- value=0.6, # Slightly lower default for more predictable code
167
- step=0.1,
168
- label="Temperature",
169
- info="Controls randomness. Lower values (e.g., 0.2) make output more focused, higher values (e.g., 0.9) make it more creative/random."
170
- )
171
- top_p_slider = gr.Slider(
172
- minimum=0.1,
173
- maximum=1.0,
174
- value=0.9, # Slightly lower default top-p
175
- step=0.05,
176
- label="Top-P (Nucleus Sampling)",
177
- info="Alternative to temperature for controlling randomness. Considers only the most probable tokens with cumulative probability p."
178
- )
179
 
180
- # --- Connect Inputs/Outputs to the Function ---
181
  generate_button.click(
182
  fn=generate_code,
183
  inputs=[
@@ -189,29 +189,23 @@ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
189
  top_p_slider,
190
  ],
191
  outputs=code_output,
192
- #api_name="generate_website_code" # Optional: for API usage
193
  )
194
 
195
- # Add examples for guidance
196
  gr.Examples(
197
  examples=[
198
  ["A simple counter page with a number display, an increment button, and a decrement button. Use Javascript for the logic.", "Static", "Single File"],
199
  ["A login form with fields for username and password, and a submit button. Basic styling.", "Static", "Multiple Files"],
200
- ["Product cards display grid. Each card should show an image, product name, price, and an 'Add to Cart' button. Make it responsive.", "Static", "Multiple Files"],
201
  ["A personal blog homepage with a header, a list of recent posts (just placeholders), and a sidebar with categories.", "Flask", "Multiple Files"],
 
202
  ],
203
  inputs=[prompt_input, backend_radio, file_structure_radio],
204
- label="Example Prompts" # Optional label for the examples section
205
  )
206
 
207
-
208
- # --- Launch the App ---
209
  if __name__ == "__main__":
210
  print("Starting Gradio app...")
211
- # Enable queuing for handling multiple users, essential for Spaces
212
- # Increase concurrency count if needed and if your hardware/Space plan supports it
213
- demo.queue(max_size=10).launch(
214
- # debug=True, # Set debug=False for production/Spaces deployment
215
- # share=False # Set share=True to create a temporary public link (useful for local testing)
216
- )
217
  print("Gradio app launched.")
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
+ import re # Import regex for potential cleaning, although prompt is the primary fix
5
 
6
  # --- Configuration ---
 
 
7
  API_TOKEN = os.getenv("HF_TOKEN", None)
8
  MODEL = "HuggingFaceH4/zephyr-7b-beta" # Or choose another suitable model
9
 
 
16
  else:
17
  print("HF Token not found. Running without token (may lead to rate limits).")
18
  client = InferenceClient(model=MODEL)
 
 
19
  print("Inference Client initialized successfully.")
20
  except Exception as e:
21
  print(f"Error initializing Inference Client: {e}")
22
+ raise gr.Error(f"Failed to initialize the AI model client for '{MODEL}'. Check model name, network, and HF_TOKEN secret if applicable. Error: {e}")
 
23
 
24
  # --- Core Code Generation Function ---
25
  def generate_code(
 
33
  """
34
  Generates website code based on user prompt and choices.
35
  Yields the code token by token for live updates.
36
+ Strives to output ONLY raw code.
37
  """
38
  print(f"--- Generating Code ---")
39
+ print(f"Prompt: {prompt[:100]}...")
40
  print(f"Backend Context: {backend_choice}")
41
+ print(f"File Structure: {file_structure}") # Crucial input
42
  print(f"Settings: Max Tokens={max_tokens}, Temp={temperature}, Top-P={top_p}")
43
 
44
+ # --- Dynamically Build System Message Based on File Structure ---
45
+
46
+ # Define specific instructions based on the user's choice
47
+ if file_structure == "Single File":
48
+ file_structure_instruction = (
49
+ "- **File Structure is 'Single File':** Generate ONLY a single, complete `index.html` file. "
50
+ "Embed ALL CSS directly within `<style>` tags inside the `<head>`. "
51
+ "Embed ALL necessary JavaScript directly within `<script>` tags just before the closing `</body>` tag. "
52
+ "Do NOT use markers like `<!-- index.html -->` or `/* style.css */`."
53
+ )
54
+ else: # Multiple Files
55
+ file_structure_instruction = (
56
+ "- **File Structure is 'Multiple Files':** Generate code for `index.html`, `style.css`, and `script.js` (if JS is needed). "
57
+ "Use these EXACT markers to separate the files:\n"
58
+ " `<!-- index.html -->`\n"
59
+ " `/* style.css */`\n"
60
+ " `// script.js` (ONLY include this marker and the JS code if JavaScript is necessary for the requested functionality).\n"
61
+ "- Place the corresponding code directly after each marker.\n"
62
+ "- Inside the `index.html` code block, ensure you correctly link the CSS (`<link rel='stylesheet' href='style.css'>`) in the `<head>`.\n"
63
+ "- Inside the `index.html` code block, ensure you correctly include the JS (`<script src='script.js'></script>`) just before the closing `</body>` tag *if* the `// script.js` marker and code are present."
64
+ )
65
+
66
+ # Assemble the full system message with the dynamic instruction
67
+ # Emphasize constraints VERY strongly
68
  system_message = (
69
+ "You are an expert frontend web developer AI. Your SOLE task is to generate RAW SOURCE CODE (HTML, CSS, JavaScript) based on the user's request and selected options. "
70
+ "You MUST follow ALL these rules ABSOLUTELY:\n"
71
+ "1. **RAW CODE ONLY:** Your *entire* response MUST consist *only* of the requested code. NO extra text, NO explanations, NO apologies, NO introductions (like 'Here is the code...', 'Okay, here is the code...'), NO summaries, NO comments about the code (unless it's a standard code comment like `<!-- comment -->`), and ABSOLUTELY NO MARKDOWN FORMATTING like ```html, ```css, ```javascript, or ```.\n"
72
+ "2. **IMMEDIATE CODE START:** The response MUST begin *directly* with the first character of the code (e.g., `<!DOCTYPE html>` or `<!-- index.html -->`). NO leading spaces or lines.\n"
73
+ "3. **MANDATORY `index.html`:** Always generate the content for `index.html`.\n"
74
+ f"4. **FILE STRUCTURE ({file_structure}):** Strictly follow ONLY the instructions for the *selected* file structure below:\n"
75
+ f" {file_structure_instruction}\n" # Insert the specific instruction here
76
+ "5. **BACKEND CONTEXT ({backend_choice}):** Use this as a hint for frontend structure (e.g., placeholders like `{{ variable }}` if 'Flask' is chosen), but ONLY generate the static frontend code (HTML, CSS, client-side JS).\n"
77
+ "6. **FRONTEND ONLY:** Do NOT generate server-side code (Python, Node.js, etc.).\n"
78
+ "7. **ACCURACY:** Generate functional code that directly addresses the user's prompt.\n\n"
79
+ "REMEMBER: ONLY CODE. NO OTHER TEXT. START IMMEDIATELY WITH CODE." # Final reinforcement
 
 
 
 
 
 
 
 
80
  )
81
 
 
82
  # --- Construct the messages for the API ---
83
  messages = [
84
  {"role": "system", "content": system_message},
85
+ # Make user prompt clearer
86
+ {"role": "user", "content": f"Generate the website frontend code based on this description: {prompt}"}
87
  ]
88
 
89
  # --- Stream the response from the API ---
90
  response_stream = ""
91
+ full_response_for_cleaning = ""
92
  try:
93
  print("Sending request to Hugging Face Inference API...")
 
94
  for message in client.chat_completion(
95
  messages=messages,
96
  max_tokens=max_tokens,
 
99
  top_p=top_p,
100
  ):
101
  token = message.choices[0].delta.content
 
102
  if isinstance(token, str):
103
  response_stream += token
104
+ full_response_for_cleaning += token # Keep a separate copy for potential final cleaning
 
 
105
  yield response_stream # Yield the cumulative response for live update
106
 
107
+ print(f"API stream finished. Raw length: {len(full_response_for_cleaning)}")
108
+
109
+ # --- Basic Post-Processing (Attempt to remove backticks if prompt fails) ---
110
+ # While the prompt *should* handle this, add a safety net.
111
+ cleaned_response = full_response_for_cleaning.strip()
112
+
113
+ # Remove potential leading/trailing markdown code fences more robustly
114
+ # Matches ``` followed by optional language identifier and newline, or just ```
115
+ cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
116
+ cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
117
+
118
+ # Also remove common introductory phrases if they slip through (less likely with strong prompt)
119
+ common_intros = [
120
+ "Here is the code:", "Okay, here is the code:", "Here's the code:",
121
+ "```html", "```css", "```javascript" # Also catch these if regex missed them
122
+ ]
123
+ for intro in common_intros:
124
+ if cleaned_response.lower().startswith(intro.lower()):
125
+ cleaned_response = cleaned_response[len(intro):].lstrip()
126
+
127
+ # Yield the final potentially cleaned response *once* after streaming is done
128
+ # This replaces the last yielded value from the loop if cleaning occurred
129
+ yield cleaned_response.strip() # Ensure no trailing whitespace after cleaning
130
+
131
 
132
  except Exception as e:
133
  error_message = f"An error occurred during the API call: {e}"
134
  print(error_message)
 
135
  yield f"## Error\n\nFailed to generate code.\n**Reason:** {e}\n\nPlease check the model status, your connection, and API token (if applicable)."
136
 
137
+
138
  # --- Build Gradio Interface using Blocks ---
139
+ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
140
  gr.Markdown("# Website Code Generator 🚀")
141
  gr.Markdown(
142
+ "Describe the website, choose options, and get ONLY the raw frontend code. "
143
+ "Code appears live below. **Select 'Single File' or 'Multiple Files' carefully.**"
144
  )
145
 
146
  with gr.Row():
147
  with gr.Column(scale=2):
148
  prompt_input = gr.Textbox(
149
  label="Website Description",
150
+ placeholder="e.g., A simple landing page with a navbar, hero section, and footer.",
151
+ lines=5,
152
  )
153
  backend_radio = gr.Radio(
154
  ["Static", "Flask", "Node.js"],
155
  label="Backend Context Hint",
156
  value="Static",
157
+ info="Hint for AI (e.g., template placeholders) - generates ONLY frontend code.",
158
  )
159
  file_structure_radio = gr.Radio(
160
+ ["Multiple Files", "Single File"], # Default: Multiple
161
  label="Output File Structure",
162
  value="Multiple Files",
163
+ info="Choose 'Single File' for everything in index.html OR 'Multiple Files' for separate css/js.", # Clarified info
164
  )
165
  generate_button = gr.Button("Generate Website Code", variant="primary")
166
 
167
  with gr.Column(scale=3):
 
168
  code_output = gr.Code(
169
+ label="Generated Code (Raw Output)", # Updated label
170
+ language="html",
171
+ lines=28,
172
+ interactive=False,
173
  )
174
 
175
  with gr.Accordion("Advanced Generation Settings", open=False):
176
+ max_tokens_slider = gr.Slider(minimum=512, maximum=4096, value=2048, step=128, label="Max New Tokens")
177
+ temperature_slider = gr.Slider(minimum=0.1, maximum=1.2, value=0.6, step=0.1, label="Temperature")
178
+ top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
+ # --- Connect Inputs/Outputs ---
181
  generate_button.click(
182
  fn=generate_code,
183
  inputs=[
 
189
  top_p_slider,
190
  ],
191
  outputs=code_output,
 
192
  )
193
 
194
+ # --- Examples ---
195
  gr.Examples(
196
  examples=[
197
  ["A simple counter page with a number display, an increment button, and a decrement button. Use Javascript for the logic.", "Static", "Single File"],
198
  ["A login form with fields for username and password, and a submit button. Basic styling.", "Static", "Multiple Files"],
199
+ ["Product cards display grid. Each card shows an image, product name, price, and an 'Add to Cart' button. Make it responsive.", "Static", "Multiple Files"],
200
  ["A personal blog homepage with a header, a list of recent posts (just placeholders), and a sidebar with categories.", "Flask", "Multiple Files"],
201
+ ["A very basic HTML page with just a title 'My App' and a heading 'Welcome'. No CSS or JS.", "Static", "Single File"]
202
  ],
203
  inputs=[prompt_input, backend_radio, file_structure_radio],
204
+ label="Example Prompts"
205
  )
206
 
207
+ # --- Launch ---
 
208
  if __name__ == "__main__":
209
  print("Starting Gradio app...")
210
+ demo.queue(max_size=10).launch()
 
 
 
 
 
211
  print("Gradio app launched.")