MINEOGO commited on
Commit
fe916a7
·
verified ·
1 Parent(s): 9376840

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -113
app.py CHANGED
@@ -1,50 +1,31 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
- import time # For potential brief pauses if needed
5
 
6
- # --- Hugging Face Token (Optional but Recommended) ---
7
- # from huggingface_hub import login
8
- # login("YOUR_HUGGINGFACE_TOKEN") # Replace with your token if needed
9
-
10
- # --- Inference Client ---
11
  try:
12
- # You might need to specify the model URL directly if the alias isn't working
13
- # client = InferenceClient(model="https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta")
14
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
15
- client.timeout = 120 # Increase timeout for potentially long generations
16
  except Exception as e:
17
  print(f"Error initializing InferenceClient: {e}")
18
- client = None # Set client to None if initialization fails
19
 
20
- # --- Parsing Function ---
21
  def parse_files(raw_response):
22
- """
23
- Parses filenames and code blocks from the raw AI output.
24
- """
25
  if not raw_response:
26
  return []
27
-
28
- # Pattern: Look for a filename line followed by content until the next filename line or end of string.
29
  pattern = re.compile(
30
- r"^\s*([\w\-.\/\\]+\.\w+)\s*\n" # Filename line (must have an extension)
31
- r"(.*?)" # Capture content (non-greedy)
32
- r"(?=\n\s*[\w\-.\/\\]+\.\w+\s*\n|\Z)", # Lookahead for next filename or end of string
33
  re.DOTALL | re.MULTILINE
34
  )
35
  files = pattern.findall(raw_response)
36
-
37
  cleaned_files = []
38
  for name, content in files:
39
- # Remove common code block markers (``` optionally followed by lang)
40
  content_cleaned = re.sub(r"^\s*```[a-zA-Z]*\n?", "", content, flags=re.MULTILINE)
41
  content_cleaned = re.sub(r"\n?```\s*$", "", content_cleaned, flags=re.MULTILINE)
42
  cleaned_files.append((name.strip(), content_cleaned.strip()))
43
-
44
- # Fallback if no files parsed but content exists
45
  if not cleaned_files and raw_response.strip():
46
  if any(c in raw_response for c in ['<','>','{','}',';','(',')']):
47
- print("Warning: No filenames found, defaulting to index.html")
48
  lang = "html"
49
  if "{" in raw_response and "}" in raw_response and ":" in raw_response: lang = "css"
50
  elif "function" in raw_response or "const" in raw_response or "let" in raw_response: lang = "javascript"
@@ -52,28 +33,16 @@ def parse_files(raw_response):
52
  if lang == "css": default_filename = "style.css"
53
  elif lang == "javascript": default_filename = "script.js"
54
  cleaned_files.append((default_filename, raw_response.strip()))
55
-
56
  return cleaned_files
57
 
58
- # --- Streaming and Parsing Orchestrator ---
59
  def stream_and_parse_code(prompt, backend, system_message, max_tokens, temperature, top_p):
60
- """
61
- Streams raw output to one component and generates final tabs for another.
62
- This function acts as the main callback for the button click.
63
- Yields dictionary updates for Gradio components.
64
- """
65
- # Check if client initialized correctly
66
  if not client:
67
  error_msg = "Error: Inference Client not available. Check API token or model name."
68
- # Yield updates to both components indicating the error
69
  yield {
70
  live_output: gr.update(value=error_msg),
71
  final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error", children=[gr.Textbox(value=error_msg)])])
72
  }
73
- return # Stop execution
74
-
75
- # --- Prepare for Streaming ---
76
- # Construct the system prompt dynamically
77
  full_sys_msg = f"""
78
  You are a code generation AI. Given a prompt, generate the necessary files for a website using the {backend} backend.
79
  Always include an index.html file.
@@ -93,94 +62,65 @@ console.log("Hello");
93
  Ensure the code is complete. NO commentary, NO explanations, NO markdown formatting like backticks (```).
94
  Start generating the files now.
95
  """.strip()
96
- if system_message: # Append user's system message if provided
97
  full_sys_msg += "\n\n" + system_message
98
-
99
  messages = [
100
  {"role": "system", "content": full_sys_msg},
101
  {"role": "user", "content": prompt}
102
  ]
103
-
104
  full_raw_response = ""
105
  error_occurred = False
106
  error_message = ""
107
-
108
- # Initial state update: Clear previous output and show generating status
109
  yield {
110
  live_output: gr.update(value="Generating stream..."),
111
- final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Generating...")]) # Indicate loading in tabs
112
  }
113
-
114
- # --- Streaming Loop ---
115
  try:
116
- # Start the streaming call
117
  stream = client.chat_completion(
118
  messages,
119
- max_tokens=int(max_tokens), # Ensure max_tokens is an integer
120
  stream=True,
121
  temperature=temperature,
122
  top_p=top_p
123
  )
124
- # Process each chunk received from the stream
125
  for chunk in stream:
126
  token = chunk.choices[0].delta.content
127
  if token:
128
  full_raw_response += token
129
- # Yield updates for the live raw output component only
130
- # Keep tabs in a 'streaming' state during the stream
131
  yield {
132
  live_output: gr.update(value=full_raw_response),
133
- # No update needed for final_tabs here, or keep showing streaming state
134
- # final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Streaming...")]) # Optional: update tabs state
135
  }
136
- # time.sleep(0.01) # Optional: small delay if updates are too fast and causing UI lag
137
-
138
  except Exception as e:
139
- # Handle errors during the API call or streaming process
140
  print(f"Error during AI streaming: {e}")
141
  error_message = f"Error during AI generation: {e}\n\nPartial Response (if any):\n{full_raw_response}"
142
  error_occurred = True
143
- # Update live output with error, prepare error tab
144
  yield {
145
  live_output: gr.update(value=error_message),
146
- final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error")]) # Indicate error state in tabs
147
  }
148
-
149
- # --- Post-Streaming: Parsing and Final Tab Generation ---
150
  if error_occurred:
151
- # If an error happened during stream, create a final error tab
152
- final_tabs_update = gr.Tabs(tabs=[
153
  gr.TabItem(label="Error", children=[gr.Textbox(value=error_message, label="Generation Error", lines=10)])
154
  ])
155
  else:
156
- # If streaming succeeded, parse the complete raw response
157
  print("\n--- Final Raw AI Response ---")
158
  print(full_raw_response)
159
  print("--------------------------\n")
160
  files = parse_files(full_raw_response)
161
-
162
  if not files:
163
- # Handle case where parsing failed or AI gave empty/invalid response
164
  no_files_msg = "AI finished, but did not return recognizable file content or the response was empty. See raw output above."
165
  final_tabs_update = gr.Tabs(tabs=[
166
  gr.TabItem(label="Output", children=[gr.Textbox(value=no_files_msg, label="Result")])
167
  ])
168
- # Update live output as well to make the message clear
169
  yield { live_output: gr.update(value=full_raw_response + "\n\n" + no_files_msg), final_tabs: final_tabs_update }
170
- return # Exit if no files
171
-
172
- # --- Create Tabs (if files were parsed successfully) ---
173
  tabs_content = []
174
  for name, content in files:
175
  name = name.strip()
176
  content = content.strip()
177
- # Skip if filename or content is empty after stripping
178
  if not name or not content:
179
- print(f"Skipping file with empty name or content: Name='{name}'")
180
  continue
181
-
182
- # Determine language for syntax highlighting
183
- lang = "plaintext" # Default
184
  if name.endswith((".html", ".htm")): lang = "html"
185
  elif name.endswith(".css"): lang = "css"
186
  elif name.endswith(".js"): lang = "javascript"
@@ -190,43 +130,29 @@ Start generating the files now.
190
  elif name.endswith((".sh", ".bash")): lang = "bash"
191
  elif name.endswith((".xml", ".xaml", ".svg")): lang = "xml"
192
  elif name.endswith(".yaml") or name.endswith(".yml"): lang = "yaml"
193
-
194
- # Ensure elem_id is unique and valid (replace problematic characters)
195
  elem_id = f"tab_{re.sub(r'[^a-zA-Z0-9_-]', '_', name)}"
196
-
197
  tab_item = gr.TabItem(label=name, elem_id=elem_id, children=[
198
- gr.Code(value=content, language=lang, label=name, interactive=False) # Show code in Code block
199
  ])
200
  tabs_content.append(tab_item)
201
-
202
- # Handle case where parsing found files, but they were all filtered out (empty name/content)
203
  if not tabs_content:
204
  final_tabs_update = gr.Tabs(tabs=[gr.TabItem(label="Output", children=[gr.Textbox(value="No valid files generated after filtering.", label="Result")])])
205
  else:
206
- final_tabs_update = gr.Tabs(tabs=tabs_content) # Create the final Tabs component with content
207
-
208
- # --- Final Update ---
209
- # Yield the final state for both components
210
- # Use gr.update for live_output if you only want to set its value without recreating it
211
- # Directly return the new final_tabs component
212
  yield {
213
- live_output: gr.update(value=full_raw_response if not error_occurred else error_message), # Show final raw response or error
214
- final_tabs: final_tabs_update # Update the final_tabs component completely
215
  }
216
 
217
-
218
- # --- Gradio UI Definition ---
219
- with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo: # Use more screen width
220
  gr.Markdown("## WebGen AI — One Prompt → Full Website Generator")
221
  gr.Markdown("Generates website code based on your description. Raw output streams live, final files appear in tabs below.")
222
-
223
  with gr.Row():
224
- # Column for inputs and controls
225
  with gr.Column(scale=2):
226
  prompt = gr.Textbox(
227
  label="Describe your website",
228
  placeholder="E.g., a simple landing page for a coffee shop with sections for menu, about, and contact.",
229
- lines=3 # Allow more lines for the prompt
230
  )
231
  backend = gr.Dropdown(
232
  ["Static", "Flask", "Node.js"],
@@ -242,14 +168,14 @@ with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
242
  )
243
  max_tokens = gr.Slider(
244
  minimum=256,
245
- maximum=4096, # Increased max tokens for complex sites
246
- value=2048, # Increased default
247
  step=64,
248
  label="Max Tokens (Output Length)"
249
  )
250
  temperature = gr.Slider(
251
  minimum=0.1,
252
- maximum=1.5, # Allow slightly higher temperature
253
  value=0.7,
254
  step=0.1,
255
  label="Temperature (Creativity)"
@@ -261,34 +187,24 @@ with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
261
  step=0.05,
262
  label="Top-p (Sampling Focus)"
263
  )
264
- generate_button = gr.Button("✨ Generate Code ✨", variant="primary") # Make button primary
265
-
266
- # Column for live output
267
  with gr.Column(scale=3):
268
  gr.Markdown("#### Live Raw Output Stream")
269
- # Component to show the live, unparsed stream - CORRECTED LANGUAGE
270
  live_output = gr.Code(
271
  label="Raw AI Stream",
272
- language="plaintext", # Use "plaintext" for generic text
273
- lines=20, # Increased lines for visibility
274
- interactive=False # Output only
275
  )
276
-
277
- gr.Markdown("---") # Separator
278
  gr.Markdown("#### Final Generated Files (Tabs)")
279
- # Placeholder for the final structured tabs - will be replaced by the output yield
280
  final_tabs = gr.Tabs(elem_id="output_tabs")
281
-
282
-
283
- # Button click action - uses the orchestrator function
284
  generate_button.click(
285
- stream_and_parse_code, # Call the main function that handles streaming and parsing
286
  inputs=[prompt, backend, system_message, max_tokens, temperature, top_p],
287
- # Outputs dictionary maps function yields to components by variable name
288
  outputs=[live_output, final_tabs],
289
- show_progress="hidden" # Hide default Gradio progress bar as we show live stream
290
  )
291
 
292
  if __name__ == "__main__":
293
- # Launch the Gradio app with debug=True for development
294
  demo.launch(debug=True)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
 
4
 
 
 
 
 
 
5
  try:
 
 
6
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
+ client.timeout = 120
8
  except Exception as e:
9
  print(f"Error initializing InferenceClient: {e}")
10
+ client = None
11
 
 
12
  def parse_files(raw_response):
 
 
 
13
  if not raw_response:
14
  return []
 
 
15
  pattern = re.compile(
16
+ r"^\s*([\w\-.\/\\]+\.\w+)\s*\n"
17
+ r"(.*?)"
18
+ r"(?=\n\s*[\w\-.\/\\]+\.\w+\s*\n|\Z)",
19
  re.DOTALL | re.MULTILINE
20
  )
21
  files = pattern.findall(raw_response)
 
22
  cleaned_files = []
23
  for name, content in files:
 
24
  content_cleaned = re.sub(r"^\s*```[a-zA-Z]*\n?", "", content, flags=re.MULTILINE)
25
  content_cleaned = re.sub(r"\n?```\s*$", "", content_cleaned, flags=re.MULTILINE)
26
  cleaned_files.append((name.strip(), content_cleaned.strip()))
 
 
27
  if not cleaned_files and raw_response.strip():
28
  if any(c in raw_response for c in ['<','>','{','}',';','(',')']):
 
29
  lang = "html"
30
  if "{" in raw_response and "}" in raw_response and ":" in raw_response: lang = "css"
31
  elif "function" in raw_response or "const" in raw_response or "let" in raw_response: lang = "javascript"
 
33
  if lang == "css": default_filename = "style.css"
34
  elif lang == "javascript": default_filename = "script.js"
35
  cleaned_files.append((default_filename, raw_response.strip()))
 
36
  return cleaned_files
37
 
 
38
  def stream_and_parse_code(prompt, backend, system_message, max_tokens, temperature, top_p):
 
 
 
 
 
 
39
  if not client:
40
  error_msg = "Error: Inference Client not available. Check API token or model name."
 
41
  yield {
42
  live_output: gr.update(value=error_msg),
43
  final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error", children=[gr.Textbox(value=error_msg)])])
44
  }
45
+ return
 
 
 
46
  full_sys_msg = f"""
47
  You are a code generation AI. Given a prompt, generate the necessary files for a website using the {backend} backend.
48
  Always include an index.html file.
 
62
  Ensure the code is complete. NO commentary, NO explanations, NO markdown formatting like backticks (```).
63
  Start generating the files now.
64
  """.strip()
65
+ if system_message:
66
  full_sys_msg += "\n\n" + system_message
 
67
  messages = [
68
  {"role": "system", "content": full_sys_msg},
69
  {"role": "user", "content": prompt}
70
  ]
 
71
  full_raw_response = ""
72
  error_occurred = False
73
  error_message = ""
 
 
74
  yield {
75
  live_output: gr.update(value="Generating stream..."),
76
+ final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Generating...")])
77
  }
 
 
78
  try:
 
79
  stream = client.chat_completion(
80
  messages,
81
+ max_tokens=int(max_tokens),
82
  stream=True,
83
  temperature=temperature,
84
  top_p=top_p
85
  )
 
86
  for chunk in stream:
87
  token = chunk.choices[0].delta.content
88
  if token:
89
  full_raw_response += token
 
 
90
  yield {
91
  live_output: gr.update(value=full_raw_response),
 
 
92
  }
 
 
93
  except Exception as e:
 
94
  print(f"Error during AI streaming: {e}")
95
  error_message = f"Error during AI generation: {e}\n\nPartial Response (if any):\n{full_raw_response}"
96
  error_occurred = True
 
97
  yield {
98
  live_output: gr.update(value=error_message),
99
+ final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error")])
100
  }
 
 
101
  if error_occurred:
102
+ final_tabs_update = gr.Tabs(tabs=[
 
103
  gr.TabItem(label="Error", children=[gr.Textbox(value=error_message, label="Generation Error", lines=10)])
104
  ])
105
  else:
 
106
  print("\n--- Final Raw AI Response ---")
107
  print(full_raw_response)
108
  print("--------------------------\n")
109
  files = parse_files(full_raw_response)
 
110
  if not files:
 
111
  no_files_msg = "AI finished, but did not return recognizable file content or the response was empty. See raw output above."
112
  final_tabs_update = gr.Tabs(tabs=[
113
  gr.TabItem(label="Output", children=[gr.Textbox(value=no_files_msg, label="Result")])
114
  ])
 
115
  yield { live_output: gr.update(value=full_raw_response + "\n\n" + no_files_msg), final_tabs: final_tabs_update }
116
+ return
 
 
117
  tabs_content = []
118
  for name, content in files:
119
  name = name.strip()
120
  content = content.strip()
 
121
  if not name or not content:
 
122
  continue
123
+ lang = "plaintext"
 
 
124
  if name.endswith((".html", ".htm")): lang = "html"
125
  elif name.endswith(".css"): lang = "css"
126
  elif name.endswith(".js"): lang = "javascript"
 
130
  elif name.endswith((".sh", ".bash")): lang = "bash"
131
  elif name.endswith((".xml", ".xaml", ".svg")): lang = "xml"
132
  elif name.endswith(".yaml") or name.endswith(".yml"): lang = "yaml"
 
 
133
  elem_id = f"tab_{re.sub(r'[^a-zA-Z0-9_-]', '_', name)}"
 
134
  tab_item = gr.TabItem(label=name, elem_id=elem_id, children=[
135
+ gr.Code(value=content, language=lang, label=name, interactive=False)
136
  ])
137
  tabs_content.append(tab_item)
 
 
138
  if not tabs_content:
139
  final_tabs_update = gr.Tabs(tabs=[gr.TabItem(label="Output", children=[gr.Textbox(value="No valid files generated after filtering.", label="Result")])])
140
  else:
141
+ final_tabs_update = gr.Tabs(tabs=tabs_content)
 
 
 
 
 
142
  yield {
143
+ live_output: gr.update(value=full_raw_response if not error_occurred else error_message),
144
+ final_tabs: final_tabs_update
145
  }
146
 
147
+ with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
 
 
148
  gr.Markdown("## WebGen AI — One Prompt → Full Website Generator")
149
  gr.Markdown("Generates website code based on your description. Raw output streams live, final files appear in tabs below.")
 
150
  with gr.Row():
 
151
  with gr.Column(scale=2):
152
  prompt = gr.Textbox(
153
  label="Describe your website",
154
  placeholder="E.g., a simple landing page for a coffee shop with sections for menu, about, and contact.",
155
+ lines=3
156
  )
157
  backend = gr.Dropdown(
158
  ["Static", "Flask", "Node.js"],
 
168
  )
169
  max_tokens = gr.Slider(
170
  minimum=256,
171
+ maximum=4096,
172
+ value=2048,
173
  step=64,
174
  label="Max Tokens (Output Length)"
175
  )
176
  temperature = gr.Slider(
177
  minimum=0.1,
178
+ maximum=1.5,
179
  value=0.7,
180
  step=0.1,
181
  label="Temperature (Creativity)"
 
187
  step=0.05,
188
  label="Top-p (Sampling Focus)"
189
  )
190
+ generate_button = gr.Button("✨ Generate Code ✨", variant="primary")
 
 
191
  with gr.Column(scale=3):
192
  gr.Markdown("#### Live Raw Output Stream")
 
193
  live_output = gr.Code(
194
  label="Raw AI Stream",
195
+ language="plaintext",
196
+ lines=20,
197
+ interactive=False
198
  )
199
+ gr.Markdown("---")
 
200
  gr.Markdown("#### Final Generated Files (Tabs)")
 
201
  final_tabs = gr.Tabs(elem_id="output_tabs")
 
 
 
202
  generate_button.click(
203
+ stream_and_parse_code,
204
  inputs=[prompt, backend, system_message, max_tokens, temperature, top_p],
 
205
  outputs=[live_output, final_tabs],
206
+ show_progress="hidden"
207
  )
208
 
209
  if __name__ == "__main__":
 
210
  demo.launch(debug=True)