MINEOGO commited on
Commit
58a5e73
·
verified ·
1 Parent(s): cd13883

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +155 -132
app.py CHANGED
@@ -1,51 +1,39 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
- import os # Good practice to import os if needed, though not strictly used here yet
5
 
6
  # --- Hugging Face Token (Optional but Recommended) ---
7
- # It's better to use a token, especially for private models or higher rate limits
8
  # from huggingface_hub import login
9
- # login("YOUR_HUGGINGFACE_TOKEN") # Replace with your actual token or set HF_TOKEN env var
10
 
11
  # --- Inference Client ---
12
- # Consider adding error handling for client initialization if needed
13
  try:
 
 
14
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
15
  except Exception as e:
16
  print(f"Error initializing InferenceClient: {e}")
17
- # Optionally, raise the exception or handle it gracefully in the UI
18
- # For now, we'll let it proceed and potentially fail later if client is None
19
  client = None
20
 
21
- # --- Parsing Function ---
22
  def parse_files(raw_response):
23
  """
24
  Parses filenames and code blocks from the raw AI output.
25
- Assumes format:
26
- filename1.ext
27
- ```lang # Optional code block marker
28
- code for file1
29
- ``` # Optional code block marker
30
- filename2.ext
31
- code for file2
32
- ...
33
  """
34
  if not raw_response:
35
  return []
36
 
37
- # Improved pattern to handle optional code blocks and leading/trailing whitespace
38
- # It looks for a filename line followed by content until the next filename line or end of string.
39
  pattern = re.compile(
40
  r"^\s*([\w\-.\/\\]+\.\w+)\s*\n" # Filename line (must have an extension)
41
  r"(.*?)" # Capture content (non-greedy)
42
  r"(?=\n\s*[\w\-.\/\\]+\.\w+\s*\n|\Z)", # Lookahead for next filename or end of string
43
  re.DOTALL | re.MULTILINE
44
  )
45
-
46
  files = pattern.findall(raw_response)
47
 
48
- # Clean up content: remove potential code block markers and extra whitespace
49
  cleaned_files = []
50
  for name, content in files:
51
  # Remove common code block markers (``` optionally followed by lang)
@@ -53,34 +41,35 @@ def parse_files(raw_response):
53
  content_cleaned = re.sub(r"\n?```\s*$", "", content_cleaned, flags=re.MULTILINE)
54
  cleaned_files.append((name.strip(), content_cleaned.strip()))
55
 
56
- # Handle case where the AI might just output code without filenames
57
  if not cleaned_files and raw_response.strip():
58
- # Basic check if it looks like code (e.g., contains common HTML/CSS/JS chars)
59
  if any(c in raw_response for c in ['<','>','{','}',';','(',')']):
60
- # Default to index.html if no files parsed but content exists
61
  print("Warning: No filenames found, defaulting to index.html")
62
- lang = "html" # Guess language, default to html
63
- if "{" in raw_response and "}" in raw_response and ":" in raw_response:
64
- lang = "css"
65
- elif "function" in raw_response or "const" in raw_response or "let" in raw_response:
66
- lang = "javascript"
67
- # Determine a default filename based on guessed language
68
  default_filename = "index.html"
69
  if lang == "css": default_filename = "style.css"
70
  elif lang == "javascript": default_filename = "script.js"
71
-
72
  cleaned_files.append((default_filename, raw_response.strip()))
73
 
74
-
75
  return cleaned_files
76
 
77
- # --- Code Generation Function ---
78
- def generate_code(prompt, backend, system_message, max_tokens, temperature, top_p):
79
- """Generates code using the InferenceClient."""
 
 
 
80
  if not client:
81
- # Return an error structure if client failed to initialize
82
- return "Error: Inference Client not available.", []
83
-
 
 
 
 
 
84
  full_sys_msg = f"""
85
  You are a code generation AI. Given a prompt, generate the necessary files for a website using the {backend} backend.
86
  Always include an index.html file.
@@ -89,124 +78,158 @@ Each file must start with its filename on a new line. Example:
89
 
90
  index.html
91
  <!DOCTYPE html>
92
- <html>
93
- <head><title>My Site</title></head>
94
- <body><h1>Hello</h1></body>
95
- </html>
96
 
97
  style.css
98
- body {{
99
- font-family: sans-serif;
100
- }}
101
-
102
- script.js
103
- console.log('Hello World!');
104
 
105
- Ensure the code is complete and functional for each file. NO commentary, NO explanations, NO markdown formatting like backticks (```).
106
  Start generating the files now.
107
- """.strip()
108
 
109
  messages = [
110
- {"role": "system", "content": full_sys_msg + ("\n" + system_message if system_message else "")},
111
  {"role": "user", "content": prompt}
112
  ]
113
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  try:
115
- response = client.chat_completion(
116
- messages=messages,
117
- max_tokens=int(max_tokens), # Ensure max_tokens is int
 
118
  temperature=temperature,
119
- top_p=top_p,
120
- stream=False # Ensure streaming is off for this logic
121
  )
122
- raw = response.choices[0].message.content
123
- print("\n--- Raw AI Response ---")
124
- print(raw)
125
- print("----------------------\n")
126
- files = parse_files(raw)
127
- return None, files # Return None for error, and the list of files
 
 
 
 
 
128
 
129
  except Exception as e:
130
- print(f"Error during AI generation: {e}")
131
- return f"Error during AI generation: {e}", [] # Return error message
132
-
133
- # --- Gradio Event Handler ---
134
- def on_generate(prompt, backend, system_message, max_tokens, temperature, top_p):
135
- """Callback function for the generate button."""
136
- error_msg, files = generate_code(prompt, backend, system_message, max_tokens, temperature, top_p)
137
-
138
- if error_msg:
139
- # Display error in a single tab if generation failed
140
- error_tab = gr.TabItem(label="Error", children=[gr.Textbox(value=error_msg, label="Generation Error")])
141
- return gr.Tabs(tabs=[error_tab]) # Return a Tabs component with the error tab
142
-
143
- if not files:
144
- # Display message if no files were parsed
145
- no_files_tab = gr.TabItem(label="Output", children=[gr.Textbox(value="AI did not return recognizable file content. Check raw output in console.", label="Result")])
146
- return gr.Tabs(tabs=[no_files_tab]) # Return a Tabs component with this message
147
-
148
- tabs = []
149
- for name, content in files:
150
- name = name.strip()
151
- content = content.strip()
152
- if not name or not content: # Skip empty names or content
153
- print(f"Skipping file with empty name or content: Name='{name}'")
154
- continue
155
-
156
- # Determine language for syntax highlighting
157
- lang = "text" # Default
158
- if name.endswith(".html") or name.endswith(".htm"):
159
- lang = "html"
160
- elif name.endswith(".css"):
161
- lang = "css"
162
- elif name.endswith(".js"):
163
- lang = "javascript"
164
- elif name.endswith(".py"):
165
- lang = "python"
166
- elif name.endswith(".json"):
167
- lang = "json"
168
- elif name.endswith(".md"):
169
- lang = "markdown"
170
- elif name.endswith(".sh") or name.endswith(".bash"):
171
- lang = "bash"
172
-
173
- tab_item = gr.TabItem(label=name, elem_id=f"tab_{name.replace('.', '_')}", children=[ # Ensure unique elem_id
174
- gr.Code(value=content, language=lang, label=name) # Add label to Code block
175
  ])
176
- tabs.append(tab_item)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
- # *** The Key Fix ***
179
- # Return a new gr.Tabs component instance containing the generated TabItems
180
- return gr.Tabs(tabs=tabs)
181
 
182
  # --- Gradio UI Definition ---
183
- with gr.Blocks() as demo:
184
- gr.Markdown("### Website Generator (Static / Flask / Node.js)")
185
- gr.Markdown("Describe the website you want to create. The AI will generate the necessary files.")
186
 
187
  with gr.Row():
188
- prompt = gr.Textbox(label="Describe your website", placeholder="E.g., a simple portfolio site with a contact form", scale=3)
189
- backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend Technology", scale=1)
190
-
191
- with gr.Accordion("Advanced Options", open=False):
192
- system_message = gr.Textbox(label="Extra instructions for the AI (System Message)", placeholder="Optional: e.g., 'Use Bootstrap 5', 'Prefer functional components in React'", value="")
193
- max_tokens = gr.Slider(minimum=256, maximum=4096, value=1536, step=64, label="Max Tokens (Length)") # Increased max
194
- temperature = gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature (Creativity)")
195
- top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Sampling)")
196
-
197
- generate_button = gr.Button("✨ Generate Code ✨", variant="primary")
198
-
199
- gr.Markdown("#### Generated Files")
200
- # Define the Tabs component placeholder. It will be replaced by the output of on_generate.
201
- out_tabs = gr.Tabs(elem_id="output_tabs")
202
-
203
- # Button click action
 
 
 
 
 
 
204
  generate_button.click(
205
- on_generate,
206
  inputs=[prompt, backend, system_message, max_tokens, temperature, top_p],
207
- outputs=[out_tabs], # Output the new Tabs component to replace the placeholder
208
- show_progress="full" # Show progress during generation
 
209
  )
210
 
211
  if __name__ == "__main__":
212
- demo.launch(debug=True) # Use debug=True for more detailed error messages in console
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
+ import time # For potential brief pauses if needed
5
 
6
  # --- Hugging Face Token (Optional but Recommended) ---
 
7
  # from huggingface_hub import login
8
+ # login("YOUR_HUGGINGFACE_TOKEN")
9
 
10
  # --- Inference Client ---
 
11
  try:
12
+ # You might need to specify the model URL directly if the alias isn't working
13
+ # client = InferenceClient(model="https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta")
14
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
15
+ client.timeout = 120 # Increase timeout for potentially long generations
16
  except Exception as e:
17
  print(f"Error initializing InferenceClient: {e}")
 
 
18
  client = None
19
 
20
+ # --- Parsing Function (from previous good version) ---
21
  def parse_files(raw_response):
22
  """
23
  Parses filenames and code blocks from the raw AI output.
 
 
 
 
 
 
 
 
24
  """
25
  if not raw_response:
26
  return []
27
 
28
+ # Pattern: Look for a filename line followed by content until the next filename line or end of string.
 
29
  pattern = re.compile(
30
  r"^\s*([\w\-.\/\\]+\.\w+)\s*\n" # Filename line (must have an extension)
31
  r"(.*?)" # Capture content (non-greedy)
32
  r"(?=\n\s*[\w\-.\/\\]+\.\w+\s*\n|\Z)", # Lookahead for next filename or end of string
33
  re.DOTALL | re.MULTILINE
34
  )
 
35
  files = pattern.findall(raw_response)
36
 
 
37
  cleaned_files = []
38
  for name, content in files:
39
  # Remove common code block markers (``` optionally followed by lang)
 
41
  content_cleaned = re.sub(r"\n?```\s*$", "", content_cleaned, flags=re.MULTILINE)
42
  cleaned_files.append((name.strip(), content_cleaned.strip()))
43
 
44
+ # Fallback if no files parsed but content exists
45
  if not cleaned_files and raw_response.strip():
 
46
  if any(c in raw_response for c in ['<','>','{','}',';','(',')']):
 
47
  print("Warning: No filenames found, defaulting to index.html")
48
+ lang = "html"
49
+ if "{" in raw_response and "}" in raw_response and ":" in raw_response: lang = "css"
50
+ elif "function" in raw_response or "const" in raw_response or "let" in raw_response: lang = "javascript"
 
 
 
51
  default_filename = "index.html"
52
  if lang == "css": default_filename = "style.css"
53
  elif lang == "javascript": default_filename = "script.js"
 
54
  cleaned_files.append((default_filename, raw_response.strip()))
55
 
 
56
  return cleaned_files
57
 
58
+ # --- Streaming and Parsing Orchestrator ---
59
+ def stream_and_parse_code(prompt, backend, system_message, max_tokens, temperature, top_p):
60
+ """
61
+ Streams raw output to one component and generates final tabs for another.
62
+ This function acts as the main callback for the button click.
63
+ """
64
  if not client:
65
+ error_msg = "Error: Inference Client not available."
66
+ yield {
67
+ live_output: error_msg,
68
+ final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error", children=[gr.Textbox(value=error_msg)])])
69
+ }
70
+ return # Stop execution
71
+
72
+ # --- Prepare for Streaming ---
73
  full_sys_msg = f"""
74
  You are a code generation AI. Given a prompt, generate the necessary files for a website using the {backend} backend.
75
  Always include an index.html file.
 
78
 
79
  index.html
80
  <!DOCTYPE html>
81
+ <html></html>
 
 
 
82
 
83
  style.css
84
+ body {{}}
 
 
 
 
 
85
 
86
+ Ensure the code is complete. NO commentary, NO explanations, NO markdown formatting like backticks (```).
87
  Start generating the files now.
88
+ """.strip() + ("\n" + system_message if system_message else "")
89
 
90
  messages = [
91
+ {"role": "system", "content": full_sys_msg},
92
  {"role": "user", "content": prompt}
93
  ]
94
 
95
+ full_raw_response = ""
96
+ error_occurred = False
97
+ error_message = ""
98
+
99
+ # Initial state update
100
+ yield {
101
+ live_output: "Generating stream...",
102
+ # Set initial tabs state to indicate loading
103
+ final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Generating...")])
104
+ }
105
+
106
+ # --- Streaming Loop ---
107
  try:
108
+ stream = client.chat_completion(
109
+ messages,
110
+ max_tokens=int(max_tokens),
111
+ stream=True,
112
  temperature=temperature,
113
+ top_p=top_p
 
114
  )
115
+ for chunk in stream:
116
+ token = chunk.choices[0].delta.content
117
+ if token:
118
+ full_raw_response += token
119
+ # Yield updates for the live raw output component
120
+ # Keep tabs in a 'generating' state during the stream
121
+ yield {
122
+ live_output: full_raw_response,
123
+ final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Streaming...")]) # Keep showing streaming
124
+ }
125
+ # time.sleep(0.01) # Optional small delay if updates are too fast
126
 
127
  except Exception as e:
128
+ print(f"Error during AI streaming: {e}")
129
+ error_message = f"Error during AI generation: {e}\n\nPartial Response:\n{full_raw_response}"
130
+ error_occurred = True
131
+ # Update live output with error, keep tabs showing error state
132
+ yield {
133
+ live_output: error_message,
134
+ final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error")])
135
+ }
136
+
137
+
138
+ # --- Post-Streaming: Parsing and Final Tab Generation ---
139
+ if error_occurred:
140
+ # If an error happened during stream, create an error tab
141
+ final_tabs_update = gr.Tabs(tabs=[
142
+ gr.TabItem(label="Error", children=[gr.Textbox(value=error_message, label="Generation Error")])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  ])
144
+ else:
145
+ # Parse the complete raw response
146
+ print("\n--- Final Raw AI Response ---")
147
+ print(full_raw_response)
148
+ print("--------------------------\n")
149
+ files = parse_files(full_raw_response)
150
+
151
+ if not files:
152
+ # Handle case where parsing failed or AI gave empty/invalid response
153
+ no_files_msg = "AI finished, but did not return recognizable file content. See raw output above."
154
+ final_tabs_update = gr.Tabs(tabs=[
155
+ gr.TabItem(label="Output", children=[gr.Textbox(value=no_files_msg, label="Result")])
156
+ ])
157
+ # Update live output as well if needed
158
+ yield { live_output: full_raw_response + "\n\n" + no_files_msg, final_tabs: final_tabs_update }
159
+ return # Exit if no files
160
+
161
+ # --- Create Tabs (if files were parsed successfully) ---
162
+ tabs_content = []
163
+ for name, content in files:
164
+ name = name.strip()
165
+ content = content.strip()
166
+ if not name or not content:
167
+ print(f"Skipping file with empty name or content: Name='{name}'")
168
+ continue
169
+
170
+ lang = "text" # Default
171
+ if name.endswith((".html", ".htm")): lang = "html"
172
+ elif name.endswith(".css"): lang = "css"
173
+ elif name.endswith(".js"): lang = "javascript"
174
+ elif name.endswith(".py"): lang = "python"
175
+ elif name.endswith(".json"): lang = "json"
176
+ elif name.endswith(".md"): lang = "markdown"
177
+ elif name.endswith((".sh", ".bash")): lang = "bash"
178
+
179
+ tab_item = gr.TabItem(label=name, elem_id=f"tab_{name.replace('.', '_').replace('/', '_')}", children=[
180
+ gr.Code(value=content, language=lang, label=name)
181
+ ])
182
+ tabs_content.append(tab_item)
183
+
184
+ if not tabs_content: # Handle case where parsing found files, but they were filtered out
185
+ final_tabs_update = gr.Tabs(tabs=[gr.TabItem(label="Output", children=[gr.Textbox(value="No valid files generated.", label="Result")])])
186
+ else:
187
+ final_tabs_update = gr.Tabs(tabs=tabs_content) # Create the final Tabs component
188
+
189
+
190
+ # --- Final Update ---
191
+ # Yield the final state for both components
192
+ yield {
193
+ live_output: full_raw_response if not error_occurred else error_message, # Show final raw response or error
194
+ final_tabs: final_tabs_update # Show the generated tabs or error tab
195
+ }
196
 
 
 
 
197
 
198
  # --- Gradio UI Definition ---
199
+ with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo: # Wider interface
200
+ gr.Markdown("## WebGen AI One Prompt Full Website Generator")
201
+ gr.Markdown("Generates website code based on your description. Raw output streams live, final files appear in tabs below.")
202
 
203
  with gr.Row():
204
+ with gr.Column(scale=2):
205
+ prompt = gr.Textbox(label="Describe your website", placeholder="E.g., a simple portfolio site with a dark mode toggle", lines=3)
206
+ backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend Technology")
207
+ with gr.Accordion("Advanced Options", open=False):
208
+ system_message = gr.Textbox(label="Extra instructions for the AI (System Message)", placeholder="Optional: e.g., 'Use Bootstrap 5', 'Prefer functional components in React'", value="")
209
+ max_tokens = gr.Slider(minimum=256, maximum=4096, value=1536, step=64, label="Max Tokens (Length)")
210
+ temperature = gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature (Creativity)")
211
+ top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Sampling)")
212
+ generate_button = gr.Button("✨ Generate Code ✨", variant="primary")
213
+
214
+ with gr.Column(scale=3):
215
+ gr.Markdown("#### Live Raw Output Stream")
216
+ # Component to show the live, unparsed stream
217
+ live_output = gr.Code(label="Raw AI Stream", language="text", lines=15, interactive=False)
218
+
219
+ gr.Markdown("---")
220
+ gr.Markdown("#### Final Generated Files (Tabs)")
221
+ # Placeholder for the final structured tabs
222
+ final_tabs = gr.Tabs(elem_id="output_tabs")
223
+
224
+
225
+ # Button click action - uses the orchestrator function
226
  generate_button.click(
227
+ stream_and_parse_code, # Call the main function that handles streaming and parsing
228
  inputs=[prompt, backend, system_message, max_tokens, temperature, top_p],
229
+ # Outputs dictionary maps function yields to components
230
+ outputs=[live_output, final_tabs],
231
+ show_progress="hidden" # Hide default progress bar as we show live stream
232
  )
233
 
234
  if __name__ == "__main__":
235
+ demo.launch(debug=True)