MINEOGO commited on
Commit
38f5ac1
·
verified ·
1 Parent(s): c87b43d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -86
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
  import re
 
5
 
6
  API_TOKEN = os.getenv("HF_TOKEN", None)
7
  MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
@@ -12,52 +13,64 @@ try:
12
  except Exception as e:
13
  raise gr.Error(f"Failed to initialize model client for {MODEL}. Error: {e}. Check HF_TOKEN and model availability.")
14
 
15
- def parse_code_into_files(raw_response: str) -> dict:
16
- files = {}
17
- default_first_filename = "index.html"
 
 
 
 
 
 
 
 
 
 
 
18
  separator_pattern = r'\.TAB\[NAME=([^\]]+)\]\n?'
19
-
20
- matches = list(re.finditer(separator_pattern, raw_response))
21
-
22
- start_index = 0
23
- first_separator_pos = matches[0].start() if matches else len(raw_response)
24
- first_block = raw_response[start_index:first_separator_pos].strip()
25
- if first_block:
26
- files[default_first_filename] = first_block
27
-
28
- if matches:
29
- backend_filename = matches[0].group(1).strip()
30
- start_index = matches[0].end()
31
-
32
- second_separator_pos = matches[1].start() if len(matches) > 1 else len(raw_response)
33
- backend_code = raw_response[start_index:second_separator_pos].strip()
34
-
35
- if backend_code:
36
- files['backend_file'] = backend_code
37
- files['backend_filename'] = backend_filename
38
- if backend_filename.endswith(".py"):
39
- files['backend_language'] = 'python'
40
- elif backend_filename.endswith(".js"):
41
- files['backend_language'] = 'javascript'
42
- elif backend_filename.endswith(".css"):
43
- files['backend_language'] = 'css'
44
- else:
45
- files['backend_language'] = None
46
 
47
  return files
48
 
 
 
 
 
 
 
 
 
49
  def generate_code(
50
  prompt: str,
51
  backend_choice: str,
52
  max_tokens: int,
53
  temperature: float,
54
  top_p: float,
55
- progress=gr.Progress(track_tqdm=True)
56
  ):
57
- print(f"Generating code for: {prompt[:100]}... | Backend: {backend_choice}")
58
- progress(0, desc="Initializing Request...")
59
 
60
- system_message = (
61
  "You are an AI that generates website code. You MUST ONLY output the raw code, without any conversational text like 'Here is the code' or explanations before or after the code blocks. "
62
  "You MUST NOT wrap the code in markdown fences like ```html, ```python, or ```js. "
63
  "If the user requests 'Static' or the prompt clearly implies only frontend code, generate ONLY the content for the `index.html` file. "
@@ -81,11 +94,17 @@ def generate_code(
81
  ]
82
 
83
  full_response = ""
84
- token_count = 0
85
- est_total_tokens = max_tokens
 
 
 
 
 
 
 
86
 
87
  try:
88
- progress(0.1, desc="Sending Request to Model...")
89
  stream = client.chat_completion(
90
  messages=messages,
91
  max_tokens=max_tokens,
@@ -94,65 +113,86 @@ def generate_code(
94
  top_p=top_p,
95
  )
96
 
97
- progress(0.2, desc="Receiving Stream...")
98
  for message in stream:
99
  token = message.choices[0].delta.content
100
  if isinstance(token, str):
101
  full_response += token
102
- token_count += 1
103
- prog = min(0.2 + (token_count / est_total_tokens) * 0.7, 0.9)
104
- progress(prog, desc="Generating Code...")
105
-
106
-
107
- progress(0.9, desc="Processing Response...")
108
- cleaned_response = full_response.strip()
109
- cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
110
- cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
111
- cleaned_response = re.sub(r"<\s*\|?\s*(user|system|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE).strip()
112
- common_phrases = [
113
- "Here is the code:", "Okay, here is the code:", "Here's the code:",
114
- "Sure, here is the code you requested:",
115
- ]
116
- temp_response_lower = cleaned_response.lower()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  for phrase in common_phrases:
118
- if temp_response_lower.startswith(phrase.lower()):
119
- cleaned_response = cleaned_response[len(phrase):].lstrip()
120
- temp_response_lower = cleaned_response.lower()
121
 
 
122
  refusal_message = "hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-("
123
- if refusal_message in full_response:
124
- progress(1, desc="Refusal Message Generated")
125
- return gr.update(value=refusal_message, language=None, visible=True), gr.update(value="", visible=False, label="Backend")
126
-
127
- parsed_files = parse_code_into_files(cleaned_response)
128
-
129
- html_code = parsed_files.get("index.html", "")
130
- backend_code = parsed_files.get("backend_file", "")
131
- backend_filename = parsed_files.get("backend_filename", "Backend")
132
- backend_language = parsed_files.get("backend_language", None)
133
-
134
- html_update = gr.update(value=html_code, language='html', visible=True)
135
-
136
- if backend_code:
137
- backend_update = gr.update(value=backend_code, language=backend_language, label=backend_filename, visible=True)
138
- else:
139
- backend_update = gr.update(value="", visible=False, label="Backend")
140
 
141
- progress(1, desc="Done")
142
- return html_update, backend_update
143
 
144
  except Exception as e:
145
- print(f"ERROR during code generation: {e}")
146
- progress(1, desc="Error Occurred")
147
- error_message = f"## Error\n\nFailed to generate or process code.\n**Reason:** {e}"
148
- return gr.update(value=error_message, language=None, visible=True), gr.update(value="", visible=False, label="Backend")
 
149
 
150
 
 
151
  with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
152
  gr.Markdown("# ✨ Website Code Generator ✨")
153
  gr.Markdown(
154
- "Describe the website you want. The AI will generate the necessary code.\n"
155
- "If multiple files are generated (e.g., for Flask/Node.js), they will appear in separate tabs below."
156
  )
157
 
158
  with gr.Row():
@@ -171,7 +211,9 @@ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
171
  generate_button = gr.Button("✨ Generate Website Code", variant="primary")
172
 
173
  with gr.Column(scale=3):
174
- with gr.Tabs(elem_id="code-tabs") as code_tabs:
 
 
175
  with gr.Tab("index.html", elem_id="html-tab") as html_tab:
176
  html_code_output = gr.Code(
177
  label="index.html",
@@ -180,14 +222,15 @@ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
180
  interactive=False,
181
  elem_id="html_code",
182
  )
 
183
  with gr.Tab("Backend", elem_id="backend-tab", visible=False) as backend_tab:
184
  backend_code_output = gr.Code(
185
- label="Backend",
186
- language=None,
187
  lines=25,
188
  interactive=False,
189
  elem_id="backend_code",
190
- visible=False
191
  )
192
 
193
  with gr.Accordion("Advanced Settings", open=False):
@@ -201,10 +244,12 @@ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
201
  minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P"
202
  )
203
 
 
204
  generate_button.click(
205
  fn=generate_code,
206
  inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
207
- outputs=[html_code_output, backend_code_output],
 
208
  )
209
 
210
  if __name__ == "__main__":
 
2
  from huggingface_hub import InferenceClient
3
  import os
4
  import re
5
+ # import traceback # Optional for debugging
6
 
7
  API_TOKEN = os.getenv("HF_TOKEN", None)
8
  MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
 
13
  except Exception as e:
14
  raise gr.Error(f"Failed to initialize model client for {MODEL}. Error: {e}. Check HF_TOKEN and model availability.")
15
 
16
+ # --- Helper Function to Parse Code during Streaming ---
17
+ def parse_streaming_code(current_response: str) -> dict:
18
+ """
19
+ Parses potentially incomplete AI output stream.
20
+ Identifies if .TAB separator is present and splits code accordingly.
21
+ Returns dict with html_code, backend_code, filename, language, and visibility flag.
22
+ """
23
+ files = {
24
+ 'html_code': '',
25
+ 'backend_code': '',
26
+ 'backend_filename': 'Backend', # Default label
27
+ 'backend_language': None,
28
+ 'backend_visible': False # Default visibility
29
+ }
30
  separator_pattern = r'\.TAB\[NAME=([^\]]+)\]\n?'
31
+ match = re.search(separator_pattern, current_response)
32
+
33
+ if match:
34
+ # Separator found in the stream so far
35
+ html_part = current_response[:match.start()].strip()
36
+ backend_part = current_response[match.end():].strip() # Code after separator
37
+ backend_filename = match.group(1).strip()
38
+
39
+ files['html_code'] = html_part
40
+ files['backend_code'] = backend_part
41
+ files['backend_filename'] = backend_filename
42
+ files['backend_visible'] = True # Make backend visible
43
+
44
+ # Determine language
45
+ if backend_filename.endswith(".py"): files['backend_language'] = 'python'
46
+ elif backend_filename.endswith(".js"): files['backend_language'] = 'javascript'
47
+ elif backend_filename.endswith(".css"): files['backend_language'] = 'css'
48
+ else: files['backend_language'] = None
49
+ else:
50
+ # No separator found yet, assume all content is HTML
51
+ files['html_code'] = current_response.strip()
52
+ # Keep backend_visible as False
 
 
 
 
 
53
 
54
  return files
55
 
56
+ # --- Minimal Cleaning for Intermediate Stream Chunks ---
57
+ def clean_intermediate_stream(text: str) -> str:
58
+ """ Basic cleaning for streaming chunks (e.g., remove chat markers). """
59
+ cleaned = re.sub(r"<\s*\|?\s*(user|system|assistant)\s*\|?\s*>", "", text, flags=re.IGNORECASE)
60
+ # Avoid stripping aggressively during stream as it might remove partial code
61
+ return cleaned
62
+
63
+ # --- Core Code Generation Function - Modified for Streaming UI Updates ---
64
  def generate_code(
65
  prompt: str,
66
  backend_choice: str,
67
  max_tokens: int,
68
  temperature: float,
69
  top_p: float,
 
70
  ):
71
+ print(f"Streaming code generation for: {prompt[:100]}... | Backend: {backend_choice}")
 
72
 
73
+ system_message = ( # System message remains the same
74
  "You are an AI that generates website code. You MUST ONLY output the raw code, without any conversational text like 'Here is the code' or explanations before or after the code blocks. "
75
  "You MUST NOT wrap the code in markdown fences like ```html, ```python, or ```js. "
76
  "If the user requests 'Static' or the prompt clearly implies only frontend code, generate ONLY the content for the `index.html` file. "
 
94
  ]
95
 
96
  full_response = ""
97
+ # Initialize state for UI updates
98
+ current_html = ""
99
+ current_backend = ""
100
+ current_backend_label = "Backend"
101
+ current_backend_lang = None
102
+ is_backend_visible = False
103
+
104
+ # Initial clear of outputs
105
+ yield gr.update(value="", visible=True), gr.update(visible=False), gr.update(value="", visible=False)
106
 
107
  try:
 
108
  stream = client.chat_completion(
109
  messages=messages,
110
  max_tokens=max_tokens,
 
113
  top_p=top_p,
114
  )
115
 
 
116
  for message in stream:
117
  token = message.choices[0].delta.content
118
  if isinstance(token, str):
119
  full_response += token
120
+ # Clean intermediate response minimally
121
+ cleaned_response_chunk = clean_intermediate_stream(full_response)
122
+
123
+ # Parse the *entire accumulated* cleaned response on each iteration
124
+ parsed_state = parse_streaming_code(cleaned_response_chunk)
125
+
126
+ # Update state variables
127
+ current_html = parsed_state['html_code']
128
+ current_backend = parsed_state['backend_code']
129
+ current_backend_label = parsed_state['backend_filename']
130
+ current_backend_lang = parsed_state['backend_language']
131
+ is_backend_visible = parsed_state['backend_visible'] # This determines visibility
132
+
133
+ # Prepare Gradio updates based on the *current* parsed state
134
+ html_update = gr.update(value=current_html)
135
+ # Update the backend tab's visibility
136
+ tab_update = gr.update(visible=is_backend_visible)
137
+ # Update the backend code block's content, label, language, and visibility
138
+ backend_code_update = gr.update(
139
+ value=current_backend,
140
+ label=current_backend_label,
141
+ language=current_backend_lang,
142
+ visible=is_backend_visible # Make code block visible *if* tab is visible
143
+ )
144
+
145
+ # Yield updates for html_code, backend_tab, backend_code
146
+ yield html_update, tab_update, backend_code_update
147
+
148
+ # --- Final Clean and Update after Stream ---
149
+ # Ensure the final state is clean and fully parsed
150
+ final_cleaned_response = full_response.strip()
151
+ # Remove fences/phrases missed during stream (optional but good practice)
152
+ final_cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", final_cleaned_response)
153
+ final_cleaned_response = re.sub(r"\n?\s*```\s*$", "", final_cleaned_response)
154
+ common_phrases = ["Here is the code:", "Okay, here is the code:", "Here's the code:", "Sure, here is the code you requested:"]
155
+ temp_lower = final_cleaned_response.lower()
156
  for phrase in common_phrases:
157
+ if temp_lower.startswith(phrase.lower()):
158
+ final_cleaned_response = final_cleaned_response[len(phrase):].lstrip()
159
+ temp_lower = final_cleaned_response.lower()
160
 
161
+ # Check for refusal message in the final response
162
  refusal_message = "hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-("
163
+ if refusal_message in final_cleaned_response:
164
+ yield gr.update(value=refusal_message), gr.update(visible=False), gr.update(value="", visible=False)
165
+ return # Stop processing
166
+
167
+ # Final parse
168
+ final_parsed_state = parse_streaming_code(final_cleaned_response)
169
+
170
+ # Final updates to ensure everything is correct
171
+ final_html_update = gr.update(value=final_parsed_state['html_code'])
172
+ final_tab_update = gr.update(visible=final_parsed_state['backend_visible'])
173
+ final_backend_code_update = gr.update(
174
+ value=final_parsed_state['backend_code'],
175
+ label=final_parsed_state['backend_filename'],
176
+ language=final_parsed_state['backend_language'],
177
+ visible=final_parsed_state['backend_visible']
178
+ )
179
+ yield final_html_update, final_tab_update, final_backend_code_update
180
 
 
 
181
 
182
  except Exception as e:
183
+ print(f"ERROR during code generation stream: {e}")
184
+ # traceback.print_exc() # Uncomment for detailed traceback
185
+ error_message = f"## Error\n\nFailed during streaming.\n**Reason:** {e}"
186
+ # Show error in HTML block, hide backend tab and code
187
+ yield gr.update(value=error_message), gr.update(visible=False), gr.update(value="", visible=False)
188
 
189
 
190
+ # --- Build Gradio Interface ---
191
  with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
192
  gr.Markdown("# ✨ Website Code Generator ✨")
193
  gr.Markdown(
194
+ "Describe the website you want. See code generated live.\n"
195
+ "If backend code is generated, a second tab will appear."
196
  )
197
 
198
  with gr.Row():
 
211
  generate_button = gr.Button("✨ Generate Website Code", variant="primary")
212
 
213
  with gr.Column(scale=3):
214
+ # Define Tabs structure
215
+ with gr.Tabs(elem_id="code-tabs"):
216
+ # Tab 1: Always present for HTML
217
  with gr.Tab("index.html", elem_id="html-tab") as html_tab:
218
  html_code_output = gr.Code(
219
  label="index.html",
 
222
  interactive=False,
223
  elem_id="html_code",
224
  )
225
+ # Tab 2: Backend - defined but starts hidden
226
  with gr.Tab("Backend", elem_id="backend-tab", visible=False) as backend_tab:
227
  backend_code_output = gr.Code(
228
+ label="Backend", # Label updated dynamically if tab becomes visible
229
+ language=None, # Language updated dynamically
230
  lines=25,
231
  interactive=False,
232
  elem_id="backend_code",
233
+ visible=False # Code block also starts hidden
234
  )
235
 
236
  with gr.Accordion("Advanced Settings", open=False):
 
244
  minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P"
245
  )
246
 
247
+ # Click function now targets html_code_output, backend_tab, and backend_code_output
248
  generate_button.click(
249
  fn=generate_code,
250
  inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
251
+ # Outputs MUST match the number and order of updates yielded by the function
252
+ outputs=[html_code_output, backend_tab, backend_code_output],
253
  )
254
 
255
  if __name__ == "__main__":