MINEOGO commited on
Commit
4d76afc
·
verified ·
1 Parent(s): e8a0246

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -161
app.py CHANGED
@@ -3,30 +3,59 @@ from huggingface_hub import InferenceClient
3
  import os
4
  import re
5
 
 
6
  API_TOKEN = os.getenv("HF_TOKEN", None)
7
- MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
 
8
 
 
9
  try:
 
10
  client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
11
  except Exception as e:
12
- raise gr.Error(f"Failed to initialize model client. Error: {e}")
13
-
14
- def generate_code(prompt: str, backend_choice: str, max_tokens: int, temperature: float, top_p: float):
 
 
 
 
 
 
 
 
 
 
 
 
15
  system_message = (
16
- "You are an AI assistant programmed to generate website codes only. "
17
- "You must not use triple backticks (```html, ```python, etc.). "
18
- "If multiple files are needed, separate them clearly using:\n"
19
- "TAB.NAME={filename}\n"
20
- "Only generate code. No explanations, no phrases like 'Here is the code'. "
21
- "If user asks non-website code, reply:\n"
22
- "'hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-('."
 
 
 
 
 
 
 
23
  )
 
 
24
  user_prompt = f"USER_PROMPT = {prompt}\nUSER_BACKEND = {backend_choice}"
 
25
  messages = [
26
  {"role": "system", "content": system_message},
27
  {"role": "user", "content": user_prompt}
28
  ]
 
 
29
  full_response = ""
 
30
  try:
31
  stream = client.chat_completion(
32
  messages=messages,
@@ -38,205 +67,114 @@ def generate_code(prompt: str, backend_choice: str, max_tokens: int, temperature
38
  for message in stream:
39
  token = message.choices[0].delta.content
40
  if isinstance(token, str):
 
41
  full_response += token
 
 
42
 
 
 
 
43
  cleaned_response = full_response.strip()
 
 
44
  cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
45
  cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
46
- cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant|system|endoftext)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
47
- cleaned_response = cleaned_response.replace("<|im_end|>", "").replace("<|im_start|>", "").strip()
48
 
 
 
 
 
49
  common_phrases = [
50
  "Here is the code:", "Okay, here is the code:", "Here's the code:",
51
  "Sure, here is the code you requested:", "Let me know if you need anything else.",
52
- "Here is the website code you requested:", "Here are the files for your website:",
53
- "Okay, here are the files:"
54
  ]
55
- lower_response = cleaned_response.lower()
 
56
  for phrase in common_phrases:
57
- if lower_response.startswith(phrase.lower()):
 
58
  cleaned_response = cleaned_response[len(phrase):].lstrip()
59
- lower_response = cleaned_response.lower()
60
 
61
- if not cleaned_response:
62
- return "Error: Empty response from model after cleaning."
 
 
 
 
63
 
64
- return cleaned_response
65
  except Exception as e:
66
- return f"## Error\n\nFailed to generate code.\n**Reason:** {e}"
67
-
68
- def split_files(full_code_text):
69
- file_blocks = []
70
- splits = re.split(r'(TAB\.NAME=\{.+?\})', full_code_text)
71
- initial_content = splits[0].strip()
72
-
73
- if len(splits) == 1:
74
- if initial_content:
75
- default_name = "index.html"
76
- if "def " in initial_content or "import " in initial_content: default_name = "app.py"
77
- elif "function " in initial_content or "const " in initial_content or "let " in initial_content: default_name = "script.js"
78
- elif "<!DOCTYPE html>" in initial_content or "<html" in initial_content: default_name = "index.html"
79
- elif "@app.route" in initial_content: default_name = "app.py"
80
- elif "require(" in initial_content or "module.exports" in initial_content: default_name = "server.js"
81
- elif "<?php" in initial_content: default_name = "index.php"
82
- elif "package main" in initial_content: default_name = "main.go"
83
-
84
- file_blocks.append((default_name, initial_content))
85
- else:
86
- for i in range(1, len(splits), 2):
87
- marker = splits[i]
88
- content = splits[i+1].strip() if (i+1) < len(splits) else ""
89
- filename_match = re.search(r'TAB\.NAME=\{(.+?)\}', marker)
90
- if filename_match:
91
- filename = filename_match.group(1).strip()
92
- if content:
93
- file_blocks.append((filename, content))
94
- elif i == 1 and initial_content: # Handle content before the first explicit marker
95
- file_blocks.append(("file_0.txt", initial_content))
96
- if content: # Add the content after the first marker if it exists
97
- filename_match_fallback = re.search(r'TAB\.NAME=\{(.+?)\}', marker)
98
- if filename_match_fallback:
99
- filename = filename_match_fallback.group(1).strip()
100
- file_blocks.append((filename, content))
101
-
102
- return file_blocks
103
 
 
 
104
  with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
105
  gr.Markdown("# ✨ Website Code Generator ✨")
106
  gr.Markdown(
107
- "Describe the website you want. The AI will generate website code.\n\n"
 
 
 
 
 
108
  "**Rules:**\n"
109
- "- Provide a backend hint (Static / Flask / Node.js).\n"
110
- "- Generated code should be functional and SFW.\n"
111
- "- Only generates website-related code.\n"
112
- "- If multiple files are generated, they will be separated below using the format `TAB.NAME={filename}`."
113
  )
 
114
  with gr.Row():
115
  with gr.Column(scale=2):
116
  prompt_input = gr.Textbox(
117
  label="Website Description",
118
- placeholder="e.g., A simple Flask app with one route that displays 'Hello World'.",
119
  lines=6,
120
  )
121
  backend_radio = gr.Radio(
122
  ["Static", "Flask", "Node.js"],
123
- label="Backend Context / Hint",
124
  value="Static",
 
125
  )
126
  generate_button = gr.Button("✨ Generate Website Code", variant="primary")
 
127
  with gr.Column(scale=3):
128
- main_output_label = gr.Markdown("### Full Generated Code / Main File")
129
- main_output_code = gr.Code(
130
- label="Generated Code", # Label is less prominent now
131
- language="html",
132
- lines=15,
133
  interactive=False,
134
  )
135
- extra_outputs_column = gr.Column(visible=False) # Initially hidden
136
 
137
  with gr.Accordion("Advanced Settings", open=False):
138
  max_tokens_slider = gr.Slider(
139
  minimum=512,
140
- maximum=8192,
141
- value=4096,
142
  step=256,
143
  label="Max New Tokens"
144
  )
145
  temperature_slider = gr.Slider(
146
- minimum=0.1,
147
- maximum=1.2,
148
- value=0.7,
149
- step=0.1,
150
- label="Temperature"
151
  )
152
  top_p_slider = gr.Slider(
153
- minimum=0.1,
154
- maximum=1.0,
155
- value=0.9,
156
- step=0.05,
157
- label="Top-P"
158
  )
159
 
160
- def get_language(filename):
161
- if filename.endswith(".html") or filename.endswith(".htm"): return "html"
162
- if filename.endswith(".css"): return "css"
163
- if filename.endswith(".js"): return "javascript"
164
- if filename.endswith(".py"): return "python"
165
- if filename.endswith(".json"): return "json"
166
- if filename.endswith(".sql"): return "sql"
167
- if filename.endswith(".php"): return "php"
168
- if filename.endswith(".go"): return "go"
169
- if filename.endswith(".java"): return "java"
170
- if filename.endswith(".rb"): return "ruby"
171
- if filename.endswith(".sh"): return "shell"
172
- if filename.endswith(".yml") or filename.endswith(".yaml"): return "yaml"
173
- if filename.endswith(".md"): return "markdown"
174
- return "text"
175
-
176
- def generate_and_display(prompt, backend, max_tokens, temperature, top_p):
177
- full_code = generate_code(prompt, backend, max_tokens, temperature, top_p)
178
-
179
- if full_code.startswith("## Error"):
180
- return {
181
- main_output_label: gr.Markdown.update(value="### Error Occurred"),
182
- main_output_code: gr.Code.update(value=full_code, language="markdown"),
183
- extra_outputs_column: gr.Column.update(visible=False, children=[])
184
- }
185
- if full_code.startswith("Error: Empty response"):
186
- return {
187
- main_output_label: gr.Markdown.update(value="### Error Occurred"),
188
- main_output_code: gr.Code.update(value=full_code, language="text"),
189
- extra_outputs_column: gr.Column.update(visible=False, children=[])
190
- }
191
-
192
- files = split_files(full_code)
193
- dynamic_components = []
194
- main_file_content = full_code
195
- main_file_lang = "text"
196
- main_file_label = "### Full Generated Code"
197
-
198
-
199
- if not files:
200
- main_file_content = full_code # Show full code if split failed but we got output
201
- main_file_lang = get_language("output.txt") # Basic guess
202
- main_file_label = "### Full Generated Output (No Files Detected)"
203
- return {
204
- main_output_label: gr.Markdown.update(value=main_file_label),
205
- main_output_code: gr.Code.update(value=main_file_content, language=main_file_lang),
206
- extra_outputs_column: gr.Column.update(visible=False, children=[])
207
- }
208
-
209
- if len(files) == 1:
210
- main_file_content = files[0][1]
211
- main_file_lang = get_language(files[0][0])
212
- main_file_label = f"### File: {files[0][0]}"
213
- return {
214
- main_output_label: gr.Markdown.update(value=main_file_label),
215
- main_output_code: gr.Code.update(value=main_file_content, language=main_file_lang),
216
- extra_outputs_column: gr.Column.update(visible=False, children=[])
217
- }
218
- else:
219
- main_file_content = files[0][1]
220
- main_file_lang = get_language(files[0][0])
221
- main_file_label = f"### File: {files[0][0]}"
222
-
223
- for i, (filename, content) in enumerate(files[1:], start=1): # Start from the second file for extras
224
- lang = get_language(filename)
225
- dynamic_components.append(gr.Markdown(f"### File: {filename}"))
226
- dynamic_components.append(gr.Code(value=content, language=lang, label=filename, interactive=False))
227
-
228
- return {
229
- main_output_label: gr.Markdown.update(value=main_file_label),
230
- main_output_code: gr.Code.update(value=main_file_content, language=main_file_lang),
231
- extra_outputs_column: gr.Column.update(visible=True, children=dynamic_components)
232
- }
233
-
234
-
235
  generate_button.click(
236
- fn=generate_and_display,
237
  inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
238
- outputs=[main_output_label, main_output_code, extra_outputs_column]
239
  )
240
 
241
  if __name__ == "__main__":
242
- demo.queue().launch()
 
 
 
3
  import os
4
  import re
5
 
6
+ # --- Configuration ---
7
  API_TOKEN = os.getenv("HF_TOKEN", None)
8
+ # Using a model known for better instruction following might be beneficial
9
+ MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct" # Kept your original choice, but consider testing others if needed
10
 
11
+ # --- Initialize Inference Client ---
12
  try:
13
+ print(f"Initializing Inference Client for model: {MODEL}")
14
  client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
15
  except Exception as e:
16
+ # Provide a more specific error message if possible
17
+ raise gr.Error(f"Failed to initialize model client for {MODEL}. Error: {e}. Check HF_TOKEN and model availability.")
18
+
19
+ # --- Core Code Generation Function ---
20
+ def generate_code(
21
+ prompt: str,
22
+ backend_choice: str,
23
+ max_tokens: int,
24
+ temperature: float,
25
+ top_p: float,
26
+ ):
27
+ print(f"Generating code for: {prompt[:100]}... | Backend: {backend_choice}")
28
+
29
+ # --- Dynamically Build System Message ---
30
+ # Modified to include the specific formatting rules
31
  system_message = (
32
+ "You are an AI that generates website code. You MUST ONLY output the raw code, without any conversational text like 'Here is the code' or explanations before or after the code blocks. "
33
+ "You MUST NOT wrap the code in markdown fences like ```html, ```python, or ```js. " # Explicit instruction to omit fences
34
+ "The user can select a backend hint (Static, Flask, Node.js). "
35
+ "If the user requests 'Static' or the prompt clearly implies only frontend code, generate ONLY the content for the `index.html` file. "
36
+ "If the user requests 'Flask' or 'Node.js' and the prompt requires backend logic (like handling forms, APIs, databases), you MUST generate both the `index.html` content AND the corresponding main backend file content (e.g., `app.py` for Flask, `server.js` or `app.js` for Node.js). "
37
+ "When generating multiple files, you MUST separate them EXACTLY as follows: "
38
+ "1. Output the complete code for the first file (e.g., `index.html`). "
39
+ "2. On a new line immediately after the first file's code, add the separator '.TAB[NAME=filename.ext]' (e.g., '.TAB[NAME=app.py]' or '.TAB[NAME=server.js]'). " # Specific separator format
40
+ "3. On the next line, immediately start the code for the second file. "
41
+ "Generate only the necessary files (usually index.html and potentially one backend file). "
42
+ "The generated website code must be SFW (safe for work) and have minimal errors. "
43
+ "Only include comments where user modification is strictly required (e.g., API keys, database paths). Avoid explanatory comments. "
44
+ "If the user asks you to create code that is NOT for a website, you MUST respond ONLY with the exact phrase: " # Specific refusal phrase
45
+ "'hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-('"
46
  )
47
+
48
+ # User prompt remains the same, passing the raw request and backend choice
49
  user_prompt = f"USER_PROMPT = {prompt}\nUSER_BACKEND = {backend_choice}"
50
+
51
  messages = [
52
  {"role": "system", "content": system_message},
53
  {"role": "user", "content": user_prompt}
54
  ]
55
+
56
+ response_stream = ""
57
  full_response = ""
58
+
59
  try:
60
  stream = client.chat_completion(
61
  messages=messages,
 
67
  for message in stream:
68
  token = message.choices[0].delta.content
69
  if isinstance(token, str):
70
+ response_stream += token
71
  full_response += token
72
+ # Yield intermediate stream for responsiveness
73
+ yield response_stream
74
 
75
+ # --- Post-processing (Refined) ---
76
+ # Primarily focus on stripping whitespace and potential leftover model markers.
77
+ # The fence removal is kept as a fallback in case the model doesn't fully comply.
78
  cleaned_response = full_response.strip()
79
+
80
+ # Fallback fence removal (hopefully not needed often with the new prompt)
81
  cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
82
  cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
 
 
83
 
84
+ # Remove potential chat markers (like <|user|>, <|assistant|>)
85
+ cleaned_response = re.sub(r"<\s*\|?\s*(user|system|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE).strip()
86
+
87
+ # Remove common conversational phrases if they somehow slip through despite the prompt
88
  common_phrases = [
89
  "Here is the code:", "Okay, here is the code:", "Here's the code:",
90
  "Sure, here is the code you requested:", "Let me know if you need anything else.",
91
+ "```html", "```python", "```javascript", "```", # Adding fences here just in case they appear standalone
 
92
  ]
93
+ # Use lower() for case-insensitive matching of leading phrases
94
+ temp_response_lower = cleaned_response.lower()
95
  for phrase in common_phrases:
96
+ if temp_response_lower.startswith(phrase.lower()):
97
+ # Use original case length for slicing
98
  cleaned_response = cleaned_response[len(phrase):].lstrip()
99
+ temp_response_lower = cleaned_response.lower() # Update lower version after stripping
100
 
101
+ # Ensure the specific refusal message isn't accidentally cleaned
102
+ refusal_message = "hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-("
103
+ if refusal_message in full_response: # Check if the refusal message was generated
104
+ yield refusal_message # Yield the exact refusal message
105
+ else:
106
+ yield cleaned_response # Yield the cleaned code
107
 
 
108
  except Exception as e:
109
+ # Log the full error for debugging on the server side
110
+ print(f"ERROR during code generation: {e}")
111
+ # Provide a user-friendly error message
112
+ yield f"## Error\n\nFailed to generate code.\n**Reason:** An unexpected error occurred. Please check the console logs or try again later."
113
+ # Consider raising a gr.Error for critical failures if preferred
114
+ # raise gr.Error(f"Code generation failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
+
117
+ # --- Build Gradio Interface ---
118
  with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
119
  gr.Markdown("# ✨ Website Code Generator ✨")
120
  gr.Markdown(
121
+ "Describe the website you want. The AI will generate the necessary code.\n"
122
+ "It will aim for `index.html` for 'Static', and potentially `index.html` + a backend file (like `app.py` or `server.js`) for 'Flask'/'Node.js'.\n"
123
+ "**Output Format:**\n"
124
+ "- No explanations, just code.\n"
125
+ "- Multiple files separated by `.TAB[NAME=filename.ext]` on its own line.\n"
126
+ "- Minimal necessary comments only.\n\n"
127
  "**Rules:**\n"
128
+ "- Backend choice guides the AI on whether to include server-side code.\n"
129
+ "- Always SFW and aims for minimal errors.\n"
130
+ "- Only generates website-related code. No other types of code."
 
131
  )
132
+
133
  with gr.Row():
134
  with gr.Column(scale=2):
135
  prompt_input = gr.Textbox(
136
  label="Website Description",
137
+ placeholder="e.g., A Flask app with a form that stores data in a variable.",
138
  lines=6,
139
  )
140
  backend_radio = gr.Radio(
141
  ["Static", "Flask", "Node.js"],
142
+ label="Backend Context",
143
  value="Static",
144
+ info="Guides AI if backend code (like Python/JS) is needed alongside HTML." # Updated info text
145
  )
146
  generate_button = gr.Button("✨ Generate Website Code", variant="primary")
147
+
148
  with gr.Column(scale=3):
149
+ code_output = gr.Code(
150
+ label="Generated Code", # Changed label slightly
151
+ language=None, # Set language to None for plain text display, better for mixed content
152
+ lines=30,
 
153
  interactive=False,
154
  )
 
155
 
156
  with gr.Accordion("Advanced Settings", open=False):
157
  max_tokens_slider = gr.Slider(
158
  minimum=512,
159
+ maximum=4096, # Adjust max based on model limits if necessary
160
+ value=3072,
161
  step=256,
162
  label="Max New Tokens"
163
  )
164
  temperature_slider = gr.Slider(
165
+ minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature"
 
 
 
 
166
  )
167
  top_p_slider = gr.Slider(
168
+ minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P"
 
 
 
 
169
  )
170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  generate_button.click(
172
+ fn=generate_code,
173
  inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
174
+ outputs=code_output,
175
  )
176
 
177
  if __name__ == "__main__":
178
+ if not API_TOKEN:
179
+ print("Warning: HF_TOKEN environment variable not set. Using anonymous access.")
180
+ demo.queue(max_size=10).launch()