Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,55 +2,39 @@ import gradio as gr
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
import re
|
5 |
-
# import traceback # Optional: for more detailed error logging if needed
|
6 |
|
7 |
-
# --- Configuration ---
|
8 |
API_TOKEN = os.getenv("HF_TOKEN", None)
|
9 |
MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
10 |
|
11 |
-
# --- Initialize Inference Client ---
|
12 |
try:
|
13 |
print(f"Initializing Inference Client for model: {MODEL}")
|
14 |
client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
|
15 |
except Exception as e:
|
16 |
raise gr.Error(f"Failed to initialize model client for {MODEL}. Error: {e}. Check HF_TOKEN and model availability.")
|
17 |
|
18 |
-
|
19 |
-
# --- Helper Function to Parse Code into Files ---
|
20 |
def parse_code_into_files(raw_response: str) -> dict:
|
21 |
-
"""
|
22 |
-
Parses raw AI output containing .TAB separators
|
23 |
-
into a dictionary where keys are filenames and values are code blocks.
|
24 |
-
Returns keys like 'index.html', 'backend_file', 'backend_filename', 'backend_language'.
|
25 |
-
"""
|
26 |
files = {}
|
27 |
-
# Default filename for the first block if no TAB is present or before the first TAB
|
28 |
default_first_filename = "index.html"
|
29 |
-
separator_pattern = r'\.TAB\[NAME=([^\]]+)\]\n?'
|
30 |
|
31 |
-
# Find all separators and their positions
|
32 |
matches = list(re.finditer(separator_pattern, raw_response))
|
33 |
|
34 |
start_index = 0
|
35 |
-
# Handle the first file (always assume index.html for now)
|
36 |
first_separator_pos = matches[0].start() if matches else len(raw_response)
|
37 |
first_block = raw_response[start_index:first_separator_pos].strip()
|
38 |
if first_block:
|
39 |
files[default_first_filename] = first_block
|
40 |
|
41 |
-
# Handle the second file (if separator exists)
|
42 |
if matches:
|
43 |
-
backend_filename = matches[0].group(1).strip()
|
44 |
-
start_index = matches[0].end()
|
45 |
|
46 |
-
# Find the position of the *next* separator, or end of string
|
47 |
second_separator_pos = matches[1].start() if len(matches) > 1 else len(raw_response)
|
48 |
backend_code = raw_response[start_index:second_separator_pos].strip()
|
49 |
|
50 |
if backend_code:
|
51 |
files['backend_file'] = backend_code
|
52 |
files['backend_filename'] = backend_filename
|
53 |
-
# Determine language from filename extension
|
54 |
if backend_filename.endswith(".py"):
|
55 |
files['backend_language'] = 'python'
|
56 |
elif backend_filename.endswith(".js"):
|
@@ -58,26 +42,21 @@ def parse_code_into_files(raw_response: str) -> dict:
|
|
58 |
elif backend_filename.endswith(".css"):
|
59 |
files['backend_language'] = 'css'
|
60 |
else:
|
61 |
-
files['backend_language'] = None
|
62 |
-
|
63 |
-
# If more files were generated (more separators), they are currently ignored by this simple parser.
|
64 |
|
65 |
return files
|
66 |
|
67 |
-
|
68 |
-
# --- Core Code Generation Function ---
|
69 |
def generate_code(
|
70 |
prompt: str,
|
71 |
backend_choice: str,
|
72 |
max_tokens: int,
|
73 |
temperature: float,
|
74 |
top_p: float,
|
75 |
-
progress=gr.Progress(
|
76 |
):
|
77 |
print(f"Generating code for: {prompt[:100]}... | Backend: {backend_choice}")
|
78 |
progress(0, desc="Initializing Request...")
|
79 |
|
80 |
-
# System message remains the same - instructing the AI on format
|
81 |
system_message = (
|
82 |
"You are an AI that generates website code. You MUST ONLY output the raw code, without any conversational text like 'Here is the code' or explanations before or after the code blocks. "
|
83 |
"You MUST NOT wrap the code in markdown fences like ```html, ```python, or ```js. "
|
@@ -103,7 +82,7 @@ def generate_code(
|
|
103 |
|
104 |
full_response = ""
|
105 |
token_count = 0
|
106 |
-
est_total_tokens = max_tokens
|
107 |
|
108 |
try:
|
109 |
progress(0.1, desc="Sending Request to Model...")
|
@@ -121,22 +100,16 @@ def generate_code(
|
|
121 |
if isinstance(token, str):
|
122 |
full_response += token
|
123 |
token_count += 1
|
124 |
-
# Update progress based on tokens received vs max_tokens
|
125 |
-
# Adjust the scaling factor (e.g., 0.7) as needed
|
126 |
prog = min(0.2 + (token_count / est_total_tokens) * 0.7, 0.9)
|
127 |
progress(prog, desc="Generating Code...")
|
128 |
|
129 |
|
130 |
progress(0.9, desc="Processing Response...")
|
131 |
-
# --- Post-processing ---
|
132 |
cleaned_response = full_response.strip()
|
133 |
-
# Fallback fence removal
|
134 |
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
|
135 |
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
|
136 |
-
# Remove potential chat markers
|
137 |
cleaned_response = re.sub(r"<\s*\|?\s*(user|system|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE).strip()
|
138 |
-
|
139 |
-
common_phrases = [ # Simplified list as prompt should handle most
|
140 |
"Here is the code:", "Okay, here is the code:", "Here's the code:",
|
141 |
"Sure, here is the code you requested:",
|
142 |
]
|
@@ -146,14 +119,11 @@ def generate_code(
|
|
146 |
cleaned_response = cleaned_response[len(phrase):].lstrip()
|
147 |
temp_response_lower = cleaned_response.lower()
|
148 |
|
149 |
-
# Check for refusal message
|
150 |
refusal_message = "hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-("
|
151 |
if refusal_message in full_response:
|
152 |
-
# Return updates to clear both code blocks and show refusal in the first
|
153 |
progress(1, desc="Refusal Message Generated")
|
154 |
-
return gr.update(value=refusal_message, language=None, visible=True), gr.update(value="", visible=False)
|
155 |
|
156 |
-
# --- PARSE the final cleaned response into files ---
|
157 |
parsed_files = parse_code_into_files(cleaned_response)
|
158 |
|
159 |
html_code = parsed_files.get("index.html", "")
|
@@ -161,35 +131,28 @@ def generate_code(
|
|
161 |
backend_filename = parsed_files.get("backend_filename", "Backend")
|
162 |
backend_language = parsed_files.get("backend_language", None)
|
163 |
|
164 |
-
# --- Prepare Gradio Updates ---
|
165 |
-
# Update for the HTML code block (always visible)
|
166 |
html_update = gr.update(value=html_code, language='html', visible=True)
|
167 |
|
168 |
-
# Update for the Backend code block (visible only if backend code exists)
|
169 |
if backend_code:
|
170 |
backend_update = gr.update(value=backend_code, language=backend_language, label=backend_filename, visible=True)
|
171 |
else:
|
172 |
-
backend_update = gr.update(value="", visible=False)
|
173 |
|
174 |
progress(1, desc="Done")
|
175 |
-
# Return tuple of updates for the outputs list
|
176 |
return html_update, backend_update
|
177 |
|
178 |
except Exception as e:
|
179 |
-
print(f"ERROR during code generation: {e}")
|
180 |
-
# traceback.print_exc() # Uncomment for full traceback
|
181 |
progress(1, desc="Error Occurred")
|
182 |
error_message = f"## Error\n\nFailed to generate or process code.\n**Reason:** {e}"
|
183 |
-
|
184 |
-
return gr.update(value=error_message, language=None, visible=True), gr.update(value="", visible=False)
|
185 |
|
186 |
|
187 |
-
# --- Build Gradio Interface ---
|
188 |
with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
|
189 |
gr.Markdown("# ✨ Website Code Generator ✨")
|
190 |
gr.Markdown(
|
191 |
"Describe the website you want. The AI will generate the necessary code.\n"
|
192 |
-
"If multiple files are generated (e.g., for Flask/Node.js), they will appear in separate tabs below."
|
193 |
)
|
194 |
|
195 |
with gr.Row():
|
@@ -208,29 +171,24 @@ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
|
|
208 |
generate_button = gr.Button("✨ Generate Website Code", variant="primary")
|
209 |
|
210 |
with gr.Column(scale=3):
|
211 |
-
# Define Tabs to hold the code outputs
|
212 |
with gr.Tabs(elem_id="code-tabs") as code_tabs:
|
213 |
-
# Tab 1: Always present for HTML
|
214 |
with gr.Tab("index.html", elem_id="html-tab") as html_tab:
|
215 |
html_code_output = gr.Code(
|
216 |
-
label="index.html",
|
217 |
language="html",
|
218 |
-
lines=25,
|
219 |
interactive=False,
|
220 |
-
elem_id="html_code",
|
221 |
)
|
222 |
-
# Tab 2: For Backend code, initially hidden
|
223 |
with gr.Tab("Backend", elem_id="backend-tab", visible=False) as backend_tab:
|
224 |
backend_code_output = gr.Code(
|
225 |
-
label="Backend
|
226 |
-
language=None,
|
227 |
lines=25,
|
228 |
interactive=False,
|
229 |
-
elem_id="backend_code",
|
230 |
-
visible=False
|
231 |
)
|
232 |
-
# Add more tabs here if needed (e.g., for CSS) following the same pattern
|
233 |
-
|
234 |
|
235 |
with gr.Accordion("Advanced Settings", open=False):
|
236 |
max_tokens_slider = gr.Slider(
|
@@ -243,15 +201,13 @@ with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
|
|
243 |
minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P"
|
244 |
)
|
245 |
|
246 |
-
# The click function now targets the specific code blocks within the tabs
|
247 |
generate_button.click(
|
248 |
fn=generate_code,
|
249 |
inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
|
250 |
-
# The outputs list MUST match the order and number of code blocks we want to update
|
251 |
outputs=[html_code_output, backend_code_output],
|
252 |
)
|
253 |
|
254 |
if __name__ == "__main__":
|
255 |
if not API_TOKEN:
|
256 |
print("Warning: HF_TOKEN environment variable not set. Using anonymous access.")
|
257 |
-
demo.queue(max_size=10).launch()
|
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
import re
|
|
|
5 |
|
|
|
6 |
API_TOKEN = os.getenv("HF_TOKEN", None)
|
7 |
MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
8 |
|
|
|
9 |
try:
|
10 |
print(f"Initializing Inference Client for model: {MODEL}")
|
11 |
client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
|
12 |
except Exception as e:
|
13 |
raise gr.Error(f"Failed to initialize model client for {MODEL}. Error: {e}. Check HF_TOKEN and model availability.")
|
14 |
|
|
|
|
|
15 |
def parse_code_into_files(raw_response: str) -> dict:
|
|
|
|
|
|
|
|
|
|
|
16 |
files = {}
|
|
|
17 |
default_first_filename = "index.html"
|
18 |
+
separator_pattern = r'\.TAB\[NAME=([^\]]+)\]\n?'
|
19 |
|
|
|
20 |
matches = list(re.finditer(separator_pattern, raw_response))
|
21 |
|
22 |
start_index = 0
|
|
|
23 |
first_separator_pos = matches[0].start() if matches else len(raw_response)
|
24 |
first_block = raw_response[start_index:first_separator_pos].strip()
|
25 |
if first_block:
|
26 |
files[default_first_filename] = first_block
|
27 |
|
|
|
28 |
if matches:
|
29 |
+
backend_filename = matches[0].group(1).strip()
|
30 |
+
start_index = matches[0].end()
|
31 |
|
|
|
32 |
second_separator_pos = matches[1].start() if len(matches) > 1 else len(raw_response)
|
33 |
backend_code = raw_response[start_index:second_separator_pos].strip()
|
34 |
|
35 |
if backend_code:
|
36 |
files['backend_file'] = backend_code
|
37 |
files['backend_filename'] = backend_filename
|
|
|
38 |
if backend_filename.endswith(".py"):
|
39 |
files['backend_language'] = 'python'
|
40 |
elif backend_filename.endswith(".js"):
|
|
|
42 |
elif backend_filename.endswith(".css"):
|
43 |
files['backend_language'] = 'css'
|
44 |
else:
|
45 |
+
files['backend_language'] = None
|
|
|
|
|
46 |
|
47 |
return files
|
48 |
|
|
|
|
|
49 |
def generate_code(
|
50 |
prompt: str,
|
51 |
backend_choice: str,
|
52 |
max_tokens: int,
|
53 |
temperature: float,
|
54 |
top_p: float,
|
55 |
+
progress=gr.Progress(track_tqdm=True)
|
56 |
):
|
57 |
print(f"Generating code for: {prompt[:100]}... | Backend: {backend_choice}")
|
58 |
progress(0, desc="Initializing Request...")
|
59 |
|
|
|
60 |
system_message = (
|
61 |
"You are an AI that generates website code. You MUST ONLY output the raw code, without any conversational text like 'Here is the code' or explanations before or after the code blocks. "
|
62 |
"You MUST NOT wrap the code in markdown fences like ```html, ```python, or ```js. "
|
|
|
82 |
|
83 |
full_response = ""
|
84 |
token_count = 0
|
85 |
+
est_total_tokens = max_tokens
|
86 |
|
87 |
try:
|
88 |
progress(0.1, desc="Sending Request to Model...")
|
|
|
100 |
if isinstance(token, str):
|
101 |
full_response += token
|
102 |
token_count += 1
|
|
|
|
|
103 |
prog = min(0.2 + (token_count / est_total_tokens) * 0.7, 0.9)
|
104 |
progress(prog, desc="Generating Code...")
|
105 |
|
106 |
|
107 |
progress(0.9, desc="Processing Response...")
|
|
|
108 |
cleaned_response = full_response.strip()
|
|
|
109 |
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
|
110 |
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
|
|
|
111 |
cleaned_response = re.sub(r"<\s*\|?\s*(user|system|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE).strip()
|
112 |
+
common_phrases = [
|
|
|
113 |
"Here is the code:", "Okay, here is the code:", "Here's the code:",
|
114 |
"Sure, here is the code you requested:",
|
115 |
]
|
|
|
119 |
cleaned_response = cleaned_response[len(phrase):].lstrip()
|
120 |
temp_response_lower = cleaned_response.lower()
|
121 |
|
|
|
122 |
refusal_message = "hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-("
|
123 |
if refusal_message in full_response:
|
|
|
124 |
progress(1, desc="Refusal Message Generated")
|
125 |
+
return gr.update(value=refusal_message, language=None, visible=True), gr.update(value="", visible=False, label="Backend")
|
126 |
|
|
|
127 |
parsed_files = parse_code_into_files(cleaned_response)
|
128 |
|
129 |
html_code = parsed_files.get("index.html", "")
|
|
|
131 |
backend_filename = parsed_files.get("backend_filename", "Backend")
|
132 |
backend_language = parsed_files.get("backend_language", None)
|
133 |
|
|
|
|
|
134 |
html_update = gr.update(value=html_code, language='html', visible=True)
|
135 |
|
|
|
136 |
if backend_code:
|
137 |
backend_update = gr.update(value=backend_code, language=backend_language, label=backend_filename, visible=True)
|
138 |
else:
|
139 |
+
backend_update = gr.update(value="", visible=False, label="Backend")
|
140 |
|
141 |
progress(1, desc="Done")
|
|
|
142 |
return html_update, backend_update
|
143 |
|
144 |
except Exception as e:
|
145 |
+
print(f"ERROR during code generation: {e}")
|
|
|
146 |
progress(1, desc="Error Occurred")
|
147 |
error_message = f"## Error\n\nFailed to generate or process code.\n**Reason:** {e}"
|
148 |
+
return gr.update(value=error_message, language=None, visible=True), gr.update(value="", visible=False, label="Backend")
|
|
|
149 |
|
150 |
|
|
|
151 |
with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
|
152 |
gr.Markdown("# ✨ Website Code Generator ✨")
|
153 |
gr.Markdown(
|
154 |
"Describe the website you want. The AI will generate the necessary code.\n"
|
155 |
+
"If multiple files are generated (e.g., for Flask/Node.js), they will appear in separate tabs below."
|
156 |
)
|
157 |
|
158 |
with gr.Row():
|
|
|
171 |
generate_button = gr.Button("✨ Generate Website Code", variant="primary")
|
172 |
|
173 |
with gr.Column(scale=3):
|
|
|
174 |
with gr.Tabs(elem_id="code-tabs") as code_tabs:
|
|
|
175 |
with gr.Tab("index.html", elem_id="html-tab") as html_tab:
|
176 |
html_code_output = gr.Code(
|
177 |
+
label="index.html",
|
178 |
language="html",
|
179 |
+
lines=25,
|
180 |
interactive=False,
|
181 |
+
elem_id="html_code",
|
182 |
)
|
|
|
183 |
with gr.Tab("Backend", elem_id="backend-tab", visible=False) as backend_tab:
|
184 |
backend_code_output = gr.Code(
|
185 |
+
label="Backend",
|
186 |
+
language=None,
|
187 |
lines=25,
|
188 |
interactive=False,
|
189 |
+
elem_id="backend_code",
|
190 |
+
visible=False
|
191 |
)
|
|
|
|
|
192 |
|
193 |
with gr.Accordion("Advanced Settings", open=False):
|
194 |
max_tokens_slider = gr.Slider(
|
|
|
201 |
minimum=0.1, maximum=1.0, value=0.9, step=0.05, label="Top-P"
|
202 |
)
|
203 |
|
|
|
204 |
generate_button.click(
|
205 |
fn=generate_code,
|
206 |
inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
|
|
|
207 |
outputs=[html_code_output, backend_code_output],
|
208 |
)
|
209 |
|
210 |
if __name__ == "__main__":
|
211 |
if not API_TOKEN:
|
212 |
print("Warning: HF_TOKEN environment variable not set. Using anonymous access.")
|
213 |
+
demo.queue(max_size=10).launch()
|