Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,83 +2,72 @@ import gradio as gr
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import re
|
4 |
|
5 |
-
client = InferenceClient("
|
6 |
|
7 |
-
def clean_code_blocks(
|
8 |
"""
|
9 |
-
|
10 |
-
Expected format:
|
11 |
index.html
|
12 |
<code>
|
13 |
-
|
14 |
-
static/style.css
|
15 |
<code>
|
16 |
"""
|
17 |
-
|
18 |
-
|
19 |
-
for i in range(1, len(parts), 2):
|
20 |
-
filename = parts[i].strip()
|
21 |
-
code = parts[i + 1].strip()
|
22 |
-
if filename and code:
|
23 |
-
file_blocks[filename] = code
|
24 |
-
return file_blocks
|
25 |
|
26 |
def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
|
27 |
-
|
28 |
-
You are a code-generation
|
29 |
-
|
30 |
-
|
|
|
|
|
31 |
|
32 |
-
|
33 |
-
<
|
34 |
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
37 |
|
38 |
messages = [
|
39 |
-
{"role": "system", "content":
|
40 |
{"role": "user", "content": prompt}
|
41 |
]
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
stream=True,
|
48 |
-
temperature=temperature,
|
49 |
-
top_p=top_p
|
50 |
-
):
|
51 |
-
token = chunk.choices[0].delta.content
|
52 |
-
if token:
|
53 |
-
response += token
|
54 |
-
|
55 |
-
# Parse and display each file in its own tab
|
56 |
files = clean_code_blocks(response)
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
62 |
|
63 |
with gr.Blocks() as demo:
|
64 |
-
gr.Markdown("
|
65 |
|
66 |
with gr.Row():
|
67 |
-
prompt = gr.Textbox(label="
|
68 |
-
backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="
|
69 |
-
|
70 |
-
system_msg = gr.Textbox(value="You are a helpful assistant.", label="System Message")
|
71 |
-
max_tokens = gr.Slider(1, 2048, value=1024, label="Max Tokens")
|
72 |
-
temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
|
73 |
-
top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
|
74 |
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
77 |
|
78 |
-
|
79 |
-
|
80 |
|
81 |
-
run_btn.click(
|
82 |
|
83 |
if __name__ == "__main__":
|
84 |
demo.launch()
|
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import re
|
4 |
|
5 |
+
client = InferenceClient("http://localhost:5000") # change this if needed
|
6 |
|
7 |
+
def clean_code_blocks(response):
|
8 |
"""
|
9 |
+
Parses AI response to extract file blocks like:
|
|
|
10 |
index.html
|
11 |
<code>
|
12 |
+
style.css
|
|
|
13 |
<code>
|
14 |
"""
|
15 |
+
pattern = re.compile(r"^([^\s\/\\]+\.?[a-zA-Z0-9]*)\n([\s\S]+?)(?=\n\S|$)", re.MULTILINE)
|
16 |
+
return pattern.findall(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
|
19 |
+
system_prompt = f"""
|
20 |
+
You are a code-generation assistant.
|
21 |
+
Respond ONLY with raw code for a website with multiple files (like index.html, style.css, script.js, app.py, etc.).
|
22 |
+
Use the {backend} backend ONLY.
|
23 |
+
|
24 |
+
Do NOT use markdown formatting. Do NOT add backticks. Do NOT add any explanations. Just output file names followed by their code, like:
|
25 |
|
26 |
+
index.html
|
27 |
+
<html>...
|
28 |
|
29 |
+
style.css
|
30 |
+
body {{...}}
|
31 |
+
|
32 |
+
app.py
|
33 |
+
from flask import Flask...
|
34 |
+
""".strip()
|
35 |
|
36 |
messages = [
|
37 |
+
{"role": "system", "content": system_prompt + "\n\n" + system_message},
|
38 |
{"role": "user", "content": prompt}
|
39 |
]
|
40 |
|
41 |
+
# Single non-streaming call since we're parsing files
|
42 |
+
completion = client.chat_completion(messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
|
43 |
+
response = completion.choices[0].message.content
|
44 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
files = clean_code_blocks(response)
|
46 |
+
|
47 |
+
with gr.Tabs() as tabset:
|
48 |
+
for filename, code in files:
|
49 |
+
with gr.Tab(label=filename.strip()):
|
50 |
+
gr.Code(value=code.strip(), language="html" if filename.endswith(".html") else "css" if filename.endswith(".css") else "javascript" if filename.endswith(".js") else "python" if filename.endswith(".py") else "text")
|
51 |
+
|
52 |
+
return tabset
|
53 |
|
54 |
with gr.Blocks() as demo:
|
55 |
+
gr.Markdown("### WebGen: Prompt → Multi-File Website (with Flask, Node.js, Static support)")
|
56 |
|
57 |
with gr.Row():
|
58 |
+
prompt = gr.Textbox(label="Prompt", lines=2)
|
59 |
+
backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Backend")
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
+
with gr.Accordion("Advanced Settings", open=False):
|
62 |
+
system_msg = gr.Textbox(value="", label="System Message")
|
63 |
+
max_tokens = gr.Slider(128, 2048, value=1024, step=1, label="Max Tokens")
|
64 |
+
temperature = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
|
65 |
+
top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
|
66 |
|
67 |
+
output = gr.Column()
|
68 |
+
run_btn = gr.Button("Generate")
|
69 |
|
70 |
+
run_btn.click(generate_code, inputs=[prompt, system_msg, max_tokens, temperature, top_p, backend], outputs=output)
|
71 |
|
72 |
if __name__ == "__main__":
|
73 |
demo.launch()
|