MINEOGO's picture
Update app.py
853d569 verified
raw
history blame
2.62 kB
import gradio as gr
from huggingface_hub import InferenceClient
import re
client = InferenceClient("http://localhost:5000") # change this if needed
def clean_code_blocks(response):
"""
Parses AI response to extract file blocks like:
index.html
<code>
style.css
<code>
"""
pattern = re.compile(r"^([^\s\/\\]+\.?[a-zA-Z0-9]*)\n([\s\S]+?)(?=\n\S|$)", re.MULTILINE)
return pattern.findall(response)
def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
system_prompt = f"""
You are a code-generation assistant.
Respond ONLY with raw code for a website with multiple files (like index.html, style.css, script.js, app.py, etc.).
Use the {backend} backend ONLY.
Do NOT use markdown formatting. Do NOT add backticks. Do NOT add any explanations. Just output file names followed by their code, like:
index.html
<html>...
style.css
body {{...}}
app.py
from flask import Flask...
""".strip()
messages = [
{"role": "system", "content": system_prompt + "\n\n" + system_message},
{"role": "user", "content": prompt}
]
# Single non-streaming call since we're parsing files
completion = client.chat_completion(messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
response = completion.choices[0].message.content
files = clean_code_blocks(response)
with gr.Tabs() as tabset:
for filename, code in files:
with gr.Tab(label=filename.strip()):
gr.Code(value=code.strip(), language="html" if filename.endswith(".html") else "css" if filename.endswith(".css") else "javascript" if filename.endswith(".js") else "python" if filename.endswith(".py") else "text")
return tabset
with gr.Blocks() as demo:
gr.Markdown("### WebGen: Prompt β†’ Multi-File Website (with Flask, Node.js, Static support)")
with gr.Row():
prompt = gr.Textbox(label="Prompt", lines=2)
backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Backend")
with gr.Accordion("Advanced Settings", open=False):
system_msg = gr.Textbox(value="", label="System Message")
max_tokens = gr.Slider(128, 2048, value=1024, step=1, label="Max Tokens")
temperature = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
output = gr.Column()
run_btn = gr.Button("Generate")
run_btn.click(generate_code, inputs=[prompt, system_msg, max_tokens, temperature, top_p, backend], outputs=output)
if __name__ == "__main__":
demo.launch()