Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import re | |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
def parse_files(raw_response): | |
pattern = re.compile(r"(?:(?:\n|^)([\w\-.\/\\]+)\n)(.+?)(?=\n[\w\-.\/\\]+\n|\Z)", re.DOTALL) | |
return pattern.findall(raw_response) | |
def generate_code(prompt, backend, system_message, max_tokens, temperature, top_p): | |
full_sys_msg = f""" | |
You are a code-only AI. Given a prompt, generate a full website using the {backend} backend. | |
Always include an index.html file. | |
Respond ONLY with filenames and raw code. NO commentary, NO backticks, NO markdown. | |
Example: | |
index.html | |
<html>...</html> | |
style.css | |
body {{ ... }} | |
Start now. | |
""".strip() | |
messages = [ | |
{"role": "system", "content": full_sys_msg + "\n" + system_message}, | |
{"role": "user", "content": prompt} | |
] | |
response = client.chat_completion( | |
messages=messages, | |
max_tokens=max_tokens, | |
temperature=temperature, | |
top_p=top_p, | |
) | |
raw = response.choices[0].message.content | |
files = parse_files(raw) | |
return files | |
def on_generate(prompt, backend, system_message, max_tokens, temperature, top_p): | |
files = generate_code(prompt, backend, system_message, max_tokens, temperature, top_p) | |
tabs = [] | |
for name, content in files: | |
lang = "html" if name.endswith(".html") else \ | |
"css" if name.endswith(".css") else \ | |
"javascript" if name.endswith(".js") else \ | |
"python" if name.endswith(".py") else "text" | |
tabs.append(gr.TabItem(label=name.strip(), elem_id=name.strip(), children=[ | |
gr.Code(value=content.strip(), language=lang) | |
])) | |
return gr.Group(tabs) | |
with gr.Blocks() as demo: | |
gr.Markdown("### Website Generator (Static / Flask / Node.js)") | |
with gr.Row(): | |
prompt = gr.Textbox(label="Describe your website", placeholder="E.g. a portfolio site with dark mode") | |
backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend") | |
with gr.Accordion("Advanced Options", open=False): | |
system_message = gr.Textbox(label="Extra instructions for the AI", value="") | |
max_tokens = gr.Slider(256, 2048, value=1024, label="Max Tokens") | |
temperature = gr.Slider(0.1, 2.0, value=0.7, label="Temperature") | |
top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top-p") | |
output = gr.Group() | |
generate_button = gr.Button("Generate Code") | |
generate_button.click( | |
on_generate, | |
inputs=[prompt, backend, system_message, max_tokens, temperature, top_p], | |
outputs=output, | |
show_progress=True | |
) | |
output.render() | |
if __name__ == "__main__": | |
demo.launch() |