File size: 2,881 Bytes
dfc5e93
 
5f4f3c1
dfc5e93
83207ef
5f4f3c1
83207ef
f4eb547
83207ef
 
 
 
 
 
 
 
 
 
 
853d569
83207ef
5f4f3c1
853d569
83207ef
853d569
83207ef
853d569
dfc5e93
b752712
83207ef
b752712
 
dfc5e93
83207ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f4eb547
0950920
 
83207ef
f4eb547
0950920
83207ef
 
 
 
 
 
 
 
 
f4eb547
 
83207ef
 
f4eb547
83207ef
 
 
f4eb547
83207ef
 
 
dfc5e93
a922063
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
from huggingface_hub import InferenceClient
import re

client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

def parse_files(raw_response):
    # Parse files and code from raw AI output
    pattern = re.compile(r"(?:(?:\n|^)([\w\-.\/\\]+)\n)(.+?)(?=\n[\w\-.\/\\]+\n|\Z)", re.DOTALL)
    return pattern.findall(raw_response)

def generate_code(prompt, backend, system_message, max_tokens, temperature, top_p):
    full_sys_msg = f"""
You are a code-only AI. Given a prompt, generate a full website using the {backend} backend.
Always include an index.html file.

Respond ONLY with filenames and raw code. NO commentary, NO backticks, NO markdown.

Example:
index.html
<html>...</html>

style.css
body {{ ... }}

Start now.
""".strip()

    messages = [
        {"role": "system", "content": full_sys_msg + "\n" + system_message},
        {"role": "user", "content": prompt}
    ]

    response = client.chat_completion(
        messages=messages,
        max_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p,
    )

    raw = response.choices[0].message.content
    files = parse_files(raw)

    return files

def on_generate(prompt, backend, system_message, max_tokens, temperature, top_p):
    files = generate_code(prompt, backend, system_message, max_tokens, temperature, top_p)
    tabs = []
    for name, content in files:
        lang = "html" if name.endswith(".html") else \
               "css" if name.endswith(".css") else \
               "javascript" if name.endswith(".js") else \
               "python" if name.endswith(".py") else "text"
        tabs.append(gr.TabItem(label=name.strip(), elem_id=name.strip(), children=[
            gr.Code(value=content.strip(), language=lang)
        ]))
    return tabs  # Return tabs directly

with gr.Blocks() as demo:
    gr.Markdown("### Website Generator (Static / Flask / Node.js)")

    with gr.Row():
        prompt = gr.Textbox(label="Describe your website", placeholder="E.g. a portfolio site with dark mode")
        backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend")

    with gr.Accordion("Advanced Options", open=False):
        system_message = gr.Textbox(label="Extra instructions for the AI", value="")
        max_tokens = gr.Slider(256, 2048, value=1024, label="Max Tokens")
        temperature = gr.Slider(0.1, 2.0, value=0.7, label="Temperature")
        top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top-p")

    out_tabs = gr.Tabs()  # Define the Tabs component

    generate_button = gr.Button("Generate Code")

    # Button click action
    generate_button.click(
        on_generate,
        inputs=[prompt, backend, system_message, max_tokens, temperature, top_p],
        outputs=out_tabs,  # Output goes directly to the Tabs component
        show_progress=True
    )

if __name__ == "__main__":
    demo.launch()