File size: 2,794 Bytes
dfc5e93
 
 
 
 
0950920
 
 
 
 
a289467
dfc5e93
0950920
dfc5e93
a922063
0950920
 
 
 
 
dfc5e93
 
 
a289467
dfc5e93
 
 
 
 
 
a289467
0950920
 
 
 
 
 
 
 
a289467
0950920
a289467
0950920
 
 
 
 
 
a289467
 
 
0950920
a289467
 
 
 
0950920
a289467
0950920
a289467
 
 
 
 
 
 
 
0950920
a289467
 
 
 
0950920
a289467
 
 
0950920
dfc5e93
 
a922063
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gradio as gr
from huggingface_hub import InferenceClient

client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

def respond(message, history, system_message, max_tokens, temperature, top_p, backend):
    forced_system = f"""
You are a code-generation AI. You MUST generate a full website including an index.html file.
Use only the {backend} backend structure. 
Respond ONLY with raw code and file/folder structure. Do NOT explain or add commentary.
""".strip()

    system_message = forced_system + "\n\n" + system_message

    messages = [{"role": "system", "content": system_message}]
    for user_msg, assistant_msg in history:
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if assistant_msg:
            messages.append({"role": "assistant", "content": assistant_msg})
    messages.append({"role": "user", "content": message})

    response = ""
    for chunk in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = chunk.choices[0].delta.content
        if token:
            response += token
            yield response

with gr.Blocks() as demo:
    gr.Markdown("# WebGen AI\nGenerate a complete website with your selected backend.")

    with gr.Row():
        system_msg = gr.Textbox(value="You are a helpful assistant.", label="System Message")
        backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Backend")

    with gr.Row():
        max_tokens = gr.Slider(1, 2048, value=512, label="Max Tokens")
        temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
        top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")

    chatbot = gr.Chatbot()
    user_input = gr.Textbox(label="Your Prompt", placeholder="Ask the AI to generate a website...")

    history = []

    def chat_submit(message):
        nonlocal history
        history.append([message, None])
        return "", history

    send_btn = gr.Button("Send")

    def run_response(message, system_msg, max_tokens, temperature, top_p, backend):
        nonlocal history
        response_generator = respond(message, history, system_msg, max_tokens, temperature, top_p, backend)
        final_response = ""
        for chunk in response_generator:
            final_response = chunk
            yield history[:-1] + [[message, chunk]]
        history[-1][1] = final_response

    send_btn.click(
        chat_submit,
        inputs=[user_input],
        outputs=[user_input, chatbot]
    ).then(
        run_response,
        inputs=[user_input, system_msg, max_tokens, temperature, top_p, backend],
        outputs=chatbot
    )

if __name__ == "__main__":
    demo.launch()