File size: 5,487 Bytes
aa6f369
 
 
8531a26
aa6f369
 
8531a26
aa6f369
 
8531a26
aa6f369
8531a26
f5a64b7
07f2c74
aa6f369
07f2c74
9b43f0b
07f2c74
9b43f0b
 
 
07f2c74
aa6f369
8531a26
aa6f369
 
8531a26
aa6f369
8531a26
 
aa6f369
 
 
f5a64b7
aa6f369
 
 
 
 
 
 
8531a26
 
9b43f0b
8531a26
aa6f369
 
8531a26
 
 
 
 
 
 
 
9b43f0b
aa6f369
9b43f0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa6f369
8531a26
 
aa6f369
9b43f0b
8531a26
 
 
9b43f0b
 
aa6f369
 
 
 
 
8531a26
aa6f369
 
 
8531a26
 
 
aa6f369
8531a26
aa6f369
9b43f0b
8531a26
aa6f369
 
8531a26
f5a64b7
 
8531a26
 
aa6f369
9b43f0b
 
 
 
8531a26
aa6f369
9b43f0b
 
07f2c74
 
 
8531a26
aa6f369
 
8531a26
9b43f0b
 
 
 
 
6541c57
f5a64b7
aa6f369
8531a26
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import gradio as gr
from huggingface_hub import InferenceClient
import os
import re

API_TOKEN = os.getenv("HF_TOKEN", None)
MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"

try:
    client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
except Exception as e:
    raise gr.Error(f"Failed to initialize model client. Error: {e}")

def generate_code(prompt: str, backend_choice: str, max_tokens: int, temperature: float, top_p: float):
    system_message = (
        "You are an AI assistant programmed to generate website codes only. "
        "You must not use triple backticks (```html, ```python, etc.). "
        "If multiple files are needed, separate them clearly using:\n"
        "TAB.NAME={filename}\n"
        "Only generate code. No explanations, no phrases like 'Here is the code'. "
        "If user asks non-website code, reply:\n"
        "'hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-('."
    )
    user_prompt = f"USER_PROMPT = {prompt}\nUSER_BACKEND = {backend_choice}"
    messages = [
        {"role": "system", "content": system_message},
        {"role": "user", "content": user_prompt}
    ]
    response_stream = ""
    full_response = ""
    try:
        stream = client.chat_completion(
            messages=messages,
            max_tokens=max_tokens,
            stream=True,
            temperature=temperature,
            top_p=top_p,
        )
        for message in stream:
            token = message.choices[0].delta.content
            if isinstance(token, str):
                response_stream += token
                full_response += token
                yield {"raw": response_stream}
        cleaned_response = full_response.strip()
        cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
        cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
        cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
        common_phrases = [
            "Here is the code:", "Okay, here is the code:", "Here's the code:",
            "Sure, here is the code you requested:", "Let me know if you need anything else."
        ]
        for phrase in common_phrases:
            if cleaned_response.lower().startswith(phrase.lower()):
                cleaned_response = cleaned_response[len(phrase):].lstrip()
        yield {"final": cleaned_response.strip()}
    except Exception as e:
        yield {"error": f"## Error\n\nFailed to generate code.\n**Reason:** {e}"}

def display_output(output_dict):
    if "error" in output_dict:
        return gr.Markdown(output_dict["error"])
    if "raw" in output_dict:
        return gr.Markdown("Generating...")
    cleaned_text = output_dict["final"]
    file_splits = re.split(r'TAB\.NAME=\{(.+?)\}', cleaned_text)
    components = []
    if not file_splits[0].strip():
        file_splits = file_splits[1:]
    for i in range(0, len(file_splits), 2):
        filename = file_splits[i].strip()
        content = file_splits[i+1].strip() if (i+1) < len(file_splits) else ""
        components.append(gr.Markdown(f"### {filename}"))
        components.append(gr.Code(value=content, label=filename, language="html" if filename.endswith(".html") else "python" if filename.endswith(".py") else "javascript" if filename.endswith(".js") else "text", interactive=False))
    return components

with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
    gr.Markdown("# ✨ Website Code Generator ✨")
    gr.Markdown(
        "Describe the website you want. The AI will generate website code.\n\n"
        "**Rules:**\n"
        "- Backend hint (Static / Flask / Node.js).\n"
        "- Always fully SFW and minimal errors.\n"
        "- Only websites. No other codes.\n"
        "- Multiple files use TAB.NAME={filename}."
    )
    with gr.Row():
        with gr.Column(scale=2):
            prompt_input = gr.Textbox(
                label="Website Description",
                placeholder="e.g., A simple landing page with a hero section and contact form.",
                lines=6,
            )
            backend_radio = gr.Radio(
                ["Static", "Flask", "Node.js"],
                label="Backend Context",
                value="Static",
            )
            generate_button = gr.Button("✨ Generate Website Code", variant="primary")
        with gr.Column(scale=3):
            output_display = gr.Group()
    with gr.Accordion("Advanced Settings", open=False):
        max_tokens_slider = gr.Slider(
            minimum=512,
            maximum=4096,
            value=3072,
            step=256,
            label="Max New Tokens"
        )
        temperature_slider = gr.Slider(
            minimum=0.1, maximum=1.2,
            value=0.7,
            step=0.1,
            label="Temperature"
        )
        top_p_slider = gr.Slider(
            minimum=0.1,
            maximum=1.0,
            value=0.9,
            step=0.05,
            label="Top-P"
        )
    generate_button.click(
        fn=generate_code,
        inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
        outputs=output_display,
    ).then(
        fn=display_output,
        inputs=output_display,
        outputs=output_display,
    )

if __name__ == "__main__":
    demo.queue(max_size=10).launch()