Spaces:
Running
Running
File size: 5,117 Bytes
aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 f5a64b7 07f2c74 aa6f369 07f2c74 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 f5a64b7 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 07f2c74 8531a26 aa6f369 8531a26 f5a64b7 8531a26 aa6f369 8531a26 aa6f369 07f2c74 8531a26 aa6f369 8531a26 6541c57 f5a64b7 aa6f369 8531a26 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
import re
API_TOKEN = os.getenv("HF_TOKEN", None)
MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
try:
client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
except Exception as e:
raise gr.Error(f"Failed to initialize model client. Error: {e}")
def generate_code(prompt: str, backend_choice: str, max_tokens: int, temperature: float, top_p: float):
system_message = (
"You are an AI assistant programmed to generate website codes only. "
"You must not use triple backticks (```html, ```python, ```js, etc.) anywhere. "
"If multiple files are needed, separate them clearly using:\n"
"{index.html}\n.TAB{NAME=nextfile.ext}\n{anotherfile.ext}\n"
"You must not add unnecessary comments; only critical modification instructions are allowed. "
"Only generate code and nothing else — no explanations, no phrases like 'Here is the code'. "
"The user will select backend type (Static / Flask / Node.js), and you must generate accordingly: "
"- For Static: simple index.html.\n"
"- For Flask or Node.js: include minimal backend scripts with index.html frontend.\n"
"If the user requests non-website code, reply with:\n"
"'hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-('."
)
user_prompt = f"USER_PROMPT = {prompt}\nUSER_BACKEND = {backend_choice}"
messages = [
{"role": "system", "content": system_message},
{"role": "user", "content": user_prompt}
]
response_stream = ""
full_response = ""
try:
stream = client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
)
for message in stream:
token = message.choices[0].delta.content
if isinstance(token, str):
response_stream += token
full_response += token
yield response_stream
cleaned_response = full_response.strip()
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
common_phrases = [
"Here is the code:", "Okay, here is the code:", "Here's the code:",
"Sure, here is the code you requested:", "Let me know if you need anything else."
]
for phrase in common_phrases:
if cleaned_response.lower().startswith(phrase.lower()):
cleaned_response = cleaned_response[len(phrase):].lstrip()
yield cleaned_response.strip()
except Exception as e:
yield f"## Error\n\nFailed to generate code.\n**Reason:** {e}"
with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
gr.Markdown("# ✨ Website Code Generator ✨")
gr.Markdown(
"Describe the website you want. The AI will generate a **single-file** `index.html` website.\n\n"
"**Rules:**\n"
"- Backend hint (Static / Flask / Node.js).\n"
"- Always fully SFW and minimal errors.\n"
"- Only generates websites. No other codes.\n"
"- Minimal necessary comments only."
)
with gr.Row():
with gr.Column(scale=2):
prompt_input = gr.Textbox(
label="Website Description",
placeholder="e.g., A simple landing page with a hero section and contact form.",
lines=6,
)
backend_radio = gr.Radio(
["Static", "Flask", "Node.js"],
label="Backend Context",
value="Static",
info="Hint only. Always generates only index.html."
)
generate_button = gr.Button("✨ Generate Website Code", variant="primary")
with gr.Column(scale=3):
code_output = gr.Code(
label="Generated Code",
language="html",
lines=30,
interactive=False,
)
with gr.Accordion("Advanced Settings", open=False):
max_tokens_slider = gr.Slider(
minimum=512,
maximum=4096,
value=3072,
step=256,
label="Max New Tokens"
)
temperature_slider = gr.Slider(
minimum=0.1, maximum=1.2, value=0.7, step=0.1, label="Temperature"
)
top_p_slider = gr.Slider(
minimum=0.1, maximum=1.0,
value=0.9,
step=0.05,
label="Top-P"
)
generate_button.click(
fn=generate_code,
inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
outputs=code_output,
)
if __name__ == "__main__":
demo.queue(max_size=10).launch() |