Spaces:
Running
Running
File size: 11,023 Bytes
aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 f5a64b7 07f2c74 aa6f369 07f2c74 9b43f0b 07f2c74 9b43f0b 07f2c74 aa6f369 8531a26 aa6f369 8531a26 aa6f369 8531a26 aa6f369 f5a64b7 aa6f369 8531a26 e8a0246 8531a26 aa6f369 e8a0246 8531a26 e8a0246 8531a26 e8a0246 8531a26 e8a0246 8531a26 e8a0246 e3eee09 aa6f369 e3eee09 9b43f0b e3eee09 e8a0246 e3eee09 aa6f369 8531a26 aa6f369 9b43f0b 8531a26 e8a0246 aa6f369 e8a0246 aa6f369 8531a26 e8a0246 8531a26 aa6f369 8531a26 aa6f369 e8a0246 e3eee09 e8a0246 e3eee09 e8a0246 e3eee09 8531a26 aa6f369 e8a0246 f5a64b7 8531a26 aa6f369 e3eee09 9b43f0b 8531a26 aa6f369 9b43f0b 07f2c74 8531a26 e3eee09 e8a0246 e3eee09 e8a0246 e3eee09 e8a0246 e3eee09 aa6f369 e3eee09 8531a26 e8a0246 6541c57 f5a64b7 aa6f369 e8a0246 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
import re
API_TOKEN = os.getenv("HF_TOKEN", None)
MODEL = "Qwen/Qwen2.5-Coder-32B-Instruct"
try:
client = InferenceClient(model=MODEL, token=API_TOKEN) if API_TOKEN else InferenceClient(model=MODEL)
except Exception as e:
raise gr.Error(f"Failed to initialize model client. Error: {e}")
def generate_code(prompt: str, backend_choice: str, max_tokens: int, temperature: float, top_p: float):
system_message = (
"You are an AI assistant programmed to generate website codes only. "
"You must not use triple backticks (```html, ```python, etc.). "
"If multiple files are needed, separate them clearly using:\n"
"TAB.NAME={filename}\n"
"Only generate code. No explanations, no phrases like 'Here is the code'. "
"If user asks non-website code, reply:\n"
"'hey there! am here to create websites for you unfortunately am programmed to not create codes! otherwise I would go on the naughty list :-('."
)
user_prompt = f"USER_PROMPT = {prompt}\nUSER_BACKEND = {backend_choice}"
messages = [
{"role": "system", "content": system_message},
{"role": "user", "content": user_prompt}
]
full_response = ""
try:
stream = client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
)
for message in stream:
token = message.choices[0].delta.content
if isinstance(token, str):
full_response += token
cleaned_response = full_response.strip()
cleaned_response = re.sub(r"^\s*```[a-z]*\s*\n?", "", cleaned_response)
cleaned_response = re.sub(r"\n?\s*```\s*$", "", cleaned_response)
cleaned_response = re.sub(r"<\s*\|?\s*(user|assistant|system|endoftext)\s*\|?\s*>", "", cleaned_response, flags=re.IGNORECASE)
cleaned_response = cleaned_response.replace("<|im_end|>", "").replace("<|im_start|>", "").strip()
common_phrases = [
"Here is the code:", "Okay, here is the code:", "Here's the code:",
"Sure, here is the code you requested:", "Let me know if you need anything else.",
"Here is the website code you requested:", "Here are the files for your website:",
"Okay, here are the files:"
]
lower_response = cleaned_response.lower()
for phrase in common_phrases:
if lower_response.startswith(phrase.lower()):
cleaned_response = cleaned_response[len(phrase):].lstrip()
lower_response = cleaned_response.lower()
if not cleaned_response:
return "Error: Empty response from model after cleaning."
return cleaned_response
except Exception as e:
return f"## Error\n\nFailed to generate code.\n**Reason:** {e}"
def split_files(full_code_text):
file_blocks = []
splits = re.split(r'(TAB\.NAME=\{.+?\})', full_code_text)
initial_content = splits[0].strip()
if len(splits) == 1:
if initial_content:
default_name = "index.html"
if "def " in initial_content or "import " in initial_content: default_name = "app.py"
elif "function " in initial_content or "const " in initial_content or "let " in initial_content: default_name = "script.js"
elif "<!DOCTYPE html>" in initial_content or "<html" in initial_content: default_name = "index.html"
elif "@app.route" in initial_content: default_name = "app.py"
elif "require(" in initial_content or "module.exports" in initial_content: default_name = "server.js"
elif "<?php" in initial_content: default_name = "index.php"
elif "package main" in initial_content: default_name = "main.go"
file_blocks.append((default_name, initial_content))
else:
for i in range(1, len(splits), 2):
marker = splits[i]
content = splits[i+1].strip() if (i+1) < len(splits) else ""
filename_match = re.search(r'TAB\.NAME=\{(.+?)\}', marker)
if filename_match:
filename = filename_match.group(1).strip()
if content:
file_blocks.append((filename, content))
elif i == 1 and initial_content: # Handle content before the first explicit marker
file_blocks.append(("file_0.txt", initial_content))
if content: # Add the content after the first marker if it exists
filename_match_fallback = re.search(r'TAB\.NAME=\{(.+?)\}', marker)
if filename_match_fallback:
filename = filename_match_fallback.group(1).strip()
file_blocks.append((filename, content))
return file_blocks
with gr.Blocks(css=".gradio-container { max-width: 90% !important; }") as demo:
gr.Markdown("# ✨ Website Code Generator ✨")
gr.Markdown(
"Describe the website you want. The AI will generate website code.\n\n"
"**Rules:**\n"
"- Provide a backend hint (Static / Flask / Node.js).\n"
"- Generated code should be functional and SFW.\n"
"- Only generates website-related code.\n"
"- If multiple files are generated, they will be separated below using the format `TAB.NAME={filename}`."
)
with gr.Row():
with gr.Column(scale=2):
prompt_input = gr.Textbox(
label="Website Description",
placeholder="e.g., A simple Flask app with one route that displays 'Hello World'.",
lines=6,
)
backend_radio = gr.Radio(
["Static", "Flask", "Node.js"],
label="Backend Context / Hint",
value="Static",
)
generate_button = gr.Button("✨ Generate Website Code", variant="primary")
with gr.Column(scale=3):
main_output_label = gr.Markdown("### Full Generated Code / Main File")
main_output_code = gr.Code(
label="Generated Code", # Label is less prominent now
language="html",
lines=15,
interactive=False,
)
extra_outputs_column = gr.Column(visible=False) # Initially hidden
with gr.Accordion("Advanced Settings", open=False):
max_tokens_slider = gr.Slider(
minimum=512,
maximum=8192,
value=4096,
step=256,
label="Max New Tokens"
)
temperature_slider = gr.Slider(
minimum=0.1,
maximum=1.2,
value=0.7,
step=0.1,
label="Temperature"
)
top_p_slider = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.9,
step=0.05,
label="Top-P"
)
def get_language(filename):
if filename.endswith(".html") or filename.endswith(".htm"): return "html"
if filename.endswith(".css"): return "css"
if filename.endswith(".js"): return "javascript"
if filename.endswith(".py"): return "python"
if filename.endswith(".json"): return "json"
if filename.endswith(".sql"): return "sql"
if filename.endswith(".php"): return "php"
if filename.endswith(".go"): return "go"
if filename.endswith(".java"): return "java"
if filename.endswith(".rb"): return "ruby"
if filename.endswith(".sh"): return "shell"
if filename.endswith(".yml") or filename.endswith(".yaml"): return "yaml"
if filename.endswith(".md"): return "markdown"
return "text"
def generate_and_display(prompt, backend, max_tokens, temperature, top_p):
full_code = generate_code(prompt, backend, max_tokens, temperature, top_p)
if full_code.startswith("## Error"):
return {
main_output_label: gr.Markdown.update(value="### Error Occurred"),
main_output_code: gr.Code.update(value=full_code, language="markdown"),
extra_outputs_column: gr.Column.update(visible=False, children=[])
}
if full_code.startswith("Error: Empty response"):
return {
main_output_label: gr.Markdown.update(value="### Error Occurred"),
main_output_code: gr.Code.update(value=full_code, language="text"),
extra_outputs_column: gr.Column.update(visible=False, children=[])
}
files = split_files(full_code)
dynamic_components = []
main_file_content = full_code
main_file_lang = "text"
main_file_label = "### Full Generated Code"
if not files:
main_file_content = full_code # Show full code if split failed but we got output
main_file_lang = get_language("output.txt") # Basic guess
main_file_label = "### Full Generated Output (No Files Detected)"
return {
main_output_label: gr.Markdown.update(value=main_file_label),
main_output_code: gr.Code.update(value=main_file_content, language=main_file_lang),
extra_outputs_column: gr.Column.update(visible=False, children=[])
}
if len(files) == 1:
main_file_content = files[0][1]
main_file_lang = get_language(files[0][0])
main_file_label = f"### File: {files[0][0]}"
return {
main_output_label: gr.Markdown.update(value=main_file_label),
main_output_code: gr.Code.update(value=main_file_content, language=main_file_lang),
extra_outputs_column: gr.Column.update(visible=False, children=[])
}
else:
main_file_content = files[0][1]
main_file_lang = get_language(files[0][0])
main_file_label = f"### File: {files[0][0]}"
for i, (filename, content) in enumerate(files[1:], start=1): # Start from the second file for extras
lang = get_language(filename)
dynamic_components.append(gr.Markdown(f"### File: {filename}"))
dynamic_components.append(gr.Code(value=content, language=lang, label=filename, interactive=False))
return {
main_output_label: gr.Markdown.update(value=main_file_label),
main_output_code: gr.Code.update(value=main_file_content, language=main_file_lang),
extra_outputs_column: gr.Column.update(visible=True, children=dynamic_components)
}
generate_button.click(
fn=generate_and_display,
inputs=[prompt_input, backend_radio, max_tokens_slider, temperature_slider, top_p_slider],
outputs=[main_output_label, main_output_code, extra_outputs_column]
)
if __name__ == "__main__":
demo.queue().launch() |