Spaces:
Running
Running
File size: 7,741 Bytes
dfc5e93 5f4f3c1 cd13883 fe916a7 cd13883 fe916a7 cd13883 83207ef cd13883 fe916a7 cd13883 58a5e73 283c557 cd13883 283c557 cd13883 58a5e73 cd13883 9376840 58a5e73 9376840 58a5e73 fe916a7 83207ef cd13883 83207ef cd13883 83207ef 853d569 cd13883 58a5e73 5f4f3c1 853d569 58a5e73 853d569 9376840 58a5e73 cd13883 9376840 fe916a7 9376840 b752712 58a5e73 b752712 58a5e73 9376840 fe916a7 58a5e73 cd13883 58a5e73 fe916a7 58a5e73 cd13883 58a5e73 cd13883 58a5e73 71765cf 58a5e73 cd13883 9376840 58a5e73 9376840 fe916a7 58a5e73 fe916a7 71765cf cd13883 58a5e73 9376840 58a5e73 71765cf 58a5e73 71765cf fe916a7 58a5e73 fe916a7 283c557 71765cf 283c557 9376840 fe916a7 58a5e73 71765cf 58a5e73 fe916a7 58a5e73 cd13883 fe916a7 58a5e73 0950920 58a5e73 283c557 58a5e73 283c557 fe916a7 58a5e73 71765cf fe916a7 58a5e73 283c557 83207ef dfc5e93 58a5e73 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
import gradio as gr
from huggingface_hub import InferenceClient
import re
try:
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
client.timeout = 120
except Exception as e:
print(f"Error initializing InferenceClient: {e}")
client = None
def parse_files(raw_response):
if not raw_response:
return []
pattern = re.compile(
r"^\s*([\w\-.\/\\]+\.\w+)\s*\n"
r"(.*?)"
r"(?=\n\s*[\w\-.\/\\]+\.\w+\s*\n|\Z)",
re.DOTALL | re.MULTILINE
)
files = pattern.findall(raw_response)
cleaned_files = []
for name, content in files:
content_cleaned = re.sub(r"^\s*```[a-zA-Z]*\n?", "", content, flags=re.MULTILINE)
content_cleaned = re.sub(r"\n?```\s*$", "", content_cleaned, flags=re.MULTILINE)
cleaned_files.append((name.strip(), content_cleaned.strip()))
if not cleaned_files and raw_response.strip():
if any(c in raw_response for c in ['<','>','{','}',';','(',')']):
lang = "html"
if "{" in raw_response and "}" in raw_response and ":" in raw_response:
lang = "css"
elif "function" in raw_response or "const" in raw_response or "let" in raw_response:
lang = "javascript"
default_filename = "index.html"
if lang == "css":
default_filename = "style.css"
elif lang == "javascript":
default_filename = "script.js"
cleaned_files.append((default_filename, raw_response.strip()))
return cleaned_files
def stream_and_parse_code(prompt, backend, system_message, max_tokens, temperature, top_p):
if not client:
error_msg = "Error: Inference Client not available. Check API token or model name."
yield {
live_output: gr.update(value=error_msg),
final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error", children=[gr.Textbox(value=error_msg)])])
}
return
full_sys_msg = f"""
You are a code generation AI. Given a prompt, generate the necessary files for a website using the {backend} backend.
Always include an index.html file.
Respond ONLY with filenames and the raw code for each file.
Each file must start with its filename on a new line. Example:
index.html
<!DOCTYPE html>
<html></html>
style.css
body {{}}
script.js
console.log("Hello");
Ensure the code is complete. NO commentary, NO explanations, NO markdown formatting like backticks (```).
Start generating the files now.
""".strip()
if system_message:
full_sys_msg += "\n\n" + system_message
messages = [
{"role": "system", "content": full_sys_msg},
{"role": "user", "content": prompt}
]
full_raw_response = ""
error_occurred = False
error_message = ""
yield {
live_output: gr.update(value="Generating stream..."),
final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Generating...")])
}
try:
stream = client.chat_completion(
messages,
max_tokens=int(max_tokens),
stream=True,
temperature=temperature,
top_p=top_p
)
for chunk in stream:
token = chunk.choices[0].delta.content
if token:
full_raw_response += token
yield {
live_output: gr.update(value=full_raw_response)
}
except Exception as e:
error_message = f"Error during AI generation: {e}\n\nPartial Response (if any):\n{full_raw_response}"
error_occurred = True
yield {
live_output: gr.update(value=error_message),
final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error")])
}
if error_occurred:
final_tabs_update = gr.Tabs(tabs=[
gr.TabItem(label="Error", children=[gr.Textbox(value=error_message, lines=10)])
])
else:
files = parse_files(full_raw_response)
if not files:
no_files_msg = "AI finished, but did not return recognizable file content or the response was empty. See raw output above."
final_tabs_update = gr.Tabs(tabs=[
gr.TabItem(label="Output", children=[gr.Textbox(value=no_files_msg)])
])
yield {
live_output: gr.update(value=full_raw_response + "\n\n" + no_files_msg),
final_tabs: final_tabs_update
}
return
tabs_content = []
for name, content in files:
name = name.strip()
content = content.strip()
if not name or not content:
continue
lang = "plaintext"
if name.endswith((".html", ".htm")):
lang = "html"
elif name.endswith(".css"):
lang = "css"
elif name.endswith(".js"):
lang = "javascript"
elif name.endswith(".py"):
lang = "python"
elif name.endswith(".json"):
lang = "json"
elif name.endswith(".md"):
lang = "markdown"
elif name.endswith((".sh", ".bash")):
lang = "bash"
elif name.endswith((".xml", ".xaml", ".svg")):
lang = "xml"
elif name.endswith((".yaml", ".yml")):
lang = "yaml"
elem_id = f"tab_{re.sub(r'[^a-zA-Z0-9_-]', '_', name)}"
tab_item = gr.TabItem(label=name, elem_id=elem_id, children=[
gr.Code(value=content, language=lang, label=name, interactive=False)
])
tabs_content.append(tab_item)
final_tabs_update = gr.Tabs(tabs=tabs_content) if tabs_content else gr.Tabs(tabs=[
gr.TabItem(label="Output", children=[gr.Textbox(value="No valid files generated after filtering.")])
])
yield {
live_output: gr.update(value=full_raw_response if not error_occurred else error_message),
final_tabs: final_tabs_update
}
with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
gr.Markdown("## WebGen AI β One Prompt β Full Website Generator")
gr.Markdown("Generates website code based on your description. Raw output streams live, final files appear in tabs below.")
with gr.Row():
with gr.Column(scale=2):
prompt = gr.Textbox(label="Describe your website", placeholder="E.g., a simple landing page...", lines=3)
backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend Technology")
with gr.Accordion("Advanced Options", open=False):
system_message = gr.Textbox(label="Extra instructions for the AI (System Message)", placeholder="Optional", value="", lines=2)
max_tokens = gr.Slider(minimum=256, maximum=4096, value=2048, step=64, label="Max Tokens (Output Length)")
temperature = gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature (Creativity)")
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Sampling Focus)")
generate_button = gr.Button("β¨ Generate Code β¨", variant="primary")
with gr.Column(scale=3):
gr.Markdown("#### Live Raw Output Stream")
live_output = gr.Textbox(label="Raw AI Stream", lines=20, interactive=False)
gr.Markdown("---")
gr.Markdown("#### Final Generated Files (Tabs)")
final_tabs = gr.Tabs(elem_id="output_tabs")
generate_button.click(stream_and_parse_code, inputs=[prompt, backend, system_message, max_tokens, temperature, top_p], outputs=[live_output, final_tabs], show_progress="hidden")
if __name__ == "__main__":
demo.launch(debug=True) |