Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import re | |
import os # Good practice to import os if needed, though not strictly used here yet | |
# --- Hugging Face Token (Optional but Recommended) --- | |
# It's better to use a token, especially for private models or higher rate limits | |
# from huggingface_hub import login | |
# login("YOUR_HUGGINGFACE_TOKEN") # Replace with your actual token or set HF_TOKEN env var | |
# --- Inference Client --- | |
# Consider adding error handling for client initialization if needed | |
try: | |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
except Exception as e: | |
print(f"Error initializing InferenceClient: {e}") | |
# Optionally, raise the exception or handle it gracefully in the UI | |
# For now, we'll let it proceed and potentially fail later if client is None | |
client = None | |
# --- Parsing Function --- | |
def parse_files(raw_response): | |
""" | |
Parses filenames and code blocks from the raw AI output. | |
Assumes format: | |
filename1.ext | |
```lang # Optional code block marker | |
code for file1 | |
``` # Optional code block marker | |
filename2.ext | |
code for file2 | |
... | |
""" | |
if not raw_response: | |
return [] | |
# Improved pattern to handle optional code blocks and leading/trailing whitespace | |
# It looks for a filename line followed by content until the next filename line or end of string. | |
pattern = re.compile( | |
r"^\s*([\w\-.\/\\]+\.\w+)\s*\n" # Filename line (must have an extension) | |
r"(.*?)" # Capture content (non-greedy) | |
r"(?=\n\s*[\w\-.\/\\]+\.\w+\s*\n|\Z)", # Lookahead for next filename or end of string | |
re.DOTALL | re.MULTILINE | |
) | |
files = pattern.findall(raw_response) | |
# Clean up content: remove potential code block markers and extra whitespace | |
cleaned_files = [] | |
for name, content in files: | |
# Remove common code block markers (``` optionally followed by lang) | |
content_cleaned = re.sub(r"^\s*```[a-zA-Z]*\n?", "", content, flags=re.MULTILINE) | |
content_cleaned = re.sub(r"\n?```\s*$", "", content_cleaned, flags=re.MULTILINE) | |
cleaned_files.append((name.strip(), content_cleaned.strip())) | |
# Handle case where the AI might just output code without filenames | |
if not cleaned_files and raw_response.strip(): | |
# Basic check if it looks like code (e.g., contains common HTML/CSS/JS chars) | |
if any(c in raw_response for c in ['<','>','{','}',';','(',')']): | |
# Default to index.html if no files parsed but content exists | |
print("Warning: No filenames found, defaulting to index.html") | |
lang = "html" # Guess language, default to html | |
if "{" in raw_response and "}" in raw_response and ":" in raw_response: | |
lang = "css" | |
elif "function" in raw_response or "const" in raw_response or "let" in raw_response: | |
lang = "javascript" | |
# Determine a default filename based on guessed language | |
default_filename = "index.html" | |
if lang == "css": default_filename = "style.css" | |
elif lang == "javascript": default_filename = "script.js" | |
cleaned_files.append((default_filename, raw_response.strip())) | |
return cleaned_files | |
# --- Code Generation Function --- | |
def generate_code(prompt, backend, system_message, max_tokens, temperature, top_p): | |
"""Generates code using the InferenceClient.""" | |
if not client: | |
# Return an error structure if client failed to initialize | |
return "Error: Inference Client not available.", [] | |
full_sys_msg = f""" | |
You are a code generation AI. Given a prompt, generate the necessary files for a website using the {backend} backend. | |
Always include an index.html file. | |
Respond ONLY with filenames and the raw code for each file. | |
Each file must start with its filename on a new line. Example: | |
index.html | |
<!DOCTYPE html> | |
<html> | |
<head><title>My Site</title></head> | |
<body><h1>Hello</h1></body> | |
</html> | |
style.css | |
body {{ | |
font-family: sans-serif; | |
}} | |
script.js | |
console.log('Hello World!'); | |
Ensure the code is complete and functional for each file. NO commentary, NO explanations, NO markdown formatting like backticks (```). | |
Start generating the files now. | |
""".strip() | |
messages = [ | |
{"role": "system", "content": full_sys_msg + ("\n" + system_message if system_message else "")}, | |
{"role": "user", "content": prompt} | |
] | |
try: | |
response = client.chat_completion( | |
messages=messages, | |
max_tokens=int(max_tokens), # Ensure max_tokens is int | |
temperature=temperature, | |
top_p=top_p, | |
stream=False # Ensure streaming is off for this logic | |
) | |
raw = response.choices[0].message.content | |
print("\n--- Raw AI Response ---") | |
print(raw) | |
print("----------------------\n") | |
files = parse_files(raw) | |
return None, files # Return None for error, and the list of files | |
except Exception as e: | |
print(f"Error during AI generation: {e}") | |
return f"Error during AI generation: {e}", [] # Return error message | |
# --- Gradio Event Handler --- | |
def on_generate(prompt, backend, system_message, max_tokens, temperature, top_p): | |
"""Callback function for the generate button.""" | |
error_msg, files = generate_code(prompt, backend, system_message, max_tokens, temperature, top_p) | |
if error_msg: | |
# Display error in a single tab if generation failed | |
error_tab = gr.TabItem(label="Error", children=[gr.Textbox(value=error_msg, label="Generation Error")]) | |
return gr.Tabs(tabs=[error_tab]) # Return a Tabs component with the error tab | |
if not files: | |
# Display message if no files were parsed | |
no_files_tab = gr.TabItem(label="Output", children=[gr.Textbox(value="AI did not return recognizable file content. Check raw output in console.", label="Result")]) | |
return gr.Tabs(tabs=[no_files_tab]) # Return a Tabs component with this message | |
tabs = [] | |
for name, content in files: | |
name = name.strip() | |
content = content.strip() | |
if not name or not content: # Skip empty names or content | |
print(f"Skipping file with empty name or content: Name='{name}'") | |
continue | |
# Determine language for syntax highlighting | |
lang = "text" # Default | |
if name.endswith(".html") or name.endswith(".htm"): | |
lang = "html" | |
elif name.endswith(".css"): | |
lang = "css" | |
elif name.endswith(".js"): | |
lang = "javascript" | |
elif name.endswith(".py"): | |
lang = "python" | |
elif name.endswith(".json"): | |
lang = "json" | |
elif name.endswith(".md"): | |
lang = "markdown" | |
elif name.endswith(".sh") or name.endswith(".bash"): | |
lang = "bash" | |
tab_item = gr.TabItem(label=name, elem_id=f"tab_{name.replace('.', '_')}", children=[ # Ensure unique elem_id | |
gr.Code(value=content, language=lang, label=name) # Add label to Code block | |
]) | |
tabs.append(tab_item) | |
# *** The Key Fix *** | |
# Return a new gr.Tabs component instance containing the generated TabItems | |
return gr.Tabs(tabs=tabs) | |
# --- Gradio UI Definition --- | |
with gr.Blocks() as demo: | |
gr.Markdown("### Website Generator (Static / Flask / Node.js)") | |
gr.Markdown("Describe the website you want to create. The AI will generate the necessary files.") | |
with gr.Row(): | |
prompt = gr.Textbox(label="Describe your website", placeholder="E.g., a simple portfolio site with a contact form", scale=3) | |
backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend Technology", scale=1) | |
with gr.Accordion("Advanced Options", open=False): | |
system_message = gr.Textbox(label="Extra instructions for the AI (System Message)", placeholder="Optional: e.g., 'Use Bootstrap 5', 'Prefer functional components in React'", value="") | |
max_tokens = gr.Slider(minimum=256, maximum=4096, value=1536, step=64, label="Max Tokens (Length)") # Increased max | |
temperature = gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature (Creativity)") | |
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Sampling)") | |
generate_button = gr.Button("✨ Generate Code ✨", variant="primary") | |
gr.Markdown("#### Generated Files") | |
# Define the Tabs component placeholder. It will be replaced by the output of on_generate. | |
out_tabs = gr.Tabs(elem_id="output_tabs") | |
# Button click action | |
generate_button.click( | |
on_generate, | |
inputs=[prompt, backend, system_message, max_tokens, temperature, top_p], | |
outputs=[out_tabs], # Output the new Tabs component to replace the placeholder | |
show_progress="full" # Show progress during generation | |
) | |
if __name__ == "__main__": | |
demo.launch(debug=True) # Use debug=True for more detailed error messages in console |