MINEOGO commited on
Commit
4917053
·
verified ·
1 Parent(s): 71765cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +142 -159
app.py CHANGED
@@ -1,180 +1,163 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- import re
4
 
 
 
 
 
 
 
 
5
  try:
6
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
- client.timeout = 120
 
 
 
 
8
  except Exception as e:
9
- print(f"Error initializing InferenceClient: {e}")
10
- client = None
11
-
12
- def parse_files(raw_response):
13
- if not raw_response:
14
- return []
15
- pattern = re.compile(
16
- r"^\s*([\w\-.\/\\]+\.\w+)\s*\n"
17
- r"(.*?)"
18
- r"(?=\n\s*[\w\-.\/\\]+\.\w+\s*\n|\Z)",
19
- re.DOTALL | re.MULTILINE
20
- )
21
- files = pattern.findall(raw_response)
22
- cleaned_files = []
23
- for name, content in files:
24
- content_cleaned = re.sub(r"^\s*```[a-zA-Z]*\n?", "", content, flags=re.MULTILINE)
25
- content_cleaned = re.sub(r"\n?```\s*$", "", content_cleaned, flags=re.MULTILINE)
26
- cleaned_files.append((name.strip(), content_cleaned.strip()))
27
- if not cleaned_files and raw_response.strip():
28
- if any(c in raw_response for c in ['<','>','{','}',';','(',')']):
29
- lang = "html"
30
- if "{" in raw_response and "}" in raw_response and ":" in raw_response:
31
- lang = "css"
32
- elif "function" in raw_response or "const" in raw_response or "let" in raw_response:
33
- lang = "javascript"
34
- default_filename = "index.html"
35
- if lang == "css":
36
- default_filename = "style.css"
37
- elif lang == "javascript":
38
- default_filename = "script.js"
39
- cleaned_files.append((default_filename, raw_response.strip()))
40
- return cleaned_files
41
-
42
- def stream_and_parse_code(prompt, backend, system_message, max_tokens, temperature, top_p):
43
- if not client:
44
- error_msg = "Error: Inference Client not available. Check API token or model name."
45
- yield {
46
- live_output: gr.update(value=error_msg),
47
- final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error", children=[gr.Textbox(value=error_msg)])])
48
- }
49
- return
50
- full_sys_msg = f"""
51
- You are a code generation AI. Given a prompt, generate the necessary files for a website using the {backend} backend.
52
- Always include an index.html file.
53
- Respond ONLY with filenames and the raw code for each file.
54
- Each file must start with its filename on a new line. Example:
55
-
56
- index.html
57
- <!DOCTYPE html>
58
- <html></html>
59
 
60
- style.css
61
- body {{}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- script.js
64
- console.log("Hello");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
- Ensure the code is complete. NO commentary, NO explanations, NO markdown formatting like backticks (```).
67
- Start generating the files now.
68
- """.strip()
69
- if system_message:
70
- full_sys_msg += "\n\n" + system_message
71
  messages = [
72
- {"role": "system", "content": full_sys_msg},
73
  {"role": "user", "content": prompt}
74
  ]
75
- full_raw_response = ""
76
- error_occurred = False
77
- error_message = ""
78
- yield {
79
- live_output: gr.update(value="Generating stream..."),
80
- final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Generating...")])
81
- }
82
  try:
83
- stream = client.chat_completion(
84
- messages,
85
- max_tokens=int(max_tokens),
 
86
  stream=True,
87
  temperature=temperature,
88
- top_p=top_p
89
- )
90
- for chunk in stream:
91
- token = chunk.choices[0].delta.content
92
- if token:
93
- full_raw_response += token
94
- yield {
95
- live_output: gr.update(value=full_raw_response)
96
- }
 
 
 
 
 
 
97
  except Exception as e:
98
- error_message = f"Error during AI generation: {e}\n\nPartial Response (if any):\n{full_raw_response}"
99
- error_occurred = True
100
- yield {
101
- live_output: gr.update(value=error_message),
102
- final_tabs: gr.Tabs(tabs=[gr.TabItem(label="Error")])
103
- }
104
- if error_occurred:
105
- final_tabs_update = gr.Tabs(tabs=[
106
- gr.TabItem(label="Error", children=[gr.Textbox(value=error_message, lines=10)])
107
- ])
108
- else:
109
- files = parse_files(full_raw_response)
110
- if not files:
111
- no_files_msg = "AI finished, but did not return recognizable file content or the response was empty. See raw output above."
112
- final_tabs_update = gr.Tabs(tabs=[
113
- gr.TabItem(label="Output", children=[gr.Textbox(value=no_files_msg)])
114
- ])
115
- yield {
116
- live_output: gr.update(value=full_raw_response + "\n\n" + no_files_msg),
117
- final_tabs: final_tabs_update
118
- }
119
- return
120
- tabs_content = []
121
- for name, content in files:
122
- name = name.strip()
123
- content = content.strip()
124
- if not name or not content:
125
- continue
126
- lang = "plaintext"
127
- if name.endswith((".html", ".htm")):
128
- lang = "html"
129
- elif name.endswith(".css"):
130
- lang = "css"
131
- elif name.endswith(".js"):
132
- lang = "javascript"
133
- elif name.endswith(".py"):
134
- lang = "python"
135
- elif name.endswith(".json"):
136
- lang = "json"
137
- elif name.endswith(".md"):
138
- lang = "markdown"
139
- elif name.endswith((".sh", ".bash")):
140
- lang = "bash"
141
- elif name.endswith((".xml", ".xaml", ".svg")):
142
- lang = "xml"
143
- elif name.endswith((".yaml", ".yml")):
144
- lang = "yaml"
145
- elem_id = f"tab_{re.sub(r'[^a-zA-Z0-9_-]', '_', name)}"
146
- tab_item = gr.TabItem(label=name, elem_id=elem_id, children=[
147
- gr.Code(value=content, language=lang, label=name, interactive=False)
148
- ])
149
- tabs_content.append(tab_item)
150
- final_tabs_update = gr.Tabs(tabs=tabs_content) if tabs_content else gr.Tabs(tabs=[
151
- gr.TabItem(label="Output", children=[gr.Textbox(value="No valid files generated after filtering.")])
152
- ])
153
- yield {
154
- live_output: gr.update(value=full_raw_response if not error_occurred else error_message),
155
- final_tabs: final_tabs_update
156
- }
157
 
158
- with gr.Blocks(css=".gradio-container { max-width: 95% !important; }") as demo:
159
- gr.Markdown("## WebGen AI — One Prompt → Full Website Generator")
160
- gr.Markdown("Generates website code based on your description. Raw output streams live, final files appear in tabs below.")
161
  with gr.Row():
162
  with gr.Column(scale=2):
163
- prompt = gr.Textbox(label="Describe your website", placeholder="E.g., a simple landing page...", lines=3)
164
- backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend Technology")
165
- with gr.Accordion("Advanced Options", open=False):
166
- system_message = gr.Textbox(label="Extra instructions for the AI (System Message)", placeholder="Optional", value="", lines=2)
167
- max_tokens = gr.Slider(minimum=256, maximum=4096, value=2048, step=64, label="Max Tokens (Output Length)")
168
- temperature = gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature (Creativity)")
169
- top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Sampling Focus)")
170
- generate_button = gr.Button(" Generate Code ✨", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
171
  with gr.Column(scale=3):
172
- gr.Markdown("#### Live Raw Output Stream")
173
- live_output = gr.Textbox(label="Raw AI Stream", lines=20, interactive=False)
174
- gr.Markdown("---")
175
- gr.Markdown("#### Final Generated Files (Tabs)")
176
- final_tabs = gr.Tabs(elem_id="output_tabs")
177
- generate_button.click(stream_and_parse_code, inputs=[prompt, backend, system_message, max_tokens, temperature, top_p], outputs=[live_output, final_tabs], show_progress="hidden")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
 
179
  if __name__ == "__main__":
180
- demo.launch(debug=True)
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import os
4
 
5
+ # --- Configuration ---
6
+ # Use environment variable for token, fallback to default if not set
7
+ # For Spaces, set the HF_TOKEN secret
8
+ API_TOKEN = os.getenv("HF_TOKEN", None)
9
+ MODEL = "HuggingFaceH4/zephyr-7b-beta"
10
+
11
+ # --- Initialize Inference Client ---
12
  try:
13
+ if API_TOKEN:
14
+ print("Using HF Token.")
15
+ client = InferenceClient(model=MODEL, token=API_TOKEN)
16
+ else:
17
+ print("HF Token not found. Running without token (may lead to rate limits).")
18
+ client = InferenceClient(model=MODEL)
19
  except Exception as e:
20
+ print(f"Error initializing Inference Client: {e}")
21
+ # Optionally, raise the error or handle it gracefully
22
+ raise gr.Error(f"Failed to initialize the AI model client. Please check model name and token. Error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ # --- Core Code Generation Function ---
25
+ def generate_code(
26
+ prompt: str,
27
+ backend_choice: str,
28
+ file_structure: str,
29
+ max_tokens: int,
30
+ temperature: float,
31
+ top_p: float,
32
+ ):
33
+ """
34
+ Generates website code based on user prompt and choices.
35
+ Yields the code token by token for live updates.
36
+ """
37
+ print(f"Received prompt: {prompt}")
38
+ print(f"Backend choice: {backend_choice}")
39
+ print(f"File structure: {file_structure}")
40
+ print(f"Max tokens: {max_tokens}, Temp: {temperature}, Top-p: {top_p}")
41
 
42
+ # --- System Message (Internal) ---
43
+ # This guides the AI's behavior. It's not user-editable in the UI.
44
+ system_message = (
45
+ "You are an expert frontend web developer AI. Your task is to generate HTML, CSS, and JavaScript code "
46
+ "for a website based on the user's description. \n"
47
+ "**Constraints:**\n"
48
+ "- ALWAYS generate a complete `index.html` file.\n"
49
+ "- ONLY output the raw code for the requested files.\n"
50
+ "- Do NOT include any explanations, comments about the code, or introductory sentences.\n"
51
+ "- Start the response *directly* with the code.\n"
52
+ "- If 'Multiple Files' is selected, structure the output clearly:\n"
53
+ " - Start the HTML section with a clear marker like `<!-- index.html -->`\n"
54
+ " - Start the CSS section with a clear marker like `/* style.css */`\n"
55
+ " - Start the JavaScript section (if needed) with `// script.js`\n"
56
+ " - Ensure each file's code follows its respective marker.\n"
57
+ "- If 'Single File' is selected, embed CSS within `<style>` tags in the `<head>` and JavaScript within `<script>` tags "
58
+ "at the end of the `<body>` of the `index.html` file.\n"
59
+ f"- Consider the user's choice of backend context ('{backend_choice}') when generating placeholders or structure, but focus on the frontend code.\n"
60
+ f"- Generate based on the file structure preference: '{file_structure}'."
61
+ )
62
 
63
+ # --- Construct the messages for the API ---
 
 
 
 
64
  messages = [
65
+ {"role": "system", "content": system_message},
66
  {"role": "user", "content": prompt}
67
  ]
68
+
69
+ # --- Stream the response from the API ---
70
+ response_stream = ""
 
 
 
 
71
  try:
72
+ print("Sending request to API...")
73
+ for message in client.chat_completion(
74
+ messages=messages,
75
+ max_tokens=max_tokens,
76
  stream=True,
77
  temperature=temperature,
78
+ top_p=top_p,
79
+ stop_sequences=["<|endoftext|>", "<|im_end|>", "</s>"] # Add common stop tokens
80
+ ):
81
+ token = message.choices[0].delta.content
82
+ # Basic check to ensure token is a string (it should be)
83
+ if isinstance(token, str):
84
+ response_stream += token
85
+ yield response_stream # Yield the cumulative response for live update
86
+ # Add a small safety break if generation seems stuck or too long (optional)
87
+ # if len(response_stream) > max_tokens * 5: # Heuristic limit
88
+ # print("Warning: Exceeded heuristic length limit, stopping generation.")
89
+ # break
90
+
91
+ print("API stream finished.")
92
+
93
  except Exception as e:
94
+ print(f"Error during API call: {e}")
95
+ yield f"An error occurred while generating the code: {e}"
96
+
97
+ # --- Build Gradio Interface using Blocks ---
98
+ with gr.Blocks() as demo:
99
+ gr.Markdown("# Website Code Generator 🚀")
100
+ gr.Markdown(
101
+ "Describe the website you want, choose your options, and the AI will generate the frontend code (HTML, CSS, JS). "
102
+ "The code will appear live in the text editor below."
103
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
 
 
 
105
  with gr.Row():
106
  with gr.Column(scale=2):
107
+ prompt_input = gr.Textbox(
108
+ label="Website Description",
109
+ placeholder="e.g., A simple portfolio website with a header, an 'About Me' section, a project grid, and a contact form.",
110
+ lines=4,
111
+ )
112
+ backend_radio = gr.Radio(
113
+ ["Static", "Flask", "Node.js"],
114
+ label="Backend Context",
115
+ value="Static",
116
+ info="How should the AI structure the frontend (e.g., template placeholders for Flask)?",
117
+ )
118
+ file_structure_radio = gr.Radio(
119
+ ["Single File", "Multiple Files"],
120
+ label="Output File Structure",
121
+ value="Multiple Files",
122
+ info="Generate everything in index.html or separate CSS/JS files?",
123
+ )
124
+ generate_button = gr.Button("Generate Website Code", variant="primary")
125
+
126
  with gr.Column(scale=3):
127
+ code_output = gr.Code(
128
+ label="Generated Code",
129
+ language="html", # Start with html, might contain css/js too
130
+ lines=25,
131
+ interactive=False, # Read-only display
132
+ )
133
+
134
+ with gr.Accordion("Advanced Generation Settings", open=False):
135
+ max_tokens_slider = gr.Slider(
136
+ minimum=256, maximum=4096, value=1500, step=64, label="Max New Tokens"
137
+ )
138
+ temperature_slider = gr.Slider(
139
+ minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature"
140
+ )
141
+ top_p_slider = gr.Slider(
142
+ minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P (Nucleus Sampling)"
143
+ )
144
+
145
+ # --- Connect Inputs/Outputs to the Function ---
146
+ generate_button.click(
147
+ fn=generate_code,
148
+ inputs=[
149
+ prompt_input,
150
+ backend_radio,
151
+ file_structure_radio,
152
+ max_tokens_slider,
153
+ temperature_slider,
154
+ top_p_slider,
155
+ ],
156
+ outputs=code_output,
157
+ api_name="generate_website_code" # Optional: for API usage
158
+ )
159
 
160
+ # --- Launch the App ---
161
  if __name__ == "__main__":
162
+ demo.queue() # Enable queuing for handling multiple users
163
+ demo.launch(debug=True) # Set debug=False for production/Spaces