MINEOGO commited on
Commit
cd13883
·
verified ·
1 Parent(s): f4eb547

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +172 -45
app.py CHANGED
@@ -1,85 +1,212 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
-
5
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  def parse_files(raw_response):
8
- # Parse files and code from raw AI output
9
- pattern = re.compile(r"(?:(?:\n|^)([\w\-.\/\\]+)\n)(.+?)(?=\n[\w\-.\/\\]+\n|\Z)", re.DOTALL)
10
- return pattern.findall(raw_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  def generate_code(prompt, backend, system_message, max_tokens, temperature, top_p):
 
 
 
 
 
13
  full_sys_msg = f"""
14
- You are a code-only AI. Given a prompt, generate a full website using the {backend} backend.
15
  Always include an index.html file.
 
 
16
 
17
- Respond ONLY with filenames and raw code. NO commentary, NO backticks, NO markdown.
18
-
19
- Example:
20
  index.html
21
- <html>...</html>
 
 
 
 
22
 
23
  style.css
24
- body {{ ... }}
 
 
 
 
 
25
 
26
- Start now.
 
27
  """.strip()
28
 
29
  messages = [
30
- {"role": "system", "content": full_sys_msg + "\n" + system_message},
31
  {"role": "user", "content": prompt}
32
  ]
33
 
34
- response = client.chat_completion(
35
- messages=messages,
36
- max_tokens=max_tokens,
37
- temperature=temperature,
38
- top_p=top_p,
39
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- raw = response.choices[0].message.content
42
- files = parse_files(raw)
 
 
43
 
44
- return files
 
 
 
45
 
46
- def on_generate(prompt, backend, system_message, max_tokens, temperature, top_p):
47
- files = generate_code(prompt, backend, system_message, max_tokens, temperature, top_p)
48
  tabs = []
49
  for name, content in files:
50
- lang = "html" if name.endswith(".html") else \
51
- "css" if name.endswith(".css") else \
52
- "javascript" if name.endswith(".js") else \
53
- "python" if name.endswith(".py") else "text"
54
- tabs.append(gr.TabItem(label=name.strip(), elem_id=name.strip(), children=[
55
- gr.Code(value=content.strip(), language=lang)
56
- ]))
57
- return tabs # Return tabs directly
58
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  with gr.Blocks() as demo:
60
  gr.Markdown("### Website Generator (Static / Flask / Node.js)")
 
61
 
62
  with gr.Row():
63
- prompt = gr.Textbox(label="Describe your website", placeholder="E.g. a portfolio site with dark mode")
64
- backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend")
65
 
66
  with gr.Accordion("Advanced Options", open=False):
67
- system_message = gr.Textbox(label="Extra instructions for the AI", value="")
68
- max_tokens = gr.Slider(256, 2048, value=1024, label="Max Tokens")
69
- temperature = gr.Slider(0.1, 2.0, value=0.7, label="Temperature")
70
- top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top-p")
71
 
72
- out_tabs = gr.Tabs() # Define the Tabs component
73
 
74
- generate_button = gr.Button("Generate Code")
 
 
75
 
76
  # Button click action
77
  generate_button.click(
78
  on_generate,
79
  inputs=[prompt, backend, system_message, max_tokens, temperature, top_p],
80
- outputs=out_tabs, # Output goes directly to the Tabs component
81
- show_progress=True
82
  )
83
 
84
  if __name__ == "__main__":
85
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
+ import os # Good practice to import os if needed, though not strictly used here yet
5
+
6
+ # --- Hugging Face Token (Optional but Recommended) ---
7
+ # It's better to use a token, especially for private models or higher rate limits
8
+ # from huggingface_hub import login
9
+ # login("YOUR_HUGGINGFACE_TOKEN") # Replace with your actual token or set HF_TOKEN env var
10
+
11
+ # --- Inference Client ---
12
+ # Consider adding error handling for client initialization if needed
13
+ try:
14
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
15
+ except Exception as e:
16
+ print(f"Error initializing InferenceClient: {e}")
17
+ # Optionally, raise the exception or handle it gracefully in the UI
18
+ # For now, we'll let it proceed and potentially fail later if client is None
19
+ client = None
20
+
21
+ # --- Parsing Function ---
22
  def parse_files(raw_response):
23
+ """
24
+ Parses filenames and code blocks from the raw AI output.
25
+ Assumes format:
26
+ filename1.ext
27
+ ```lang # Optional code block marker
28
+ code for file1
29
+ ``` # Optional code block marker
30
+ filename2.ext
31
+ code for file2
32
+ ...
33
+ """
34
+ if not raw_response:
35
+ return []
36
+
37
+ # Improved pattern to handle optional code blocks and leading/trailing whitespace
38
+ # It looks for a filename line followed by content until the next filename line or end of string.
39
+ pattern = re.compile(
40
+ r"^\s*([\w\-.\/\\]+\.\w+)\s*\n" # Filename line (must have an extension)
41
+ r"(.*?)" # Capture content (non-greedy)
42
+ r"(?=\n\s*[\w\-.\/\\]+\.\w+\s*\n|\Z)", # Lookahead for next filename or end of string
43
+ re.DOTALL | re.MULTILINE
44
+ )
45
 
46
+ files = pattern.findall(raw_response)
47
+
48
+ # Clean up content: remove potential code block markers and extra whitespace
49
+ cleaned_files = []
50
+ for name, content in files:
51
+ # Remove common code block markers (``` optionally followed by lang)
52
+ content_cleaned = re.sub(r"^\s*```[a-zA-Z]*\n?", "", content, flags=re.MULTILINE)
53
+ content_cleaned = re.sub(r"\n?```\s*$", "", content_cleaned, flags=re.MULTILINE)
54
+ cleaned_files.append((name.strip(), content_cleaned.strip()))
55
+
56
+ # Handle case where the AI might just output code without filenames
57
+ if not cleaned_files and raw_response.strip():
58
+ # Basic check if it looks like code (e.g., contains common HTML/CSS/JS chars)
59
+ if any(c in raw_response for c in ['<','>','{','}',';','(',')']):
60
+ # Default to index.html if no files parsed but content exists
61
+ print("Warning: No filenames found, defaulting to index.html")
62
+ lang = "html" # Guess language, default to html
63
+ if "{" in raw_response and "}" in raw_response and ":" in raw_response:
64
+ lang = "css"
65
+ elif "function" in raw_response or "const" in raw_response or "let" in raw_response:
66
+ lang = "javascript"
67
+ # Determine a default filename based on guessed language
68
+ default_filename = "index.html"
69
+ if lang == "css": default_filename = "style.css"
70
+ elif lang == "javascript": default_filename = "script.js"
71
+
72
+ cleaned_files.append((default_filename, raw_response.strip()))
73
+
74
+
75
+ return cleaned_files
76
+
77
+ # --- Code Generation Function ---
78
  def generate_code(prompt, backend, system_message, max_tokens, temperature, top_p):
79
+ """Generates code using the InferenceClient."""
80
+ if not client:
81
+ # Return an error structure if client failed to initialize
82
+ return "Error: Inference Client not available.", []
83
+
84
  full_sys_msg = f"""
85
+ You are a code generation AI. Given a prompt, generate the necessary files for a website using the {backend} backend.
86
  Always include an index.html file.
87
+ Respond ONLY with filenames and the raw code for each file.
88
+ Each file must start with its filename on a new line. Example:
89
 
 
 
 
90
  index.html
91
+ <!DOCTYPE html>
92
+ <html>
93
+ <head><title>My Site</title></head>
94
+ <body><h1>Hello</h1></body>
95
+ </html>
96
 
97
  style.css
98
+ body {{
99
+ font-family: sans-serif;
100
+ }}
101
+
102
+ script.js
103
+ console.log('Hello World!');
104
 
105
+ Ensure the code is complete and functional for each file. NO commentary, NO explanations, NO markdown formatting like backticks (```).
106
+ Start generating the files now.
107
  """.strip()
108
 
109
  messages = [
110
+ {"role": "system", "content": full_sys_msg + ("\n" + system_message if system_message else "")},
111
  {"role": "user", "content": prompt}
112
  ]
113
 
114
+ try:
115
+ response = client.chat_completion(
116
+ messages=messages,
117
+ max_tokens=int(max_tokens), # Ensure max_tokens is int
118
+ temperature=temperature,
119
+ top_p=top_p,
120
+ stream=False # Ensure streaming is off for this logic
121
+ )
122
+ raw = response.choices[0].message.content
123
+ print("\n--- Raw AI Response ---")
124
+ print(raw)
125
+ print("----------------------\n")
126
+ files = parse_files(raw)
127
+ return None, files # Return None for error, and the list of files
128
+
129
+ except Exception as e:
130
+ print(f"Error during AI generation: {e}")
131
+ return f"Error during AI generation: {e}", [] # Return error message
132
+
133
+ # --- Gradio Event Handler ---
134
+ def on_generate(prompt, backend, system_message, max_tokens, temperature, top_p):
135
+ """Callback function for the generate button."""
136
+ error_msg, files = generate_code(prompt, backend, system_message, max_tokens, temperature, top_p)
137
 
138
+ if error_msg:
139
+ # Display error in a single tab if generation failed
140
+ error_tab = gr.TabItem(label="Error", children=[gr.Textbox(value=error_msg, label="Generation Error")])
141
+ return gr.Tabs(tabs=[error_tab]) # Return a Tabs component with the error tab
142
 
143
+ if not files:
144
+ # Display message if no files were parsed
145
+ no_files_tab = gr.TabItem(label="Output", children=[gr.Textbox(value="AI did not return recognizable file content. Check raw output in console.", label="Result")])
146
+ return gr.Tabs(tabs=[no_files_tab]) # Return a Tabs component with this message
147
 
 
 
148
  tabs = []
149
  for name, content in files:
150
+ name = name.strip()
151
+ content = content.strip()
152
+ if not name or not content: # Skip empty names or content
153
+ print(f"Skipping file with empty name or content: Name='{name}'")
154
+ continue
155
+
156
+ # Determine language for syntax highlighting
157
+ lang = "text" # Default
158
+ if name.endswith(".html") or name.endswith(".htm"):
159
+ lang = "html"
160
+ elif name.endswith(".css"):
161
+ lang = "css"
162
+ elif name.endswith(".js"):
163
+ lang = "javascript"
164
+ elif name.endswith(".py"):
165
+ lang = "python"
166
+ elif name.endswith(".json"):
167
+ lang = "json"
168
+ elif name.endswith(".md"):
169
+ lang = "markdown"
170
+ elif name.endswith(".sh") or name.endswith(".bash"):
171
+ lang = "bash"
172
+
173
+ tab_item = gr.TabItem(label=name, elem_id=f"tab_{name.replace('.', '_')}", children=[ # Ensure unique elem_id
174
+ gr.Code(value=content, language=lang, label=name) # Add label to Code block
175
+ ])
176
+ tabs.append(tab_item)
177
+
178
+ # *** The Key Fix ***
179
+ # Return a new gr.Tabs component instance containing the generated TabItems
180
+ return gr.Tabs(tabs=tabs)
181
+
182
+ # --- Gradio UI Definition ---
183
  with gr.Blocks() as demo:
184
  gr.Markdown("### Website Generator (Static / Flask / Node.js)")
185
+ gr.Markdown("Describe the website you want to create. The AI will generate the necessary files.")
186
 
187
  with gr.Row():
188
+ prompt = gr.Textbox(label="Describe your website", placeholder="E.g., a simple portfolio site with a contact form", scale=3)
189
+ backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend Technology", scale=1)
190
 
191
  with gr.Accordion("Advanced Options", open=False):
192
+ system_message = gr.Textbox(label="Extra instructions for the AI (System Message)", placeholder="Optional: e.g., 'Use Bootstrap 5', 'Prefer functional components in React'", value="")
193
+ max_tokens = gr.Slider(minimum=256, maximum=4096, value=1536, step=64, label="Max Tokens (Length)") # Increased max
194
+ temperature = gr.Slider(minimum=0.1, maximum=1.5, value=0.7, step=0.1, label="Temperature (Creativity)")
195
+ top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Sampling)")
196
 
197
+ generate_button = gr.Button("✨ Generate Code ✨", variant="primary")
198
 
199
+ gr.Markdown("#### Generated Files")
200
+ # Define the Tabs component placeholder. It will be replaced by the output of on_generate.
201
+ out_tabs = gr.Tabs(elem_id="output_tabs")
202
 
203
  # Button click action
204
  generate_button.click(
205
  on_generate,
206
  inputs=[prompt, backend, system_message, max_tokens, temperature, top_p],
207
+ outputs=[out_tabs], # Output the new Tabs component to replace the placeholder
208
+ show_progress="full" # Show progress during generation
209
  )
210
 
211
  if __name__ == "__main__":
212
+ demo.launch(debug=True) # Use debug=True for more detailed error messages in console