MINEOGO commited on
Commit
853d569
·
verified ·
1 Parent(s): 5f4f3c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -54
app.py CHANGED
@@ -2,83 +2,72 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
- def clean_code_blocks(raw_response):
8
  """
9
- Extract code for each file from a structured LLM response
10
- Expected format:
11
  index.html
12
  <code>
13
-
14
- static/style.css
15
  <code>
16
  """
17
- parts = re.split(r"(?:\n|^)([^\n\/\\<>:\"|?*]+(?:\.[a-z]+)?(?:\/[^\n]+)?)\n", raw_response)
18
- file_blocks = {}
19
- for i in range(1, len(parts), 2):
20
- filename = parts[i].strip()
21
- code = parts[i + 1].strip()
22
- if filename and code:
23
- file_blocks[filename] = code
24
- return file_blocks
25
 
26
  def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
27
- full_system_prompt = f"""
28
- You are a code-generation AI. You MUST generate a full website including an index.html file.
29
- Use ONLY the {backend} backend structure.
30
- Output all code for each file separately using this format:
 
 
31
 
32
- filename.ext
33
- <code without backticks>
34
 
35
- Do NOT add commentary, do NOT use markdown. Output raw code only.
36
- """.strip() + "\n\n" + system_message
 
 
 
 
37
 
38
  messages = [
39
- {"role": "system", "content": full_system_prompt},
40
  {"role": "user", "content": prompt}
41
  ]
42
 
43
- response = ""
44
- for chunk in client.chat_completion(
45
- messages,
46
- max_tokens=max_tokens,
47
- stream=True,
48
- temperature=temperature,
49
- top_p=top_p
50
- ):
51
- token = chunk.choices[0].delta.content
52
- if token:
53
- response += token
54
-
55
- # Parse and display each file in its own tab
56
  files = clean_code_blocks(response)
57
- tabs = []
58
- for filename, code in files.items():
59
- tabs.append(gr.TabItem(label=filename, elem_id=filename))
60
- tabs.append(gr.Code(value=code, language="html" if filename.endswith(".html") else "python" if filename.endswith(".py") else "javascript" if filename.endswith(".js") else "css", label=filename))
61
- return tabs
 
 
62
 
63
  with gr.Blocks() as demo:
64
- gr.Markdown("## WebGen AI — One Prompt → Multi-File Website Generator")
65
 
66
  with gr.Row():
67
- prompt = gr.Textbox(label="Enter Prompt", placeholder="Describe the website you want...")
68
- backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Select Backend")
69
-
70
- system_msg = gr.Textbox(value="You are a helpful assistant.", label="System Message")
71
- max_tokens = gr.Slider(1, 2048, value=1024, label="Max Tokens")
72
- temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
73
- top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
74
 
75
- run_btn = gr.Button("Generate Code")
76
- tabs_output = gr.Group()
 
 
 
77
 
78
- def wrapper(*args):
79
- return generate_code(*args)
80
 
81
- run_btn.click(wrapper, inputs=[prompt, system_msg, max_tokens, temperature, top_p, backend], outputs=tabs_output)
82
 
83
  if __name__ == "__main__":
84
  demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
+ client = InferenceClient("http://localhost:5000") # change this if needed
6
 
7
+ def clean_code_blocks(response):
8
  """
9
+ Parses AI response to extract file blocks like:
 
10
  index.html
11
  <code>
12
+ style.css
 
13
  <code>
14
  """
15
+ pattern = re.compile(r"^([^\s\/\\]+\.?[a-zA-Z0-9]*)\n([\s\S]+?)(?=\n\S|$)", re.MULTILINE)
16
+ return pattern.findall(response)
 
 
 
 
 
 
17
 
18
  def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
19
+ system_prompt = f"""
20
+ You are a code-generation assistant.
21
+ Respond ONLY with raw code for a website with multiple files (like index.html, style.css, script.js, app.py, etc.).
22
+ Use the {backend} backend ONLY.
23
+
24
+ Do NOT use markdown formatting. Do NOT add backticks. Do NOT add any explanations. Just output file names followed by their code, like:
25
 
26
+ index.html
27
+ <html>...
28
 
29
+ style.css
30
+ body {{...}}
31
+
32
+ app.py
33
+ from flask import Flask...
34
+ """.strip()
35
 
36
  messages = [
37
+ {"role": "system", "content": system_prompt + "\n\n" + system_message},
38
  {"role": "user", "content": prompt}
39
  ]
40
 
41
+ # Single non-streaming call since we're parsing files
42
+ completion = client.chat_completion(messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
43
+ response = completion.choices[0].message.content
44
+
 
 
 
 
 
 
 
 
 
45
  files = clean_code_blocks(response)
46
+
47
+ with gr.Tabs() as tabset:
48
+ for filename, code in files:
49
+ with gr.Tab(label=filename.strip()):
50
+ gr.Code(value=code.strip(), language="html" if filename.endswith(".html") else "css" if filename.endswith(".css") else "javascript" if filename.endswith(".js") else "python" if filename.endswith(".py") else "text")
51
+
52
+ return tabset
53
 
54
  with gr.Blocks() as demo:
55
+ gr.Markdown("### WebGen: Prompt → Multi-File Website (with Flask, Node.js, Static support)")
56
 
57
  with gr.Row():
58
+ prompt = gr.Textbox(label="Prompt", lines=2)
59
+ backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Backend")
 
 
 
 
 
60
 
61
+ with gr.Accordion("Advanced Settings", open=False):
62
+ system_msg = gr.Textbox(value="", label="System Message")
63
+ max_tokens = gr.Slider(128, 2048, value=1024, step=1, label="Max Tokens")
64
+ temperature = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
65
+ top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
66
 
67
+ output = gr.Column()
68
+ run_btn = gr.Button("Generate")
69
 
70
+ run_btn.click(generate_code, inputs=[prompt, system_msg, max_tokens, temperature, top_p, backend], outputs=output)
71
 
72
  if __name__ == "__main__":
73
  demo.launch()