MINEOGO commited on
Commit
83207ef
·
verified ·
1 Parent(s): 853d569

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -52
app.py CHANGED
@@ -2,72 +2,83 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
- client = InferenceClient("http://localhost:5000") # change this if needed
6
-
7
- def clean_code_blocks(response):
8
- """
9
- Parses AI response to extract file blocks like:
10
- index.html
11
- <code>
12
- style.css
13
- <code>
14
- """
15
- pattern = re.compile(r"^([^\s\/\\]+\.?[a-zA-Z0-9]*)\n([\s\S]+?)(?=\n\S|$)", re.MULTILINE)
16
- return pattern.findall(response)
17
-
18
- def generate_code(prompt, system_message, max_tokens, temperature, top_p, backend):
19
- system_prompt = f"""
20
- You are a code-generation assistant.
21
- Respond ONLY with raw code for a website with multiple files (like index.html, style.css, script.js, app.py, etc.).
22
- Use the {backend} backend ONLY.
23
-
24
- Do NOT use markdown formatting. Do NOT add backticks. Do NOT add any explanations. Just output file names followed by their code, like:
25
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  index.html
27
- <html>...
28
 
29
  style.css
30
- body {{...}}
31
 
32
- app.py
33
- from flask import Flask...
34
  """.strip()
35
 
36
  messages = [
37
- {"role": "system", "content": system_prompt + "\n\n" + system_message},
38
  {"role": "user", "content": prompt}
39
  ]
40
 
41
- # Single non-streaming call since we're parsing files
42
- completion = client.chat_completion(messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
43
- response = completion.choices[0].message.content
44
-
45
- files = clean_code_blocks(response)
46
-
47
- with gr.Tabs() as tabset:
48
- for filename, code in files:
49
- with gr.Tab(label=filename.strip()):
50
- gr.Code(value=code.strip(), language="html" if filename.endswith(".html") else "css" if filename.endswith(".css") else "javascript" if filename.endswith(".js") else "python" if filename.endswith(".py") else "text")
51
-
52
- return tabset
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  with gr.Blocks() as demo:
55
- gr.Markdown("### WebGen: Prompt Multi-File Website (with Flask, Node.js, Static support)")
56
-
57
  with gr.Row():
58
- prompt = gr.Textbox(label="Prompt", lines=2)
59
- backend = gr.Dropdown(["Flask", "Static", "Node.js"], value="Static", label="Backend")
60
-
61
- with gr.Accordion("Advanced Settings", open=False):
62
- system_msg = gr.Textbox(value="", label="System Message")
63
- max_tokens = gr.Slider(128, 2048, value=1024, step=1, label="Max Tokens")
64
- temperature = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
65
- top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
66
-
67
- output = gr.Column()
68
- run_btn = gr.Button("Generate")
69
-
70
- run_btn.click(generate_code, inputs=[prompt, system_msg, max_tokens, temperature, top_p, backend], outputs=output)
 
 
 
 
 
 
 
71
 
72
  if __name__ == "__main__":
73
  demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
  import re
4
 
5
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ def parse_files(raw_response):
8
+ pattern = re.compile(r"(?:(?:\n|^)([\w\-.\/\\]+)\n)(.+?)(?=\n[\w\-.\/\\]+\n|\Z)", re.DOTALL)
9
+ return pattern.findall(raw_response)
10
+
11
+ def generate_code(prompt, backend, system_message, max_tokens, temperature, top_p):
12
+ full_sys_msg = f"""
13
+ You are a code-only AI. Given a prompt, generate a full website using the {backend} backend.
14
+ Always include an index.html file.
15
+
16
+ Respond ONLY with filenames and raw code. NO commentary, NO backticks, NO markdown.
17
+
18
+ Example:
19
  index.html
20
+ <html>...</html>
21
 
22
  style.css
23
+ body {{ ... }}
24
 
25
+ Start now.
 
26
  """.strip()
27
 
28
  messages = [
29
+ {"role": "system", "content": full_sys_msg + "\n" + system_message},
30
  {"role": "user", "content": prompt}
31
  ]
32
 
33
+ response = client.chat_completion(
34
+ messages=messages,
35
+ max_tokens=max_tokens,
36
+ temperature=temperature,
37
+ top_p=top_p,
38
+ )
39
+
40
+ raw = response.choices[0].message.content
41
+ files = parse_files(raw)
42
+
43
+ return files
44
+
45
+ def on_generate(prompt, backend, system_message, max_tokens, temperature, top_p):
46
+ files = generate_code(prompt, backend, system_message, max_tokens, temperature, top_p)
47
+ tabs = []
48
+ for name, content in files:
49
+ lang = "html" if name.endswith(".html") else \
50
+ "css" if name.endswith(".css") else \
51
+ "javascript" if name.endswith(".js") else \
52
+ "python" if name.endswith(".py") else "text"
53
+ tabs.append(gr.TabItem(label=name.strip(), elem_id=name.strip(), children=[
54
+ gr.Code(value=content.strip(), language=lang)
55
+ ]))
56
+ return gr.Group(tabs)
57
 
58
  with gr.Blocks() as demo:
59
+ gr.Markdown("### Website Generator (Static / Flask / Node.js)")
60
+
61
  with gr.Row():
62
+ prompt = gr.Textbox(label="Describe your website", placeholder="E.g. a portfolio site with dark mode")
63
+ backend = gr.Dropdown(["Static", "Flask", "Node.js"], value="Static", label="Backend")
64
+
65
+ with gr.Accordion("Advanced Options", open=False):
66
+ system_message = gr.Textbox(label="Extra instructions for the AI", value="")
67
+ max_tokens = gr.Slider(256, 2048, value=1024, label="Max Tokens")
68
+ temperature = gr.Slider(0.1, 2.0, value=0.7, label="Temperature")
69
+ top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top-p")
70
+
71
+ output = gr.Group()
72
+ generate_button = gr.Button("Generate Code")
73
+
74
+ generate_button.click(
75
+ on_generate,
76
+ inputs=[prompt, backend, system_message, max_tokens, temperature, top_p],
77
+ outputs=output,
78
+ show_progress=True
79
+ )
80
+
81
+ output.render()
82
 
83
  if __name__ == "__main__":
84
  demo.launch()