MINEOGO commited on
Commit
a922063
·
verified ·
1 Parent(s): 801d673

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -28
app.py CHANGED
@@ -1,22 +1,19 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
 
20
  for val in history:
21
  if val[0]:
22
  messages.append({"role": "user", "content": val[0]})
@@ -26,7 +23,6 @@ def respond(
26
  messages.append({"role": "user", "content": message})
27
 
28
  response = ""
29
-
30
  for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
@@ -35,30 +31,22 @@ def respond(
35
  top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
- respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
  ],
 
 
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
 
 
 
4
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
5
 
6
 
7
+ def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, backend):
8
+ # Force the AI to generate a website with index.html and specified backend
9
+ forced_instruction = f"""
10
+ You must generate a complete website structure including at least an index.html.
11
+ Use the following backend structure: {backend}.
12
+ Only use {backend} relevant code and structure, and don't include any other type.
13
+ """
14
+ system_message = forced_instruction + "\n\n" + system_message
 
15
 
16
+ messages = [{"role": "system", "content": system_message}]
17
  for val in history:
18
  if val[0]:
19
  messages.append({"role": "user", "content": val[0]})
 
23
  messages.append({"role": "user", "content": message})
24
 
25
  response = ""
 
26
  for message in client.chat_completion(
27
  messages,
28
  max_tokens=max_tokens,
 
31
  top_p=top_p,
32
  ):
33
  token = message.choices[0].delta.content
 
34
  response += token
35
  yield response
36
 
37
 
 
 
 
38
  demo = gr.ChatInterface(
39
+ fn=respond,
40
  additional_inputs=[
41
+ gr.Textbox(value="You are a helpful assistant.", label="System message"),
42
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
43
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
44
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
45
+ gr.Dropdown(choices=["Flask", "Static", "Node.js"], value="Static", label="Website Backend"),
 
 
 
 
 
46
  ],
47
+ title="WebGen AI",
48
+ description="Ask the AI to build a website with a specific backend (Flask, Static, or Node.js). It will always include index.html.",
49
  )
50
 
 
51
  if __name__ == "__main__":
52
+ demo.launch()