TheMaisk commited on
Commit
2d100fa
·
verified ·
1 Parent(s): 7b77c74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -3,12 +3,12 @@ from huggingface_hub import InferenceClient
3
  import gradio as gr
4
  import os
5
 
6
- # Load prompts from JSON file
7
  def load_prompts_from_json(file_path):
8
  with open(file_path, 'r') as file:
9
  return json.load(file)
10
 
11
- # Load prompts from 'prompts.json'
12
  prompts = load_prompts_from_json('prompts.json')
13
 
14
  # Inference client
@@ -26,7 +26,7 @@ def format_prompt(new_message, history, prompt_type='default'):
26
  return prompt
27
 
28
  def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0, prompt_type='default'):
29
- # Configuration of parameters
30
  temperature = float(temperature)
31
  if temperature < 1e-2:
32
  temperature = 1e-2
@@ -45,20 +45,20 @@ def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95,
45
  for response in stream:
46
  output += response.token.text
47
  yield output
48
- return output
49
 
50
- # Chatbot without avatars and with transparent design
51
  samir_chatbot = gr.Chatbot(bubble_full_width=True, show_label=False, show_copy_button=False, likeable=False)
52
 
53
- # Dropdown for prompt types
54
  prompt_type_dropdown = gr.Dropdown(choices=list(prompts.keys()), label="Prompt Type", value='default')
55
 
56
- # Minimalistic theme and Gradio demo configuration
57
  theme = 'syddharth/gray-minimal'
58
  demo = gr.Interface(
59
  fn=generate,
60
  inputs=[
61
- gr.Textbox(lines=2, label="Input"),
62
  "state" # State input for conversation history
63
  ],
64
  outputs=[samir_chatbot],
@@ -66,9 +66,5 @@ demo = gr.Interface(
66
  theme=theme
67
  )
68
 
69
- # Inside generate function:
70
- def generate(prompt, history, ...):
71
- # Update history using the state object
72
- # ...
73
-
74
- return output, updated_history # Return bo
 
3
  import gradio as gr
4
  import os
5
 
6
+ # Laden der Prompts aus der JSON-Datei
7
  def load_prompts_from_json(file_path):
8
  with open(file_path, 'r') as file:
9
  return json.load(file)
10
 
11
+ # Laden der Prompts aus 'prompts.json'
12
  prompts = load_prompts_from_json('prompts.json')
13
 
14
  # Inference client
 
26
  return prompt
27
 
28
  def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0, prompt_type='default'):
29
+ # Konfiguration der Parameter
30
  temperature = float(temperature)
31
  if temperature < 1e-2:
32
  temperature = 1e-2
 
45
  for response in stream:
46
  output += response.token.text
47
  yield output
48
+ return output, history + [(prompt, output)] # Speichere die Konversation
49
 
50
+ # Chatbot ohne Avatare und mit transparentem Design
51
  samir_chatbot = gr.Chatbot(bubble_full_width=True, show_label=False, show_copy_button=False, likeable=False)
52
 
53
+ # Dropdown für Prompt-Typen
54
  prompt_type_dropdown = gr.Dropdown(choices=list(prompts.keys()), label="Prompt Type", value='default')
55
 
56
+ # Minimalistisches Theme und Gradio Demo Konfiguration
57
  theme = 'syddharth/gray-minimal'
58
  demo = gr.Interface(
59
  fn=generate,
60
  inputs=[
61
+ gr.Textbox(lines=2, label="Eingabe"),
62
  "state" # State input for conversation history
63
  ],
64
  outputs=[samir_chatbot],
 
66
  theme=theme
67
  )
68
 
69
+ # Launch the demo with the queue
70
+ demo.queue().launch(show_api=False)