TheMaisk commited on
Commit
7cdb4a4
·
verified ·
1 Parent(s): f5a4880

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -10
app.py CHANGED
@@ -3,18 +3,18 @@ from huggingface_hub import InferenceClient
3
  import gradio as gr
4
  import os
5
 
6
- # Laden der Prompts aus der JSON-Datei
7
  def load_prompts_from_json(file_path):
8
  with open(file_path, 'r') as file:
9
  return json.load(file)
10
 
11
- # Angenommen, Sie haben eine JSON-Datei namens 'prompts.json'
12
  prompts = load_prompts_from_json('prompts.json')
13
 
14
- # Klient für die Inferenz
15
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
16
 
17
- # Geheime Eingabeaufforderung aus Umgebungsvariablen
18
  secret_prompt = os.getenv("SECRET_PROMPT")
19
 
20
  def format_prompt(new_message, history, prompt_type='default'):
@@ -26,7 +26,7 @@ def format_prompt(new_message, history, prompt_type='default'):
26
  return prompt
27
 
28
  def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0, prompt_type='default'):
29
- # Konfiguration der Parameter
30
  temperature = float(temperature)
31
  if temperature < 1e-2:
32
  temperature = 1e-2
@@ -47,18 +47,18 @@ def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95,
47
  yield output
48
  return output
49
 
50
- # Chatbot ohne Avatare und mit transparentem Design
51
  samir_chatbot = gr.Chatbot(bubble_full_width=True, show_label=False, show_copy_button=False, likeable=False)
52
 
53
- # Dropdown für Prompt-Typen
54
- prompt_type_dropdown = gr.Dropdown(choices=list(prompts.keys()), label="Prompt-Typ", value='default')
55
 
56
- # Minimalistisches Theme und Konfiguration der Gradio-Demo
57
  theme = 'syddharth/gray-minimal'
58
  demo = gr.Interface(
59
  fn=generate,
60
  inputs=[
61
- gr.Textbox(lines=2, label="Eingabe"),
62
  "state",
63
  gr.Slider(0, 1, value=0.25, label="Temperature"),
64
  gr.Slider(1, 2048, value=512, step=1, label="Max Tokens"),
@@ -71,4 +71,5 @@ demo = gr.Interface(
71
  theme=theme
72
  )
73
 
 
74
  demo.queue().launch(show_api=False)
 
3
  import gradio as gr
4
  import os
5
 
6
+ # Load prompts from JSON file
7
  def load_prompts_from_json(file_path):
8
  with open(file_path, 'r') as file:
9
  return json.load(file)
10
 
11
+ # Load prompts from 'prompts.json'
12
  prompts = load_prompts_from_json('prompts.json')
13
 
14
+ # Inference client
15
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
16
 
17
+ # Secret prompt from environment variable
18
  secret_prompt = os.getenv("SECRET_PROMPT")
19
 
20
  def format_prompt(new_message, history, prompt_type='default'):
 
26
  return prompt
27
 
28
  def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0, prompt_type='default'):
29
+ # Configuration of parameters
30
  temperature = float(temperature)
31
  if temperature < 1e-2:
32
  temperature = 1e-2
 
47
  yield output
48
  return output
49
 
50
+ # Chatbot without avatars and with transparent design
51
  samir_chatbot = gr.Chatbot(bubble_full_width=True, show_label=False, show_copy_button=False, likeable=False)
52
 
53
+ # Dropdown for prompt types
54
+ prompt_type_dropdown = gr.Dropdown(choices=list(prompts.keys()), label="Prompt Type", value='default')
55
 
56
+ # Minimalistic theme and Gradio demo configuration
57
  theme = 'syddharth/gray-minimal'
58
  demo = gr.Interface(
59
  fn=generate,
60
  inputs=[
61
+ gr.Textbox(lines=2, label="Input"),
62
  "state",
63
  gr.Slider(0, 1, value=0.25, label="Temperature"),
64
  gr.Slider(1, 2048, value=512, step=1, label="Max Tokens"),
 
71
  theme=theme
72
  )
73
 
74
+ # Launch the demo with the queue
75
  demo.queue().launch(show_api=False)