TheMaisk commited on
Commit
86cf89c
·
verified ·
1 Parent(s): 90c20fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -9
app.py CHANGED
@@ -1,6 +1,15 @@
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
- import os
 
 
 
 
 
 
 
 
4
 
5
  # Klient für die Inferenz
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
@@ -8,21 +17,20 @@ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
8
  # Geheime Eingabeaufforderung aus Umgebungsvariablen
9
  secret_prompt = os.getenv("SECRET_PROMPT")
10
 
11
- def format_prompt(new_message, history):
12
- prompt = secret_prompt
13
  for user_msg, bot_msg in history:
14
  prompt += f"[INST] {user_msg} [/INST]"
15
  prompt += f" {bot_msg}</s> "
16
  prompt += f"[INST] {new_message} [/INST]"
17
  return prompt
18
 
19
- def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
20
  # Konfiguration der Parameter
21
  temperature = float(temperature)
22
  if temperature < 1e-2:
23
  temperature = 1e-2
24
  top_p = float(top_p)
25
-
26
  generate_kwargs = dict(
27
  temperature=temperature,
28
  max_new_tokens=max_new_tokens,
@@ -31,11 +39,9 @@ def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95,
31
  do_sample=True,
32
  seed=727,
33
  )
34
-
35
- formatted_prompt = format_prompt(prompt, history)
36
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
37
  output = ""
38
-
39
  for response in stream:
40
  output += response.token.text
41
  yield output
@@ -44,8 +50,25 @@ def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95,
44
  # Chatbot ohne Avatare und mit transparentem Design
45
  samir_chatbot = gr.Chatbot(bubble_full_width=True, show_label=False, show_copy_button=False, likeable=False)
46
 
 
 
 
47
  # Minimalistisches Theme und Konfiguration der Gradio-Demo
48
  theme = 'syddharth/gray-minimal'
49
- demo = gr.ChatInterface(fn=generate, chatbot=samir_chatbot, title="Tutorial Master", theme=theme)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  demo.queue().launch(show_api=False)
 
1
+ import json
2
  from huggingface_hub import InferenceClient
3
  import gradio as gr
4
+ import os
5
+
6
+ # Laden der Prompts aus der JSON-Datei
7
+ def load_prompts_from_json(file_path):
8
+ with open(file_path, 'r') as file:
9
+ return json.load(file)
10
+
11
+ # Angenommen, Sie haben eine JSON-Datei namens 'prompts.json'
12
+ prompts = load_prompts_from_json('prompts.json')
13
 
14
  # Klient für die Inferenz
15
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
17
  # Geheime Eingabeaufforderung aus Umgebungsvariablen
18
  secret_prompt = os.getenv("SECRET_PROMPT")
19
 
20
+ def format_prompt(new_message, history, prompt_type='default'):
21
+ prompt = prompts.get(prompt_type, secret_prompt)
22
  for user_msg, bot_msg in history:
23
  prompt += f"[INST] {user_msg} [/INST]"
24
  prompt += f" {bot_msg}</s> "
25
  prompt += f"[INST] {new_message} [/INST]"
26
  return prompt
27
 
28
+ def generate(prompt, history, temperature=0.25, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0, prompt_type='default'):
29
  # Konfiguration der Parameter
30
  temperature = float(temperature)
31
  if temperature < 1e-2:
32
  temperature = 1e-2
33
  top_p = float(top_p)
 
34
  generate_kwargs = dict(
35
  temperature=temperature,
36
  max_new_tokens=max_new_tokens,
 
39
  do_sample=True,
40
  seed=727,
41
  )
42
+ formatted_prompt = format_prompt(prompt, history, prompt_type)
 
43
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
44
  output = ""
 
45
  for response in stream:
46
  output += response.token.text
47
  yield output
 
50
  # Chatbot ohne Avatare und mit transparentem Design
51
  samir_chatbot = gr.Chatbot(bubble_full_width=True, show_label=False, show_copy_button=False, likeable=False)
52
 
53
+ # Dropdown für Prompt-Typen
54
+ prompt_type_dropdown = gr.Dropdown(choices=list(prompts.keys()), label="Prompt-Typ", value='default')
55
+
56
  # Minimalistisches Theme und Konfiguration der Gradio-Demo
57
  theme = 'syddharth/gray-minimal'
58
+ demo = gr.Interface(
59
+ fn=generate,
60
+ inputs=[
61
+ gr.Textbox(lines=2, label="Eingabe"),
62
+ "state",
63
+ gr.Slider(0, 1, value=0.25, label="Temperature"),
64
+ gr.Slider(1, 2048, value=512, step=1, label="Max Tokens"),
65
+ gr.Slider(0, 1, value=0.95, label="Top P"),
66
+ gr.Slider(1, 2, value=1.0, label="Repetition Penalty"),
67
+ prompt_type_dropdown
68
+ ],
69
+ outputs=[samir_chatbot],
70
+ title="Tutorial Master",
71
+ theme=theme
72
+ )
73
 
74
  demo.queue().launch(show_api=False)