awacke1 commited on
Commit
c5b563a
Β·
verified Β·
1 Parent(s): 0906c19

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -23
app.py CHANGED
@@ -14,21 +14,18 @@ import base64
14
  import io
15
  import json
16
 
17
- # 🌍 The world's most basic language setup (aka "English or bust!")
18
  default_lang = "en"
19
  engines = { default_lang: Model(default_lang) }
20
 
21
- # 🎀 Turn gibberish into text (hopefully)
22
  def transcribe(audio):
23
  lang = "en"
24
  model = engines[lang]
25
  text = model.stt_file(audio)[0]
26
  return text
27
 
28
- # πŸ”‘ Secret sauce (shhh, don't tell anyone)
29
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
30
 
31
- # πŸ€– Pick your poison (I mean, AI model)
32
  def client_fn(model):
33
  if "Mixtral" in model:
34
  return InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
@@ -41,12 +38,10 @@ def client_fn(model):
41
  else:
42
  return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
43
 
44
- # 🎲 Randomize like it's Vegas, baby!
45
  def randomize_seed_fn(seed: int) -> int:
46
  seed = random.randint(0, 999999)
47
  return seed
48
 
49
- # πŸ‘©β€πŸ”¬ Dr. Nova Quantum's brain dump (warning: may cause existential crisis)
50
  system_instructions1 = """
51
  [SYSTEM] Answer as Dr. Nova Quantum, a brilliant 50-something scientist specializing in quantum computing and artificial intelligence. Your responses should reflect your vast knowledge and experience in cutting-edge technology and scientific advancements. Maintain a professional yet approachable demeanor, offering insights that blend theoretical concepts with practical applications. Your goal is to educate and inspire, making complex topics accessible without oversimplifying. Draw from your decades of research and innovation to provide nuanced, forward-thinking answers. Remember, you're not just sharing information, but guiding others towards a deeper understanding of our technological future.
52
  Keep conversations engaging, clear, and concise.
@@ -55,16 +50,14 @@ Respond in a manner that reflects your expertise and wisdom.
55
  [USER]
56
  """
57
 
58
- # πŸ“š Where we keep all the juicy conversations (for blackmail, obviously)
59
  history_df = pd.DataFrame(columns=['Timestamp', 'Model', 'Input Size', 'Output Size', 'Request', 'Response'])
60
 
61
- # πŸ’Ύ Save history (or "How to preserve evidence of your AI addiction")
62
  def save_history():
63
  history_df_copy = history_df.copy()
64
  history_df_copy['Timestamp'] = history_df_copy['Timestamp'].astype(str)
65
  history_df_copy.to_json('chat_history.json', orient='records')
66
 
67
- # πŸ“‚ Load history (aka "Remind yourself of all the weird questions you've asked")
68
  def load_history():
69
  global history_df
70
  if os.path.exists('chat_history.json'):
@@ -74,7 +67,6 @@ def load_history():
74
  history_df = pd.DataFrame(columns=['Timestamp', 'Model', 'Input Size', 'Output Size', 'Request', 'Response'])
75
  return history_df
76
 
77
- # 🧠 The magic happens here (or does it?)
78
  def models(text, model="Llama 3 8B", seed=42):
79
  global history_df
80
 
@@ -95,7 +87,7 @@ def models(text, model="Llama 3 8B", seed=42):
95
  if not response.token.text == "</s>":
96
  output += response.token.text
97
 
98
- # πŸ“ Log the conversation (for science, of course)
99
  new_row = pd.DataFrame({
100
  'Timestamp': [datetime.now()],
101
  'Model': [model],
@@ -109,7 +101,7 @@ def models(text, model="Llama 3 8B", seed=42):
109
 
110
  return output
111
 
112
- # 🎭 Voice actors for your AI (because why settle for one personality?)
113
  VOICES = [
114
  "en-US-AriaNeural",
115
  "en-US-GuyNeural",
@@ -118,7 +110,6 @@ VOICES = [
118
  "en-CA-ClaraNeural",
119
  ]
120
 
121
- # 🎬 Lights, camera, AI-ction!
122
  async def respond(audio, model, seed, voice):
123
  user = transcribe(audio)
124
  reply = models(user, model, seed)
@@ -128,13 +119,11 @@ async def respond(audio, model, seed, voice):
128
  await communicate.save(tmp_path)
129
  return tmp_path
130
 
131
- # πŸ“Š Display history (or "How to prove you're not talking to yourself")
132
  def display_history():
133
  df = load_history()
134
  df['Timestamp'] = df['Timestamp'].astype(str)
135
  return df
136
 
137
- # πŸ“₯ Download history (for those who like to relive their AI conversations)
138
  def download_history():
139
  csv_buffer = io.StringIO()
140
  history_df_copy = history_df.copy()
@@ -145,10 +134,9 @@ def download_history():
145
  href = f'data:text/csv;base64,{b64}'
146
  return gr.HTML(f'<a href="{href}" download="chat_history.csv">Download Chat History</a>')
147
 
148
- # πŸš€ Welcome to the future (or at least a fancy UI)
149
  DESCRIPTION = """# <center>Dr. Nova Quantum⚑ - Your Personal Guide to the Frontiers of Science and Technology</center>"""
150
 
151
- # 🎨 Building the UI (warning: may cause extreme excitement)
152
  with gr.Blocks(css="style.css") as demo:
153
  gr.Markdown(DESCRIPTION)
154
  with gr.Row():
@@ -175,7 +163,7 @@ with gr.Blocks(css="style.css") as demo:
175
  label="Dr. Nova Quantum's Voice"
176
  )
177
 
178
- # πŸ”„ The "oops, let's start over" button
179
  new_chat_button = gr.Button("πŸ”„ New Chat", variant="primary")
180
 
181
  input_audio = gr.Audio(label="User", sources="microphone", type="filepath")
@@ -189,15 +177,14 @@ with gr.Blocks(css="style.css") as demo:
189
  download_button = gr.Button("Download Conversation History")
190
  download_link = gr.HTML()
191
 
192
- # 🎭 Where the magic happens (or where we pretend it happens)
193
  def process_audio(audio, model, seed, voice):
194
  response = asyncio.run(respond(audio, model, seed, voice))
195
  text = transcribe(audio)
196
  return response, display_history(), text, models(text, model, seed)
197
 
198
- # 🧹 Clean slate function (for when things get weird)
199
  def reset_interface():
200
- return gr.Audio.update(value=None), gr.Audio.update(value=None), gr.Markdown(""), gr.Markdown("")
201
 
202
  input_audio.change(
203
  fn=process_audio,
@@ -205,7 +192,6 @@ with gr.Blocks(css="style.css") as demo:
205
  outputs=[output_audio, history_display, request_md, response_md]
206
  )
207
 
208
- # πŸ”„ New chat button magic
209
  new_chat_button.click(
210
  fn=reset_interface,
211
  outputs=[input_audio, output_audio, request_md, response_md]
@@ -215,7 +201,7 @@ with gr.Blocks(css="style.css") as demo:
215
 
216
  demo.load(fn=display_history, outputs=[history_display])
217
 
218
- # πŸš€ Houston, we have liftoff!
219
  if __name__ == "__main__":
220
  load_history()
221
  demo.queue(max_size=200).launch()
 
14
  import io
15
  import json
16
 
17
+ # 🌍 Language setup
18
  default_lang = "en"
19
  engines = { default_lang: Model(default_lang) }
20
 
 
21
  def transcribe(audio):
22
  lang = "en"
23
  model = engines[lang]
24
  text = model.stt_file(audio)[0]
25
  return text
26
 
 
27
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
28
 
 
29
  def client_fn(model):
30
  if "Mixtral" in model:
31
  return InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
38
  else:
39
  return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
40
 
 
41
  def randomize_seed_fn(seed: int) -> int:
42
  seed = random.randint(0, 999999)
43
  return seed
44
 
 
45
  system_instructions1 = """
46
  [SYSTEM] Answer as Dr. Nova Quantum, a brilliant 50-something scientist specializing in quantum computing and artificial intelligence. Your responses should reflect your vast knowledge and experience in cutting-edge technology and scientific advancements. Maintain a professional yet approachable demeanor, offering insights that blend theoretical concepts with practical applications. Your goal is to educate and inspire, making complex topics accessible without oversimplifying. Draw from your decades of research and innovation to provide nuanced, forward-thinking answers. Remember, you're not just sharing information, but guiding others towards a deeper understanding of our technological future.
47
  Keep conversations engaging, clear, and concise.
 
50
  [USER]
51
  """
52
 
53
+ # πŸ“š Conversation history
54
  history_df = pd.DataFrame(columns=['Timestamp', 'Model', 'Input Size', 'Output Size', 'Request', 'Response'])
55
 
 
56
  def save_history():
57
  history_df_copy = history_df.copy()
58
  history_df_copy['Timestamp'] = history_df_copy['Timestamp'].astype(str)
59
  history_df_copy.to_json('chat_history.json', orient='records')
60
 
 
61
  def load_history():
62
  global history_df
63
  if os.path.exists('chat_history.json'):
 
67
  history_df = pd.DataFrame(columns=['Timestamp', 'Model', 'Input Size', 'Output Size', 'Request', 'Response'])
68
  return history_df
69
 
 
70
  def models(text, model="Llama 3 8B", seed=42):
71
  global history_df
72
 
 
87
  if not response.token.text == "</s>":
88
  output += response.token.text
89
 
90
+ # Log the conversation
91
  new_row = pd.DataFrame({
92
  'Timestamp': [datetime.now()],
93
  'Model': [model],
 
101
 
102
  return output
103
 
104
+ # 🎭 Available voices
105
  VOICES = [
106
  "en-US-AriaNeural",
107
  "en-US-GuyNeural",
 
110
  "en-CA-ClaraNeural",
111
  ]
112
 
 
113
  async def respond(audio, model, seed, voice):
114
  user = transcribe(audio)
115
  reply = models(user, model, seed)
 
119
  await communicate.save(tmp_path)
120
  return tmp_path
121
 
 
122
  def display_history():
123
  df = load_history()
124
  df['Timestamp'] = df['Timestamp'].astype(str)
125
  return df
126
 
 
127
  def download_history():
128
  csv_buffer = io.StringIO()
129
  history_df_copy = history_df.copy()
 
134
  href = f'data:text/csv;base64,{b64}'
135
  return gr.HTML(f'<a href="{href}" download="chat_history.csv">Download Chat History</a>')
136
 
 
137
  DESCRIPTION = """# <center>Dr. Nova Quantum⚑ - Your Personal Guide to the Frontiers of Science and Technology</center>"""
138
 
139
+ # 🎨 Gradio interface
140
  with gr.Blocks(css="style.css") as demo:
141
  gr.Markdown(DESCRIPTION)
142
  with gr.Row():
 
163
  label="Dr. Nova Quantum's Voice"
164
  )
165
 
166
+ # πŸ”„ New Chat button
167
  new_chat_button = gr.Button("πŸ”„ New Chat", variant="primary")
168
 
169
  input_audio = gr.Audio(label="User", sources="microphone", type="filepath")
 
177
  download_button = gr.Button("Download Conversation History")
178
  download_link = gr.HTML()
179
 
 
180
  def process_audio(audio, model, seed, voice):
181
  response = asyncio.run(respond(audio, model, seed, voice))
182
  text = transcribe(audio)
183
  return response, display_history(), text, models(text, model, seed)
184
 
185
+ # 🧹 Reset function for New Chat
186
  def reset_interface():
187
+ return None, None, "", ""
188
 
189
  input_audio.change(
190
  fn=process_audio,
 
192
  outputs=[output_audio, history_display, request_md, response_md]
193
  )
194
 
 
195
  new_chat_button.click(
196
  fn=reset_interface,
197
  outputs=[input_audio, output_audio, request_md, response_md]
 
201
 
202
  demo.load(fn=display_history, outputs=[history_display])
203
 
204
+ # πŸš€ Launch the app
205
  if __name__ == "__main__":
206
  load_history()
207
  demo.queue(max_size=200).launch()