awacke1 commited on
Commit
a07973f
·
verified ·
1 Parent(s): c1f471d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -56
app.py CHANGED
@@ -41,7 +41,7 @@ def randomize_seed_fn(seed: int) -> int:
41
  seed = random.randint(0, 999999)
42
  return seed
43
 
44
- default_system_instructions = """
45
  [SYSTEM] Answer as Dr. Nova Quantum, a brilliant 50-something scientist specializing in quantum computing and artificial intelligence. Your responses should reflect your vast knowledge and experience in cutting-edge technology and scientific advancements. Maintain a professional yet approachable demeanor, offering insights that blend theoretical concepts with practical applications. Your goal is to educate and inspire, making complex topics accessible without oversimplifying. Draw from your decades of research and innovation to provide nuanced, forward-thinking answers. Remember, you're not just sharing information, but guiding others towards a deeper understanding of our technological future.
46
  Keep conversations engaging, clear, and concise.
47
  Avoid unnecessary introductions and answer the user's questions directly.
@@ -50,7 +50,7 @@ Respond in a manner that reflects your expertise and wisdom.
50
  """
51
 
52
  # Initialize an empty DataFrame to store the history
53
- history_df = pd.DataFrame(columns=['Timestamp', 'Request', 'Response', 'Model', 'Input Size', 'Output Size'])
54
 
55
  def save_history():
56
  history_df_copy = history_df.copy()
@@ -63,10 +63,10 @@ def load_history():
63
  history_df = pd.read_json('chat_history.json', orient='records')
64
  history_df['Timestamp'] = pd.to_datetime(history_df['Timestamp'])
65
  else:
66
- history_df = pd.DataFrame(columns=['Timestamp', 'Request', 'Response', 'Model', 'Input Size', 'Output Size'])
67
  return history_df
68
 
69
- def models(text, model="Llama 3 8B", seed=42, system_instructions=default_system_instructions):
70
  global history_df
71
 
72
  seed = int(randomize_seed_fn(seed))
@@ -78,7 +78,7 @@ def models(text, model="Llama 3 8B", seed=42, system_instructions=default_system
78
  max_new_tokens=300,
79
  seed=seed
80
  )
81
- formatted_prompt = system_instructions + text + "[DR. NOVA QUANTUM]"
82
  stream = client.text_generation(
83
  formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
84
  output = ""
@@ -86,6 +86,18 @@ def models(text, model="Llama 3 8B", seed=42, system_instructions=default_system
86
  if not response.token.text == "</s>":
87
  output += response.token.text
88
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  return output
90
 
91
  # Add a list of available voices
@@ -97,13 +109,14 @@ VOICES = [
97
  "en-CA-ClaraNeural",
98
  ]
99
 
100
- async def respond(input_text, model, seed, voice, system_instructions):
101
- reply = models(input_text, model, seed, system_instructions)
 
102
  communicate = edge_tts.Communicate(reply, voice=voice)
103
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
104
  tmp_path = tmp_file.name
105
  await communicate.save(tmp_path)
106
- return tmp_path, reply
107
 
108
  def display_history():
109
  df = load_history()
@@ -120,9 +133,6 @@ def download_history():
120
  href = f'data:text/csv;base64,{b64}'
121
  return gr.HTML(f'<a href="{href}" download="chat_history.csv">Download Chat History</a>')
122
 
123
- def new_chat():
124
- return None, None, gr.Markdown.update(value=""), gr.Markdown.update(value=""), gr.DataFrame.update(value=pd.DataFrame())
125
-
126
  DESCRIPTION = """# <center>Dr. Nova Quantum⚡ - Your Personal Guide to the Frontiers of Science and Technology</center>"""
127
 
128
  with gr.Blocks(css="style.css") as demo:
@@ -151,65 +161,28 @@ with gr.Blocks(css="style.css") as demo:
151
  label="Dr. Nova Quantum's Voice"
152
  )
153
 
154
- system_prompt = gr.Textbox(
155
- label="System Prompt",
156
- placeholder="Edit the system prompt here...",
157
- value=default_system_instructions,
158
- lines=5
159
- )
160
-
161
- with gr.Row():
162
- input_audio = gr.Audio(label="User (Audio)", sources="microphone", type="filepath")
163
- input_text = gr.Textbox(label="User (Text)", placeholder="Type your message here...")
164
-
165
  output_audio = gr.Audio(label="Dr. Nova Quantum", type="filepath", autoplay=True)
166
 
167
  request_md = gr.Markdown(label="User Request")
168
  response_md = gr.Markdown(label="Dr. Nova Quantum Response")
169
 
170
- history_display = gr.DataFrame(label="Conversation History", headers=["Timestamp", "Request", "Response", "Model", "Input Size", "Output Size"])
171
 
172
- new_chat_button = gr.Button("New Chat")
173
  download_button = gr.Button("Download Conversation History")
174
  download_link = gr.HTML()
175
 
176
- def process_input(input_audio, input_text, model, seed, voice, system_instructions):
177
- if input_audio is not None:
178
- text = transcribe(input_audio)
179
- else:
180
- text = input_text
181
-
182
- response, reply = asyncio.run(respond(text, model, seed, voice, system_instructions))
183
-
184
- # Update history
185
- new_row = pd.DataFrame({
186
- 'Timestamp': [datetime.now()],
187
- 'Request': [text],
188
- 'Response': [reply],
189
- 'Model': [model],
190
- 'Input Size': [len(text)],
191
- 'Output Size': [len(reply)]
192
- })
193
- global history_df
194
- history_df = pd.concat([history_df, new_row], ignore_index=True)
195
- save_history()
196
-
197
- return response, display_history(), text, reply
198
 
199
  input_audio.change(
200
- fn=process_input,
201
- inputs=[input_audio, input_text, select, seed, voice_select, system_prompt],
202
  outputs=[output_audio, history_display, request_md, response_md]
203
  )
204
 
205
- input_text.submit(
206
- fn=process_input,
207
- inputs=[input_audio, input_text, select, seed, voice_select, system_prompt],
208
- outputs=[output_audio, history_display, request_md, response_md]
209
- )
210
-
211
- new_chat_button.click(fn=new_chat, outputs=[input_audio, input_text, request_md, response_md, history_display])
212
-
213
  download_button.click(fn=download_history, outputs=[download_link])
214
 
215
  demo.load(fn=display_history, outputs=[history_display])
 
41
  seed = random.randint(0, 999999)
42
  return seed
43
 
44
+ system_instructions1 = """
45
  [SYSTEM] Answer as Dr. Nova Quantum, a brilliant 50-something scientist specializing in quantum computing and artificial intelligence. Your responses should reflect your vast knowledge and experience in cutting-edge technology and scientific advancements. Maintain a professional yet approachable demeanor, offering insights that blend theoretical concepts with practical applications. Your goal is to educate and inspire, making complex topics accessible without oversimplifying. Draw from your decades of research and innovation to provide nuanced, forward-thinking answers. Remember, you're not just sharing information, but guiding others towards a deeper understanding of our technological future.
46
  Keep conversations engaging, clear, and concise.
47
  Avoid unnecessary introductions and answer the user's questions directly.
 
50
  """
51
 
52
  # Initialize an empty DataFrame to store the history
53
+ history_df = pd.DataFrame(columns=['Timestamp', 'Model', 'Input Size', 'Output Size', 'Request', 'Response'])
54
 
55
  def save_history():
56
  history_df_copy = history_df.copy()
 
63
  history_df = pd.read_json('chat_history.json', orient='records')
64
  history_df['Timestamp'] = pd.to_datetime(history_df['Timestamp'])
65
  else:
66
+ history_df = pd.DataFrame(columns=['Timestamp', 'Model', 'Input Size', 'Output Size', 'Request', 'Response'])
67
  return history_df
68
 
69
+ def models(text, model="Llama 3 8B", seed=42):
70
  global history_df
71
 
72
  seed = int(randomize_seed_fn(seed))
 
78
  max_new_tokens=300,
79
  seed=seed
80
  )
81
+ formatted_prompt = system_instructions1 + text + "[DR. NOVA QUANTUM]"
82
  stream = client.text_generation(
83
  formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
84
  output = ""
 
86
  if not response.token.text == "</s>":
87
  output += response.token.text
88
 
89
+ # Add the current interaction to the history DataFrame
90
+ new_row = pd.DataFrame({
91
+ 'Timestamp': [datetime.now()],
92
+ 'Model': [model],
93
+ 'Input Size': [len(text)],
94
+ 'Output Size': [len(output)],
95
+ 'Request': [text],
96
+ 'Response': [output]
97
+ })
98
+ history_df = pd.concat([history_df, new_row], ignore_index=True)
99
+ save_history()
100
+
101
  return output
102
 
103
  # Add a list of available voices
 
109
  "en-CA-ClaraNeural",
110
  ]
111
 
112
+ async def respond(audio, model, seed, voice):
113
+ user = transcribe(audio)
114
+ reply = models(user, model, seed)
115
  communicate = edge_tts.Communicate(reply, voice=voice)
116
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
117
  tmp_path = tmp_file.name
118
  await communicate.save(tmp_path)
119
+ return tmp_path
120
 
121
  def display_history():
122
  df = load_history()
 
133
  href = f'data:text/csv;base64,{b64}'
134
  return gr.HTML(f'<a href="{href}" download="chat_history.csv">Download Chat History</a>')
135
 
 
 
 
136
  DESCRIPTION = """# <center>Dr. Nova Quantum⚡ - Your Personal Guide to the Frontiers of Science and Technology</center>"""
137
 
138
  with gr.Blocks(css="style.css") as demo:
 
161
  label="Dr. Nova Quantum's Voice"
162
  )
163
 
164
+ input_audio = gr.Audio(label="User", sources="microphone", type="filepath")
 
 
 
 
 
 
 
 
 
 
165
  output_audio = gr.Audio(label="Dr. Nova Quantum", type="filepath", autoplay=True)
166
 
167
  request_md = gr.Markdown(label="User Request")
168
  response_md = gr.Markdown(label="Dr. Nova Quantum Response")
169
 
170
+ history_display = gr.DataFrame(label="Conversation History", headers=["Timestamp", "Model", "Input Size", "Output Size", "Request", "Response"])
171
 
 
172
  download_button = gr.Button("Download Conversation History")
173
  download_link = gr.HTML()
174
 
175
+ def process_audio(audio, model, seed, voice):
176
+ response = asyncio.run(respond(audio, model, seed, voice))
177
+ text = transcribe(audio)
178
+ return response, display_history(), text, models(text, model, seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  input_audio.change(
181
+ fn=process_audio,
182
+ inputs=[input_audio, select, seed, voice_select],
183
  outputs=[output_audio, history_display, request_md, response_md]
184
  )
185
 
 
 
 
 
 
 
 
 
186
  download_button.click(fn=download_history, outputs=[download_link])
187
 
188
  demo.load(fn=display_history, outputs=[history_display])