Mr-Geo commited on
Commit
3a4341d
Β·
verified Β·
1 Parent(s): 60dc823

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -13
app.py CHANGED
@@ -162,7 +162,7 @@ def get_context(message):
162
  print(f"\nFinal context length: {total_chars} characters")
163
  return context
164
 
165
- def chat_response(message, history):
166
  """Chat response function for Gradio interface"""
167
  try:
168
  # Get context
@@ -215,20 +215,51 @@ Context: {context}"""
215
  # Get response
216
  response = ""
217
  completion = client.chat.completions.create(
218
- model="llama-3.3-70b-versatile",
219
  messages=messages,
220
  temperature=0.7,
221
- max_tokens=2000,
222
  top_p=0.95,
223
  stream=True
224
  )
225
 
226
  print("\n=== LLM Response Start ===")
 
 
 
 
227
  for chunk in completion:
228
  if chunk.choices[0].delta.content:
229
- response += chunk.choices[0].delta.content
230
- print(chunk.choices[0].delta.content, end='', flush=True)
231
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  print("\n=== LLM Response End ===\n")
233
 
234
  except Exception as e:
@@ -251,24 +282,47 @@ if __name__ == "__main__":
251
  demo = gr.Blocks()
252
 
253
  with demo:
254
- gr.Markdown("# Website Chat Assistant")
255
- gr.Markdown("Ask questions about the website.")
 
 
 
 
 
 
 
 
 
 
 
 
256
 
257
  chatbot = gr.Chatbot(height=600)
258
- msg = gr.Textbox(placeholder="Ask a question...", label="Your question")
259
- clear = gr.Button("Clear")
 
 
 
 
 
 
 
 
 
 
 
260
 
261
  def user(user_message, history):
262
  return "", history + [[user_message, None]]
263
 
264
- def bot(history):
265
  if history and history[-1][1] is None:
266
- for response in chat_response(history[-1][0], history[:-1]):
267
  history[-1][1] = response
268
  yield history
269
 
270
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
271
- bot, chatbot, chatbot
272
  )
273
 
274
  clear.click(lambda: None, None, chatbot, queue=False)
 
162
  print(f"\nFinal context length: {total_chars} characters")
163
  return context
164
 
165
+ def chat_response(message, history, model_name):
166
  """Chat response function for Gradio interface"""
167
  try:
168
  # Get context
 
215
  # Get response
216
  response = ""
217
  completion = client.chat.completions.create(
218
+ model=model_name,
219
  messages=messages,
220
  temperature=0.7,
221
+ max_tokens=2500,
222
  top_p=0.95,
223
  stream=True
224
  )
225
 
226
  print("\n=== LLM Response Start ===")
227
+ thinking_process = ""
228
+ final_response = ""
229
+ is_thinking = False
230
+
231
  for chunk in completion:
232
  if chunk.choices[0].delta.content:
233
+ content = chunk.choices[0].delta.content
234
+ print(content, end='', flush=True)
235
+
236
+ # Check for thinking tags
237
+ if "<think>" in content:
238
+ is_thinking = True
239
+ continue
240
+ elif "</think>" in content:
241
+ is_thinking = False
242
+ # Create collapsible thinking section
243
+ if thinking_process:
244
+ final_response = f"""<details>
245
+ <summary>πŸ€” Click here to see the 'thinking' process</summary>
246
+ <hr>
247
+ <div style="font-size: 0.9em;">
248
+ <i>πŸ’­{thinking_process}</i>
249
+ </div>
250
+ <hr>
251
+ </details>
252
+
253
+ {final_response}"""
254
+ continue
255
+
256
+ # Append content to appropriate section
257
+ if is_thinking:
258
+ thinking_process += content
259
+ else:
260
+ final_response += content
261
+ yield final_response
262
+
263
  print("\n=== LLM Response End ===\n")
264
 
265
  except Exception as e:
 
282
  demo = gr.Blocks()
283
 
284
  with demo:
285
+ gr.Markdown("# British Antarctic Survey Website Chat Assistant 🌍")
286
+ gr.Markdown("Ask questions about the BAS website. This system accesses text data from 11,982 unique BAS URLs (6GB vector database) πŸ“š")
287
+ # Add model selector
288
+ model_selector = gr.Dropdown(
289
+ choices=[
290
+ "llama-3.3-70b-versatile",
291
+ "llama-3.1-8b-instant",
292
+ "mixtral-8x7b-32768",
293
+ "deepseek-r1-distill-llama-70b"
294
+ ],
295
+ value="llama-3.3-70b-versatile",
296
+ label="Select AI Model πŸ€–",
297
+ info="Choose which AI model to use for responses - select 'deepseek-r1-distill-llama-70b' for the 'thinking' AI search"
298
+ )
299
 
300
  chatbot = gr.Chatbot(height=600)
301
+ msg = gr.Textbox(placeholder="Ask a question or select an example question... πŸ€”", label="Your question")
302
+ clear = gr.Button("Clear 🧹")
303
+
304
+ # Add example questions
305
+ gr.Examples(
306
+ examples=[
307
+ "What research stations does BAS operate in Antarctica? πŸ”οΈ",
308
+ "Tell me about the RRS Sir David Attenborough 🚒",
309
+ "What kind of science and research does BAS do? πŸ”¬",
310
+ "What is BAS doing about climate change? 🌑️",
311
+ ],
312
+ inputs=msg,
313
+ )
314
 
315
  def user(user_message, history):
316
  return "", history + [[user_message, None]]
317
 
318
+ def bot(history, model_name):
319
  if history and history[-1][1] is None:
320
+ for response in chat_response(history[-1][0], history[:-1], model_name):
321
  history[-1][1] = response
322
  yield history
323
 
324
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
325
+ bot, [chatbot, model_selector], chatbot
326
  )
327
 
328
  clear.click(lambda: None, None, chatbot, queue=False)