iisadia commited on
Commit
a76773e
·
verified ·
1 Parent(s): 3159d0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -306
app.py CHANGED
@@ -4,102 +4,27 @@ import requests
4
  from streamlit.components.v1 import html
5
  import os
6
 
7
- # Import transformers and cache the help agent for performance
8
  @st.cache_resource
9
  def get_help_agent():
10
  from transformers import pipeline
11
- # Using BlenderBot 400M Distill as the public conversational model (used elsewhere)
12
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
13
 
14
- # Custom CSS for professional look (fixed text color) with speech recognition
15
  def inject_custom_css():
16
  st.markdown("""
17
  <style>
18
  @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;600;700&display=swap');
19
-
20
- * {
21
- font-family: 'Poppins', sans-serif;
22
- }
23
-
24
- .title {
25
- font-size: 3rem !important;
26
- font-weight: 700 !important;
27
- color: #6C63FF !important;
28
- text-align: center;
29
- margin-bottom: 0.5rem;
30
- }
31
-
32
- .subtitle {
33
- font-size: 1.2rem !important;
34
- text-align: center;
35
- color: #666 !important;
36
- margin-bottom: 2rem;
37
- }
38
-
39
- .question-box {
40
- background: #F8F9FA;
41
- border-radius: 15px;
42
- padding: 2rem;
43
- margin: 1.5rem 0;
44
- box-shadow: 0 4px 6px rgba(0,0,0,0.1);
45
- color: black !important;
46
- }
47
-
48
- .answer-btn {
49
- border-radius: 12px !important;
50
- padding: 0.5rem 1.5rem !important;
51
- font-weight: 600 !important;
52
- margin: 0.5rem !important;
53
- }
54
-
55
- .yes-btn {
56
- background: #6C63FF !important;
57
- color: white !important;
58
- }
59
-
60
- .no-btn {
61
- background: #FF6B6B !important;
62
- color: white !important;
63
- }
64
-
65
- .final-reveal {
66
- animation: fadeIn 2s;
67
- font-size: 2.5rem;
68
- color: #6C63FF;
69
- text-align: center;
70
- margin: 2rem 0;
71
- }
72
-
73
- @keyframes fadeIn {
74
- from { opacity: 0; }
75
- to { opacity: 1; }
76
- }
77
-
78
- .confetti {
79
- position: fixed;
80
- top: 0;
81
- left: 0;
82
- width: 100%;
83
- height: 100%;
84
- pointer-events: none;
85
- z-index: 1000;
86
- }
87
-
88
- .confidence-meter {
89
- height: 10px;
90
- background: linear-gradient(90deg, #FF6B6B 0%, #6C63FF 100%);
91
- border-radius: 5px;
92
- margin: 10px 0;
93
- }
94
-
95
- .mic-btn {
96
- margin-top: 29px;
97
- border: none;
98
- background: none;
99
- cursor: pointer;
100
- font-size: 1.5em;
101
- padding: 0;
102
- }
103
  </style>
104
  <script>
105
  function startSpeechRecognition(inputId) {
@@ -107,28 +32,23 @@ def inject_custom_css():
107
  recognition.lang = 'en-US';
108
  recognition.interimResults = false;
109
  recognition.maxAlternatives = 1;
110
-
111
  recognition.onresult = function(event) {
112
  const transcript = event.results[0][0].transcript.toLowerCase();
113
  const inputElement = document.getElementById(inputId);
114
  if (inputElement) {
115
  inputElement.value = transcript;
116
- // Trigger Streamlit's input change detection
117
  const event = new Event('input', { bubbles: true });
118
  inputElement.dispatchEvent(event);
119
  }
120
  };
121
-
122
  recognition.onerror = function(event) {
123
  console.error('Speech recognition error', event.error);
124
  };
125
-
126
  recognition.start();
127
  }
128
  </script>
129
  """, unsafe_allow_html=True)
130
 
131
- # Confetti animation
132
  def show_confetti():
133
  html("""
134
  <canvas id="confetti-canvas" class="confetti"></canvas>
@@ -136,16 +56,11 @@ def show_confetti():
136
  <script>
137
  const canvas = document.getElementById('confetti-canvas');
138
  const confetti = confetti.create(canvas, { resize: true });
139
- confetti({
140
- particleCount: 150,
141
- spread: 70,
142
- origin: { y: 0.6 }
143
- });
144
  setTimeout(() => { canvas.remove(); }, 5000);
145
  </script>
146
  """)
147
 
148
- # Enhanced AI question generation for guessing game using Llama model
149
  def ask_llama(conversation_history, category, is_final_guess=False):
150
  api_url = "https://api.groq.com/openai/v1/chat/completions"
151
  headers = {
@@ -189,45 +104,28 @@ def ask_llama(conversation_history, category, is_final_guess=False):
189
  st.error(f"Error calling Llama API: {str(e)}")
190
  return "Could not generate question"
191
 
192
- # New function for the help AI assistant using the Hugging Face InferenceClient
193
  def ask_help_agent(query):
194
  try:
195
  from huggingface_hub import InferenceClient
196
- # Initialize the client with the provided model
197
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=os.environ.get("HF_HUB_TOKEN"))
198
  system_message = "You are a friendly Chatbot."
199
-
200
- # Build history from session state (if any)
201
  history = []
202
  if "help_conversation" in st.session_state:
203
  for msg in st.session_state.help_conversation:
204
- # Each history entry is a tuple: (user query, assistant response)
205
  history.append((msg.get("query", ""), msg.get("response", "")))
206
-
207
  messages = [{"role": "system", "content": system_message}]
208
  for user_msg, bot_msg in history:
209
- if user_msg:
210
- messages.append({"role": "user", "content": user_msg})
211
- if bot_msg:
212
- messages.append({"role": "assistant", "content": bot_msg})
213
  messages.append({"role": "user", "content": query})
214
-
215
  response_text = ""
216
- # Using streaming to collect the entire response from the model
217
- for message in client.chat_completion(
218
- messages,
219
- max_tokens=150,
220
- stream=True,
221
- temperature=0.7,
222
- top_p=0.95,
223
- ):
224
  token = message.choices[0].delta.content
225
  response_text += token
226
  return response_text
227
  except Exception as e:
228
  return f"Error in help agent: {str(e)}"
229
 
230
- # Main game logic
231
  def main():
232
  inject_custom_css()
233
 
@@ -242,9 +140,8 @@ def main():
242
  st.session_state.conversation_history = []
243
  st.session_state.category = None
244
  st.session_state.final_guess = None
245
- st.session_state.help_conversation = [] # separate history for help agent
246
 
247
- # Start screen
248
  if st.session_state.game_state == "start":
249
  st.markdown("""
250
  <div class="question-box">
@@ -261,25 +158,13 @@ def main():
261
  """, unsafe_allow_html=True)
262
 
263
  with st.form("start_form"):
264
- # Create columns for input and microphone button
265
  col1, col2 = st.columns([4, 1])
266
  with col1:
267
- category_input = st.text_input(
268
- "Enter category (person/place/object):",
269
- key="category_input"
270
- ).strip().lower()
271
-
272
  with col2:
273
- st.markdown(
274
- """
275
- <button type="button" onclick="startSpeechRecognition('text_input-category_input')"
276
- class="mic-btn">
277
- 🎤
278
- </button>
279
- """,
280
- unsafe_allow_html=True
281
- )
282
-
283
  if st.form_submit_button("Start Game"):
284
  if not category_input:
285
  st.error("Please enter a category!")
@@ -287,176 +172,10 @@ def main():
287
  st.error("Please enter either 'person', 'place', or 'object'!")
288
  else:
289
  st.session_state.category = category_input
290
- first_question = ask_llama([
291
- {"role": "user", "content": "Ask your first strategic yes/no question."}
292
- ], category_input)
293
  st.session_state.questions = [first_question]
294
- st.session_state.conversation_history = [
295
- {"role": "assistant", "content": first_question}
296
- ]
297
- st.session_state.game_state = "gameplay"
298
- st.experimental_rerun()
299
-
300
- # Gameplay screen
301
- elif st.session_state.game_state == "gameplay":
302
- current_question = st.session_state.questions[st.session_state.current_q]
303
-
304
- # Check if AI made a guess
305
- if "Final Guess:" in current_question:
306
- st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
307
- st.session_state.game_state = "confirm_guess"
308
- st.experimental_rerun()
309
-
310
- st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br>'
311
- f'<strong>{current_question}</strong></div>',
312
- unsafe_allow_html=True)
313
-
314
- with st.form("answer_form"):
315
- answer_key = f"answer_{st.session_state.current_q}"
316
-
317
- # Create columns for input and microphone button
318
- col1, col2 = st.columns([4, 1])
319
- with col1:
320
- answer_input = st.text_input(
321
- "Your answer (yes/no/both) - speak or type:",
322
- key=answer_key
323
- ).strip().lower()
324
-
325
- with col2:
326
- st.markdown(
327
- f"""
328
- <button type="button" onclick="startSpeechRecognition('text_input-{answer_key}')"
329
- class="mic-btn">
330
- 🎤
331
- </button>
332
- """,
333
- unsafe_allow_html=True
334
- )
335
-
336
- if st.form_submit_button("Submit"):
337
- if answer_input not in ["yes", "no", "both"]:
338
- st.error("Please answer with 'yes', 'no', or 'both'!")
339
- else:
340
- st.session_state.answers.append(answer_input)
341
- st.session_state.conversation_history.append(
342
- {"role": "user", "content": answer_input}
343
- )
344
-
345
- # Generate next response
346
- next_response = ask_llama(
347
- st.session_state.conversation_history,
348
- st.session_state.category
349
- )
350
-
351
- # Check if AI made a guess
352
- if "Final Guess:" in next_response:
353
- st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
354
- st.session_state.game_state = "confirm_guess"
355
- else:
356
- st.session_state.questions.append(next_response)
357
- st.session_state.conversation_history.append(
358
- {"role": "assistant", "content": next_response}
359
- )
360
- st.session_state.current_q += 1
361
-
362
- # Stop after 20 questions max
363
- if st.session_state.current_q >= 20:
364
- st.session_state.game_state = "result"
365
-
366
- st.experimental_rerun()
367
-
368
- # Side Help Option: independent chat with an AI help assistant using Hugging Face model
369
- with st.expander("Need Help? Chat with AI Assistant"):
370
- help_query = st.text_input("Enter your help query:", key="help_query")
371
- if st.button("Send", key="send_help"):
372
- if help_query:
373
- help_response = ask_help_agent(help_query)
374
- st.session_state.help_conversation.append({"query": help_query, "response": help_response})
375
- else:
376
- st.error("Please enter a query!")
377
- if st.session_state.help_conversation:
378
- for msg in st.session_state.help_conversation:
379
- st.markdown(f"**You:** {msg['query']}")
380
- st.markdown(f"**Help Assistant:** {msg['response']}")
381
-
382
- # Guess confirmation screen using text input response
383
- elif st.session_state.game_state == "confirm_guess":
384
- st.markdown(f'<div class="question-box">🤖 My Final Guess:<br><br>'
385
- f'<strong>Is it {st.session_state.final_guess}?</strong></div>',
386
- unsafe_allow_html=True)
387
-
388
- with st.form("confirm_form"):
389
- # Create columns for input and microphone button
390
- col1, col2 = st.columns([4, 1])
391
- with col1:
392
- confirm_input = st.text_input(
393
- "Type or speak your answer (yes/no/both):",
394
- key="confirm_input"
395
- ).strip().lower()
396
-
397
- with col2:
398
- st.markdown(
399
- """
400
- <button type="button" onclick="startSpeechRecognition('text_input-confirm_input')"
401
- class="mic-btn">
402
- 🎤
403
- </button>
404
- """,
405
- unsafe_allow_html=True
406
- )
407
-
408
- if st.form_submit_button("Submit"):
409
- if confirm_input not in ["yes", "no", "both"]:
410
- st.error("Please answer with 'yes', 'no', or 'both'!")
411
- else:
412
- if confirm_input == "yes":
413
- st.session_state.game_state = "result"
414
- st.experimental_rerun()
415
- st.stop() # Immediately halt further execution
416
- else:
417
- # Add negative response to history and continue gameplay
418
- st.session_state.conversation_history.append(
419
- {"role": "user", "content": "no"}
420
- )
421
- st.session_state.game_state = "gameplay"
422
- next_response = ask_llama(
423
- st.session_state.conversation_history,
424
- st.session_state.category
425
- )
426
- st.session_state.questions.append(next_response)
427
- st.session_state.conversation_history.append(
428
- {"role": "assistant", "content": next_response}
429
- )
430
- st.session_state.current_q += 1
431
- st.experimental_rerun()
432
-
433
- # Result screen
434
- elif st.session_state.game_state == "result":
435
- if not st.session_state.final_guess:
436
- # Generate final guess if not already made
437
- qa_history = "\n".join(
438
- [f"Q{i+1}: {q}\nA: {a}"
439
- for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
440
- )
441
-
442
- final_guess = ask_llama(
443
- [{"role": "user", "content": qa_history}],
444
- st.session_state.category,
445
- is_final_guess=True
446
- )
447
- st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()
448
-
449
- show_confetti()
450
- st.markdown(f'<div class="final-reveal">🎉 It\'s...</div>', unsafe_allow_html=True)
451
- time.sleep(1)
452
- st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>',
453
- unsafe_allow_html=True)
454
- st.markdown(f"<p style='text-align:center'>Guessed in {len(st.session_state.questions)} questions</p>",
455
- unsafe_allow_html=True)
456
-
457
- if st.button("Play Again", key="play_again"):
458
- st.session_state.clear()
459
- st.experimental_rerun()
460
 
461
  if __name__ == "__main__":
462
- main()
 
4
  from streamlit.components.v1 import html
5
  import os
6
 
 
7
  @st.cache_resource
8
  def get_help_agent():
9
  from transformers import pipeline
 
10
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
11
 
 
12
  def inject_custom_css():
13
  st.markdown("""
14
  <style>
15
  @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;600;700&display=swap');
16
+ * { font-family: 'Poppins', sans-serif; }
17
+ .title { font-size: 3rem !important; font-weight: 700 !important; color: #6C63FF !important; text-align: center; margin-bottom: 0.5rem; }
18
+ .subtitle { font-size: 1.2rem !important; text-align: center; color: #666 !important; margin-bottom: 2rem; }
19
+ .question-box { background: #F8F9FA; border-radius: 15px; padding: 2rem; margin: 1.5rem 0; box-shadow: 0 4px 6px rgba(0,0,0,0.1); color: black !important; }
20
+ .answer-btn { border-radius: 12px !important; padding: 0.5rem 1.5rem !important; font-weight: 600 !important; margin: 0.5rem !important; }
21
+ .yes-btn { background: #6C63FF !important; color: white !important; }
22
+ .no-btn { background: #FF6B6B !important; color: white !important; }
23
+ .final-reveal { animation: fadeIn 2s; font-size: 2.5rem; color: #6C63FF; text-align: center; margin: 2rem 0; }
24
+ @keyframes fadeIn { from { opacity: 0; } to { opacity: 1; } }
25
+ .confetti { position: fixed; top: 0; left: 0; width: 100%; height: 100%; pointer-events: none; z-index: 1000; }
26
+ .confidence-meter { height: 10px; background: linear-gradient(90deg, #FF6B6B 0%, #6C63FF 100%); border-radius: 5px; margin: 10px 0; }
27
+ .mic-btn { margin-top: 29px; border: none; background: none; cursor: pointer; font-size: 1.5em; padding: 0; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  </style>
29
  <script>
30
  function startSpeechRecognition(inputId) {
 
32
  recognition.lang = 'en-US';
33
  recognition.interimResults = false;
34
  recognition.maxAlternatives = 1;
 
35
  recognition.onresult = function(event) {
36
  const transcript = event.results[0][0].transcript.toLowerCase();
37
  const inputElement = document.getElementById(inputId);
38
  if (inputElement) {
39
  inputElement.value = transcript;
 
40
  const event = new Event('input', { bubbles: true });
41
  inputElement.dispatchEvent(event);
42
  }
43
  };
 
44
  recognition.onerror = function(event) {
45
  console.error('Speech recognition error', event.error);
46
  };
 
47
  recognition.start();
48
  }
49
  </script>
50
  """, unsafe_allow_html=True)
51
 
 
52
  def show_confetti():
53
  html("""
54
  <canvas id="confetti-canvas" class="confetti"></canvas>
 
56
  <script>
57
  const canvas = document.getElementById('confetti-canvas');
58
  const confetti = confetti.create(canvas, { resize: true });
59
+ confetti({ particleCount: 150, spread: 70, origin: { y: 0.6 } });
 
 
 
 
60
  setTimeout(() => { canvas.remove(); }, 5000);
61
  </script>
62
  """)
63
 
 
64
  def ask_llama(conversation_history, category, is_final_guess=False):
65
  api_url = "https://api.groq.com/openai/v1/chat/completions"
66
  headers = {
 
104
  st.error(f"Error calling Llama API: {str(e)}")
105
  return "Could not generate question"
106
 
 
107
  def ask_help_agent(query):
108
  try:
109
  from huggingface_hub import InferenceClient
 
110
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=os.environ.get("HF_HUB_TOKEN"))
111
  system_message = "You are a friendly Chatbot."
 
 
112
  history = []
113
  if "help_conversation" in st.session_state:
114
  for msg in st.session_state.help_conversation:
 
115
  history.append((msg.get("query", ""), msg.get("response", "")))
 
116
  messages = [{"role": "system", "content": system_message}]
117
  for user_msg, bot_msg in history:
118
+ if user_msg: messages.append({"role": "user", "content": user_msg})
119
+ if bot_msg: messages.append({"role": "assistant", "content": bot_msg})
 
 
120
  messages.append({"role": "user", "content": query})
 
121
  response_text = ""
122
+ for message in client.chat_completion(messages, max_tokens=150, stream=True, temperature=0.7, top_p=0.95):
 
 
 
 
 
 
 
123
  token = message.choices[0].delta.content
124
  response_text += token
125
  return response_text
126
  except Exception as e:
127
  return f"Error in help agent: {str(e)}"
128
 
 
129
  def main():
130
  inject_custom_css()
131
 
 
140
  st.session_state.conversation_history = []
141
  st.session_state.category = None
142
  st.session_state.final_guess = None
143
+ st.session_state.help_conversation = []
144
 
 
145
  if st.session_state.game_state == "start":
146
  st.markdown("""
147
  <div class="question-box">
 
158
  """, unsafe_allow_html=True)
159
 
160
  with st.form("start_form"):
 
161
  col1, col2 = st.columns([4, 1])
162
  with col1:
163
+ category_input = st.text_input("Enter category (person/place/object):", key="category_input").strip().lower()
 
 
 
 
164
  with col2:
165
+ st.markdown("""
166
+ <button type="button" onclick="startSpeechRecognition('text_input-category_input')" class="mic-btn">🎤</button>
167
+ """, unsafe_allow_html=True)
 
 
 
 
 
 
 
168
  if st.form_submit_button("Start Game"):
169
  if not category_input:
170
  st.error("Please enter a category!")
 
172
  st.error("Please enter either 'person', 'place', or 'object'!")
173
  else:
174
  st.session_state.category = category_input
175
+ first_question = ask_llama([], category_input)
 
 
176
  st.session_state.questions = [first_question]
177
+ st.session_state.conversation_history = [{"role": "assistant", "content": first_question}]
178
+ st.session_state.game_state = "playing"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  if __name__ == "__main__":
181
+ main()