iisadia commited on
Commit
8627d53
·
verified ·
1 Parent(s): 1194971

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -139
app.py CHANGED
@@ -4,6 +4,13 @@ import requests
4
  from streamlit.components.v1 import html
5
  import os
6
 
 
 
 
 
 
 
 
7
  # Custom CSS for professional look (fixed text color)
8
  def inject_custom_css():
9
  st.markdown("""
@@ -38,71 +45,73 @@ def inject_custom_css():
38
  color: black !important;
39
  }
40
 
41
- .mic-button {
42
- margin-left: 10px;
43
- padding: 5px 10px !important;
44
- border-radius: 5px;
 
 
 
 
45
  background: #6C63FF !important;
46
  color: white !important;
47
- border: none;
48
- cursor: pointer;
49
  }
50
 
51
- .language-selector {
52
- position: absolute;
53
- top: 10px;
54
- right: 10px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  z-index: 1000;
56
  }
 
 
 
 
 
 
 
57
  </style>
58
  """, unsafe_allow_html=True)
59
 
60
- # Speech-to-text component
61
- def speech_to_text(language, key):
62
- component = f"""
 
 
63
  <script>
64
- function startRecording{key}() {{
65
- const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
66
- recognition.lang = '{language}';
67
- recognition.interimResults = false;
68
- recognition.maxAlternatives = 1;
69
-
70
- recognition.start();
71
-
72
- recognition.onresult = function(event) {{
73
- const transcript = event.results[0][0].transcript;
74
- window.location.href = window.location.origin + window.location.pathname + '?speech_input=' +
75
- encodeURIComponent(transcript) + '&speech_key={key}';
76
- }};
77
-
78
- recognition.onerror = function(event) {{
79
- console.error('Speech recognition error:', event.error);
80
- }};
81
- }}
82
  </script>
83
- <button class="mic-button" onclick="startRecording{key}()">
84
- 🎤
85
- </button>
86
- """
87
- html(component, height=50)
88
-
89
- # Handle speech input
90
- def handle_speech_input():
91
- params = st.query_params
92
- if 'speech_input' in params and 'speech_key' in params:
93
- speech_input = params['speech_input']
94
- speech_key = params['speech_key']
95
- st.session_state[speech_key] = speech_input
96
- st.query_params.clear()
97
- st.experimental_rerun()
98
 
99
- # Import transformers and cache the help agent for performance
100
- @st.cache_resource
101
- def get_help_agent():
102
- from transformers import pipeline
103
- return pipeline("conversational", model="facebook/blenderbot-400M-distill")
104
-
105
- # Enhanced AI question generation
106
  def ask_llama(conversation_history, category, is_final_guess=False):
107
  api_url = "https://api.groq.com/openai/v1/chat/completions"
108
  headers = {
@@ -119,9 +128,16 @@ def ask_llama(conversation_history, category, is_final_guess=False):
119
  6. For objects: ask about size, color, usage, material, or where it's found
120
  7. Never repeat questions and always make progress toward guessing"""
121
 
 
 
 
 
 
 
122
  messages = [
123
  {"role": "system", "content": system_prompt},
124
- *conversation_history
 
125
  ]
126
 
127
  data = {
@@ -139,18 +155,47 @@ def ask_llama(conversation_history, category, is_final_guess=False):
139
  st.error(f"Error calling Llama API: {str(e)}")
140
  return "Could not generate question"
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  # Main game logic
143
  def main():
144
  inject_custom_css()
145
- handle_speech_input()
146
-
147
- # Language selector
148
- lang_options = {"English": "en-US", "Urdu": "ur-PK"}
149
- with st.container():
150
- st.markdown('<div class="language-selector">', unsafe_allow_html=True)
151
- selected_lang = st.selectbox("Voice Language", options=list(lang_options.keys()), label_visibility="collapsed")
152
- st.markdown('</div>', unsafe_allow_html=True)
153
- selected_lang_code = lang_options[selected_lang]
154
 
155
  st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
156
  st.markdown('<div class="subtitle">The Smart Guessing Game</div>', unsafe_allow_html=True)
@@ -163,7 +208,7 @@ def main():
163
  st.session_state.conversation_history = []
164
  st.session_state.category = None
165
  st.session_state.final_guess = None
166
- st.session_state.help_conversation = []
167
 
168
  # Start screen
169
  if st.session_state.game_state == "start":
@@ -177,37 +222,22 @@ def main():
177
  <li><strong>Place</strong> - city, country, landmark, geographical location</li>
178
  <li><strong>Object</strong> - everyday item, tool, vehicle, etc.</li>
179
  </ul>
180
- <p>Type or speak your category below to begin:</p>
181
  </div>
182
  """, unsafe_allow_html=True)
183
 
184
  with st.form("start_form"):
185
- col1, col2 = st.columns([4, 1])
186
- with col1:
187
- category_input = st.text_input("Enter category (person/place/object):",
188
- key="category_input").strip().lower()
189
- with col2:
190
- st.write("")
191
- st.write("")
192
- speech_to_text(language=selected_lang_code, key="category_input")
193
-
194
  if st.form_submit_button("Start Game"):
195
- urdu_to_english = {
196
- 'شخص': 'person',
197
- 'جگہ': 'place',
198
- 'چیز': 'object'
199
- }
200
- category = urdu_to_english.get(category_input, category_input)
201
-
202
- if not category:
203
  st.error("Please enter a category!")
204
- elif category not in ["person", "place", "object"]:
205
  st.error("Please enter either 'person', 'place', or 'object'!")
206
  else:
207
- st.session_state.category = category
208
  first_question = ask_llama([
209
  {"role": "user", "content": "Ask your first strategic yes/no question."}
210
- ], category)
211
  st.session_state.questions = [first_question]
212
  st.session_state.conversation_history = [
213
  {"role": "assistant", "content": first_question}
@@ -219,6 +249,7 @@ def main():
219
  elif st.session_state.game_state == "gameplay":
220
  current_question = st.session_state.questions[st.session_state.current_q]
221
 
 
222
  if "Final Guess:" in current_question:
223
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
224
  st.session_state.game_state = "confirm_guess"
@@ -229,30 +260,15 @@ def main():
229
  unsafe_allow_html=True)
230
 
231
  with st.form("answer_form"):
232
- col1, col2 = st.columns([4, 1])
233
- with col1:
234
- answer_input = st.text_input("Your answer (yes/no/both):",
235
- key=f"answer_{st.session_state.current_q}").strip().lower()
236
- with col2:
237
- st.write("")
238
- st.write("")
239
- speech_to_text(language=selected_lang_code, key=f"answer_{st.session_state.current_q}")
240
-
241
  if st.form_submit_button("Submit"):
242
- # Map Urdu responses to English
243
- urdu_to_english = {
244
- 'ہاں': 'yes',
245
- 'نہیں': 'no',
246
- 'دونوں': 'both'
247
- }
248
- processed_answer = urdu_to_english.get(answer_input, answer_input)
249
-
250
- if processed_answer not in ["yes", "no", "both"]:
251
  st.error("Please answer with 'yes', 'no', or 'both'!")
252
  else:
253
- st.session_state.answers.append(processed_answer)
254
  st.session_state.conversation_history.append(
255
- {"role": "user", "content": processed_answer}
256
  )
257
 
258
  # Generate next response
@@ -261,6 +277,7 @@ def main():
261
  st.session_state.category
262
  )
263
 
 
264
  if "Final Guess:" in next_response:
265
  st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
266
  st.session_state.game_state = "confirm_guess"
@@ -271,41 +288,44 @@ def main():
271
  )
272
  st.session_state.current_q += 1
273
 
 
274
  if st.session_state.current_q >= 20:
275
  st.session_state.game_state = "result"
276
 
277
  st.experimental_rerun()
278
 
279
- # Guess confirmation screen
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  elif st.session_state.game_state == "confirm_guess":
281
  st.markdown(f'<div class="question-box">🤖 My Final Guess:<br><br>'
282
  f'<strong>Is it {st.session_state.final_guess}?</strong></div>',
283
  unsafe_allow_html=True)
284
 
285
  with st.form("confirm_form"):
286
- col1, col2 = st.columns([4, 1])
287
- with col1:
288
- confirm_input = st.text_input("Type your answer (yes/no/both):", key="confirm_input").strip().lower()
289
- with col2:
290
- st.write("")
291
- st.write("")
292
- speech_to_text(language=selected_lang_code, key="confirm_input")
293
-
294
  if st.form_submit_button("Submit"):
295
- urdu_to_english = {
296
- 'ہاں': 'yes',
297
- 'نہیں': 'no',
298
- 'دونوں': 'both'
299
- }
300
- processed_confirm = urdu_to_english.get(confirm_input, confirm_input)
301
-
302
- if processed_confirm not in ["yes", "no", "both"]:
303
  st.error("Please answer with 'yes', 'no', or 'both'!")
304
  else:
305
- if processed_confirm == "yes":
306
  st.session_state.game_state = "result"
307
  st.experimental_rerun()
 
308
  else:
 
309
  st.session_state.conversation_history.append(
310
  {"role": "user", "content": "no"}
311
  )
@@ -324,6 +344,7 @@ def main():
324
  # Result screen
325
  elif st.session_state.game_state == "result":
326
  if not st.session_state.final_guess:
 
327
  qa_history = "\n".join(
328
  [f"Q{i+1}: {q}\nA: {a}"
329
  for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
@@ -336,21 +357,7 @@ def main():
336
  )
337
  st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()
338
 
339
- html("""
340
- <canvas id="confetti-canvas"></canvas>
341
- <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script>
342
- <script>
343
- const canvas = document.getElementById('confetti-canvas');
344
- const confetti = confetti.create(canvas, { resize: true });
345
- confetti({
346
- particleCount: 150,
347
- spread: 70,
348
- origin: { y: 0.6 }
349
- });
350
- setTimeout(() => { canvas.remove(); }, 5000);
351
- </script>
352
- """)
353
-
354
  st.markdown(f'<div class="final-reveal">🎉 It\'s...</div>', unsafe_allow_html=True)
355
  time.sleep(1)
356
  st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>',
@@ -363,4 +370,4 @@ def main():
363
  st.experimental_rerun()
364
 
365
  if __name__ == "__main__":
366
- main()
 
4
  from streamlit.components.v1 import html
5
  import os
6
 
7
+ # Import transformers and cache the help agent for performance
8
+ @st.cache_resource
9
+ def get_help_agent():
10
+ from transformers import pipeline
11
+ # Using BlenderBot 400M Distill as the public conversational model (used elsewhere)
12
+ return pipeline("conversational", model="facebook/blenderbot-400M-distill")
13
+
14
  # Custom CSS for professional look (fixed text color)
15
  def inject_custom_css():
16
  st.markdown("""
 
45
  color: black !important;
46
  }
47
 
48
+ .answer-btn {
49
+ border-radius: 12px !important;
50
+ padding: 0.5rem 1.5rem !important;
51
+ font-weight: 600 !important;
52
+ margin: 0.5rem !important;
53
+ }
54
+
55
+ .yes-btn {
56
  background: #6C63FF !important;
57
  color: white !important;
 
 
58
  }
59
 
60
+ .no-btn {
61
+ background: #FF6B6B !important;
62
+ color: white !important;
63
+ }
64
+
65
+ .final-reveal {
66
+ animation: fadeIn 2s;
67
+ font-size: 2.5rem;
68
+ color: #6C63FF;
69
+ text-align: center;
70
+ margin: 2rem 0;
71
+ }
72
+
73
+ @keyframes fadeIn {
74
+ from { opacity: 0; }
75
+ to { opacity: 1; }
76
+ }
77
+
78
+ .confetti {
79
+ position: fixed;
80
+ top: 0;
81
+ left: 0;
82
+ width: 100%;
83
+ height: 100%;
84
+ pointer-events: none;
85
  z-index: 1000;
86
  }
87
+
88
+ .confidence-meter {
89
+ height: 10px;
90
+ background: linear-gradient(90deg, #FF6B6B 0%, #6C63FF 100%);
91
+ border-radius: 5px;
92
+ margin: 10px 0;
93
+ }
94
  </style>
95
  """, unsafe_allow_html=True)
96
 
97
+ # Confetti animation
98
+ def show_confetti():
99
+ html("""
100
+ <canvas id="confetti-canvas" class="confetti"></canvas>
101
+ <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script>
102
  <script>
103
+ const canvas = document.getElementById('confetti-canvas');
104
+ const confetti = confetti.create(canvas, { resize: true });
105
+ confetti({
106
+ particleCount: 150,
107
+ spread: 70,
108
+ origin: { y: 0.6 }
109
+ });
110
+ setTimeout(() => { canvas.remove(); }, 5000);
 
 
 
 
 
 
 
 
 
 
111
  </script>
112
+ """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
+ # Enhanced AI question generation for guessing game using Llama model
 
 
 
 
 
 
115
  def ask_llama(conversation_history, category, is_final_guess=False):
116
  api_url = "https://api.groq.com/openai/v1/chat/completions"
117
  headers = {
 
128
  6. For objects: ask about size, color, usage, material, or where it's found
129
  7. Never repeat questions and always make progress toward guessing"""
130
 
131
+ if is_final_guess:
132
+ prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
133
+ {conversation_history}"""
134
+ else:
135
+ prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
136
+
137
  messages = [
138
  {"role": "system", "content": system_prompt},
139
+ *conversation_history,
140
+ {"role": "user", "content": prompt}
141
  ]
142
 
143
  data = {
 
155
  st.error(f"Error calling Llama API: {str(e)}")
156
  return "Could not generate question"
157
 
158
+ # New function for the help AI assistant using the Hugging Face InferenceClient
159
+ def ask_help_agent(query):
160
+ try:
161
+ from huggingface_hub import InferenceClient
162
+ # Initialize the client with the provided model
163
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=os.environ.get("HF_HUB_TOKEN"))
164
+ system_message = "You are a friendly Chatbot."
165
+
166
+ # Build history from session state (if any)
167
+ history = []
168
+ if "help_conversation" in st.session_state:
169
+ for msg in st.session_state.help_conversation:
170
+ # Each history entry is a tuple: (user query, assistant response)
171
+ history.append((msg.get("query", ""), msg.get("response", "")))
172
+
173
+ messages = [{"role": "system", "content": system_message}]
174
+ for user_msg, bot_msg in history:
175
+ if user_msg:
176
+ messages.append({"role": "user", "content": user_msg})
177
+ if bot_msg:
178
+ messages.append({"role": "assistant", "content": bot_msg})
179
+ messages.append({"role": "user", "content": query})
180
+
181
+ response_text = ""
182
+ # Using streaming to collect the entire response from the model
183
+ for message in client.chat_completion(
184
+ messages,
185
+ max_tokens=150,
186
+ stream=True,
187
+ temperature=0.7,
188
+ top_p=0.95,
189
+ ):
190
+ token = message.choices[0].delta.content
191
+ response_text += token
192
+ return response_text
193
+ except Exception as e:
194
+ return f"Error in help agent: {str(e)}"
195
+
196
  # Main game logic
197
  def main():
198
  inject_custom_css()
 
 
 
 
 
 
 
 
 
199
 
200
  st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
201
  st.markdown('<div class="subtitle">The Smart Guessing Game</div>', unsafe_allow_html=True)
 
208
  st.session_state.conversation_history = []
209
  st.session_state.category = None
210
  st.session_state.final_guess = None
211
+ st.session_state.help_conversation = [] # separate history for help agent
212
 
213
  # Start screen
214
  if st.session_state.game_state == "start":
 
222
  <li><strong>Place</strong> - city, country, landmark, geographical location</li>
223
  <li><strong>Object</strong> - everyday item, tool, vehicle, etc.</li>
224
  </ul>
225
+ <p>Type your category below to begin:</p>
226
  </div>
227
  """, unsafe_allow_html=True)
228
 
229
  with st.form("start_form"):
230
+ category_input = st.text_input("Enter category (person/place/object):").strip().lower()
 
 
 
 
 
 
 
 
231
  if st.form_submit_button("Start Game"):
232
+ if not category_input:
 
 
 
 
 
 
 
233
  st.error("Please enter a category!")
234
+ elif category_input not in ["person", "place", "object"]:
235
  st.error("Please enter either 'person', 'place', or 'object'!")
236
  else:
237
+ st.session_state.category = category_input
238
  first_question = ask_llama([
239
  {"role": "user", "content": "Ask your first strategic yes/no question."}
240
+ ], category_input)
241
  st.session_state.questions = [first_question]
242
  st.session_state.conversation_history = [
243
  {"role": "assistant", "content": first_question}
 
249
  elif st.session_state.game_state == "gameplay":
250
  current_question = st.session_state.questions[st.session_state.current_q]
251
 
252
+ # Check if AI made a guess
253
  if "Final Guess:" in current_question:
254
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
255
  st.session_state.game_state = "confirm_guess"
 
260
  unsafe_allow_html=True)
261
 
262
  with st.form("answer_form"):
263
+ answer_input = st.text_input("Your answer (yes/no/both):",
264
+ key=f"answer_{st.session_state.current_q}").strip().lower()
 
 
 
 
 
 
 
265
  if st.form_submit_button("Submit"):
266
+ if answer_input not in ["yes", "no", "both"]:
 
 
 
 
 
 
 
 
267
  st.error("Please answer with 'yes', 'no', or 'both'!")
268
  else:
269
+ st.session_state.answers.append(answer_input)
270
  st.session_state.conversation_history.append(
271
+ {"role": "user", "content": answer_input}
272
  )
273
 
274
  # Generate next response
 
277
  st.session_state.category
278
  )
279
 
280
+ # Check if AI made a guess
281
  if "Final Guess:" in next_response:
282
  st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
283
  st.session_state.game_state = "confirm_guess"
 
288
  )
289
  st.session_state.current_q += 1
290
 
291
+ # Stop after 20 questions max
292
  if st.session_state.current_q >= 20:
293
  st.session_state.game_state = "result"
294
 
295
  st.experimental_rerun()
296
 
297
+ # Side Help Option: independent chat with an AI help assistant using Hugging Face model
298
+ with st.expander("Need Help? Chat with AI Assistant"):
299
+ help_query = st.text_input("Enter your help query:", key="help_query")
300
+ if st.button("Send", key="send_help"):
301
+ if help_query:
302
+ help_response = ask_help_agent(help_query)
303
+ st.session_state.help_conversation.append({"query": help_query, "response": help_response})
304
+ else:
305
+ st.error("Please enter a query!")
306
+ if st.session_state.help_conversation:
307
+ for msg in st.session_state.help_conversation:
308
+ st.markdown(f"**You:** {msg['query']}")
309
+ st.markdown(f"**Help Assistant:** {msg['response']}")
310
+
311
+ # Guess confirmation screen using text input response
312
  elif st.session_state.game_state == "confirm_guess":
313
  st.markdown(f'<div class="question-box">🤖 My Final Guess:<br><br>'
314
  f'<strong>Is it {st.session_state.final_guess}?</strong></div>',
315
  unsafe_allow_html=True)
316
 
317
  with st.form("confirm_form"):
318
+ confirm_input = st.text_input("Type your answer (yes/no/both):", key="confirm_input").strip().lower()
 
 
 
 
 
 
 
319
  if st.form_submit_button("Submit"):
320
+ if confirm_input not in ["yes", "no", "both"]:
 
 
 
 
 
 
 
321
  st.error("Please answer with 'yes', 'no', or 'both'!")
322
  else:
323
+ if confirm_input == "yes":
324
  st.session_state.game_state = "result"
325
  st.experimental_rerun()
326
+ st.stop() # Immediately halt further execution
327
  else:
328
+ # Add negative response to history and continue gameplay
329
  st.session_state.conversation_history.append(
330
  {"role": "user", "content": "no"}
331
  )
 
344
  # Result screen
345
  elif st.session_state.game_state == "result":
346
  if not st.session_state.final_guess:
347
+ # Generate final guess if not already made
348
  qa_history = "\n".join(
349
  [f"Q{i+1}: {q}\nA: {a}"
350
  for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
 
357
  )
358
  st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()
359
 
360
+ show_confetti()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
  st.markdown(f'<div class="final-reveal">🎉 It\'s...</div>', unsafe_allow_html=True)
362
  time.sleep(1)
363
  st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>',
 
370
  st.experimental_rerun()
371
 
372
  if __name__ == "__main__":
373
+ main()