iisadia commited on
Commit
d18e982
·
verified ·
1 Parent(s): 5e13a93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +361 -76
app.py CHANGED
@@ -5,7 +5,7 @@ from streamlit.components.v1 import html
5
  import os
6
  from dotenv import load_dotenv
7
 
8
- # Voice input dependencies
9
  import torchaudio
10
  import numpy as np
11
  import torch
@@ -20,27 +20,30 @@ from transformers import pipeline
20
 
21
  @st.cache_resource
22
  def load_voice_model():
 
23
  return pipeline("automatic-speech-recognition", model="openai/whisper-base")
24
 
25
  def process_audio(audio_bytes):
26
  waveform, sample_rate = torchaudio.load(BytesIO(audio_bytes))
27
- if waveform.shape[0] > 1:
28
  waveform = torch.mean(waveform, dim=0, keepdim=True)
29
- if sample_rate != 16000:
30
  resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
31
  waveform = resampler(waveform)
32
  return {"raw": waveform.numpy().squeeze(), "sampling_rate": 16000}
33
 
34
  def get_voice_transcription(state_key):
 
 
 
35
  if state_key not in st.session_state:
36
  st.session_state[state_key] = ""
37
- audio_bytes = audio_recorder(
38
- key=state_key + "_audio",
39
- pause_threshold=0.8,
40
- text="🎙️ Speak your message",
41
- recording_color="#e8b62c",
42
- neutral_color="#6aa36f"
43
- )
44
  if audio_bytes:
45
  current_hash = hashlib.md5(audio_bytes).hexdigest()
46
  last_hash_key = state_key + "_last_hash"
@@ -51,6 +54,7 @@ def get_voice_transcription(state_key):
51
  whisper = load_voice_model()
52
  transcribed_text = whisper(audio_input)["text"]
53
  st.info(f"📝 Transcribed: {transcribed_text}")
 
54
  st.session_state[state_key] += (" " + transcribed_text).strip()
55
  st.experimental_rerun()
56
  except Exception as e:
@@ -58,36 +62,71 @@ def get_voice_transcription(state_key):
58
  return st.session_state[state_key]
59
 
60
  ######################################
61
- # Game Functions & Styling
62
  ######################################
63
 
64
  @st.cache_resource
65
  def get_help_agent():
 
 
66
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
67
 
68
  def inject_custom_css():
69
  st.markdown("""
70
  <style>
71
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
 
 
72
  * { font-family: 'Inter', sans-serif; }
 
73
  .title { font-size: 2.8rem !important; font-weight: 800 !important;
74
  background: linear-gradient(45deg, #6C63FF, #3B82F6);
75
  -webkit-background-clip: text; -webkit-text-fill-color: transparent;
76
- text-align: center; margin: 1rem 0; }
77
- .subtitle { font-size: 1.1rem; text-align: center; color: #64748B; margin-bottom: 2.5rem; }
 
78
  .question-box { background: white; border-radius: 20px; padding: 2rem; margin: 1.5rem 0;
79
- box-shadow: 0 10px 25px rgba(0,0,0,0.08); border: 1px solid #e2e8f0; color: black; }
 
 
 
 
 
80
  .input-box { background: white; border-radius: 12px; padding: 1.5rem; margin: 1rem 0;
81
  box-shadow: 0 4px 6px rgba(0,0,0,0.05); }
82
  .stTextInput input { border: 2px solid #e2e8f0 !important; border-radius: 10px !important;
83
- padding: 12px 16px !important; }
 
 
84
  button { background: linear-gradient(45deg, #6C63FF, #3B82F6) !important;
85
- color: white !important; border-radius: 10px !important;
86
- padding: 12px 24px !important; font-weight: 600; }
87
- .final-reveal { font-size: 2.8rem;
 
 
88
  background: linear-gradient(45deg, #6C63FF, #3B82F6);
89
  -webkit-background-clip: text; -webkit-text-fill-color: transparent;
90
  text-align: center; margin: 2rem 0; font-weight: 800; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  </style>
92
  """, unsafe_allow_html=True)
93
 
@@ -97,7 +136,10 @@ def show_confetti():
97
  <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script>
98
  <script>
99
  const count = 200;
100
- const defaults = { origin: { y: 0.7 }, zIndex: 1050 };
 
 
 
101
  function fire(particleRatio, opts) {
102
  confetti(Object.assign({}, defaults, opts, {
103
  particleCount: Math.floor(count * particleRatio)
@@ -114,82 +156,325 @@ def show_confetti():
114
  def ask_llama(conversation_history, category, is_final_guess=False):
115
  api_url = "https://api.groq.com/openai/v1/chat/completions"
116
  headers = {
117
- "Authorization": f"Bearer {os.getenv('GROQ_API_KEY')}",
118
  "Content-Type": "application/json"
119
  }
120
- system_prompt = f"""You're playing 20 questions to guess a {category}. Rules:
121
- 1. Ask strategic, non-repeating yes/no questions to narrow down.
122
- 2. Use all previous answers smartly.
123
- 3. If you're 80%+ sure, say: Final Guess: [your guess]
124
- 4. For places: ask about continent, country, landmarks, etc.
125
- 5. For people: ask if real, profession, gender, etc.
126
- 6. For objects: ask about use, size, material, etc."""
127
-
128
- prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
129
- {conversation_history}""" if is_final_guess else "Ask your next smart yes/no question."
130
-
131
- messages = [{"role": "system", "content": system_prompt}]
132
- messages += conversation_history
133
- messages.append({"role": "user", "content": prompt})
134
-
 
 
 
135
  data = {
136
- "model": "llama-3-70b-8192",
137
  "messages": messages,
138
- "temperature": 0.8,
139
  "max_tokens": 100
140
  }
 
 
 
 
 
 
 
141
 
 
 
142
  try:
143
- res = requests.post(api_url, headers=headers, json=data)
144
- res.raise_for_status()
145
- return res.json()["choices"][0]["message"]["content"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  except Exception as e:
147
- st.error(f" LLaMA API error: {e}")
148
- return "..."
149
 
150
  ######################################
151
- # Main App Logic Here (UI, Game Loop)
152
  ######################################
153
 
154
  def main():
155
- load_dotenv()
156
  inject_custom_css()
 
 
 
 
 
 
 
 
 
 
 
157
 
158
- st.title("🎮 Guess It! - 20 Questions Game")
159
- st.markdown("<div class='subtitle'>Think of a person, place, or object. LLaMA will try to guess it!</div>", unsafe_allow_html=True)
160
-
161
- category = st.selectbox("Category of your secret:", ["Person", "Place", "Object"])
162
-
163
- if "conversation" not in st.session_state:
164
- st.session_state.conversation = []
165
- st.session_state.last_bot_msg = ""
166
-
167
- if st.button("🔄 Restart Game"):
168
- st.session_state.conversation = []
169
- st.session_state.last_bot_msg = ""
170
- st.rerun()
171
-
172
- if not st.session_state.conversation:
173
- st.session_state.last_bot_msg = ask_llama([], category)
174
- st.session_state.conversation.append({"role": "assistant", "content": st.session_state.last_bot_msg})
 
 
 
 
 
 
 
 
 
175
 
176
- st.markdown(f"<div class='question-box'><strong>LLaMA:</strong> {st.session_state.last_bot_msg}</div>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
- user_input = get_voice_transcription("voice_input") or st.text_input("💬 Your answer (yes/no/sometimes):")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
- if st.button("Submit Answer") and user_input:
181
- st.session_state.conversation.append({"role": "user", "content": user_input})
182
- with st.spinner("Thinking..."):
183
- response = ask_llama(st.session_state.conversation, category)
184
- st.session_state.last_bot_msg = response
185
- st.session_state.conversation.append({"role": "assistant", "content": response})
186
- st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
188
- if st.button("🤔 Make Final Guess"):
189
- with st.spinner("Making final guess..."):
190
- final_guess = ask_llama(st.session_state.conversation, category, is_final_guess=True)
191
- st.markdown(f"<div class='final-reveal'>🤯 Final Guess: <strong>{final_guess}</strong></div>", unsafe_allow_html=True)
192
- show_confetti()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
  if __name__ == "__main__":
195
- main()
 
5
  import os
6
  from dotenv import load_dotenv
7
 
8
+ # New imports for voice input
9
  import torchaudio
10
  import numpy as np
11
  import torch
 
20
 
21
  @st.cache_resource
22
  def load_voice_model():
23
+ # Loading the Whisper model (which automatically detects both English and Urdu)
24
  return pipeline("automatic-speech-recognition", model="openai/whisper-base")
25
 
26
  def process_audio(audio_bytes):
27
  waveform, sample_rate = torchaudio.load(BytesIO(audio_bytes))
28
+ if waveform.shape[0] > 1: # Convert stereo to mono
29
  waveform = torch.mean(waveform, dim=0, keepdim=True)
30
+ if sample_rate != 16000: # Resample to 16kHz if needed
31
  resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
32
  waveform = resampler(waveform)
33
  return {"raw": waveform.numpy().squeeze(), "sampling_rate": 16000}
34
 
35
  def get_voice_transcription(state_key):
36
+ """Display audio recorder for a given key.
37
+ If new audio is recorded, transcribe it and update the session state.
38
+ """
39
  if state_key not in st.session_state:
40
  st.session_state[state_key] = ""
41
+ # Use a unique key for the recorder widget
42
+ audio_bytes = audio_recorder(key=state_key + "_audio",
43
+ pause_threshold=0.8,
44
+ text="",
45
+ recording_color="#e8b62c",
46
+ neutral_color="#6aa36f")
 
47
  if audio_bytes:
48
  current_hash = hashlib.md5(audio_bytes).hexdigest()
49
  last_hash_key = state_key + "_last_hash"
 
54
  whisper = load_voice_model()
55
  transcribed_text = whisper(audio_input)["text"]
56
  st.info(f"📝 Transcribed: {transcribed_text}")
57
+ # Append (or set) new transcription
58
  st.session_state[state_key] += (" " + transcribed_text).strip()
59
  st.experimental_rerun()
60
  except Exception as e:
 
62
  return st.session_state[state_key]
63
 
64
  ######################################
65
+ # Existing Game Helper Functions
66
  ######################################
67
 
68
  @st.cache_resource
69
  def get_help_agent():
70
+ from transformers import pipeline
71
+ # Using BlenderBot 400M Distill as the public conversational model (used elsewhere)
72
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
73
 
74
  def inject_custom_css():
75
  st.markdown("""
76
  <style>
77
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
78
+ @import url('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css');
79
+
80
  * { font-family: 'Inter', sans-serif; }
81
+ body { background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); }
82
  .title { font-size: 2.8rem !important; font-weight: 800 !important;
83
  background: linear-gradient(45deg, #6C63FF, #3B82F6);
84
  -webkit-background-clip: text; -webkit-text-fill-color: transparent;
85
+ text-align: center; margin: 1rem 0; letter-spacing: -1px; }
86
+ .subtitle { font-size: 1.1rem !important; text-align: center;
87
+ color: #64748B !important; margin-bottom: 2.5rem; animation: fadeInSlide 1s ease; }
88
  .question-box { background: white; border-radius: 20px; padding: 2rem; margin: 1.5rem 0;
89
+ box-shadow: 0 10px 25px rgba(0,0,0,0.08); border: 1px solid #e2e8f0;
90
+ position: relative; transition: transform 0.2s ease; color: black; }
91
+ .question-box:hover { transform: translateY(-3px); }
92
+ .question-box::before { content: "🕹️"; position: absolute; left: -15px; top: -15px;
93
+ background: white; border-radius: 50%; padding: 8px;
94
+ box-shadow: 0 4px 6px rgba(0,0,0,0.1); font-size: 1.2rem; }
95
  .input-box { background: white; border-radius: 12px; padding: 1.5rem; margin: 1rem 0;
96
  box-shadow: 0 4px 6px rgba(0,0,0,0.05); }
97
  .stTextInput input { border: 2px solid #e2e8f0 !important; border-radius: 10px !important;
98
+ padding: 12px 16px !important; transition: all 0.3s ease !important; }
99
+ .stTextInput input:focus { border-color: #6C63FF !important;
100
+ box-shadow: 0 0 0 3px rgba(108, 99, 255, 0.2) !important; }
101
  button { background: linear-gradient(45deg, #6C63FF, #3B82F6) !important;
102
+ color: white !important; border: none !important; border-radius: 10px !important;
103
+ padding: 12px 24px !important; font-weight: 600 !important;
104
+ transition: all 0.3s ease !important; }
105
+ button:hover { transform: translateY(-2px); box-shadow: 0 5px 15px rgba(108, 99, 255, 0.3) !important; }
106
+ .final-reveal { animation: fadeInUp 1s ease; font-size: 2.8rem;
107
  background: linear-gradient(45deg, #6C63FF, #3B82F6);
108
  -webkit-background-clip: text; -webkit-text-fill-color: transparent;
109
  text-align: center; margin: 2rem 0; font-weight: 800; }
110
+ .help-chat { background: rgba(255,255,255,0.9); backdrop-filter: blur(10px);
111
+ border-radius: 15px; padding: 1rem; margin: 1rem 0;
112
+ box-shadow: 0 8px 30px rgba(0,0,0,0.12); }
113
+ @keyframes fadeInSlide { 0% { opacity: 0; transform: translateY(20px); }
114
+ 100% { opacity: 1; transform: translateY(0); } }
115
+ @keyframes fadeInUp { 0% { opacity: 0; transform: translateY(30px); }
116
+ 100% { opacity: 1; transform: translateY(0); } }
117
+ .progress-bar { height: 6px; background: #e2e8f0; border-radius: 3px;
118
+ margin: 1.5rem 0; overflow: hidden; }
119
+ .progress-fill { height: 100%; background: linear-gradient(90deg, #6C63FF, #3B82F6);
120
+ transition: width 0.5s ease; }
121
+ .question-count { color: #6C63FF; font-weight: 600; font-size: 0.9rem; margin-bottom: 0.5rem; }
122
+ /* New styles for input with microphone */
123
+ .input-with-mic { display: flex; align-items: center; gap: 10px; }
124
+ .input-with-mic input { flex: 1; }
125
+ .mic-button { background: #6C63FF !important; color: white !important;
126
+ border-radius: 50% !important; width: 40px !important;
127
+ height: 40px !important; padding: 0 !important;
128
+ min-width: 0 !important; }
129
+ .mic-button:hover { transform: scale(1.1) !important; }
130
  </style>
131
  """, unsafe_allow_html=True)
132
 
 
136
  <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script>
137
  <script>
138
  const count = 200;
139
+ const defaults = {
140
+ origin: { y: 0.7 },
141
+ zIndex: 1050
142
+ };
143
  function fire(particleRatio, opts) {
144
  confetti(Object.assign({}, defaults, opts, {
145
  particleCount: Math.floor(count * particleRatio)
 
156
  def ask_llama(conversation_history, category, is_final_guess=False):
157
  api_url = "https://api.groq.com/openai/v1/chat/completions"
158
  headers = {
159
+ "Authorization": "Bearer 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",
160
  "Content-Type": "application/json"
161
  }
162
+ system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
163
+ 1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
164
+ 2. Consider all previous answers carefully before asking next question
165
+ 3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]"
166
+ 4. For places: ask about continent, climate, famous landmarks, country, city or population
167
+ 5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame
168
+ 6. For objects: ask about size, color, usage, material, or where it's found
169
+ 7. Never repeat questions and always make progress toward guessing"""
170
+ if is_final_guess:
171
+ prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
172
+ {conversation_history}"""
173
+ else:
174
+ prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
175
+ messages = [
176
+ {"role": "system", "content": system_prompt},
177
+ *conversation_history,
178
+ {"role": "user", "content": prompt}
179
+ ]
180
  data = {
181
+ "model": "llama-3.3-70b-versatile",
182
  "messages": messages,
183
+ "temperature": 0.7 if is_final_guess else 0.8,
184
  "max_tokens": 100
185
  }
186
+ try:
187
+ response = requests.post(api_url, headers=headers, json=data)
188
+ response.raise_for_status()
189
+ return response.json()["choices"][0]["message"]["content"]
190
+ except Exception as e:
191
+ st.error(f"Error calling Llama API: {str(e)}")
192
+ return "Could not generate question"
193
 
194
+ MISTRAL_API_KEY = "wm5eLl09b9I9cOxR3E9n5rrRr1CRQQjn"
195
+ def ask_help_agent(query):
196
  try:
197
+ url = "https://api.mistral.ai/v1/chat/completions"
198
+ headers = {
199
+ "Authorization": f"Bearer {MISTRAL_API_KEY}",
200
+ "Content-Type": "application/json"
201
+ }
202
+ system_message = "You are a friendly Chatbot."
203
+ messages = [{"role": "system", "content": system_message}]
204
+ if "help_conversation" in st.session_state:
205
+ for msg in st.session_state.help_conversation:
206
+ if msg.get("query"):
207
+ messages.append({"role": "user", "content": msg["query"]})
208
+ if msg.get("response"):
209
+ messages.append({"role": "assistant", "content": msg["response"]})
210
+ messages.append({"role": "user", "content": query})
211
+ payload = {
212
+ "model": "mistral-tiny",
213
+ "messages": messages,
214
+ "temperature": 0.7,
215
+ "top_p": 0.95
216
+ }
217
+ response = requests.post(url, headers=headers, json=payload)
218
+ if response.status_code == 200:
219
+ result = response.json()
220
+ return result["choices"][0]["message"]["content"]
221
+ else:
222
+ return f"API Error {response.status_code}: {response.text}"
223
  except Exception as e:
224
+ return f"Error in help agent: {str(e)}"
 
225
 
226
  ######################################
227
+ # Main Game Logic with Voice Integration
228
  ######################################
229
 
230
  def main():
 
231
  inject_custom_css()
232
+ st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
233
+ st.markdown('<div class="subtitle">AI-Powered Guessing Game Challenge</div>', unsafe_allow_html=True)
234
+ if 'game_state' not in st.session_state:
235
+ st.session_state.game_state = "start"
236
+ st.session_state.questions = []
237
+ st.session_state.current_q = 0
238
+ st.session_state.answers = []
239
+ st.session_state.conversation_history = []
240
+ st.session_state.category = None
241
+ st.session_state.final_guess = None
242
+ st.session_state.help_conversation = [] # separate history for help agent
243
 
244
+ # Start screen with enhanced layout
245
+ if st.session_state.game_state == "start":
246
+ with st.container():
247
+ st.markdown("""
248
+ <div class="question-box">
249
+ <h3 style="color: #6C63FF; margin-bottom: 1.5rem;">🎮 Welcome to KASOTI</h3>
250
+ <p style="line-height: 1.6; color: #64748B;">
251
+ Think of something and I'll try to guess it in 20 questions or less!<br>
252
+ Choose from these categories:
253
+ </p>
254
+ <div style="display: grid; gap: 1rem; margin: 2rem 0;">
255
+ <div style="padding: 1.5rem; background: #f8f9fa; border-radius: 12px;">
256
+ <h4 style="margin: 0; color: #6C63FF;">🧑 Person</h4>
257
+ <p style="margin: 0.5rem 0 0; color: #64748B;">Celebrity, fictional character, historical figure</p>
258
+ </div>
259
+ <div style="padding: 1.5rem; background: #f8f9fa; border-radius: 12px;">
260
+ <h4 style="margin: 0; color: #6C63FF;">🌍 Place</h4>
261
+ <p style="margin: 0.5rem 0 0; color: #64748B;">City, country, landmark, geographical location</p>
262
+ </div>
263
+ <div style="padding: 1.5rem; background: #f8f9fa; border-radius: 12px;">
264
+ <h4 style="margin: 0; color: #6C63FF;">🎯 Object</h4>
265
+ <p style="margin: 0.5rem 0 0; color: #64748B;">Everyday item, tool, vehicle, or concept</p>
266
+ </div>
267
+ </div>
268
+ </div>
269
+ """, unsafe_allow_html=True)
270
 
271
+ with st.form("start_form"):
272
+ # --- Voice Input for Category ---
273
+ st.markdown("#### Use Voice (English/Urdu) for Category Input")
274
+ col1, col2 = st.columns([0.85, 0.15])
275
+ with col1:
276
+ voice_category = st.text_input("Enter category (person/place/object):",
277
+ value=get_voice_transcription("voice_category").strip(),
278
+ key="category_input")
279
+ with col2:
280
+ st.markdown("<div style='height: 52px; display: flex; align-items: center;'>", unsafe_allow_html=True)
281
+ audio_bytes = audio_recorder(text="",
282
+ pause_threshold=0.8,
283
+ key="voice_category_audio",
284
+ recording_color="#e8b62c",
285
+ neutral_color="#6aa36f")
286
+ st.markdown("</div>", unsafe_allow_html=True)
287
+
288
+ if st.form_submit_button("Start Game"):
289
+ category_input = voice_category.strip().lower()
290
+ if not category_input:
291
+ st.error("Please enter a category!")
292
+ elif category_input not in ["person", "place", "object"]:
293
+ st.error("Please enter either 'person', 'place', or 'object'!")
294
+ else:
295
+ st.session_state.category = category_input
296
+ first_question = ask_llama([
297
+ {"role": "user", "content": "Ask your first strategic yes/no question."}
298
+ ], category_input)
299
+ st.session_state.questions = [first_question]
300
+ st.session_state.conversation_history = [
301
+ {"role": "assistant", "content": first_question}
302
+ ]
303
+ st.session_state.game_state = "gameplay"
304
+ st.experimental_rerun()
305
 
306
+ # Gameplay screen with progress bar
307
+ elif st.session_state.game_state == "gameplay":
308
+ with st.container():
309
+ progress = (st.session_state.current_q + 1) / 20
310
+ st.markdown(f"""
311
+ <div class="question-count">QUESTION {st.session_state.current_q + 1} OF 20</div>
312
+ <div class="progress-bar">
313
+ <div class="progress-fill" style="width: {progress * 100}%"></div>
314
+ </div>
315
+ """, unsafe_allow_html=True)
316
+ current_question = st.session_state.questions[st.session_state.current_q]
317
+ st.markdown(f'''
318
+ <div class="question-box">
319
+ <div style="display: flex; align-items: center; gap: 1rem; margin-bottom: 1.5rem;">
320
+ <div style="background: #6C63FF; width: 40px; height: 40px; border-radius: 50%;
321
+ display: flex; align-items: center; justify-content: center; color: white;">
322
+ <i class="fas fa-robot"></i>
323
+ </div>
324
+ <h3 style="margin: 0; color: #1E293B;">AI Question</h3>
325
+ </div>
326
+ <p style="font-size: 1.1rem; line-height: 1.6; color: #1E293B;">{current_question}</p>
327
+ </div>
328
+ ''', unsafe_allow_html=True)
329
+ if "Final Guess:" in current_question:
330
+ st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
331
+ st.session_state.game_state = "confirm_guess"
332
+ st.experimental_rerun()
333
+ with st.form("answer_form"):
334
+ # --- Voice Input for Answer ---
335
+ st.markdown("#### Use Voice (English/Urdu) for Your Answer")
336
+ col1, col2 = st.columns([0.85, 0.15])
337
+ with col1:
338
+ voice_answer = st.text_input("Your answer (yes/no/both):",
339
+ value=get_voice_transcription("voice_answer").strip(),
340
+ key=f"answer_{st.session_state.current_q}")
341
+ with col2:
342
+ st.markdown("<div style='height: 52px; display: flex; align-items: center;'>", unsafe_allow_html=True)
343
+ audio_bytes = audio_recorder(text="",
344
+ pause_threshold=0.8,
345
+ key="voice_answer_audio",
346
+ recording_color="#e8b62c",
347
+ neutral_color="#6aa36f")
348
+ st.markdown("</div>", unsafe_allow_html=True)
349
+
350
+ if st.form_submit_button("Submit"):
351
+ answer_input = voice_answer.strip().lower()
352
+ if answer_input not in ["yes", "no", "both"]:
353
+ st.error("Please answer with 'yes', 'no', or 'both'!")
354
+ else:
355
+ st.session_state.answers.append(answer_input)
356
+ st.session_state.conversation_history.append(
357
+ {"role": "user", "content": answer_input}
358
+ )
359
+ next_response = ask_llama(
360
+ st.session_state.conversation_history,
361
+ st.session_state.category
362
+ )
363
+ if "Final Guess:" in next_response:
364
+ st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
365
+ st.session_state.game_state = "confirm_guess"
366
+ else:
367
+ st.session_state.questions.append(next_response)
368
+ st.session_state.conversation_history.append(
369
+ {"role": "assistant", "content": next_response}
370
+ )
371
+ st.session_state.current_q += 1
372
+ if st.session_state.current_q >= 20:
373
+ st.session_state.game_state = "result"
374
+ st.experimental_rerun()
375
+ with st.expander("Need Help? Chat with AI Assistant"):
376
+ # --- Voice Input for Help Query ---
377
+ st.markdown("#### Use Voice (English/Urdu) for Help Query")
378
+ col1, col2 = st.columns([0.85, 0.15])
379
+ with col1:
380
+ voice_help = st.text_input("Enter your help query:",
381
+ value=get_voice_transcription("voice_help").strip(),
382
+ key="help_query")
383
+ with col2:
384
+ st.markdown("<div style='height: 52px; display: flex; align-items: center;'>", unsafe_allow_html=True)
385
+ audio_bytes = audio_recorder(text="",
386
+ pause_threshold=0.8,
387
+ key="voice_help_audio",
388
+ recording_color="#e8b62c",
389
+ neutral_color="#6aa36f")
390
+ st.markdown("</div>", unsafe_allow_html=True)
391
+
392
+ if st.button("Send", key="send_help"):
393
+ if voice_help:
394
+ help_response = ask_help_agent(voice_help)
395
+ st.session_state.help_conversation.append({"query": voice_help, "response": help_response})
396
+ else:
397
+ st.error("Please enter a query!")
398
+ if st.session_state.help_conversation:
399
+ for msg in st.session_state.help_conversation:
400
+ st.markdown(f"**You:** {msg['query']}")
401
+ st.markdown(f"**Help Assistant:** {msg['response']}")
402
 
403
+ elif st.session_state.game_state == "confirm_guess":
404
+ st.markdown(f'''
405
+ <div class="question-box">
406
+ <div style="display: flex; align-items: center; gap: 1rem; margin-bottom: 1.5rem;">
407
+ <div style="background: #6C63FF; width: 40px; height: 40px; border-radius: 50%;
408
+ display: flex; align-items: center; justify-content: center; color: white;">
409
+ <i class="fas fa-lightbulb"></i>
410
+ </div>
411
+ <h3 style="margin: 0; color: #1E293B;">AI's Final Guess</h3>
412
+ </div>
413
+ <p style="font-size: 1.2rem; line-height: 1.6; color: #1E293B;">
414
+ Is it <strong style="color: #6C63FF;">{st.session_state.final_guess}</strong>?
415
+ </p>
416
+ </div>
417
+ ''', unsafe_allow_html=True)
418
+ with st.form("confirm_form"):
419
+ col1, col2 = st.columns([0.85, 0.15])
420
+ with col1:
421
+ confirm_input = st.text_input("Type your answer (yes/no/both):", key="confirm_input")
422
+ with col2:
423
+ st.markdown("<div style='height: 52px; display: flex; align-items: center;'>", unsafe_allow_html=True)
424
+ audio_bytes = audio_recorder(text="",
425
+ pause_threshold=0.8,
426
+ key="voice_confirm_audio",
427
+ recording_color="#e8b62c",
428
+ neutral_color="#6aa36f")
429
+ st.markdown("</div>", unsafe_allow_html=True)
430
+
431
+ if st.form_submit_button("Submit"):
432
+ confirm_input = confirm_input.strip().lower()
433
+ if confirm_input not in ["yes", "no", "both"]:
434
+ st.error("Please answer with 'yes', 'no', or 'both'!")
435
+ else:
436
+ if confirm_input == "yes":
437
+ st.session_state.game_state = "result"
438
+ st.experimental_rerun()
439
+ st.stop()
440
+ else:
441
+ st.session_state.conversation_history.append(
442
+ {"role": "user", "content": "no"}
443
+ )
444
+ st.session_state.game_state = "gameplay"
445
+ next_response = ask_llama(
446
+ st.session_state.conversation_history,
447
+ st.session_state.category
448
+ )
449
+ st.session_state.questions.append(next_response)
450
+ st.session_state.conversation_history.append(
451
+ {"role": "assistant", "content": next_response}
452
+ )
453
+ st.session_state.current_q += 1
454
+ st.experimental_rerun()
455
 
456
+ elif st.session_state.game_state == "result":
457
+ if not st.session_state.final_guess:
458
+ qa_history = "\n".join(
459
+ [f"Q{i+1}: {q}\nA: {a}"
460
+ for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
461
+ )
462
+ final_guess = ask_llama(
463
+ [{"role": "user", "content": qa_history}],
464
+ st.session_state.category,
465
+ is_final_guess=True
466
+ )
467
+ st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()
468
+ show_confetti()
469
+ st.markdown(f'<div class="final-reveal">🎉 It\'s...</div>', unsafe_allow_html=True)
470
+ time.sleep(1)
471
+ st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>',
472
+ unsafe_allow_html=True)
473
+ st.markdown(f"<p style='text-align:center; color:#64748B;'>Guessed in {len(st.session_state.questions)} questions</p>",
474
+ unsafe_allow_html=True)
475
+ if st.button("Play Again", key="play_again"):
476
+ st.session_state.clear()
477
+ st.experimental_rerun()
478
 
479
  if __name__ == "__main__":
480
+ main()