iisadia commited on
Commit
12fc990
·
verified ·
1 Parent(s): a502850

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -213
app.py CHANGED
@@ -3,93 +3,27 @@ import time
3
  import requests
4
  from streamlit.components.v1 import html
5
 
6
- # Import transformers and cache the help agent for performance
7
  @st.cache_resource
8
  def get_help_agent():
9
  from transformers import pipeline
10
- # Using BlenderBot 400M Distill as the public conversational model
11
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
12
 
13
- # Custom CSS for professional look
14
  def inject_custom_css():
15
  st.markdown("""
16
  <style>
17
  @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;600;700&display=swap');
18
-
19
- * {
20
- font-family: 'Poppins', sans-serif;
21
- }
22
-
23
- .title {
24
- font-size: 3rem !important;
25
- font-weight: 700 !important;
26
- color: #6C63FF !important;
27
- text-align: center;
28
- margin-bottom: 0.5rem;
29
- }
30
-
31
- .subtitle {
32
- font-size: 1.2rem !important;
33
- text-align: center;
34
- color: #666 !important;
35
- margin-bottom: 2rem;
36
- }
37
-
38
- .question-box {
39
- background: #F8F9FA;
40
- border-radius: 15px;
41
- padding: 2rem;
42
- margin: 1.5rem 0;
43
- box-shadow: 0 4px 6px rgba(0,0,0,0.1);
44
- color: black;
45
- }
46
-
47
- .answer-btn {
48
- border-radius: 12px !important;
49
- padding: 0.5rem 1.5rem !important;
50
- font-weight: 600 !important;
51
- margin: 0.5rem !important;
52
- }
53
-
54
- .yes-btn {
55
- background: #6C63FF !important;
56
- color: white !important;
57
- }
58
-
59
- .no-btn {
60
- background: #FF6B6B !important;
61
- color: white !important;
62
- }
63
-
64
- .final-reveal {
65
- animation: fadeIn 2s;
66
- font-size: 2.5rem;
67
- color: #6C63FF;
68
- text-align: center;
69
- margin: 2rem 0;
70
- }
71
-
72
- @keyframes fadeIn {
73
- from { opacity: 0; }
74
- to { opacity: 1; }
75
- }
76
-
77
- .confetti {
78
- position: fixed;
79
- top: 0;
80
- left: 0;
81
- width: 100%;
82
- height: 100%;
83
- pointer-events: none;
84
- z-index: 1000;
85
- }
86
-
87
- .confidence-meter {
88
- height: 10px;
89
- background: linear-gradient(90deg, #FF6B6B 0%, #6C63FF 100%);
90
- border-radius: 5px;
91
- margin: 10px 0;
92
- }
93
  </style>
94
  """, unsafe_allow_html=True)
95
 
@@ -101,16 +35,12 @@ def show_confetti():
101
  <script>
102
  const canvas = document.getElementById('confetti-canvas');
103
  const confetti = confetti.create(canvas, { resize: true });
104
- confetti({
105
- particleCount: 150,
106
- spread: 70,
107
- origin: { y: 0.6 }
108
- });
109
  setTimeout(() => { canvas.remove(); }, 5000);
110
  </script>
111
  """)
112
 
113
- # Enhanced AI question generation for guessing game using Llama model
114
  def ask_llama(conversation_history, category, is_final_guess=False):
115
  api_url = "https://api.groq.com/openai/v1/chat/completions"
116
  headers = {
@@ -118,194 +48,171 @@ def ask_llama(conversation_history, category, is_final_guess=False):
118
  "Content-Type": "application/json"
119
  }
120
 
121
- system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
122
- 1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
123
- 2. Consider all previous answers carefully before asking next question
124
- 3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]"
125
- 4. For places: ask about continent, climate, famous landmarks, or population
126
- 5. For people: ask about profession, gender, alive/dead, nationality, or fame
127
- 6. For objects: ask about size, color, usage, material, or where it's found
128
- 7. Never repeat questions and always make progress toward guessing"""
129
-
130
- if is_final_guess:
131
- prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
132
- {conversation_history}"""
133
- else:
134
- prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
135
 
136
  messages = [
137
  {"role": "system", "content": system_prompt},
138
  *conversation_history,
139
- {"role": "user", "content": prompt}
140
  ]
141
 
142
  data = {
143
  "model": "llama-3.3-70b-versatile",
144
  "messages": messages,
145
- "temperature": 0.7 if is_final_guess else 0.8,
146
- "max_tokens": 100
 
 
147
  }
148
 
149
  try:
150
  response = requests.post(api_url, headers=headers, json=data)
151
- response.raise_for_status()
152
  return response.json()["choices"][0]["message"]["content"]
153
  except Exception as e:
154
- st.error(f"Error calling Llama API: {str(e)}")
155
  return "Could not generate question"
156
 
157
- # New function for the help AI assistant using a Hugging Face chatbot model
158
  def ask_help_agent(query):
159
  from transformers import Conversation
160
- # Get the cached help agent (BlenderBot)
161
- help_agent = get_help_agent()
162
- conversation = Conversation(query)
163
- result = help_agent(conversation)
164
- # The generated response is stored in generated_responses list
165
- return result.generated_responses[-1]
166
 
167
- # Main game logic
168
  def main():
169
  inject_custom_css()
170
-
171
- st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
172
- st.markdown('<div class="subtitle">The Smart Guessing Game</div>', unsafe_allow_html=True)
173
 
174
  if 'game_state' not in st.session_state:
175
- st.session_state.game_state = "start"
176
- st.session_state.questions = []
177
- st.session_state.current_q = 0
178
- st.session_state.answers = []
179
- st.session_state.conversation_history = []
180
- st.session_state.category = None
181
- st.session_state.final_guess = None
182
- st.session_state.help_conversation = [] # separate history for help agent
 
 
183
 
184
  # Start screen
185
  if st.session_state.game_state == "start":
186
  st.markdown("""
187
  <div class="question-box">
188
- <h3>Welcome to <span style='color:#6C63FF;'>KASOTI 🎯</span></h3>
189
- <p>Think of something and I'll try to guess it in 20 questions or less!</p>
190
- <p>Choose a category:</p>
191
  <ul>
192
- <li><strong>Person</strong> - celebrity, fictional character, historical figure</li>
193
- <li><strong>Place</strong> - city, country, landmark, geographical location</li>
194
- <li><strong>Object</strong> - everyday item, tool, vehicle, etc.</li>
195
  </ul>
196
- <p>Type your category below to begin:</p>
197
  </div>
198
  """, unsafe_allow_html=True)
199
 
200
  with st.form("start_form"):
201
- category_input = st.text_input("Enter category (person/place/object):").strip().lower()
202
  if st.form_submit_button("Start Game"):
203
- if not category_input:
204
- st.error("Please enter a category!")
205
- elif category_input not in ["person", "place", "object"]:
206
- st.error("Please enter either 'person', 'place', or 'object'!")
207
- else:
208
- st.session_state.category = category_input
209
- first_question = ask_llama([
210
- {"role": "user", "content": "Ask your first strategic yes/no question."}
211
- ], category_input)
212
- st.session_state.questions = [first_question]
213
- st.session_state.conversation_history = [
214
- {"role": "assistant", "content": first_question}
215
- ]
216
- st.session_state.game_state = "gameplay"
217
- st.rerun()
218
 
219
  # Gameplay screen
220
  elif st.session_state.game_state == "gameplay":
221
- current_question = st.session_state.questions[st.session_state.current_q]
222
 
223
- # Check if AI made a guess
224
  if "Final Guess:" in current_question:
225
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
226
- st.session_state.game_state = "result"
227
  st.rerun()
228
-
229
- st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br>'
230
- f'<strong>{current_question}</strong></div>',
231
- unsafe_allow_html=True)
232
-
233
- with st.form("answer_form"):
234
- answer_input = st.text_input("Your answer (yes/no/both):",
235
- key=f"answer_{st.session_state.current_q}").strip().lower()
236
- if st.form_submit_button("Submit"):
237
- if answer_input not in ["yes", "no", "both"]:
238
- st.error("Please answer with 'yes', 'no', or 'both'!")
239
- else:
240
- st.session_state.answers.append(answer_input)
241
- st.session_state.conversation_history.append(
242
- {"role": "user", "content": answer_input}
243
- )
244
-
245
- # Generate next response
246
- next_response = ask_llama(
247
- st.session_state.conversation_history,
248
- st.session_state.category
249
- )
250
-
251
- # Check if AI made a guess
252
- if "Final Guess:" in next_response:
253
- st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
254
- st.session_state.game_state = "result"
255
- else:
256
- st.session_state.questions.append(next_response)
257
- st.session_state.conversation_history.append(
258
- {"role": "assistant", "content": next_response}
259
- )
260
- st.session_state.current_q += 1
261
 
262
- # Stop after 8 questions if no guess yet
263
- if st.session_state.current_q >= 8:
264
- st.session_state.game_state = "result"
265
-
266
- st.rerun()
267
 
268
- # Side Help Option: independent chat with an AI help assistant (Hugging Face model)
269
- with st.expander("Need Help? Chat with AI Assistant"):
270
- help_query = st.text_input("Enter your help query:", key="help_query")
271
- if st.button("Send", key="send_help"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
  if help_query:
273
  help_response = ask_help_agent(help_query)
274
- st.session_state.help_conversation.append({"query": help_query, "response": help_response})
275
- else:
276
- st.error("Please enter a query!")
277
- if st.session_state.help_conversation:
278
- for msg in st.session_state.help_conversation:
279
- st.markdown(f"**You:** {msg['query']}")
280
- st.markdown(f"**Help Assistant:** {msg['response']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
 
282
  # Result screen
283
  elif st.session_state.game_state == "result":
284
- if st.session_state.final_guess is None:
285
- # Generate final guess if not already made
286
- qa_history = "\n".join(
287
- [f"Q{i+1}: {q}\nA: {a}"
288
- for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
289
- )
290
-
291
- st.session_state.final_guess = ask_llama(
292
- [{"role": "user", "content": qa_history}],
293
  st.session_state.category,
294
  is_final_guess=True
295
  )
 
296
 
297
  show_confetti()
298
- st.markdown('<div class="final-reveal">🎉 My guess is...</div>', unsafe_allow_html=True)
299
  time.sleep(1)
300
- st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>',
301
- unsafe_allow_html=True)
302
-
303
- st.markdown(f"<p style='text-align:center'>Guessed in {len(st.session_state.questions)} questions</p>",
304
- unsafe_allow_html=True)
305
 
306
  if st.button("Play Again", key="play_again"):
307
  st.session_state.clear()
308
  st.rerun()
309
 
310
  if __name__ == "__main__":
311
- main()
 
3
  import requests
4
  from streamlit.components.v1 import html
5
 
6
+ # Cache the help agent
7
  @st.cache_resource
8
  def get_help_agent():
9
  from transformers import pipeline
 
10
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
11
 
12
+ # Custom CSS
13
  def inject_custom_css():
14
  st.markdown("""
15
  <style>
16
  @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;600;700&display=swap');
17
+ * { font-family: 'Poppins', sans-serif; }
18
+ .title { font-size: 3rem !important; color: #6C63FF !important; text-align: center; }
19
+ .subtitle { font-size: 1.2rem !important; text-align: center; color: #666 !important; }
20
+ .question-box { background: #F8F9FA; border-radius: 15px; padding: 2rem; margin: 1.5rem 0; box-shadow: 0 4px 6px rgba(0,0,0,0.1); }
21
+ .answer-btn { border-radius: 12px !important; padding: 0.5rem 1.5rem !important; }
22
+ .yes-btn { background: #6C63FF !important; color: white !important; }
23
+ .no-btn { background: #FF6B6B !important; color: white !important; }
24
+ .final-reveal { animation: fadeIn 2s; font-size: 2.5rem; color: #6C63FF; text-align: center; margin: 2rem 0; }
25
+ @keyframes fadeIn { from { opacity: 0; } to { opacity: 1; } }
26
+ .confetti { position: fixed; top: 0; left: 0; width: 100%; height: 100%; pointer-events: none; z-index: 1000; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  </style>
28
  """, unsafe_allow_html=True)
29
 
 
35
  <script>
36
  const canvas = document.getElementById('confetti-canvas');
37
  const confetti = confetti.create(canvas, { resize: true });
38
+ confetti({ particleCount: 150, spread: 70, origin: { y: 0.6 } });
 
 
 
 
39
  setTimeout(() => { canvas.remove(); }, 5000);
40
  </script>
41
  """)
42
 
43
+ # Improved question generation
44
  def ask_llama(conversation_history, category, is_final_guess=False):
45
  api_url = "https://api.groq.com/openai/v1/chat/completions"
46
  headers = {
 
48
  "Content-Type": "application/json"
49
  }
50
 
51
+ system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these STRICT rules:
52
+ 1. FIRST determine category specifics:
53
+ - Person: Start with real/fictional, alive/dead, gender, profession
54
+ - Place: Start with continent, urban/rural, famous landmarks
55
+ - Object: Start with purpose, size, material, location used
56
+ 2. Ask SPECIFIC yes/no questions that eliminate possibilities
57
+ 3. Only guess when 95% confident using "Final Guess: [answer]"
58
+ 4. After guessing, immediately stop and verify
59
+ 5. Never repeat questions
60
+ 6. For objects: ask about usage, parts, location, materials
61
+ 7. For people: clarify fictional status early
62
+ 8. For places: ask about geography early"""
 
 
63
 
64
  messages = [
65
  {"role": "system", "content": system_prompt},
66
  *conversation_history,
67
+ {"role": "user", "content": "Ask your next question:" if not is_final_guess else "Make your final guess:"}
68
  ]
69
 
70
  data = {
71
  "model": "llama-3.3-70b-versatile",
72
  "messages": messages,
73
+ "temperature": 0.3 if is_final_guess else 0.7,
74
+ "max_tokens": 100,
75
+ "top_p": 0.9,
76
+ "frequency_penalty": 0.5
77
  }
78
 
79
  try:
80
  response = requests.post(api_url, headers=headers, json=data)
 
81
  return response.json()["choices"][0]["message"]["content"]
82
  except Exception as e:
83
+ st.error(f"API Error: {str(e)}")
84
  return "Could not generate question"
85
 
86
+ # Help assistant
87
  def ask_help_agent(query):
88
  from transformers import Conversation
89
+ return get_help_agent()(Conversation(query)).generated_responses[-1]
 
 
 
 
 
90
 
91
+ # Main app
92
  def main():
93
  inject_custom_css()
94
+ st.markdown('<div class="title">KASOTI</div><div class="subtitle">The Smart Guessing Game</div>', unsafe_allow_html=True)
 
 
95
 
96
  if 'game_state' not in st.session_state:
97
+ st.session_state.update({
98
+ 'game_state': "start",
99
+ 'questions': [],
100
+ 'current_q': 0,
101
+ 'answers': [],
102
+ 'conversation_history': [],
103
+ 'category': None,
104
+ 'final_guess': None,
105
+ 'help_conversation': []
106
+ })
107
 
108
  # Start screen
109
  if st.session_state.game_state == "start":
110
  st.markdown("""
111
  <div class="question-box">
112
+ <h3>Welcome to KASOTI 🎯</h3>
113
+ <p>Think of something and I'll guess it in 20 questions or less!</p>
114
+ <p>Choose category:</p>
115
  <ul>
116
+ <li><strong>Person</strong> - real or fictional</li>
117
+ <li><strong>Place</strong> - any geographical location</li>
118
+ <li><strong>Object</strong> - any physical item</li>
119
  </ul>
 
120
  </div>
121
  """, unsafe_allow_html=True)
122
 
123
  with st.form("start_form"):
124
+ category = st.selectbox("Select category:", ["person", "place", "object"])
125
  if st.form_submit_button("Start Game"):
126
+ st.session_state.category = category
127
+ first_question = ask_llama([], category)
128
+ st.session_state.questions = [first_question]
129
+ st.session_state.conversation_history = [{"role": "assistant", "content": first_question}]
130
+ st.session_state.game_state = "gameplay"
131
+ st.rerun()
 
 
 
 
 
 
 
 
 
132
 
133
  # Gameplay screen
134
  elif st.session_state.game_state == "gameplay":
135
+ current_question = st.session_state.questions[-1]
136
 
137
+ # Check for final guess
138
  if "Final Guess:" in current_question:
139
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
140
+ st.session_state.game_state = "confirm_guess"
141
  st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
+ st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br><strong>{current_question}</strong></div>', unsafe_allow_html=True)
 
 
 
 
144
 
145
+ with st.form("answer_form"):
146
+ answer = st.radio("Your answer:", ["Yes", "No", "Both"], horizontal=True)
147
+ if st.form_submit_button("Submit"):
148
+ detailed_answer = {
149
+ "Yes": "Yes, that's correct.",
150
+ "No": "No, that's not correct.",
151
+ "Both": "It can be both depending on context."
152
+ }[answer]
153
+
154
+ st.session_state.answers.append(details_answer)
155
+ st.session_state.conversation_history.append({"role": "user", "content": detailed_answer})
156
+
157
+ # Generate next question
158
+ next_response = ask_llama(st.session_state.conversation_history, st.session_state.category)
159
+ st.session_state.questions.append(next_response)
160
+ st.session_state.conversation_history.append({"role": "assistant", "content": next_response})
161
+ st.session_state.current_q += 1
162
+
163
+ # Max 20 questions
164
+ if st.session_state.current_q >= 19:
165
+ st.session_state.game_state = "result"
166
+ st.rerun()
167
+
168
+ # Help section
169
+ with st.expander("💡 Need Help?"):
170
+ help_query = st.text_input("Ask the AI Assistant:")
171
+ if st.button("Send"):
172
  if help_query:
173
  help_response = ask_help_agent(help_query)
174
+ st.session_state.help_conversation.append(f"You: {help_query}\nAssistant: {help_response}")
175
+ for msg in st.session_state.help_conversation[-3:]:
176
+ st.markdown(f"`{msg}`")
177
+
178
+ # Guess confirmation screen
179
+ elif st.session_state.game_state == "confirm_guess":
180
+ st.markdown(f'<div class="question-box">🎯 My Final Guess:<br><br><strong>Is it {st.session_state.final_guess}?</strong></div>', unsafe_allow_html=True)
181
+
182
+ col1, col2 = st.columns(2)
183
+ with col1:
184
+ if st.button("Yes! Correct!", key="correct", use_container_width=True):
185
+ st.session_state.game_state = "result"
186
+ st.rerun()
187
+ with col2:
188
+ if st.button("No, Keep Trying", key="wrong", use_container_width=True):
189
+ st.session_state.conversation_history.append({"role": "user", "content": "No, that's incorrect."})
190
+ next_response = ask_llama(st.session_state.conversation_history, st.session_state.category)
191
+ st.session_state.questions.append(next_response)
192
+ st.session_state.conversation_history.append({"role": "assistant", "content": next_response})
193
+ st.session_state.current_q += 1
194
+ st.session_state.game_state = "gameplay"
195
+ st.rerun()
196
 
197
  # Result screen
198
  elif st.session_state.game_state == "result":
199
+ if not st.session_state.final_guess:
200
+ final_guess = ask_llama(
201
+ [{"role": "user", "content": "\n".join([f"Q{i+1}: {q}\nA: {a}" for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))})],
 
 
 
 
 
 
202
  st.session_state.category,
203
  is_final_guess=True
204
  )
205
+ st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()
206
 
207
  show_confetti()
208
+ st.markdown(f'<div class="final-reveal">🎉 It\'s...</div>', unsafe_allow_html=True)
209
  time.sleep(1)
210
+ st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;">{st.session_state.final_guess}</div>', unsafe_allow_html=True)
211
+ st.markdown(f"<p style='text-align:center'>Guessed in {len(st.session_state.questions)} questions</p>", unsafe_allow_html=True)
 
 
 
212
 
213
  if st.button("Play Again", key="play_again"):
214
  st.session_state.clear()
215
  st.rerun()
216
 
217
  if __name__ == "__main__":
218
+ main()