Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
import time
|
3 |
import requests
|
4 |
-
import json
|
5 |
from streamlit.components.v1 import html
|
6 |
|
7 |
# Custom CSS for professional look
|
@@ -77,10 +76,17 @@ def inject_custom_css():
|
|
77 |
pointer-events: none;
|
78 |
z-index: 1000;
|
79 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
</style>
|
81 |
""", unsafe_allow_html=True)
|
82 |
|
83 |
-
# Confetti animation
|
84 |
def show_confetti():
|
85 |
html("""
|
86 |
<canvas id="confetti-canvas" class="confetti"></canvas>
|
@@ -88,38 +94,48 @@ def show_confetti():
|
|
88 |
<script>
|
89 |
const canvas = document.getElementById('confetti-canvas');
|
90 |
const confetti = confetti.create(canvas, { resize: true });
|
91 |
-
|
92 |
confetti({
|
93 |
particleCount: 150,
|
94 |
spread: 70,
|
95 |
origin: { y: 0.6 }
|
96 |
});
|
97 |
-
|
98 |
-
setTimeout(() => {
|
99 |
-
canvas.remove();
|
100 |
-
}, 5000);
|
101 |
</script>
|
102 |
""")
|
103 |
|
104 |
-
#
|
105 |
-
def ask_llama(conversation_history, category):
|
106 |
-
api_url = "https://api.groq.com/openai/v1/chat/completions"
|
107 |
headers = {
|
108 |
-
"Authorization": "Bearer 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",
|
109 |
"Content-Type": "application/json"
|
110 |
}
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
messages = [
|
113 |
-
{
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
] + conversation_history
|
118 |
|
119 |
data = {
|
120 |
-
"model": "llama-3.3-70b-versatile",
|
121 |
"messages": messages,
|
122 |
-
"temperature": 0.7,
|
123 |
"max_tokens": 100
|
124 |
}
|
125 |
|
@@ -131,12 +147,12 @@ def ask_llama(conversation_history, category):
|
|
131 |
st.error(f"Error calling Llama API: {str(e)}")
|
132 |
return "Could not generate question"
|
133 |
|
134 |
-
#
|
135 |
def main():
|
136 |
inject_custom_css()
|
137 |
|
138 |
st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
|
139 |
-
st.markdown('<div class="subtitle">The
|
140 |
|
141 |
if 'game_state' not in st.session_state:
|
142 |
st.session_state.game_state = "start"
|
@@ -145,26 +161,27 @@ def main():
|
|
145 |
st.session_state.answers = []
|
146 |
st.session_state.conversation_history = []
|
147 |
st.session_state.category = None
|
|
|
148 |
|
149 |
# Start screen
|
150 |
if st.session_state.game_state == "start":
|
151 |
st.markdown("""
|
152 |
<div class="question-box">
|
153 |
<h3>Welcome to <span style='color:#6C63FF;'>KASOTI 🎯</span></h3>
|
154 |
-
<p>
|
155 |
-
<p>
|
156 |
<ul>
|
157 |
-
<li><strong>
|
158 |
-
<li><strong>
|
159 |
-
<li><strong>
|
160 |
</ul>
|
161 |
-
<p
|
162 |
</div>
|
163 |
""", unsafe_allow_html=True)
|
164 |
|
165 |
with st.form("start_form"):
|
166 |
-
category_input = st.text_input("Enter category (person
|
167 |
-
|
168 |
if st.form_submit_button("Start Game"):
|
169 |
if not category_input:
|
170 |
st.error("Please enter a category!")
|
@@ -172,11 +189,9 @@ def main():
|
|
172 |
st.error("Please enter either 'person', 'place', or 'object'!")
|
173 |
else:
|
174 |
st.session_state.category = category_input
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
"content": "Ask your first yes/no question."
|
179 |
-
}], category_input)
|
180 |
st.session_state.questions = [first_question]
|
181 |
st.session_state.conversation_history = [
|
182 |
{"role": "assistant", "content": first_question}
|
@@ -187,59 +202,80 @@ def main():
|
|
187 |
# Gameplay screen
|
188 |
elif st.session_state.game_state == "gameplay":
|
189 |
current_question = st.session_state.questions[st.session_state.current_q]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br>'
|
191 |
f'<strong>{current_question}</strong></div>',
|
192 |
unsafe_allow_html=True)
|
193 |
-
|
194 |
with st.form("answer_form"):
|
195 |
-
answer_input = st.text_input("Your answer (yes/no):",
|
196 |
-
|
|
|
197 |
if st.form_submit_button("Submit"):
|
198 |
if answer_input not in ["yes", "no"]:
|
199 |
st.error("Please answer with 'yes' or 'no'!")
|
200 |
else:
|
201 |
-
# Record answer
|
202 |
st.session_state.answers.append(answer_input)
|
203 |
st.session_state.conversation_history.append(
|
204 |
{"role": "user", "content": answer_input}
|
205 |
)
|
206 |
|
207 |
-
#
|
208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
st.session_state.game_state = "result"
|
210 |
else:
|
211 |
-
|
212 |
-
next_question = ask_llama(
|
213 |
-
st.session_state.conversation_history,
|
214 |
-
st.session_state.category
|
215 |
-
)
|
216 |
-
st.session_state.questions.append(next_question)
|
217 |
st.session_state.conversation_history.append(
|
218 |
-
{"role": "assistant", "content":
|
219 |
)
|
220 |
st.session_state.current_q += 1
|
221 |
|
|
|
|
|
|
|
|
|
222 |
st.rerun()
|
223 |
|
224 |
# Result screen
|
225 |
elif st.session_state.game_state == "result":
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
{
|
230 |
-
|
231 |
-
|
232 |
-
|
|
|
|
|
|
|
|
|
|
|
233 |
|
234 |
show_confetti()
|
235 |
-
st.markdown('<div class="final-reveal">🎉
|
236 |
time.sleep(1)
|
237 |
-
st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{final_guess}</div>',
|
238 |
unsafe_allow_html=True)
|
239 |
-
|
|
|
|
|
|
|
240 |
if st.button("Play Again", key="play_again"):
|
241 |
st.session_state.clear()
|
242 |
st.rerun()
|
243 |
|
244 |
if __name__ == "__main__":
|
245 |
-
main()
|
|
|
1 |
import streamlit as st
|
2 |
import time
|
3 |
import requests
|
|
|
4 |
from streamlit.components.v1 import html
|
5 |
|
6 |
# Custom CSS for professional look
|
|
|
76 |
pointer-events: none;
|
77 |
z-index: 1000;
|
78 |
}
|
79 |
+
|
80 |
+
.confidence-meter {
|
81 |
+
height: 10px;
|
82 |
+
background: linear-gradient(90deg, #FF6B6B 0%, #6C63FF 100%);
|
83 |
+
border-radius: 5px;
|
84 |
+
margin: 10px 0;
|
85 |
+
}
|
86 |
</style>
|
87 |
""", unsafe_allow_html=True)
|
88 |
|
89 |
+
# Confetti animation
|
90 |
def show_confetti():
|
91 |
html("""
|
92 |
<canvas id="confetti-canvas" class="confetti"></canvas>
|
|
|
94 |
<script>
|
95 |
const canvas = document.getElementById('confetti-canvas');
|
96 |
const confetti = confetti.create(canvas, { resize: true });
|
|
|
97 |
confetti({
|
98 |
particleCount: 150,
|
99 |
spread: 70,
|
100 |
origin: { y: 0.6 }
|
101 |
});
|
102 |
+
setTimeout(() => { canvas.remove(); }, 5000);
|
|
|
|
|
|
|
103 |
</script>
|
104 |
""")
|
105 |
|
106 |
+
# Enhanced AI question generation
|
107 |
+
def ask_llama(conversation_history, category, is_final_guess=False):
|
108 |
+
api_url = "https://api.groq.com/openai/v1/chat/completions"
|
109 |
headers = {
|
110 |
+
"Authorization": "Bearer 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",
|
111 |
"Content-Type": "application/json"
|
112 |
}
|
113 |
|
114 |
+
system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
|
115 |
+
1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
|
116 |
+
2. Consider all previous answers carefully before asking next question
|
117 |
+
3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]"
|
118 |
+
4. For places: ask about continent, climate, famous landmarks, or population
|
119 |
+
5. For people: ask about profession, gender, alive/dead, nationality, or fame
|
120 |
+
6. For objects: ask about size, color, usage, material, or where it's found
|
121 |
+
7. Never repeat questions and always make progress toward guessing"""
|
122 |
+
|
123 |
+
if is_final_guess:
|
124 |
+
prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
|
125 |
+
{conversation_history}"""
|
126 |
+
else:
|
127 |
+
prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
|
128 |
+
|
129 |
messages = [
|
130 |
+
{"role": "system", "content": system_prompt},
|
131 |
+
*conversation_history,
|
132 |
+
{"role": "user", "content": prompt}
|
133 |
+
]
|
|
|
134 |
|
135 |
data = {
|
136 |
+
"model": "llama-3.3-70b-versatile",
|
137 |
"messages": messages,
|
138 |
+
"temperature": 0.7 if is_final_guess else 0.8,
|
139 |
"max_tokens": 100
|
140 |
}
|
141 |
|
|
|
147 |
st.error(f"Error calling Llama API: {str(e)}")
|
148 |
return "Could not generate question"
|
149 |
|
150 |
+
# Main game logic
|
151 |
def main():
|
152 |
inject_custom_css()
|
153 |
|
154 |
st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
|
155 |
+
st.markdown('<div class="subtitle">The Smart Guessing Game</div>', unsafe_allow_html=True)
|
156 |
|
157 |
if 'game_state' not in st.session_state:
|
158 |
st.session_state.game_state = "start"
|
|
|
161 |
st.session_state.answers = []
|
162 |
st.session_state.conversation_history = []
|
163 |
st.session_state.category = None
|
164 |
+
st.session_state.final_guess = None
|
165 |
|
166 |
# Start screen
|
167 |
if st.session_state.game_state == "start":
|
168 |
st.markdown("""
|
169 |
<div class="question-box">
|
170 |
<h3>Welcome to <span style='color:#6C63FF;'>KASOTI 🎯</span></h3>
|
171 |
+
<p>Think of something and I'll try to guess it in 20 questions or less!</p>
|
172 |
+
<p>Choose a category:</p>
|
173 |
<ul>
|
174 |
+
<li><strong>Person</strong> - celebrity, fictional character, historical figure</li>
|
175 |
+
<li><strong>Place</strong> - city, country, landmark, geographical location</li>
|
176 |
+
<li><strong>Object</strong> - everyday item, tool, vehicle, etc.</li>
|
177 |
</ul>
|
178 |
+
<p>Type your category below to begin:</p>
|
179 |
</div>
|
180 |
""", unsafe_allow_html=True)
|
181 |
|
182 |
with st.form("start_form"):
|
183 |
+
category_input = st.text_input("Enter category (person/place/object):").strip().lower()
|
184 |
+
|
185 |
if st.form_submit_button("Start Game"):
|
186 |
if not category_input:
|
187 |
st.error("Please enter a category!")
|
|
|
189 |
st.error("Please enter either 'person', 'place', or 'object'!")
|
190 |
else:
|
191 |
st.session_state.category = category_input
|
192 |
+
first_question = ask_llama([
|
193 |
+
{"role": "user", "content": "Ask your first strategic yes/no question."}
|
194 |
+
], category_input)
|
|
|
|
|
195 |
st.session_state.questions = [first_question]
|
196 |
st.session_state.conversation_history = [
|
197 |
{"role": "assistant", "content": first_question}
|
|
|
202 |
# Gameplay screen
|
203 |
elif st.session_state.game_state == "gameplay":
|
204 |
current_question = st.session_state.questions[st.session_state.current_q]
|
205 |
+
|
206 |
+
# Check if AI made a guess
|
207 |
+
if "Final Guess:" in current_question:
|
208 |
+
st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
|
209 |
+
st.session_state.game_state = "result"
|
210 |
+
st.rerun()
|
211 |
+
|
212 |
st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br>'
|
213 |
f'<strong>{current_question}</strong></div>',
|
214 |
unsafe_allow_html=True)
|
215 |
+
|
216 |
with st.form("answer_form"):
|
217 |
+
answer_input = st.text_input("Your answer (yes/no):",
|
218 |
+
key=f"answer_{st.session_state.current_q}").strip().lower()
|
219 |
+
|
220 |
if st.form_submit_button("Submit"):
|
221 |
if answer_input not in ["yes", "no"]:
|
222 |
st.error("Please answer with 'yes' or 'no'!")
|
223 |
else:
|
|
|
224 |
st.session_state.answers.append(answer_input)
|
225 |
st.session_state.conversation_history.append(
|
226 |
{"role": "user", "content": answer_input}
|
227 |
)
|
228 |
|
229 |
+
# Generate next response
|
230 |
+
next_response = ask_llama(
|
231 |
+
st.session_state.conversation_history,
|
232 |
+
st.session_state.category
|
233 |
+
)
|
234 |
+
|
235 |
+
# Check if AI made a guess
|
236 |
+
if "Final Guess:" in next_response:
|
237 |
+
st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
|
238 |
st.session_state.game_state = "result"
|
239 |
else:
|
240 |
+
st.session_state.questions.append(next_response)
|
|
|
|
|
|
|
|
|
|
|
241 |
st.session_state.conversation_history.append(
|
242 |
+
{"role": "assistant", "content": next_response}
|
243 |
)
|
244 |
st.session_state.current_q += 1
|
245 |
|
246 |
+
# Stop after 8 questions if no guess yet
|
247 |
+
if st.session_state.current_q >= 8:
|
248 |
+
st.session_state.game_state = "result"
|
249 |
+
|
250 |
st.rerun()
|
251 |
|
252 |
# Result screen
|
253 |
elif st.session_state.game_state == "result":
|
254 |
+
if st.session_state.final_guess is None:
|
255 |
+
# Generate final guess if not already made
|
256 |
+
qa_history = "\n".join(
|
257 |
+
[f"Q{i+1}: {q}\nA: {a}"
|
258 |
+
for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
|
259 |
+
)
|
260 |
+
|
261 |
+
st.session_state.final_guess = ask_llama(
|
262 |
+
[{"role": "user", "content": qa_history}],
|
263 |
+
st.session_state.category,
|
264 |
+
is_final_guess=True
|
265 |
+
)
|
266 |
|
267 |
show_confetti()
|
268 |
+
st.markdown('<div class="final-reveal">🎉 My guess is...</div>', unsafe_allow_html=True)
|
269 |
time.sleep(1)
|
270 |
+
st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;color:#6C63FF;">{st.session_state.final_guess}</div>',
|
271 |
unsafe_allow_html=True)
|
272 |
+
|
273 |
+
st.markdown(f"<p style='text-align:center'>Guessed in {len(st.session_state.questions)} questions</p>",
|
274 |
+
unsafe_allow_html=True)
|
275 |
+
|
276 |
if st.button("Play Again", key="play_again"):
|
277 |
st.session_state.clear()
|
278 |
st.rerun()
|
279 |
|
280 |
if __name__ == "__main__":
|
281 |
+
main()
|