File size: 10,023 Bytes
1292ed1
74a6ec1
8622a36
74a6ec1
b922996
12fc990
d55d4bd
 
 
 
61d1238
12fc990
74a6ec1
 
 
bad3bd8
12fc990
 
 
 
 
 
 
 
 
 
74a6ec1
 
1292ed1
cd87f3e
74a6ec1
 
bad3bd8
 
74a6ec1
bad3bd8
 
12fc990
cd87f3e
74a6ec1
 
1292ed1
12fc990
cd87f3e
 
8622a36
cd87f3e
8622a36
 
3ff9487
12fc990
 
 
 
 
 
 
 
 
 
 
 
cd87f3e
3ff9487
cd87f3e
 
12fc990
cd87f3e
6f37b53
8622a36
cd87f3e
3ff9487
12fc990
 
 
 
8622a36
6f37b53
8622a36
3ff9487
 
8622a36
12fc990
3ff9487
8622a36
12fc990
61d1238
d55d4bd
12fc990
61d1238
12fc990
1292ed1
74a6ec1
12fc990
4db3cf5
3ff9487
12fc990
 
 
 
 
 
 
 
 
 
6f37b53
 
3ff9487
12df84a
 
12fc990
 
 
6f37b53
12fc990
 
 
6f37b53
12df84a
 
 
3ff9487
12fc990
3ff9487
12fc990
 
 
 
 
 
6f37b53
3ff9487
 
12fc990
cd87f3e
12fc990
cd87f3e
 
12fc990
cd87f3e
6f37b53
12fc990
6f37b53
12fc990
 
 
 
 
 
 
 
 
a083a98
12fc990
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61d1238
 
12fc990
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61d1238
3ff9487
 
12fc990
a083a98
 
 
 
 
 
12fc990
a083a98
cd87f3e
 
 
12fc990
3ff9487
 
12fc990
3ff9487
12fc990
 
cd87f3e
3ff9487
1292ed1
 
 
 
12fc990
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
import streamlit as st
import time
import requests
from streamlit.components.v1 import html

# Cache the help agent
@st.cache_resource
def get_help_agent():
    from transformers import pipeline
    return pipeline("conversational", model="facebook/blenderbot-400M-distill")

# Custom CSS
def inject_custom_css():
    st.markdown("""
    <style>
        @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;600;700&display=swap');
        * { font-family: 'Poppins', sans-serif; }
        .title { font-size: 3rem !important; color: #6C63FF !important; text-align: center; }
        .subtitle { font-size: 1.2rem !important; text-align: center; color: #666 !important; }
        .question-box { background: #F8F9FA; border-radius: 15px; padding: 2rem; margin: 1.5rem 0; box-shadow: 0 4px 6px rgba(0,0,0,0.1); }
        .answer-btn { border-radius: 12px !important; padding: 0.5rem 1.5rem !important; }
        .yes-btn { background: #6C63FF !important; color: white !important; }
        .no-btn { background: #FF6B6B !important; color: white !important; }
        .final-reveal { animation: fadeIn 2s; font-size: 2.5rem; color: #6C63FF; text-align: center; margin: 2rem 0; }
        @keyframes fadeIn { from { opacity: 0; } to { opacity: 1; } }
        .confetti { position: fixed; top: 0; left: 0; width: 100%; height: 100%; pointer-events: none; z-index: 1000; }
    </style>
    """, unsafe_allow_html=True)

# Confetti animation
def show_confetti():
    html("""
    <canvas id="confetti-canvas" class="confetti"></canvas>
    <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script>
    <script>
    const canvas = document.getElementById('confetti-canvas');
    const confetti = confetti.create(canvas, { resize: true });
    confetti({ particleCount: 150, spread: 70, origin: { y: 0.6 } });
    setTimeout(() => { canvas.remove(); }, 5000);
    </script>
    """)

# Improved question generation
def ask_llama(conversation_history, category, is_final_guess=False):
    api_url = "https://api.groq.com/openai/v1/chat/completions"
    headers = {
        "Authorization": "Bearer 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",
        "Content-Type": "application/json"
    }

    system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these STRICT rules:
1. FIRST determine category specifics:
   - Person: Start with real/fictional, alive/dead, gender, profession
   - Place: Start with continent, urban/rural, famous landmarks
   - Object: Start with purpose, size, material, location used
2. Ask SPECIFIC yes/no questions that eliminate possibilities
3. Only guess when 95% confident using "Final Guess: [answer]"
4. After guessing, immediately stop and verify
5. Never repeat questions
6. For objects: ask about usage, parts, location, materials
7. For people: clarify fictional status early
8. For places: ask about geography early"""

    messages = [
        {"role": "system", "content": system_prompt},
        *conversation_history,
        {"role": "user", "content": "Ask your next question:" if not is_final_guess else "Make your final guess:"}
    ]

    data = {
        "model": "llama-3.3-70b-versatile",
        "messages": messages,
        "temperature": 0.3 if is_final_guess else 0.7,
        "max_tokens": 100,
        "top_p": 0.9,
        "frequency_penalty": 0.5
    }

    try:
        response = requests.post(api_url, headers=headers, json=data)
        return response.json()["choices"][0]["message"]["content"]
    except Exception as e:
        st.error(f"API Error: {str(e)}")
        return "Could not generate question"

# Help assistant
def ask_help_agent(query):
    from transformers import Conversation
    return get_help_agent()(Conversation(query)).generated_responses[-1]

# Main app
def main():
    inject_custom_css()
    st.markdown('<div class="title">KASOTI</div><div class="subtitle">The Smart Guessing Game</div>', unsafe_allow_html=True)

    if 'game_state' not in st.session_state:
        st.session_state.update({
            'game_state': "start",
            'questions': [],
            'current_q': 0,
            'answers': [],
            'conversation_history': [],
            'category': None,
            'final_guess': None,
            'help_conversation': []
        })

    # Start screen
    if st.session_state.game_state == "start":
        st.markdown("""
        <div class="question-box">
            <h3>Welcome to KASOTI ๐ŸŽฏ</h3>
            <p>Think of something and I'll guess it in 20 questions or less!</p>
            <p>Choose category:</p>
            <ul>
                <li><strong>Person</strong> - real or fictional</li>
                <li><strong>Place</strong> - any geographical location</li>
                <li><strong>Object</strong> - any physical item</li>
            </ul>
        </div>
        """, unsafe_allow_html=True)

        with st.form("start_form"):
            category = st.selectbox("Select category:", ["person", "place", "object"])
            if st.form_submit_button("Start Game"):
                st.session_state.category = category
                first_question = ask_llama([], category)
                st.session_state.questions = [first_question]
                st.session_state.conversation_history = [{"role": "assistant", "content": first_question}]
                st.session_state.game_state = "gameplay"
                st.rerun()

    # Gameplay screen
    elif st.session_state.game_state == "gameplay":
        current_question = st.session_state.questions[-1]
        
        # Check for final guess
        if "Final Guess:" in current_question:
            st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
            st.session_state.game_state = "confirm_guess"
            st.rerun()

        st.markdown(f'<div class="question-box">Question {st.session_state.current_q + 1}/20:<br><br><strong>{current_question}</strong></div>', unsafe_allow_html=True)

        with st.form("answer_form"):
            answer = st.radio("Your answer:", ["Yes", "No", "Both"], horizontal=True)
            if st.form_submit_button("Submit"):
                detailed_answer = {
                    "Yes": "Yes, that's correct.",
                    "No": "No, that's not correct.",
                    "Both": "It can be both depending on context."
                }[answer]
                
                st.session_state.answers.append(detailed_answer)
                st.session_state.conversation_history.append({"role": "user", "content": detailed_answer})

                # Generate next question
                next_response = ask_llama(st.session_state.conversation_history, st.session_state.category)
                st.session_state.questions.append(next_response)
                st.session_state.conversation_history.append({"role": "assistant", "content": next_response})
                st.session_state.current_q += 1

                # Max 20 questions
                if st.session_state.current_q >= 19:
                    st.session_state.game_state = "result"
                st.rerun()

        # Help section
        with st.expander("๐Ÿ’ก Need Help?"):
            help_query = st.text_input("Ask the AI Assistant:")
            if st.button("Send"):
                if help_query:
                    help_response = ask_help_agent(help_query)
                    st.session_state.help_conversation.append(f"You: {help_query}\nAssistant: {help_response}")
                for msg in st.session_state.help_conversation[-3:]:
                    st.markdown(f"`{msg}`")

    # Guess confirmation screen
    elif st.session_state.game_state == "confirm_guess":
        st.markdown(f'<div class="question-box">๐ŸŽฏ My Final Guess:<br><br><strong>Is it {st.session_state.final_guess}?</strong></div>', unsafe_allow_html=True)
        
        col1, col2 = st.columns(2)
        with col1:
            if st.button("Yes! Correct!", key="correct", use_container_width=True):
                st.session_state.game_state = "result"
                st.rerun()
        with col2:
            if st.button("No, Keep Trying", key="wrong", use_container_width=True):
                st.session_state.conversation_history.append({"role": "user", "content": "No, that's incorrect."})
                next_response = ask_llama(st.session_state.conversation_history, st.session_state.category)
                st.session_state.questions.append(next_response)
                st.session_state.conversation_history.append({"role": "assistant", "content": next_response})
                st.session_state.current_q += 1
                st.session_state.game_state = "gameplay"
                st.rerun()

    # Result screen
    elif st.session_state.game_state == "result":
        if not st.session_state.final_guess:
            # Fixed list comprehension with proper bracket closure
            qa_history = "\n".join(
                [f"Q{i+1}: {q}\nA: {a}" 
                 for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
            )
            
            final_guess = ask_llama(
                [{"role": "user", "content": qa_history}],
                st.session_state.category,
                is_final_guess=True
            )
            st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()

        show_confetti()
        st.markdown(f'<div class="final-reveal">๐ŸŽ‰ It\'s...</div>', unsafe_allow_html=True)
        time.sleep(1)
        st.markdown(f'<div class="final-reveal" style="font-size:3.5rem;">{st.session_state.final_guess}</div>', unsafe_allow_html=True)
        st.markdown(f"<p style='text-align:center'>Guessed in {len(st.session_state.questions)} questions</p>", unsafe_allow_html=True)
        
        if st.button("Play Again", key="play_again"):
            st.session_state.clear()
            st.rerun()

if __name__ == "__main__":
    main()