import streamlit as st import time import requests from streamlit.components.v1 import html import os from dotenv import load_dotenv # Speech recognition component def add_mic_button(target_input_key, is_select=False): html(f"""
""") # Import transformers and cache the help agent for performance @st.cache_resource def get_help_agent(): from transformers import pipeline # Using BlenderBot 400M Distill as the public conversational model (used elsewhere) return pipeline("conversational", model="facebook/blenderbot-400M-distill") # Enhanced Custom CSS with modern design def inject_custom_css(): st.markdown(""" """, unsafe_allow_html=True) # Confetti animation (enhanced) def show_confetti(): html(""" """) # Enhanced AI question generation for guessing game using Llama model def ask_llama(conversation_history, category, is_final_guess=False): api_url = "https://api.groq.com/openai/v1/chat/completions" headers = { "Authorization": "Bearer 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", "Content-Type": "application/json" } system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules: 1. Ask strategic, non-repeating yes/no questions that narrow down possibilities 2. Consider all previous answers carefully before asking next question 3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]" 4. For places: ask about continent, climate, famous landmarks, country, city or population 5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame 6. For objects: ask about size, color, usage, material, or where it's found 7. Never repeat questions and always make progress toward guessing""" if is_final_guess: prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text: {conversation_history}""" else: prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities." messages = [ {"role": "system", "content": system_prompt}, *conversation_history, {"role": "user", "content": prompt} ] data = { "model": "llama-3.3-70b-versatile", "messages": messages, "temperature": 0.7 if is_final_guess else 0.8, "max_tokens": 100 } try: response = requests.post(api_url, headers=headers, json=data) response.raise_for_status() return response.json()["choices"][0]["message"]["content"] except Exception as e: st.error(f"Error calling Llama API: {str(e)}") return "Could not generate question" # New function for the help AI assistant using the Hugging Face InferenceClient MISTRAL_API_KEY = "wm5eLl09b9I9cOxR3E9n5rrRr1CRQQjn" def ask_help_agent(query): try: # Prepare Mistral API request url = "https://api.mistral.ai/v1/chat/completions" headers = { "Authorization": f"Bearer {MISTRAL_API_KEY}", "Content-Type": "application/json" } system_message = "You are a friendly Chatbot." # Build message history messages = [{"role": "system", "content": system_message}] if "help_conversation" in st.session_state: for msg in st.session_state.help_conversation: if msg.get("query"): messages.append({"role": "user", "content": msg["query"]}) if msg.get("response"): messages.append({"role": "assistant", "content": msg["response"]}) # Add current user query messages.append({"role": "user", "content": query}) # API payload payload = { "model": "mistral-tiny", "messages": messages, "temperature": 0.7, "top_p": 0.95 } # Send POST request response = requests.post(url, headers=headers, json=payload) if response.status_code == 200: result = response.json() return result["choices"][0]["message"]["content"] else: return f"API Error {response.status_code}: {response.text}" except Exception as e: return f"Error in help agent: {str(e)}" # Main game logic with enhanced UI def main(): inject_custom_css() st.markdown('
KASOTI
', unsafe_allow_html=True) st.markdown('
AI-Powered Guessing Game Challenge
', unsafe_allow_html=True) if 'game_state' not in st.session_state: st.session_state.game_state = "start" st.session_state.questions = [] st.session_state.current_q = 0 st.session_state.answers = [] st.session_state.conversation_history = [] st.session_state.category = None st.session_state.final_guess = None st.session_state.help_conversation = [] # separate history for help agent # Start screen with enhanced layout if st.session_state.game_state == "start": with st.container(): st.markdown("""

🎮 Welcome to KASOTI

Think of something and I'll try to guess it in 20 questions or less!
Choose from these categories:

🧑 Person

Celebrity, fictional character, historical figure

🌍 Place

City, country, landmark, geographical location

đŸŽ¯ Object

Everyday item, tool, vehicle, or concept

""", unsafe_allow_html=True) with st.form("start_form"): col1, col2 = st.columns([4, 1]) with col1: category_input = st.text_input("Enter category (person/place/object):", key="category_input").strip().lower() with col2: st.markdown("
") add_mic_button("category_input") if st.form_submit_button("Start Game"): if not category_input: st.error("Please enter a category!") elif category_input not in ["person", "place", "object"]: st.error("Please enter either 'person', 'place', or 'object'!") else: st.session_state.category = category_input first_question = ask_llama([ {"role": "user", "content": "Ask your first strategic yes/no question."} ], category_input) st.session_state.questions = [first_question] st.session_state.conversation_history = [ {"role": "assistant", "content": first_question} ] st.session_state.game_state = "gameplay" st.experimental_rerun() # Gameplay screen with progress bar elif st.session_state.game_state == "gameplay": with st.container(): # Add progress bar progress = (st.session_state.current_q + 1) / 20 st.markdown(f"""
QUESTION {st.session_state.current_q + 1} OF 20
""", unsafe_allow_html=True) current_question = st.session_state.questions[st.session_state.current_q] # Enhanced question box st.markdown(f'''

AI Question

{current_question}

''', unsafe_allow_html=True) # Check if AI made a guess if "Final Guess:" in current_question: st.session_state.final_guess = current_question.split("Final Guess:")[1].strip() st.session_state.game_state = "confirm_guess" st.experimental_rerun() with st.form("answer_form"): col1, col2 = st.columns([4, 1]) with col1: answer_input = st.text_input("Your answer (yes/no/both):", key=f"answer_{st.session_state.current_q}").strip().lower() with col2: st.markdown("
") add_mic_button(f"answer_{st.session_state.current_q}") if st.form_submit_button("Submit"): if answer_input not in ["yes", "no", "both"]: st.error("Please answer with 'yes', 'no', or 'both'!") else: st.session_state.answers.append(answer_input) st.session_state.conversation_history.append( {"role": "user", "content": answer_input} ) # Generate next response next_response = ask_llama( st.session_state.conversation_history, st.session_state.category ) # Check if AI made a guess if "Final Guess:" in next_response: st.session_state.final_guess = next_response.split("Final Guess:")[1].strip() st.session_state.game_state = "confirm_guess" else: st.session_state.questions.append(next_response) st.session_state.conversation_history.append( {"role": "assistant", "content": next_response} ) st.session_state.current_q += 1 # Stop after 20 questions max if st.session_state.current_q >= 20: st.session_state.game_state = "result" st.experimental_rerun() # Side Help Option: independent chat with an AI help assistant using Hugging Face model with st.expander("Need Help? Chat with AI Assistant"): help_query = st.text_input("Enter your help query:", key="help_query") if st.button("Send", key="send_help"): if help_query: help_response = ask_help_agent(help_query) st.session_state.help_conversation.append({"query": help_query, "response": help_response}) else: st.error("Please enter a query!") if st.session_state.help_conversation: for msg in st.session_state.help_conversation: st.markdown(f"**You:** {msg['query']}") st.markdown(f"**Help Assistant:** {msg['response']}") # Guess confirmation screen using text input response elif st.session_state.game_state == "confirm_guess": st.markdown(f'''

AI's Final Guess

Is it {st.session_state.final_guess}?

''', unsafe_allow_html=True) with st.form("confirm_form"): col1, col2 = st.columns([4, 1]) with col1: confirm_input = st.text_input("Type your answer (yes/no/both):", key="confirm_input").strip().lower() with col2: st.markdown("
") add_mic_button("confirm_input") if st.form_submit_button("Submit"): if confirm_input not in ["yes", "no", "both"]: st.error("Please answer with 'yes', 'no', or 'both'!") else: if confirm_input == "yes": st.session_state.game_state = "result" st.experimental_rerun() st.stop() # Immediately halt further execution else: # Add negative response to history and continue gameplay st.session_state.conversation_history.append( {"role": "user", "content": "no"} ) st.session_state.game_state = "gameplay" next_response = ask_llama( st.session_state.conversation_history, st.session_state.category ) st.session_state.questions.append(next_response) st.session_state.conversation_history.append( {"role": "assistant", "content": next_response} ) st.session_state.current_q += 1 st.experimental_rerun() # Result screen with enhanced celebration elif st.session_state.game_state == "result": if not st.session_state.final_guess: # Generate final guess if not already made qa_history = "\n".join( [f"Q{i+1}: {q}\nA: {a}" for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))] ) final_guess = ask_llama( [{"role": "user", "content": qa_history}], st.session_state.category, is_final_guess=True ) st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip() show_confetti() st.markdown(f'
🎉 It\'s...
', unsafe_allow_html=True) time.sleep(1) st.markdown(f'
{st.session_state.final_guess}
', unsafe_allow_html=True) st.markdown(f"

Guessed in {len(st.session_state.questions)} questions

", unsafe_allow_html=True) if st.button("Play Again", key="play_again"): st.session_state.clear() st.experimental_rerun() if __name__ == "__main__": main()