Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,9 @@ import time
|
|
3 |
import requests
|
4 |
from streamlit.components.v1 import html
|
5 |
|
|
|
|
|
|
|
6 |
# Custom CSS for professional look
|
7 |
def inject_custom_css():
|
8 |
st.markdown("""
|
@@ -103,7 +106,7 @@ def show_confetti():
|
|
103 |
</script>
|
104 |
""")
|
105 |
|
106 |
-
# Enhanced AI question generation
|
107 |
def ask_llama(conversation_history, category, is_final_guess=False):
|
108 |
api_url = "https://api.groq.com/openai/v1/chat/completions"
|
109 |
headers = {
|
@@ -147,6 +150,32 @@ def ask_llama(conversation_history, category, is_final_guess=False):
|
|
147 |
st.error(f"Error calling Llama API: {str(e)}")
|
148 |
return "Could not generate question"
|
149 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
# Main game logic
|
151 |
def main():
|
152 |
inject_custom_css()
|
@@ -162,6 +191,7 @@ def main():
|
|
162 |
st.session_state.conversation_history = []
|
163 |
st.session_state.category = None
|
164 |
st.session_state.final_guess = None
|
|
|
165 |
|
166 |
# Start screen
|
167 |
if st.session_state.game_state == "start":
|
@@ -214,12 +244,12 @@ def main():
|
|
214 |
unsafe_allow_html=True)
|
215 |
|
216 |
with st.form("answer_form"):
|
217 |
-
answer_input = st.text_input("Your answer (yes/no):",
|
218 |
key=f"answer_{st.session_state.current_q}").strip().lower()
|
219 |
|
220 |
if st.form_submit_button("Submit"):
|
221 |
-
if answer_input not in ["yes", "no"]:
|
222 |
-
st.error("Please answer with 'yes' or '
|
223 |
else:
|
224 |
st.session_state.answers.append(answer_input)
|
225 |
st.session_state.conversation_history.append(
|
@@ -249,6 +279,20 @@ def main():
|
|
249 |
|
250 |
st.rerun()
|
251 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
# Result screen
|
253 |
elif st.session_state.game_state == "result":
|
254 |
if st.session_state.final_guess is None:
|
@@ -278,4 +322,4 @@ def main():
|
|
278 |
st.rerun()
|
279 |
|
280 |
if __name__ == "__main__":
|
281 |
-
main()
|
|
|
3 |
import requests
|
4 |
from streamlit.components.v1 import html
|
5 |
|
6 |
+
# Your Gemini API key for the help agent
|
7 |
+
gemini_api_key = "AIzaSyC1IoNgTzAZ52tx11K-2_9F5ruaCptiArE"
|
8 |
+
|
9 |
# Custom CSS for professional look
|
10 |
def inject_custom_css():
|
11 |
st.markdown("""
|
|
|
106 |
</script>
|
107 |
""")
|
108 |
|
109 |
+
# Enhanced AI question generation for guessing game
|
110 |
def ask_llama(conversation_history, category, is_final_guess=False):
|
111 |
api_url = "https://api.groq.com/openai/v1/chat/completions"
|
112 |
headers = {
|
|
|
150 |
st.error(f"Error calling Llama API: {str(e)}")
|
151 |
return "Could not generate question"
|
152 |
|
153 |
+
# New function for the help AI assistant using Gemini (or any similar model)
|
154 |
+
def ask_help_agent(query):
|
155 |
+
gemini_api_url = "https://api.gemini.com/v1/chat/completions"
|
156 |
+
headers = {
|
157 |
+
"Authorization": f"Bearer {gemini_api_key}",
|
158 |
+
"Content-Type": "application/json"
|
159 |
+
}
|
160 |
+
system_prompt = "You're an AI help assistant. Provide concise, friendly advice for clarifying questions in a guessing game. Do not refer to the guessing game details."
|
161 |
+
messages = [
|
162 |
+
{"role": "system", "content": system_prompt},
|
163 |
+
{"role": "user", "content": query}
|
164 |
+
]
|
165 |
+
data = {
|
166 |
+
"model": "gemini-1", # or another suitable Gemini model identifier
|
167 |
+
"messages": messages,
|
168 |
+
"temperature": 0.7,
|
169 |
+
"max_tokens": 150
|
170 |
+
}
|
171 |
+
try:
|
172 |
+
response = requests.post(gemini_api_url, headers=headers, json=data)
|
173 |
+
response.raise_for_status()
|
174 |
+
return response.json()["choices"][0]["message"]["content"]
|
175 |
+
except Exception as e:
|
176 |
+
st.error(f"Error calling Gemini API: {str(e)}")
|
177 |
+
return "Could not get help at the moment."
|
178 |
+
|
179 |
# Main game logic
|
180 |
def main():
|
181 |
inject_custom_css()
|
|
|
191 |
st.session_state.conversation_history = []
|
192 |
st.session_state.category = None
|
193 |
st.session_state.final_guess = None
|
194 |
+
st.session_state.help_conversation = [] # separate history for help agent
|
195 |
|
196 |
# Start screen
|
197 |
if st.session_state.game_state == "start":
|
|
|
244 |
unsafe_allow_html=True)
|
245 |
|
246 |
with st.form("answer_form"):
|
247 |
+
answer_input = st.text_input("Your answer (yes/no/both):",
|
248 |
key=f"answer_{st.session_state.current_q}").strip().lower()
|
249 |
|
250 |
if st.form_submit_button("Submit"):
|
251 |
+
if answer_input not in ["yes", "no", "both"]:
|
252 |
+
st.error("Please answer with 'yes', 'no', or 'both'!")
|
253 |
else:
|
254 |
st.session_state.answers.append(answer_input)
|
255 |
st.session_state.conversation_history.append(
|
|
|
279 |
|
280 |
st.rerun()
|
281 |
|
282 |
+
# Side Help Option: independent chat with an AI help assistant
|
283 |
+
with st.expander("Need Help? Chat with AI Assistant"):
|
284 |
+
help_query = st.text_input("Enter your help query:", key="help_query")
|
285 |
+
if st.button("Send", key="send_help"):
|
286 |
+
if help_query:
|
287 |
+
help_response = ask_help_agent(help_query)
|
288 |
+
st.session_state.help_conversation.append({"query": help_query, "response": help_response})
|
289 |
+
else:
|
290 |
+
st.error("Please enter a query!")
|
291 |
+
if st.session_state.help_conversation:
|
292 |
+
for msg in st.session_state.help_conversation:
|
293 |
+
st.markdown(f"**You:** {msg['query']}")
|
294 |
+
st.markdown(f"**Help Assistant:** {msg['response']}")
|
295 |
+
|
296 |
# Result screen
|
297 |
elif st.session_state.game_state == "result":
|
298 |
if st.session_state.final_guess is None:
|
|
|
322 |
st.rerun()
|
323 |
|
324 |
if __name__ == "__main__":
|
325 |
+
main()
|