Update app.py
Browse files
app.py
CHANGED
@@ -101,11 +101,11 @@ def show_confetti():
|
|
101 |
</script>
|
102 |
""")
|
103 |
|
104 |
-
#
|
105 |
-
def
|
106 |
-
api_url = "https://api.
|
107 |
headers = {
|
108 |
-
"Authorization": "Bearer
|
109 |
"Content-Type": "application/json"
|
110 |
}
|
111 |
|
@@ -117,7 +117,7 @@ def ask_mistral(conversation_history, category):
|
|
117 |
] + conversation_history
|
118 |
|
119 |
data = {
|
120 |
-
"model": "
|
121 |
"messages": messages,
|
122 |
"temperature": 0.7,
|
123 |
"max_tokens": 100
|
@@ -128,7 +128,7 @@ def ask_mistral(conversation_history, category):
|
|
128 |
response.raise_for_status()
|
129 |
return response.json()["choices"][0]["message"]["content"]
|
130 |
except Exception as e:
|
131 |
-
st.error(f"Error calling
|
132 |
return "Could not generate question"
|
133 |
|
134 |
# Game logic
|
@@ -173,9 +173,10 @@ def main():
|
|
173 |
else:
|
174 |
st.session_state.category = category_input
|
175 |
# Generate first question
|
176 |
-
first_question =
|
177 |
-
|
178 |
-
|
|
|
179 |
st.session_state.questions = [first_question]
|
180 |
st.session_state.conversation_history = [
|
181 |
{"role": "assistant", "content": first_question}
|
@@ -208,7 +209,7 @@ def main():
|
|
208 |
st.session_state.game_state = "result"
|
209 |
else:
|
210 |
# Generate next question
|
211 |
-
next_question =
|
212 |
st.session_state.conversation_history,
|
213 |
st.session_state.category
|
214 |
)
|
@@ -223,7 +224,7 @@ def main():
|
|
223 |
# Result screen
|
224 |
elif st.session_state.game_state == "result":
|
225 |
# Generate final guess
|
226 |
-
final_guess =
|
227 |
st.session_state.conversation_history + [
|
228 |
{"role": "user", "content": "Based on all my answers, what is your final guess? Just state the guess directly."}
|
229 |
],
|
@@ -241,4 +242,4 @@ def main():
|
|
241 |
st.rerun()
|
242 |
|
243 |
if __name__ == "__main__":
|
244 |
-
main()
|
|
|
101 |
</script>
|
102 |
""")
|
103 |
|
104 |
+
# Groq Llama AI API call
|
105 |
+
def ask_llama(conversation_history, category):
|
106 |
+
api_url = "https://api.groq.ai/v1/chat/completions"
|
107 |
headers = {
|
108 |
+
"Authorization": "Bearer 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",
|
109 |
"Content-Type": "application/json"
|
110 |
}
|
111 |
|
|
|
117 |
] + conversation_history
|
118 |
|
119 |
data = {
|
120 |
+
"model": "llama-3.3-70b-versatile",
|
121 |
"messages": messages,
|
122 |
"temperature": 0.7,
|
123 |
"max_tokens": 100
|
|
|
128 |
response.raise_for_status()
|
129 |
return response.json()["choices"][0]["message"]["content"]
|
130 |
except Exception as e:
|
131 |
+
st.error(f"Error calling Llama API: {str(e)}")
|
132 |
return "Could not generate question"
|
133 |
|
134 |
# Game logic
|
|
|
173 |
else:
|
174 |
st.session_state.category = category_input
|
175 |
# Generate first question
|
176 |
+
first_question = ask_llama([{
|
177 |
+
"role": "user",
|
178 |
+
"content": "Ask your first yes/no question."
|
179 |
+
}], category_input)
|
180 |
st.session_state.questions = [first_question]
|
181 |
st.session_state.conversation_history = [
|
182 |
{"role": "assistant", "content": first_question}
|
|
|
209 |
st.session_state.game_state = "result"
|
210 |
else:
|
211 |
# Generate next question
|
212 |
+
next_question = ask_llama(
|
213 |
st.session_state.conversation_history,
|
214 |
st.session_state.category
|
215 |
)
|
|
|
224 |
# Result screen
|
225 |
elif st.session_state.game_state == "result":
|
226 |
# Generate final guess
|
227 |
+
final_guess = ask_llama(
|
228 |
st.session_state.conversation_history + [
|
229 |
{"role": "user", "content": "Based on all my answers, what is your final guess? Just state the guess directly."}
|
230 |
],
|
|
|
242 |
st.rerun()
|
243 |
|
244 |
if __name__ == "__main__":
|
245 |
+
main()
|