Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,12 @@ import time
|
|
3 |
import requests
|
4 |
from streamlit.components.v1 import html
|
5 |
|
6 |
-
#
|
7 |
-
|
|
|
|
|
|
|
|
|
8 |
|
9 |
# Custom CSS for professional look
|
10 |
def inject_custom_css():
|
@@ -106,7 +110,7 @@ def show_confetti():
|
|
106 |
</script>
|
107 |
""")
|
108 |
|
109 |
-
# Enhanced AI question generation for guessing game
|
110 |
def ask_llama(conversation_history, category, is_final_guess=False):
|
111 |
api_url = "https://api.groq.com/openai/v1/chat/completions"
|
112 |
headers = {
|
@@ -150,31 +154,15 @@ def ask_llama(conversation_history, category, is_final_guess=False):
|
|
150 |
st.error(f"Error calling Llama API: {str(e)}")
|
151 |
return "Could not generate question"
|
152 |
|
153 |
-
# New function for the help AI assistant using
|
154 |
def ask_help_agent(query):
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
{"role": "system", "content": system_prompt},
|
163 |
-
{"role": "user", "content": query}
|
164 |
-
]
|
165 |
-
data = {
|
166 |
-
"model": "gemini-1", # or another suitable Gemini model identifier
|
167 |
-
"messages": messages,
|
168 |
-
"temperature": 0.7,
|
169 |
-
"max_tokens": 150
|
170 |
-
}
|
171 |
-
try:
|
172 |
-
response = requests.post(gemini_api_url, headers=headers, json=data)
|
173 |
-
response.raise_for_status()
|
174 |
-
return response.json()["choices"][0]["message"]["content"]
|
175 |
-
except Exception as e:
|
176 |
-
st.error(f"Error calling Gemini API: {str(e)}")
|
177 |
-
return "Could not get help at the moment."
|
178 |
|
179 |
# Main game logic
|
180 |
def main():
|
@@ -211,7 +199,6 @@ def main():
|
|
211 |
|
212 |
with st.form("start_form"):
|
213 |
category_input = st.text_input("Enter category (person/place/object):").strip().lower()
|
214 |
-
|
215 |
if st.form_submit_button("Start Game"):
|
216 |
if not category_input:
|
217 |
st.error("Please enter a category!")
|
@@ -246,7 +233,6 @@ def main():
|
|
246 |
with st.form("answer_form"):
|
247 |
answer_input = st.text_input("Your answer (yes/no/both):",
|
248 |
key=f"answer_{st.session_state.current_q}").strip().lower()
|
249 |
-
|
250 |
if st.form_submit_button("Submit"):
|
251 |
if answer_input not in ["yes", "no", "both"]:
|
252 |
st.error("Please answer with 'yes', 'no', or 'both'!")
|
@@ -279,7 +265,7 @@ def main():
|
|
279 |
|
280 |
st.rerun()
|
281 |
|
282 |
-
# Side Help Option: independent chat with an AI help assistant
|
283 |
with st.expander("Need Help? Chat with AI Assistant"):
|
284 |
help_query = st.text_input("Enter your help query:", key="help_query")
|
285 |
if st.button("Send", key="send_help"):
|
|
|
3 |
import requests
|
4 |
from streamlit.components.v1 import html
|
5 |
|
6 |
+
# Import transformers and cache the help agent for performance
|
7 |
+
@st.cache_resource
|
8 |
+
def get_help_agent():
|
9 |
+
from transformers import pipeline
|
10 |
+
# Using BlenderBot 400M Distill as the public conversational model
|
11 |
+
return pipeline("conversational", model="facebook/blenderbot-400M-distill")
|
12 |
|
13 |
# Custom CSS for professional look
|
14 |
def inject_custom_css():
|
|
|
110 |
</script>
|
111 |
""")
|
112 |
|
113 |
+
# Enhanced AI question generation for guessing game using Llama model
|
114 |
def ask_llama(conversation_history, category, is_final_guess=False):
|
115 |
api_url = "https://api.groq.com/openai/v1/chat/completions"
|
116 |
headers = {
|
|
|
154 |
st.error(f"Error calling Llama API: {str(e)}")
|
155 |
return "Could not generate question"
|
156 |
|
157 |
+
# New function for the help AI assistant using a Hugging Face chatbot model
|
158 |
def ask_help_agent(query):
|
159 |
+
from transformers import Conversation
|
160 |
+
# Get the cached help agent (BlenderBot)
|
161 |
+
help_agent = get_help_agent()
|
162 |
+
conversation = Conversation(query)
|
163 |
+
result = help_agent(conversation)
|
164 |
+
# The generated response is stored in generated_responses list
|
165 |
+
return result.generated_responses[-1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
# Main game logic
|
168 |
def main():
|
|
|
199 |
|
200 |
with st.form("start_form"):
|
201 |
category_input = st.text_input("Enter category (person/place/object):").strip().lower()
|
|
|
202 |
if st.form_submit_button("Start Game"):
|
203 |
if not category_input:
|
204 |
st.error("Please enter a category!")
|
|
|
233 |
with st.form("answer_form"):
|
234 |
answer_input = st.text_input("Your answer (yes/no/both):",
|
235 |
key=f"answer_{st.session_state.current_q}").strip().lower()
|
|
|
236 |
if st.form_submit_button("Submit"):
|
237 |
if answer_input not in ["yes", "no", "both"]:
|
238 |
st.error("Please answer with 'yes', 'no', or 'both'!")
|
|
|
265 |
|
266 |
st.rerun()
|
267 |
|
268 |
+
# Side Help Option: independent chat with an AI help assistant (Hugging Face model)
|
269 |
with st.expander("Need Help? Chat with AI Assistant"):
|
270 |
help_query = st.text_input("Enter your help query:", key="help_query")
|
271 |
if st.button("Send", key="send_help"):
|