iisadia commited on
Commit
d55d4bd
·
verified ·
1 Parent(s): 61d1238

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -30
app.py CHANGED
@@ -3,8 +3,12 @@ import time
3
  import requests
4
  from streamlit.components.v1 import html
5
 
6
- # Your Gemini API key for the help agent
7
- gemini_api_key = "AIzaSyC1IoNgTzAZ52tx11K-2_9F5ruaCptiArE"
 
 
 
 
8
 
9
  # Custom CSS for professional look
10
  def inject_custom_css():
@@ -106,7 +110,7 @@ def show_confetti():
106
  </script>
107
  """)
108
 
109
- # Enhanced AI question generation for guessing game
110
  def ask_llama(conversation_history, category, is_final_guess=False):
111
  api_url = "https://api.groq.com/openai/v1/chat/completions"
112
  headers = {
@@ -150,31 +154,15 @@ def ask_llama(conversation_history, category, is_final_guess=False):
150
  st.error(f"Error calling Llama API: {str(e)}")
151
  return "Could not generate question"
152
 
153
- # New function for the help AI assistant using Gemini (or any similar model)
154
  def ask_help_agent(query):
155
- gemini_api_url = "https://api.gemini.com/v1/chat/completions"
156
- headers = {
157
- "Authorization": f"Bearer {gemini_api_key}",
158
- "Content-Type": "application/json"
159
- }
160
- system_prompt = "You're an AI help assistant. Provide concise, friendly advice for clarifying questions in a guessing game. Do not refer to the guessing game details."
161
- messages = [
162
- {"role": "system", "content": system_prompt},
163
- {"role": "user", "content": query}
164
- ]
165
- data = {
166
- "model": "gemini-1", # or another suitable Gemini model identifier
167
- "messages": messages,
168
- "temperature": 0.7,
169
- "max_tokens": 150
170
- }
171
- try:
172
- response = requests.post(gemini_api_url, headers=headers, json=data)
173
- response.raise_for_status()
174
- return response.json()["choices"][0]["message"]["content"]
175
- except Exception as e:
176
- st.error(f"Error calling Gemini API: {str(e)}")
177
- return "Could not get help at the moment."
178
 
179
  # Main game logic
180
  def main():
@@ -211,7 +199,6 @@ def main():
211
 
212
  with st.form("start_form"):
213
  category_input = st.text_input("Enter category (person/place/object):").strip().lower()
214
-
215
  if st.form_submit_button("Start Game"):
216
  if not category_input:
217
  st.error("Please enter a category!")
@@ -246,7 +233,6 @@ def main():
246
  with st.form("answer_form"):
247
  answer_input = st.text_input("Your answer (yes/no/both):",
248
  key=f"answer_{st.session_state.current_q}").strip().lower()
249
-
250
  if st.form_submit_button("Submit"):
251
  if answer_input not in ["yes", "no", "both"]:
252
  st.error("Please answer with 'yes', 'no', or 'both'!")
@@ -279,7 +265,7 @@ def main():
279
 
280
  st.rerun()
281
 
282
- # Side Help Option: independent chat with an AI help assistant
283
  with st.expander("Need Help? Chat with AI Assistant"):
284
  help_query = st.text_input("Enter your help query:", key="help_query")
285
  if st.button("Send", key="send_help"):
 
3
  import requests
4
  from streamlit.components.v1 import html
5
 
6
+ # Import transformers and cache the help agent for performance
7
+ @st.cache_resource
8
+ def get_help_agent():
9
+ from transformers import pipeline
10
+ # Using BlenderBot 400M Distill as the public conversational model
11
+ return pipeline("conversational", model="facebook/blenderbot-400M-distill")
12
 
13
  # Custom CSS for professional look
14
  def inject_custom_css():
 
110
  </script>
111
  """)
112
 
113
+ # Enhanced AI question generation for guessing game using Llama model
114
  def ask_llama(conversation_history, category, is_final_guess=False):
115
  api_url = "https://api.groq.com/openai/v1/chat/completions"
116
  headers = {
 
154
  st.error(f"Error calling Llama API: {str(e)}")
155
  return "Could not generate question"
156
 
157
+ # New function for the help AI assistant using a Hugging Face chatbot model
158
  def ask_help_agent(query):
159
+ from transformers import Conversation
160
+ # Get the cached help agent (BlenderBot)
161
+ help_agent = get_help_agent()
162
+ conversation = Conversation(query)
163
+ result = help_agent(conversation)
164
+ # The generated response is stored in generated_responses list
165
+ return result.generated_responses[-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
 
167
  # Main game logic
168
  def main():
 
199
 
200
  with st.form("start_form"):
201
  category_input = st.text_input("Enter category (person/place/object):").strip().lower()
 
202
  if st.form_submit_button("Start Game"):
203
  if not category_input:
204
  st.error("Please enter a category!")
 
233
  with st.form("answer_form"):
234
  answer_input = st.text_input("Your answer (yes/no/both):",
235
  key=f"answer_{st.session_state.current_q}").strip().lower()
 
236
  if st.form_submit_button("Submit"):
237
  if answer_input not in ["yes", "no", "both"]:
238
  st.error("Please answer with 'yes', 'no', or 'both'!")
 
265
 
266
  st.rerun()
267
 
268
+ # Side Help Option: independent chat with an AI help assistant (Hugging Face model)
269
  with st.expander("Need Help? Chat with AI Assistant"):
270
  help_query = st.text_input("Enter your help query:", key="help_query")
271
  if st.button("Send", key="send_help"):