iisadia commited on
Commit
5e0ec0a
·
verified ·
1 Parent(s): 4dc1ab3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -13
app.py CHANGED
@@ -154,23 +154,44 @@ def ask_llama(conversation_history, category, is_final_guess=False):
154
  st.error(f"Error calling Llama API: {str(e)}")
155
  return "Could not generate question"
156
 
157
- # New function for the help AI assistant using a free Hugging Face conversational model
158
- # (replacing the original BlenderBot approach with DialoGPT for simplicity)
159
  def ask_help_agent(query):
160
  try:
161
- from transformers import pipeline, Conversation
162
- # Use the conversational pipeline with DialoGPT-medium for dialogue context
163
- help_chat = pipeline("conversational", model="microsoft/DialoGPT-medium")
164
- # Create a Conversation object to hold the context
165
- conv = Conversation(query)
166
- # Generate a response by passing the conversation object
167
- response = help_chat(conv)
168
- # Extract and return the generated response from the conversation
169
- return conv.generated_responses[-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  except Exception as e:
171
  return f"Error in help agent: {str(e)}"
172
 
173
-
174
  # Main game logic
175
  def main():
176
  inject_custom_css()
@@ -272,7 +293,7 @@ def main():
272
 
273
  st.experimental_rerun()
274
 
275
- # Side Help Option: independent chat with an AI help assistant (free Hugging Face model)
276
  with st.expander("Need Help? Chat with AI Assistant"):
277
  help_query = st.text_input("Enter your help query:", key="help_query")
278
  if st.button("Send", key="send_help"):
 
154
  st.error(f"Error calling Llama API: {str(e)}")
155
  return "Could not generate question"
156
 
157
+ # New function for the help AI assistant using the Hugging Face InferenceClient
 
158
  def ask_help_agent(query):
159
  try:
160
+ from huggingface_hub import InferenceClient
161
+ # Initialize the client with the provided model
162
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
163
+ system_message = "You are a friendly Chatbot."
164
+
165
+ # Build history from session state (if any)
166
+ history = []
167
+ if "help_conversation" in st.session_state:
168
+ for msg in st.session_state.help_conversation:
169
+ # Each history entry is a tuple: (user query, assistant response)
170
+ history.append((msg.get("query", ""), msg.get("response", "")))
171
+
172
+ messages = [{"role": "system", "content": system_message}]
173
+ for user_msg, bot_msg in history:
174
+ if user_msg:
175
+ messages.append({"role": "user", "content": user_msg})
176
+ if bot_msg:
177
+ messages.append({"role": "assistant", "content": bot_msg})
178
+ messages.append({"role": "user", "content": query})
179
+
180
+ response_text = ""
181
+ # Using streaming to collect the entire response from the model
182
+ for message in client.chat_completion(
183
+ messages,
184
+ max_tokens=150,
185
+ stream=True,
186
+ temperature=0.7,
187
+ top_p=0.95,
188
+ ):
189
+ token = message.choices[0].delta.content
190
+ response_text += token
191
+ return response_text
192
  except Exception as e:
193
  return f"Error in help agent: {str(e)}"
194
 
 
195
  # Main game logic
196
  def main():
197
  inject_custom_css()
 
293
 
294
  st.experimental_rerun()
295
 
296
+ # Side Help Option: independent chat with an AI help assistant using Hugging Face model
297
  with st.expander("Need Help? Chat with AI Assistant"):
298
  help_query = st.text_input("Enter your help query:", key="help_query")
299
  if st.button("Send", key="send_help"):