iisadia commited on
Commit
97ed4c6
·
verified ·
1 Parent(s): 2370981

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -28
app.py CHANGED
@@ -3,7 +3,7 @@ import time
3
  import requests
4
  from streamlit.components.v1 import html
5
  import os
6
-
7
 
8
  # Import transformers and cache the help agent for performance
9
  @st.cache_resource
@@ -236,37 +236,28 @@ def ask_llama(conversation_history, category, is_final_guess=False):
236
 
237
  def ask_help_agent(query):
238
  try:
239
- import ollama
240
- import requests
241
-
242
- # (1) Check if Ollama server is running
243
- try:
244
- requests.get("http://localhost:11434", timeout=5)
245
- except:
246
- return "🛑 **Ollama is not running!**\n\nPlease:\n1. [Download Ollama](https://ollama.com)\n2. Run `ollama serve` in terminal\n3. Pull a model (`ollama pull llama3`)"
247
-
248
- # (2) Build chat history
249
- messages = [{"role": "system", "content": "You are a helpful AI assistant."}]
250
 
251
- if "help_conversation" in st.session_state:
252
- for msg in st.session_state.help_conversation:
253
- if msg.get("query"):
254
- messages.append({"role": "user", "content": msg["query"]})
255
- if msg.get("response"):
256
- messages.append({"role": "assistant", "content": msg["response"]})
257
 
258
- messages.append({"role": "user", "content": query})
259
-
260
- # (3) Get response
261
- response = ollama.chat(
262
- model="llama3", # or "mistral" for lighter model
263
- messages=messages,
264
- options={"temperature": 0.7}
265
  )
266
- return response['message']['content']
267
-
 
 
 
 
268
  except Exception as e:
269
- return f"⚠️ **Assistant Error**\n\n{str(e)}\n\nPlease ensure Ollama is installed and running."
270
 
271
  # Main game logic with enhanced UI
272
  def main():
 
3
  import requests
4
  from streamlit.components.v1 import html
5
  import os
6
+ from dotenv import load_dotenv
7
 
8
  # Import transformers and cache the help agent for performance
9
  @st.cache_resource
 
236
 
237
  def ask_help_agent(query):
238
  try:
239
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1"
240
+ headers = {"Authorization": "Bearer hf_wm5eLl09b9I9cOxR3E9n5rrRr1CRQQjn"} # ⚠️ Replace with your token
 
 
 
 
 
 
 
 
 
241
 
242
+ prompt = f"""<s>[INST] <<SYS>>
243
+ You are a helpful assistant. Keep answers short.
244
+ <</SYS>>
245
+ {query} [/INST]"""
 
 
246
 
247
+ response = requests.post(
248
+ API_URL,
249
+ headers=headers,
250
+ json={"inputs": prompt},
251
+ timeout=10
 
 
252
  )
253
+
254
+ if response.status_code == 200:
255
+ return response.json()[0]['generated_text'].split("[/INST]")[-1].strip()
256
+ else:
257
+ return f"Error: {response.text}"
258
+
259
  except Exception as e:
260
+ return f"Assistant busy. Try later. ({str(e)})"
261
 
262
  # Main game logic with enhanced UI
263
  def main():