jjvelezo commited on
Commit
54b69c4
·
verified ·
1 Parent(s): 49b6791

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +90 -98
agent.py CHANGED
@@ -3,101 +3,45 @@ import requests
3
  import urllib.parse
4
  from bs4 import BeautifulSoup
5
 
6
- class BaseModel:
7
- def answer(self, prompt: str) -> str:
8
- raise NotImplementedError("Model must implement the answer method.")
9
-
10
- class HfApiModel(BaseModel):
11
- def __init__(self, model_name: str, api_token: str):
12
- self.model_name = model_name
13
- self.api_token = api_token
14
-
15
- def answer(self, prompt: str) -> str:
16
- url = f"https://api-inference.huggingface.co/models/{self.model_name}"
17
- headers = {
18
- "Authorization": f"Bearer {self.api_token}",
19
- "Content-Type": "application/json"
20
- }
21
- payload = {
22
- "inputs": prompt,
23
- "parameters": {
24
- "max_new_tokens": 200,
25
- "temperature": 0.0
26
- }
27
- }
28
-
29
- try:
30
- response = requests.post(url, headers=headers, json=payload, timeout=30)
31
- response.raise_for_status()
32
- output = response.json()
33
- if isinstance(output, list) and "generated_text" in output[0]:
34
- return output[0]["generated_text"].strip()[:200]
35
- return "No response generated."
36
- except Exception as e:
37
- return f"Error from Hugging Face API: {e}"
38
-
39
- class LiteLLMModel(BaseModel):
40
- def __init__(self, endpoint_url: str):
41
- self.url = endpoint_url
42
-
43
- def answer(self, prompt: str) -> str:
44
- try:
45
- response = requests.post(self.url, json={"input": prompt}, timeout=30)
46
- response.raise_for_status()
47
- return response.json().get("output", "No output.")
48
- except Exception as e:
49
- return f"LiteLLM error: {e}"
50
-
51
- class OpenAIServerModel(BaseModel):
52
- def __init__(self, api_key: str, model: str = "gpt-3.5-turbo"):
53
- self.api_key = api_key
54
- self.model = model
55
-
56
- def answer(self, prompt: str) -> str:
57
- try:
58
- response = requests.post(
59
- "https://api.openai.com/v1/chat/completions",
60
- headers={
61
- "Authorization": f"Bearer {self.api_key}",
62
- "Content-Type": "application/json"
63
- },
64
- json={
65
- "model": self.model,
66
- "messages": [{"role": "user", "content": prompt}],
67
- "max_tokens": 200,
68
- "temperature": 0.0
69
- },
70
- timeout=30
71
- )
72
- response.raise_for_status()
73
- data = response.json()
74
- return data["choices"][0]["message"]["content"].strip()[:200]
75
- except Exception as e:
76
- return f"OpenAI error: {e}"
77
-
78
  class DuckDuckGoAgent:
79
  def __init__(self):
80
  print("DuckDuckGoAgent initialized.")
81
- self.headers = {"User-Agent": "Mozilla/5.0"}
82
- self.hf_api_key = os.getenv("HF_API_TOKEN")
83
- self.model_type = os.getenv("MODEL_TYPE", "huggingface")
84
- self.model_name = os.getenv("MODEL_NAME", "mistralai/Mistral-7B-Instruct-v0.1")
85
- self.model_url = os.getenv("MODEL_URL") # For LiteLLM
86
- self.openai_key = os.getenv("OPENAI_API_KEY")
87
 
88
- self.llm = self._init_model()
 
 
 
 
 
 
 
89
 
90
- def _init_model(self) -> BaseModel:
91
- if self.model_type == "openai" and self.openai_key:
92
- return OpenAIServerModel(api_key=self.openai_key)
93
- elif self.model_type == "litellm" and self.model_url:
94
- return LiteLLMModel(endpoint_url=self.model_url)
95
- elif self.model_type == "huggingface" and self.hf_api_key:
96
- return HfApiModel(model_name=self.model_name, api_token=self.hf_api_key)
97
- else:
98
- raise ValueError("No valid model configuration found.")
 
 
 
 
 
 
 
 
99
 
100
  def get_duckduckgo_answer(self, query: str) -> str:
 
 
 
 
101
  search_query = urllib.parse.quote(query)
102
  url = f"https://api.duckduckgo.com/?q={search_query}&format=json&no_html=1&skip_disambig=1"
103
 
@@ -107,13 +51,20 @@ class DuckDuckGoAgent:
107
  data = response.json()
108
  if 'AbstractText' in data and data['AbstractText']:
109
  return data['AbstractText'][:200]
 
 
 
 
 
110
  return self.scrape_duckduckgo(query)
111
- return self.scrape_duckduckgo(query)
112
  except Exception as e:
113
- print(f"Error with DuckDuckGo API: {e}")
114
  return self.scrape_duckduckgo(query)
115
 
116
  def scrape_duckduckgo(self, query: str) -> str:
 
 
 
117
  print("Using fallback: scraping HTML results.")
118
  try:
119
  response = requests.post(
@@ -128,13 +79,54 @@ class DuckDuckGoAgent:
128
  text = s.get_text().strip()
129
  if text:
130
  return text[:200]
131
- return self.llm.answer(query)
 
132
  except Exception as e:
133
- print(f"Scraping error: {e}")
134
- return self.llm.answer(query)
135
 
136
- def __call__(self, question: str) -> str:
137
- print(f"Agent received question: {question[:50]}...")
138
- answer = self.get_duckduckgo_answer(question)
139
- print(f"Agent returning answer: {answer}")
140
- return answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import urllib.parse
4
  from bs4 import BeautifulSoup
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  class DuckDuckGoAgent:
7
  def __init__(self):
8
  print("DuckDuckGoAgent initialized.")
9
+ self.headers = {
10
+ "User-Agent": "Mozilla/5.0"
11
+ }
 
 
 
12
 
13
+ # Support for multiple model backends
14
+ self.supported_models = {
15
+ "huggingface": self.call_huggingface_llm,
16
+ # You can easily extend this dictionary to support:
17
+ # "openai": self.call_openai_model,
18
+ # "lite_llm": self.call_litellm_model,
19
+ # "custom_server": self.call_custom_model,
20
+ }
21
 
22
+ self.default_model = "huggingface"
23
+ self.model_config = {
24
+ "huggingface": {
25
+ "api_key": os.getenv("HF_API_TOKEN"),
26
+ "model_name": "mistralai/Mistral-7B-Instruct-v0.1"
27
+ }
28
+ }
29
+
30
+ def __call__(self, question: str) -> str:
31
+ """
32
+ Main method to process a question. It first tries DuckDuckGo,
33
+ then scraping, and finally uses a language model if needed.
34
+ """
35
+ print(f"Agent received question: {question[:50]}...")
36
+ answer = self.get_duckduckgo_answer(question)
37
+ print(f"Agent returning answer: {answer}")
38
+ return answer.strip()
39
 
40
  def get_duckduckgo_answer(self, query: str) -> str:
41
+ """
42
+ Attempt to get an answer from the DuckDuckGo API.
43
+ If no abstract text is found, fall back to scraping.
44
+ """
45
  search_query = urllib.parse.quote(query)
46
  url = f"https://api.duckduckgo.com/?q={search_query}&format=json&no_html=1&skip_disambig=1"
47
 
 
51
  data = response.json()
52
  if 'AbstractText' in data and data['AbstractText']:
53
  return data['AbstractText'][:200]
54
+ else:
55
+ print("No abstract found, falling back to scraping.")
56
+ return self.scrape_duckduckgo(query)
57
+ else:
58
+ print(f"DuckDuckGo API failed with status: {response.status_code}")
59
  return self.scrape_duckduckgo(query)
 
60
  except Exception as e:
61
+ print(f"Error contacting DuckDuckGo API: {e}")
62
  return self.scrape_duckduckgo(query)
63
 
64
  def scrape_duckduckgo(self, query: str) -> str:
65
+ """
66
+ Fallback to scraping DuckDuckGo search results if API fails or no abstract found.
67
+ """
68
  print("Using fallback: scraping HTML results.")
69
  try:
70
  response = requests.post(
 
79
  text = s.get_text().strip()
80
  if text:
81
  return text[:200]
82
+ print("No useful snippets found, falling back to language model.")
83
+ return self.call_model_backend(query)
84
  except Exception as e:
85
+ print(f"Error scraping DuckDuckGo: {e}")
86
+ return self.call_model_backend(query)
87
 
88
+ def call_model_backend(self, prompt: str) -> str:
89
+ """
90
+ Dispatch to the selected LLM backend.
91
+ """
92
+ if self.default_model in self.supported_models:
93
+ return self.supported_models[self.default_model](prompt)
94
+ return "No valid model backend configured."
95
+
96
+ def call_huggingface_llm(self, prompt: str) -> str:
97
+ """
98
+ Call Hugging Face Inference API as fallback LLM.
99
+ """
100
+ config = self.model_config.get("huggingface", {})
101
+ api_key = config.get("api_key")
102
+ model = config.get("model_name")
103
+
104
+ if not api_key or not model:
105
+ return "Error: Hugging Face API Token or model not configured."
106
+
107
+ url = f"https://api-inference.huggingface.co/models/{model}"
108
+ headers = {
109
+ "Authorization": f"Bearer {api_key}",
110
+ "Content-Type": "application/json"
111
+ }
112
+ payload = {
113
+ "inputs": prompt,
114
+ "parameters": {
115
+ "max_new_tokens": 200,
116
+ "temperature": 0.7
117
+ }
118
+ }
119
+
120
+ try:
121
+ response = requests.post(url, headers=headers, json=payload, timeout=30)
122
+ response.raise_for_status()
123
+ output = response.json()
124
+ if isinstance(output, list) and "generated_text" in output[0]:
125
+ return output[0]["generated_text"].strip()[:200]
126
+ elif isinstance(output, dict) and "error" in output:
127
+ return f"HF LLM error: {output['error']}"
128
+ else:
129
+ return "No response generated from Hugging Face LLM."
130
+ except Exception as e:
131
+ print(f"Error contacting Hugging Face LLM: {e}")
132
+ return "Error contacting Hugging Face model."