jjjulllesss commited on
Commit
5226c8c
·
verified ·
1 Parent(s): 480fc0c

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +35 -2
agent.py CHANGED
@@ -4,6 +4,7 @@ import json
4
  import requests
5
  import pandas as pd
6
  from huggingface_hub import InferenceClient
 
7
 
8
  class GetFileTool(Tool):
9
  name = "get_file_tool"
@@ -126,6 +127,38 @@ class ImageAnalysisTool(Tool):
126
 
127
  return completion.choices[0].message.content
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
  def final_answer_formatting(answer, question):
131
  model = LiteLLMModel(
@@ -187,8 +220,8 @@ manager_agent = CodeAgent(
187
  model_id="gemini/gemini-2.5-flash-preview-04-17",
188
  api_key=os.getenv("GOOGLE_API_KEY"),
189
  ),
190
- tools=[GetFileTool(), LoadXlsxFileTool(), LoadTextFileTool(), ImageAnalysisTool()],
191
- managed_agents=[web_agent, audio_agent],
192
  additional_authorized_imports=[
193
  "pandas"
194
  ],
 
4
  import requests
5
  import pandas as pd
6
  from huggingface_hub import InferenceClient
7
+ from openai import OpenAI
8
 
9
  class GetFileTool(Tool):
10
  name = "get_file_tool"
 
127
 
128
  return completion.choices[0].message.content
129
 
130
+ class WebSearchPerplexityTool(Tool):
131
+ name = "web_search_perplexity_tool"
132
+ description = """This tool searches the web for information using Perplexity, try first to forward the entire question"""
133
+ inputs = {
134
+ "question": {"type": "string", "description": "The question to search the web for"}
135
+ }
136
+ output_type = "string"
137
+
138
+ def forward(self, question: str) -> str:
139
+ messages = [
140
+ {
141
+ "role": "system",
142
+ "content": (
143
+ "Answer the question based on the information provided and following the instructions"
144
+ ),
145
+ },
146
+ {
147
+ "role": "user",
148
+ "content": question,
149
+ },
150
+ ]
151
+
152
+ client = OpenAI(api_key=os.getenv("PERPLEXITY_API_KEY"), base_url="https://api.perplexity.ai")
153
+
154
+ # chat completion without streaming
155
+ response = client.chat.completions.create(
156
+ model="sonar-pro",
157
+ messages=messages,
158
+ )
159
+
160
+ return response.choices[0].message.content
161
+
162
 
163
  def final_answer_formatting(answer, question):
164
  model = LiteLLMModel(
 
220
  model_id="gemini/gemini-2.5-flash-preview-04-17",
221
  api_key=os.getenv("GOOGLE_API_KEY"),
222
  ),
223
+ tools=[GetFileTool(), LoadXlsxFileTool(), LoadTextFileTool(), ImageAnalysisTool(), WebSearchPerplexityTool()],
224
+ managed_agents=[audio_agent],
225
  additional_authorized_imports=[
226
  "pandas"
227
  ],