rahulnamdev commited on
Commit
ff800fe
·
verified ·
1 Parent(s): b1c5894

Update core_agent.py

Browse files
Files changed (1) hide show
  1. core_agent.py +140 -0
core_agent.py CHANGED
@@ -49,3 +49,143 @@ class GAIAAgent:
49
  provider: Provider for InferenceClientModel (e.g., "hf-inference")
50
  timeout: Timeout in seconds for API calls
51
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  provider: Provider for InferenceClientModel (e.g., "hf-inference")
50
  timeout: Timeout in seconds for API calls
51
  """
52
+ # Set verbosity
53
+ self.verbose = verbose
54
+ self.system_prompt = system_prompt # Store for potential future use
55
+
56
+ # Initialize model based on configuration
57
+ if model_type == "HfApiModel":
58
+ if api_key is None:
59
+ api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN")
60
+ if not api_key:
61
+ raise ValueError("No Hugging Face token provided. Please set HUGGINGFACEHUB_API_TOKEN environment variable or pass api_key parameter.")
62
+
63
+ if self.verbose:
64
+ print(f"Using Hugging Face token: {api_key[:5]}...")
65
+
66
+ self.model = HfApiModel(
67
+ model_id=model_id or "meta-llama/Llama-3-70B-Instruct",
68
+ token=api_key,
69
+ temperature=temperature
70
+ )
71
+ elif model_type == "InferenceClientModel":
72
+ if api_key is None:
73
+ api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN")
74
+ if not api_key:
75
+ raise ValueError("No Hugging Face token provided. Please set HUGGINGFACEHUB_API_TOKEN environment variable or pass api_key parameter.")
76
+
77
+ if self.verbose:
78
+ print(f"Using Hugging Face token: {api_key[:5]}...")
79
+
80
+ self.model = InferenceClientModel(
81
+ model_id=model_id or "meta-llama/Llama-3-70B-Instruct",
82
+ provider=provider or "hf-inference",
83
+ token=api_key,
84
+ timeout=timeout or 120,
85
+ temperature=temperature
86
+ )
87
+ elif model_type == "LiteLLMModel":
88
+ from smolagents import LiteLLMModel
89
+ self.model = LiteLLMModel(
90
+ model_id=model_id or "gpt-4o",
91
+ api_key=api_key or os.getenv("OPENAI_API_KEY"),
92
+ temperature=temperature
93
+ )
94
+ elif model_type == "OpenAIServerModel":
95
+ # Check for xAI API key and base URL first
96
+ xai_api_key = os.getenv("XAI_API_KEY")
97
+ xai_api_base = os.getenv("XAI_API_BASE")
98
+
99
+ # If xAI credentials are available, use them
100
+ if xai_api_key and api_key is None:
101
+ api_key = xai_api_key
102
+ if self.verbose:
103
+ print(f"Using xAI API key: {api_key[:5]}...")
104
+
105
+ # If no API key specified, fall back to OPENAI_API_KEY
106
+ if api_key is None:
107
+ api_key = os.getenv("OPENAI_API_KEY")
108
+ if not api_key:
109
+ raise ValueError("No OpenAI API key provided. Please set OPENAI_API_KEY or XAI_API_KEY environment variable or pass api_key parameter.")
110
+
111
+ # If xAI API base is available and no api_base is provided, use it
112
+ if xai_api_base and api_base is None:
113
+ api_base = xai_api_base
114
+ if self.verbose:
115
+ print(f"Using xAI API base URL: {api_base}")
116
+
117
+ # If no API base specified but environment variable available, use it
118
+ if api_base is None:
119
+ api_base = os.getenv("AGENT_API_BASE")
120
+ if api_base and self.verbose:
121
+ print(f"Using API base from AGENT_API_BASE: {api_base}")
122
+
123
+ self.model = OpenAIServerModel(
124
+ model_id=model_id or "gpt-4o",
125
+ api_key=api_key,
126
+ api_base=api_base,
127
+ temperature=temperature
128
+ )
129
+ else:
130
+ raise ValueError(f"Unknown model type: {model_type}")
131
+
132
+ if self.verbose:
133
+ print(f"Initialized model: {model_type} - {model_id}")
134
+
135
+ # Initialize default tools
136
+ self.tools = [
137
+ DuckDuckGoSearchTool(),
138
+ PythonInterpreterTool(),
139
+ save_and_read_file,
140
+ download_file_from_url,
141
+ analyze_csv_file,
142
+ analyze_excel_file
143
+ ]
144
+
145
+ # Add extract_text_from_image if PIL and pytesseract are available
146
+ try:
147
+ import pytesseract
148
+ from PIL import Image
149
+ self.tools.append(extract_text_from_image)
150
+ if self.verbose:
151
+ print("Added image processing tool")
152
+ except ImportError:
153
+ if self.verbose:
154
+ print("Image processing libraries not available")
155
+
156
+ # Add any additional tools
157
+ if additional_tools:
158
+ self.tools.extend(additional_tools)
159
+
160
+ if self.verbose:
161
+ print(f"Initialized with {len(self.tools)} tools")
162
+
163
+ # Setup imports allowed
164
+ self.imports = ["pandas", "numpy", "datetime", "json", "re", "math", "os", "requests", "csv", "urllib"]
165
+ if additional_imports:
166
+ self.imports.extend(additional_imports)
167
+
168
+ # Initialize the CodeAgent
169
+ executor_kwargs = {}
170
+ if executor_type == "e2b":
171
+ try:
172
+ # Try to import e2b dependencies to check if they're available
173
+ from e2b_code_interpreter import Sandbox
174
+ if self.verbose:
175
+ print("Using e2b executor")
176
+ except ImportError:
177
+ if self.verbose:
178
+ print("e2b dependencies not found, falling back to local executor")
179
+ executor_type = "local" # Fallback to local if e2b is not available
180
+
181
+ self.agent = CodeAgent(
182
+ tools=self.tools,
183
+ model=self.model,
184
+ additional_authorized_imports=self.imports,
185
+ executor_type=executor_type,
186
+ executor_kwargs=executor_kwargs,
187
+ verbosity_level=2 if self.verbose else 0
188
+ )
189
+
190
+ if self.verbose:
191
+ print("Agent initialized and ready")