Spaces:
Runtime error
Runtime error
from smolagents import ( | |
CodeAgent, | |
DuckDuckGoSearchTool, | |
HfApiModel, | |
LiteLLMModel, | |
OpenAIServerModel, | |
PythonInterpreterTool, | |
tool, | |
InferenceClientModel | |
) | |
from typing import List, Dict, Any, Optional | |
import os | |
import tempfile | |
import re | |
import json | |
import requests | |
from urllib.parse import urlparse | |
class GAIAAgent: | |
def __init__( | |
self, | |
model_type: str = "HfApiModel", | |
model_id: Optional[str] = None, | |
api_key: Optional[str] = None, | |
api_base: Optional[str] = None, | |
temperature: float = 0.2, | |
executor_type: str = "local", # Changed from use_e2b to executor_type | |
additional_imports: List[str] = None, | |
additional_tools: List[Any] = None, | |
system_prompt: Optional[str] = None, # We'll still accept this parameter but not use it directly | |
verbose: bool = False, | |
provider: Optional[str] = None, # Add provider for InferenceClientModel | |
timeout: Optional[int] = None # Add timeout for InferenceClientModel | |
): | |
""" | |
Initialize a GAIAAgent with specified configuration | |
Args: | |
model_type: Type of model to use (HfApiModel, LiteLLMModel, OpenAIServerModel, InferenceClientModel) | |
model_id: ID of the model to use | |
api_key: API key for the model provider | |
api_base: Base URL for API calls | |
temperature: Temperature for text generation | |
executor_type: Type of executor for code execution ('local' or 'e2b') | |
additional_imports: Additional Python modules to allow importing | |
additional_tools: Additional tools to provide to the agent | |
system_prompt: Custom system prompt to use (not directly used, kept for backward compatibility) | |
verbose: Enable verbose logging | |
provider: Provider for InferenceClientModel (e.g., "hf-inference") | |
timeout: Timeout in seconds for API calls | |
""" | |
# Set verbosity | |
self.verbose = verbose | |
self.system_prompt = system_prompt # Store for potential future use | |
# Initialize model based on configuration | |
if model_type == "HfApiModel": | |
if api_key is None: | |
api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
if not api_key: | |
raise ValueError("No Hugging Face token provided. Please set HUGGINGFACEHUB_API_TOKEN environment variable or pass api_key parameter.") | |
if self.verbose: | |
print(f"Using Hugging Face token: {api_key[:5]}...") | |
self.model = HfApiModel( | |
model_id=model_id or "meta-llama/Llama-3-70B-Instruct", | |
token=api_key, | |
temperature=temperature | |
) | |
elif model_type == "InferenceClientModel": | |
if api_key is None: | |
api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
if not api_key: | |
raise ValueError("No Hugging Face token provided. Please set HUGGINGFACEHUB_API_TOKEN environment variable or pass api_key parameter.") | |
if self.verbose: | |
print(f"Using Hugging Face token: {api_key[:5]}...") | |
self.model = InferenceClientModel( | |
model_id=model_id or "meta-llama/Llama-3-70B-Instruct", | |
provider=provider or "hf-inference", | |
token=api_key, | |
timeout=timeout or 120, | |
temperature=temperature | |
) | |
elif model_type == "LiteLLMModel": | |
from smolagents import LiteLLMModel | |
self.model = LiteLLMModel( | |
model_id=model_id or "gpt-4o", | |
api_key=api_key or os.getenv("OPENAI_API_KEY"), | |
temperature=temperature | |
) | |
elif model_type == "OpenAIServerModel": | |
# Check for xAI API key and base URL first | |
xai_api_key = os.getenv("XAI_API_KEY") | |
xai_api_base = os.getenv("XAI_API_BASE") | |
# If xAI credentials are available, use them | |
if xai_api_key and api_key is None: | |
api_key = xai_api_key | |
if self.verbose: | |
print(f"Using xAI API key: {api_key[:5]}...") | |
# If no API key specified, fall back to OPENAI_API_KEY | |
if api_key is None: | |
api_key = os.getenv("OPENAI_API_KEY") | |
if not api_key: | |
raise ValueError("No OpenAI API key provided. Please set OPENAI_API_KEY or XAI_API_KEY environment variable or pass api_key parameter.") | |
# If xAI API base is available and no api_base is provided, use it | |
if xai_api_base and api_base is None: | |
api_base = xai_api_base | |
if self.verbose: | |
print(f"Using xAI API base URL: {api_base}") | |
# If no API base specified but environment variable available, use it | |
if api_base is None: | |
api_base = os.getenv("AGENT_API_BASE") | |
if api_base and self.verbose: | |
print(f"Using API base from AGENT_API_BASE: {api_base}") | |
self.model = OpenAIServerModel( | |
model_id=model_id or "gpt-4o", | |
api_key=api_key, | |
api_base=api_base, | |
temperature=temperature | |
) | |
else: | |
raise ValueError(f"Unknown model type: {model_type}") | |
if self.verbose: | |
print(f"Initialized model: {model_type} - {model_id}") | |
# Initialize default tools | |
self.tools = [ | |
DuckDuckGoSearchTool(), | |
PythonInterpreterTool(), | |
save_and_read_file, | |
download_file_from_url, | |
analyze_csv_file, | |
analyze_excel_file | |
] | |
# Add extract_text_from_image if PIL and pytesseract are available | |
try: | |
import pytesseract | |
from PIL import Image | |
self.tools.append(extract_text_from_image) | |
if self.verbose: | |
print("Added image processing tool") | |
except ImportError: | |
if self.verbose: | |
print("Image processing libraries not available") | |
# Add any additional tools | |
if additional_tools: | |
self.tools.extend(additional_tools) | |
if self.verbose: | |
print(f"Initialized with {len(self.tools)} tools") | |
# Setup imports allowed | |
self.imports = ["pandas", "numpy", "datetime", "json", "re", "math", "os", "requests", "csv", "urllib"] | |
if additional_imports: | |
self.imports.extend(additional_imports) | |
# Initialize the CodeAgent | |
executor_kwargs = {} | |
if executor_type == "e2b": | |
try: | |
# Try to import e2b dependencies to check if they're available | |
from e2b_code_interpreter import Sandbox | |
if self.verbose: | |
print("Using e2b executor") | |
except ImportError: | |
if self.verbose: | |
print("e2b dependencies not found, falling back to local executor") | |
executor_type = "local" # Fallback to local if e2b is not available | |
self.agent = CodeAgent( | |
tools=self.tools, | |
model=self.model, | |
additional_authorized_imports=self.imports, | |
executor_type=executor_type, | |
executor_kwargs=executor_kwargs, | |
verbosity_level=2 if self.verbose else 0 | |
) | |
if self.verbose: | |
print("Agent initialized and ready") |