susmitsil commited on
Commit
af67476
·
verified ·
1 Parent(s): 4598e7a
Files changed (1) hide show
  1. gemini_agent.py +612 -11
gemini_agent.py CHANGED
@@ -1,5 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
  from langchain_google_genai import ChatGoogleGenerativeAI
2
- from langchain_core.messages import SystemMessage
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  class GeminiAgent:
5
  def __init__(self, api_key: str, model_name: str = "gemini-2.0-flash"):
@@ -12,25 +289,285 @@ class GeminiAgent:
12
 
13
  self.api_key = api_key
14
  self.model_name = model_name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  self.agent = self._setup_agent()
16
-
17
- def _setup_agent(self):
18
- # Initialize model with system message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  return ChatGoogleGenerativeAI(
20
  model=self.model_name,
21
  google_api_key=self.api_key,
22
- temperature=0, # Lower temperature for focused responses
23
- max_output_tokens=2000, # Increased for more detailed responses
24
- convert_system_message_to_human=True,
25
- system_message=SystemMessage(content="You are a helpful AI assistant. For the Wikipedia question, use the latest 2022 English Wikipedia version as your knowledge source. For the YouTube video question, analyze the video content carefully and count the maximum number of different bird species visible simultaneously in any frame.")
 
 
 
 
 
 
 
26
  )
 
 
 
 
 
 
 
 
 
 
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  def run(self, query: str) -> str:
 
29
  try:
30
- response = self.agent.invoke(query)
31
- return response.content
32
  except Exception as e:
33
- return f"Error: {e}"
 
 
 
 
 
 
 
34
 
35
  def run_interactive(self):
36
  print("AI Assistant Ready! (Type 'exit' to quit)")
@@ -42,3 +579,67 @@ class GeminiAgent:
42
  break
43
 
44
  print("Assistant:", self.run(query))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ import time
4
+ import re
5
+ import json
6
+ from typing import List, Optional, Dict, Any
7
+ from urllib.parse import urlparse
8
+ import requests
9
+ import yt_dlp
10
+ from bs4 import BeautifulSoup
11
+
12
+ from langchain_core.messages import HumanMessage, SystemMessage
13
  from langchain_google_genai import ChatGoogleGenerativeAI
14
+ from langchain_community.utilities import DuckDuckGoSearchAPIWrapper, WikipediaAPIWrapper
15
+ from langchain.agents import Tool, AgentExecutor, ConversationalAgent, initialize_agent, AgentType
16
+ from langchain.memory import ConversationBufferMemory
17
+ from langchain.prompts import MessagesPlaceholder
18
+ from langchain.tools import BaseTool, Tool, tool
19
+ from google.generativeai.types import HarmCategory, HarmBlockThreshold
20
+ from PIL import Image
21
+ import google.generativeai as genai
22
+ from pydantic import Field
23
+
24
+ from smolagents import WikipediaSearchTool
25
+
26
+ class SmolagentToolWrapper(BaseTool):
27
+ """Wrapper for smolagents tools to make them compatible with LangChain."""
28
+
29
+ wrapped_tool: object = Field(description="The wrapped smolagents tool")
30
+
31
+ def __init__(self, tool):
32
+ """Initialize the wrapper with a smolagents tool."""
33
+ super().__init__(
34
+ name=tool.name,
35
+ description=tool.description,
36
+ return_direct=False,
37
+ wrapped_tool=tool
38
+ )
39
+
40
+ def _run(self, query: str) -> str:
41
+ """Use the wrapped tool to execute the query."""
42
+ try:
43
+ # For WikipediaSearchTool
44
+ if hasattr(self.wrapped_tool, 'search'):
45
+ return self.wrapped_tool.search(query)
46
+ # For DuckDuckGoSearchTool and others
47
+ return self.wrapped_tool(query)
48
+ except Exception as e:
49
+ return f"Error using tool: {str(e)}"
50
+
51
+ def _arun(self, query: str) -> str:
52
+ """Async version - just calls sync version since smolagents tools don't support async."""
53
+ return self._run(query)
54
+
55
+ class WebSearchTool:
56
+ def __init__(self):
57
+ self.last_request_time = 0
58
+ self.min_request_interval = 1.0 # Minimum time between requests in seconds
59
+ self.max_retries = 5
60
+
61
+ def search(self, query: str, domain: Optional[str] = None) -> str:
62
+ """Perform web search with rate limiting and retries."""
63
+ for attempt in range(self.max_retries):
64
+ # Implement rate limiting
65
+ current_time = time.time()
66
+ time_since_last = current_time - self.last_request_time
67
+ if time_since_last < self.min_request_interval:
68
+ time.sleep(self.min_request_interval - time_since_last)
69
+
70
+ try:
71
+ # Make the search request
72
+ results = self._do_search(query, domain)
73
+ self.last_request_time = time.time()
74
+ return results
75
+ except Exception as e:
76
+ if "202 Ratelimit" in str(e):
77
+ if attempt < self.max_retries - 1:
78
+ # Exponential backoff
79
+ wait_time = (2 ** attempt) * self.min_request_interval
80
+ time.sleep(wait_time)
81
+ continue
82
+ return f"Search failed after {self.max_retries} attempts: {str(e)}"
83
+
84
+ return "Search failed due to rate limiting"
85
+
86
+ def _do_search(self, query: str, domain: Optional[str] = None) -> str:
87
+ """Perform the actual search request."""
88
+ try:
89
+ # Construct search URL
90
+ base_url = "https://html.duckduckgo.com/html"
91
+ params = {"q": query}
92
+ if domain:
93
+ params["q"] += f" site:{domain}"
94
+
95
+ # Make request with increased timeout
96
+ response = requests.get(base_url, params=params, timeout=10)
97
+ response.raise_for_status()
98
+
99
+ if response.status_code == 202:
100
+ raise Exception("202 Ratelimit")
101
+
102
+ # Extract search results
103
+ results = []
104
+ soup = BeautifulSoup(response.text, 'html.parser')
105
+ for result in soup.find_all('div', {'class': 'result'}):
106
+ title = result.find('a', {'class': 'result__a'})
107
+ snippet = result.find('a', {'class': 'result__snippet'})
108
+ if title and snippet:
109
+ results.append({
110
+ 'title': title.get_text(),
111
+ 'snippet': snippet.get_text(),
112
+ 'url': title.get('href')
113
+ })
114
+
115
+ # Format results
116
+ formatted_results = []
117
+ for r in results[:10]: # Limit to top 5 results
118
+ formatted_results.append(f"[{r['title']}]({r['url']})\n{r['snippet']}\n")
119
+
120
+ return "## Search Results\n\n" + "\n".join(formatted_results)
121
+
122
+ except requests.RequestException as e:
123
+ raise Exception(f"Search request failed: {str(e)}")
124
+
125
+ def save_and_read_file(content: str, filename: Optional[str] = None) -> str:
126
+ """
127
+ Save content to a temporary file and return the path.
128
+ Useful for processing files from the GAIA API.
129
+
130
+ Args:
131
+ content: The content to save to the file
132
+ filename: Optional filename, will generate a random name if not provided
133
+
134
+ Returns:
135
+ Path to the saved file
136
+ """
137
+ temp_dir = tempfile.gettempdir()
138
+ if filename is None:
139
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
140
+ filepath = temp_file.name
141
+ else:
142
+ filepath = os.path.join(temp_dir, filename)
143
+
144
+ # Write content to the file
145
+ with open(filepath, 'w') as f:
146
+ f.write(content)
147
+
148
+ return f"File saved to {filepath}. You can read this file to process its contents."
149
+
150
+
151
+ def download_file_from_url(url: str, filename: Optional[str] = None) -> str:
152
+ """
153
+ Download a file from a URL and save it to a temporary location.
154
+
155
+ Args:
156
+ url: The URL to download from
157
+ filename: Optional filename, will generate one based on URL if not provided
158
+
159
+ Returns:
160
+ Path to the downloaded file
161
+ """
162
+ try:
163
+ # Parse URL to get filename if not provided
164
+ if not filename:
165
+ path = urlparse(url).path
166
+ filename = os.path.basename(path)
167
+ if not filename:
168
+ # Generate a random name if we couldn't extract one
169
+ import uuid
170
+ filename = f"downloaded_{uuid.uuid4().hex[:8]}"
171
+
172
+ # Create temporary file
173
+ temp_dir = tempfile.gettempdir()
174
+ filepath = os.path.join(temp_dir, filename)
175
+
176
+ # Download the file
177
+ response = requests.get(url, stream=True)
178
+ response.raise_for_status()
179
+
180
+ # Save the file
181
+ with open(filepath, 'wb') as f:
182
+ for chunk in response.iter_content(chunk_size=8192):
183
+ f.write(chunk)
184
+
185
+ return f"File downloaded to {filepath}. You can now process this file."
186
+ except Exception as e:
187
+ return f"Error downloading file: {str(e)}"
188
+
189
+
190
+ def extract_text_from_image(image_path: str) -> str:
191
+ """
192
+ Extract text from an image using pytesseract (if available).
193
+
194
+ Args:
195
+ image_path: Path to the image file
196
+
197
+ Returns:
198
+ Extracted text or error message
199
+ """
200
+ try:
201
+ # Try to import pytesseract
202
+ import pytesseract
203
+ from PIL import Image
204
+
205
+ # Open the image
206
+ image = Image.open(image_path)
207
+
208
+ # Extract text
209
+ text = pytesseract.image_to_string(image)
210
+
211
+ return f"Extracted text from image:\n\n{text}"
212
+ except ImportError:
213
+ return "Error: pytesseract is not installed. Please install it with 'pip install pytesseract' and ensure Tesseract OCR is installed on your system."
214
+ except Exception as e:
215
+ return f"Error extracting text from image: {str(e)}"
216
+
217
+
218
+ def analyze_csv_file(file_path: str, query: str) -> str:
219
+ """
220
+ Analyze a CSV file using pandas and answer a question about it.
221
+
222
+ Args:
223
+ file_path: Path to the CSV file
224
+ query: Question about the data
225
+
226
+ Returns:
227
+ Analysis result or error message
228
+ """
229
+ try:
230
+ import pandas as pd
231
+
232
+ # Read the CSV file
233
+ df = pd.read_csv(file_path)
234
+
235
+ # Run various analyses based on the query
236
+ result = f"CSV file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
237
+ result += f"Columns: {', '.join(df.columns)}\n\n"
238
+
239
+ # Add summary statistics
240
+ result += "Summary statistics:\n"
241
+ result += str(df.describe())
242
+
243
+ return result
244
+ except ImportError:
245
+ return "Error: pandas is not installed. Please install it with 'pip install pandas'."
246
+ except Exception as e:
247
+ return f"Error analyzing CSV file: {str(e)}"
248
+
249
+ @tool
250
+ def analyze_excel_file(file_path: str, query: str) -> str:
251
+ """
252
+ Analyze an Excel file using pandas and answer a question about it.
253
+
254
+ Args:
255
+ file_path: Path to the Excel file
256
+ query: Question about the data
257
+
258
+ Returns:
259
+ Analysis result or error message
260
+ """
261
+ try:
262
+ import pandas as pd
263
+
264
+ # Read the Excel file
265
+ df = pd.read_excel(file_path)
266
+
267
+ # Run various analyses based on the query
268
+ result = f"Excel file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
269
+ result += f"Columns: {', '.join(df.columns)}\n\n"
270
+
271
+ # Add summary statistics
272
+ result += "Summary statistics:\n"
273
+ result += str(df.describe())
274
+
275
+ return result
276
+ except ImportError:
277
+ return "Error: pandas and openpyxl are not installed. Please install them with 'pip install pandas openpyxl'."
278
+ except Exception as e:
279
+ return f"Error analyzing Excel file: {str(e)}"
280
 
281
  class GeminiAgent:
282
  def __init__(self, api_key: str, model_name: str = "gemini-2.0-flash"):
 
289
 
290
  self.api_key = api_key
291
  self.model_name = model_name
292
+
293
+ # Configure Gemini
294
+ genai.configure(api_key=api_key)
295
+
296
+ # Initialize the LLM
297
+ self.llm = self._setup_llm()
298
+
299
+ # Setup tools
300
+ self.tools = [
301
+ SmolagentToolWrapper(WikipediaSearchTool()),
302
+ Tool(
303
+ name="analyze_video",
304
+ func=self._analyze_video,
305
+ description="Analyze YouTube video content directly"
306
+ ),
307
+ Tool(
308
+ name="analyze_image",
309
+ func=self._analyze_image,
310
+ description="Analyze image content"
311
+ ),
312
+ Tool(
313
+ name="analyze_table",
314
+ func=self._analyze_table,
315
+ description="Analyze table or matrix data"
316
+ ),
317
+ Tool(
318
+ name="analyze_list",
319
+ func=self._analyze_list,
320
+ description="Analyze and categorize list items"
321
+ ),
322
+ Tool(
323
+ name="web_search",
324
+ func=self._web_search,
325
+ description="Search the web for information"
326
+ )
327
+ ]
328
+
329
+ # Setup memory
330
+ self.memory = ConversationBufferMemory(
331
+ memory_key="chat_history",
332
+ return_messages=True
333
+ )
334
+
335
+ # Initialize agent
336
  self.agent = self._setup_agent()
337
+
338
+
339
+ def _setup_llm(self):
340
+ """Set up the language model."""
341
+ # Set up model with video capabilities
342
+ generation_config = {
343
+ "temperature": 0.0,
344
+ "max_output_tokens": 2000,
345
+ "candidate_count": 1,
346
+ }
347
+
348
+ safety_settings = {
349
+ HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
350
+ HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
351
+ HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
352
+ HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
353
+ }
354
+
355
  return ChatGoogleGenerativeAI(
356
  model=self.model_name,
357
  google_api_key=self.api_key,
358
+ temperature=0,
359
+ max_output_tokens=2000,
360
+ generation_config=generation_config,
361
+ safety_settings=safety_settings,
362
+ system_message=SystemMessage(content=(
363
+ "You are a precise AI assistant that helps users find information and analyze content. "
364
+ "You can directly understand and analyze YouTube videos, images, and other content. "
365
+ "When analyzing videos, focus on relevant details like dialogue, text, and key visual elements. "
366
+ "For lists, tables, and structured data, ensure proper formatting and organization. "
367
+ "If you need additional context, clearly explain what is needed."
368
+ ))
369
  )
370
+
371
+ def _setup_agent(self) -> AgentExecutor:
372
+ """Set up the agent with tools and system message."""
373
+
374
+ # Define the system message template
375
+ PREFIX = """You are a helpful AI assistant that can use various tools to answer questions and analyze content. You have access to tools for web search, Wikipedia lookup, and multimedia analysis.
376
+
377
+ TOOLS:
378
+ ------
379
+ You have access to the following tools:"""
380
 
381
+ FORMAT_INSTRUCTIONS = """To use a tool, use the following format:
382
+
383
+ Thought: Do I need to use a tool? Yes
384
+ Action: the action to take, should be one of [{tool_names}]
385
+ Action Input: the input to the action
386
+ Observation: the result of the action
387
+
388
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
389
+
390
+ Thought: Do I need to use a tool? No
391
+ Final Answer: [your response here]
392
+
393
+ Begin! Remember to ALWAYS include 'Thought:', 'Action:', 'Action Input:', and 'Final Answer:' in your responses."""
394
+
395
+ SUFFIX = """Previous conversation history:
396
+ {chat_history}
397
+
398
+ New question: {input}
399
+ {agent_scratchpad}"""
400
+
401
+ # Create the base agent
402
+ agent = ConversationalAgent.from_llm_and_tools(
403
+ llm=self.llm,
404
+ tools=self.tools,
405
+ prefix=PREFIX,
406
+ format_instructions=FORMAT_INSTRUCTIONS,
407
+ suffix=SUFFIX,
408
+ input_variables=["input", "chat_history", "agent_scratchpad", "tool_names"],
409
+ handle_parsing_errors=True
410
+ )
411
+
412
+ # Initialize agent executor with custom output handling
413
+ return AgentExecutor.from_agent_and_tools(
414
+ agent=agent,
415
+ tools=self.tools,
416
+ memory=self.memory,
417
+ max_iterations=5,
418
+ verbose=True,
419
+ handle_parsing_errors=True,
420
+ return_only_outputs=True # This ensures we only get the final output
421
+ )
422
+
423
+ def _web_search(self, query: str, domain: Optional[str] = None) -> str:
424
+ """Perform web search with rate limiting and retries."""
425
+ try:
426
+ # Use DuckDuckGo API wrapper for more reliable results
427
+ search = DuckDuckGoSearchAPIWrapper(max_results=5)
428
+ results = search.run(f"{query} {f'site:{domain}' if domain else ''}")
429
+
430
+ if not results or results.strip() == "":
431
+ return "No search results found."
432
+
433
+ return results
434
+
435
+ except Exception as e:
436
+ return f"Search error: {str(e)}"
437
+
438
+ def _analyze_video(self, url: str) -> str:
439
+ """Analyze video content using Gemini's video understanding capabilities."""
440
+ try:
441
+ # Validate URL
442
+ parsed_url = urlparse(url)
443
+ if not all([parsed_url.scheme, parsed_url.netloc]):
444
+ return "Please provide a valid video URL with http:// or https:// prefix."
445
+
446
+ # Check if it's a YouTube URL
447
+ if 'youtube.com' not in url and 'youtu.be' not in url:
448
+ return "Only YouTube videos are supported at this time."
449
+
450
+ try:
451
+ # Configure yt-dlp with minimal extraction
452
+ ydl_opts = {
453
+ 'quiet': True,
454
+ 'no_warnings': True,
455
+ 'extract_flat': True,
456
+ 'no_playlist': True,
457
+ 'youtube_include_dash_manifest': False
458
+ }
459
+
460
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
461
+ try:
462
+ # Try basic info extraction
463
+ info = ydl.extract_info(url, download=False, process=False)
464
+ if not info:
465
+ return "Could not extract video information."
466
+
467
+ title = info.get('title', 'Unknown')
468
+ description = info.get('description', '')
469
+
470
+ # Create a detailed prompt with available metadata
471
+ prompt = f"""Please analyze this YouTube video:
472
+ Title: {title}
473
+ URL: {url}
474
+ Description: {description}
475
+
476
+ Please provide a detailed analysis focusing on:
477
+ 1. Main topic and key points from the title and description
478
+ 2. Expected visual elements and scenes
479
+ 3. Overall message or purpose
480
+ 4. Target audience"""
481
+
482
+ # Use the LLM with proper message format
483
+ messages = [HumanMessage(content=prompt)]
484
+ response = self.llm.invoke(messages)
485
+ return response.content if hasattr(response, 'content') else str(response)
486
+
487
+ except Exception as e:
488
+ if 'Sign in to confirm' in str(e):
489
+ return "This video requires age verification or sign-in. Please provide a different video URL."
490
+ return f"Error accessing video: {str(e)}"
491
+
492
+ except Exception as e:
493
+ return f"Error extracting video info: {str(e)}"
494
+
495
+ except Exception as e:
496
+ return f"Error analyzing video: {str(e)}"
497
+
498
+ def _analyze_table(self, table_data: str) -> str:
499
+ """Analyze table or matrix data."""
500
+ try:
501
+ if not table_data or not isinstance(table_data, str):
502
+ return "Please provide valid table data for analysis."
503
+
504
+ prompt = f"""Please analyze this table:
505
+
506
+ {table_data}
507
+
508
+ Provide a detailed analysis including:
509
+ 1. Structure and format
510
+ 2. Key patterns or relationships
511
+ 3. Notable findings
512
+ 4. Any mathematical properties (if applicable)"""
513
+
514
+ messages = [HumanMessage(content=prompt)]
515
+ response = self.llm.invoke(messages)
516
+ return response.content if hasattr(response, 'content') else str(response)
517
+
518
+ except Exception as e:
519
+ return f"Error analyzing table: {str(e)}"
520
+
521
+ def _analyze_image(self, image_data: str) -> str:
522
+ """Analyze image content."""
523
+ try:
524
+ if not image_data or not isinstance(image_data, str):
525
+ return "Please provide a valid image for analysis."
526
+
527
+ prompt = f"""Please analyze this image:
528
+
529
+ {image_data}
530
+
531
+ Focus on:
532
+ 1. Visual elements and objects
533
+ 2. Colors and composition
534
+ 3. Text or numbers (if present)
535
+ 4. Overall context and meaning"""
536
+
537
+ messages = [HumanMessage(content=prompt)]
538
+ response = self.llm.invoke(messages)
539
+ return response.content if hasattr(response, 'content') else str(response)
540
+
541
+ except Exception as e:
542
+ return f"Error analyzing image: {str(e)}"
543
+
544
+ def _analyze_list(self, list_data: str) -> str:
545
+ """Analyze and categorize list items."""
546
+ if not list_data:
547
+ return "No list data provided."
548
+ try:
549
+ items = [x.strip() for x in list_data.split(',')]
550
+ if not items:
551
+ return "Please provide a comma-separated list of items."
552
+ # Add list analysis logic here
553
+ return "Please provide the list items for analysis."
554
+ except Exception as e:
555
+ return f"Error analyzing list: {str(e)}"
556
+
557
  def run(self, query: str) -> str:
558
+ """Run the agent on a query."""
559
  try:
560
+ response = self.agent.run(query)
561
+ return response
562
  except Exception as e:
563
+ return f"Error processing query: {str(e)}"
564
+
565
+ def _clean_response(self, response: str) -> str:
566
+ """Clean up the response from the agent."""
567
+ # Remove any tool invocation artifacts
568
+ cleaned = re.sub(r'> Entering new AgentExecutor chain...|> Finished chain.', '', response)
569
+ cleaned = re.sub(r'Thought:.*?Action:.*?Action Input:.*?Observation:.*?\n', '', cleaned, flags=re.DOTALL)
570
+ return cleaned.strip()
571
 
572
  def run_interactive(self):
573
  print("AI Assistant Ready! (Type 'exit' to quit)")
 
579
  break
580
 
581
  print("Assistant:", self.run(query))
582
+
583
+ @tool
584
+ def analyze_csv_file(file_path: str, query: str) -> str:
585
+ """
586
+ Analyze a CSV file using pandas and answer a question about it.
587
+
588
+ Args:
589
+ file_path: Path to the CSV file
590
+ query: Question about the data
591
+
592
+ Returns:
593
+ Analysis result or error message
594
+ """
595
+ try:
596
+ import pandas as pd
597
+
598
+ # Read the CSV file
599
+ df = pd.read_csv(file_path)
600
+
601
+ # Run various analyses based on the query
602
+ result = f"CSV file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
603
+ result += f"Columns: {', '.join(df.columns)}\n\n"
604
+
605
+ # Add summary statistics
606
+ result += "Summary statistics:\n"
607
+ result += str(df.describe())
608
+
609
+ return result
610
+ except ImportError:
611
+ return "Error: pandas is not installed. Please install it with 'pip install pandas'."
612
+ except Exception as e:
613
+ return f"Error analyzing CSV file: {str(e)}"
614
+
615
+ @tool
616
+ def analyze_excel_file(file_path: str, query: str) -> str:
617
+ """
618
+ Analyze an Excel file using pandas and answer a question about it.
619
+
620
+ Args:
621
+ file_path: Path to the Excel file
622
+ query: Question about the data
623
+
624
+ Returns:
625
+ Analysis result or error message
626
+ """
627
+ try:
628
+ import pandas as pd
629
+
630
+ # Read the Excel file
631
+ df = pd.read_excel(file_path)
632
+
633
+ # Run various analyses based on the query
634
+ result = f"Excel file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
635
+ result += f"Columns: {', '.join(df.columns)}\n\n"
636
+
637
+ # Add summary statistics
638
+ result += "Summary statistics:\n"
639
+ result += str(df.describe())
640
+
641
+ return result
642
+ except ImportError:
643
+ return "Error: pandas and openpyxl are not installed. Please install them with 'pip install pandas openpyxl'."
644
+ except Exception as e:
645
+ return f"Error analyzing Excel file: {str(e)}"