IamRulo commited on
Commit
bc82208
·
verified ·
1 Parent(s): 78aec6e

Upload agent_old.py

Browse files
Files changed (1) hide show
  1. agent_old.py +775 -0
agent_old.py ADDED
@@ -0,0 +1,775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LangGraph: agent graph w/ tools"""
2
+ import os
3
+ from dotenv import load_dotenv
4
+ from typing import List, Dict, Any, Optional
5
+ import tempfile
6
+ import re
7
+ import json
8
+ import requests
9
+ from urllib.parse import urlparse
10
+ import pytesseract
11
+ from PIL import Image, ImageDraw, ImageFont, ImageEnhance, ImageFilter
12
+ import cmath
13
+ import pandas as pd
14
+ import uuid
15
+ import numpy as np
16
+
17
+
18
+ """ Langchain imports"""
19
+ from langgraph.graph import START, StateGraph, MessagesState
20
+ from langchain_core.messages import SystemMessage, HumanMessage
21
+ from langgraph.prebuilt import ToolNode, tools_condition
22
+ from langchain_core.tools import tool
23
+ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
24
+ from langchain_community.tools.tavily_search import TavilySearchResults
25
+ from langchain_community.document_loaders import WikipediaLoader
26
+ from langchain_community.document_loaders import ArxivLoader
27
+ #from langchain_community.vectorstores import SupabaseVectorStore
28
+ from langchain_google_genai import ChatGoogleGenerativeAI
29
+ #from langchain.tools.retriever import create_retriever_tool
30
+ #from supabase.client import Client, create_client
31
+ #from code_interpreter import CodeInterpreter
32
+ #interpreter_instance = CodeInterpreter()
33
+ #from image_processing import *
34
+
35
+ """
36
+ import getpass
37
+ import os
38
+
39
+ if "GOOGLE_API_KEY" not in os.environ:
40
+ os.environ["GOOGLE_API_KEY"] = getpass.getpass("Enter your Google AI API key: ")
41
+ """
42
+
43
+ load_dotenv()
44
+
45
+ @tool
46
+ def multiply(a: int, b: int) -> int:
47
+ """Multiply two numbers.
48
+
49
+ Args:
50
+ a: first int
51
+ b: second int
52
+ """
53
+ return a * b
54
+
55
+ @tool
56
+ def add(a: int, b: int) -> int:
57
+ """Add two numbers.
58
+
59
+ Args:
60
+ a: first int
61
+ b: second int
62
+ """
63
+ return a + b
64
+
65
+ @tool
66
+ def subtract(a: int, b: int) -> int:
67
+ """Subtract two numbers.
68
+
69
+ Args:
70
+ a: first int
71
+ b: second int
72
+ """
73
+ return a - b
74
+
75
+ @tool
76
+ def divide(a: int, b: int) -> int:
77
+ """Divide two numbers.
78
+
79
+ Args:
80
+ a: first int
81
+ b: second int
82
+ """
83
+ if b == 0:
84
+ raise ValueError("Cannot divide by zero.")
85
+ return a / b
86
+
87
+ @tool
88
+ def modulus(a: int, b: int) -> int:
89
+ """Get the modulus of two numbers.
90
+
91
+ Args:
92
+ a: first int
93
+ b: second int
94
+ """
95
+ return a % b
96
+
97
+ @tool
98
+ def power(a: float, b: float) -> float:
99
+ """
100
+ Get the power of two numbers.
101
+ Args:
102
+ a (float): the first number
103
+ b (float): the second number
104
+ """
105
+ return a**b
106
+
107
+ @tool
108
+ def square_root(a: float) -> float | complex:
109
+ """
110
+ Get the square root of a number.
111
+ Args:
112
+ a (float): the number to get the square root of
113
+ """
114
+ if a >= 0:
115
+ return a**0.5
116
+ return cmath.sqrt(a)
117
+
118
+
119
+ @tool
120
+ def wiki_search(query: str) -> str:
121
+ """Search Wikipedia for a query and return maximum 2 results.
122
+
123
+ Args:
124
+ query: The search query."""
125
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
126
+ formatted_search_docs = "\n\n---\n\n".join(
127
+ [
128
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
129
+ for doc in search_docs
130
+ ])
131
+ return {"wiki_results": formatted_search_docs}
132
+
133
+ @tool
134
+ def web_search(query: str) -> str:
135
+ """Search Tavily for a query and return maximum 3 results.
136
+
137
+ Args:
138
+ query: The search query."""
139
+
140
+ search_docs = TavilySearchResults(max_results=3).invoke(query=query)
141
+ formatted_search_docs = "\n\n---\n\n".join(
142
+ [
143
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
144
+ for doc in search_docs
145
+ ])
146
+ return {"web_results": formatted_search_docs}
147
+
148
+ @tool
149
+ def arvix_search(query: str) -> str:
150
+ """Search Arxiv for a query and return maximum 3 result.
151
+
152
+ Args:
153
+ query: The search query."""
154
+ search_docs = ArxivLoader(query=query, load_max_docs=3).load()
155
+ formatted_search_docs = "\n\n---\n\n".join(
156
+ [
157
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
158
+ for doc in search_docs
159
+ ])
160
+ return {"arvix_results": formatted_search_docs}
161
+
162
+ @tool
163
+ def execute_code_multilang(code: str, language: str = "python") -> str:
164
+ """Execute code in multiple languages (Python, Bash, SQL, C, Java) and return results.
165
+ Args:
166
+ code (str): The source code to execute.
167
+ language (str): The language of the code. Supported: "python", "bash", "sql", "c", "java".
168
+ Returns:
169
+ A string summarizing the execution results (stdout, stderr, errors, plots, dataframes if any).
170
+ """
171
+ supported_languages = ["python", "bash", "sql", "c", "java"]
172
+ language = language.lower()
173
+
174
+ if language not in supported_languages:
175
+ return f"❌ Unsupported language: {language}. Supported languages are: {', '.join(supported_languages)}"
176
+
177
+ result = interpreter_instance.execute_code(code, language=language)
178
+
179
+ response = []
180
+
181
+ if result["status"] == "success":
182
+ response.append(f"✅ Code executed successfully in **{language.upper()}**")
183
+
184
+ if result.get("stdout"):
185
+ response.append(
186
+ "\n**Standard Output:**\n```\n" + result["stdout"].strip() + "\n```"
187
+ )
188
+
189
+ if result.get("stderr"):
190
+ response.append(
191
+ "\n**Standard Error (if any):**\n```\n"
192
+ + result["stderr"].strip()
193
+ + "\n```"
194
+ )
195
+
196
+ if result.get("result") is not None:
197
+ response.append(
198
+ "\n**Execution Result:**\n```\n"
199
+ + str(result["result"]).strip()
200
+ + "\n```"
201
+ )
202
+
203
+ if result.get("dataframes"):
204
+ for df_info in result["dataframes"]:
205
+ response.append(
206
+ f"\n**DataFrame `{df_info['name']}` (Shape: {df_info['shape']})**"
207
+ )
208
+ df_preview = pd.DataFrame(df_info["head"])
209
+ response.append("First 5 rows:\n```\n" + str(df_preview) + "\n```")
210
+
211
+ if result.get("plots"):
212
+ response.append(
213
+ f"\n**Generated {len(result['plots'])} plot(s)** (Image data returned separately)"
214
+ )
215
+
216
+ else:
217
+ response.append(f"❌ Code execution failed in **{language.upper()}**")
218
+ if result.get("stderr"):
219
+ response.append(
220
+ "\n**Error Log:**\n```\n" + result["stderr"].strip() + "\n```"
221
+ )
222
+
223
+ return "\n".join(response)
224
+
225
+ @tool
226
+ def save_and_read_file(content: str, filename: Optional[str] = None) -> str:
227
+ """
228
+ Save content to a file and return the path.
229
+ Args:
230
+ content (str): the content to save to the file
231
+ filename (str, optional): the name of the file. If not provided, a random name file will be created.
232
+ """
233
+ temp_dir = tempfile.gettempdir()
234
+ if filename is None:
235
+ temp_file = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir)
236
+ filepath = temp_file.name
237
+ else:
238
+ filepath = os.path.join(temp_dir, filename)
239
+
240
+ with open(filepath, "w") as f:
241
+ f.write(content)
242
+
243
+ return f"File saved to {filepath}. You can read this file to process its contents."
244
+
245
+ @tool
246
+ def download_file_from_url(url: str, filename: Optional[str] = None) -> str:
247
+ """
248
+ Download a file from a URL and save it to a temporary location.
249
+ Args:
250
+ url (str): the URL of the file to download.
251
+ filename (str, optional): the name of the file. If not provided, a random name file will be created.
252
+ """
253
+ try:
254
+ # Parse URL to get filename if not provided
255
+ if not filename:
256
+ path = urlparse(url).path
257
+ filename = os.path.basename(path)
258
+ if not filename:
259
+ filename = f"downloaded_{uuid.uuid4().hex[:8]}"
260
+
261
+ # Create temporary file
262
+ temp_dir = tempfile.gettempdir()
263
+ filepath = os.path.join(temp_dir, filename)
264
+
265
+ # Download the file
266
+ response = requests.get(url, stream=True)
267
+ response.raise_for_status()
268
+
269
+ # Save the file
270
+ with open(filepath, "wb") as f:
271
+ for chunk in response.iter_content(chunk_size=8192):
272
+ f.write(chunk)
273
+
274
+ return f"File downloaded to {filepath}. You can read this file to process its contents."
275
+ except Exception as e:
276
+ return f"Error downloading file: {str(e)}"
277
+
278
+ @tool
279
+ def extract_text_from_image(image_path: str) -> str:
280
+ """
281
+ Extract text from an image using OCR library pytesseract (if available).
282
+ Args:
283
+ image_path (str): the path to the image file.
284
+ """
285
+ try:
286
+ # Open the image
287
+ image = Image.open(image_path)
288
+
289
+ # Extract text from the image
290
+ text = pytesseract.image_to_string(image)
291
+
292
+ return f"Extracted text from image:\n\n{text}"
293
+ except Exception as e:
294
+ return f"Error extracting text from image: {str(e)}"
295
+
296
+ @tool
297
+ def analyze_csv_file(file_path: str, query: str) -> str:
298
+ """
299
+ Analyze a CSV file using pandas and answer a question about it.
300
+ Args:
301
+ file_path (str): the path to the CSV file.
302
+ query (str): Question about the data
303
+ """
304
+ try:
305
+ # Read the CSV file
306
+ df = pd.read_csv(file_path)
307
+
308
+ # Run various analyses based on the query
309
+ result = f"CSV file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
310
+ result += f"Columns: {', '.join(df.columns)}\n\n"
311
+
312
+ # Add summary statistics
313
+ result += "Summary statistics:\n"
314
+ result += str(df.describe())
315
+
316
+ return result
317
+
318
+ except Exception as e:
319
+ return f"Error analyzing CSV file: {str(e)}"
320
+
321
+ @tool
322
+ def analyze_excel_file(file_path: str, query: str) -> str:
323
+ """
324
+ Analyze an Excel file using pandas and answer a question about it.
325
+ Args:
326
+ file_path (str): the path to the Excel file.
327
+ query (str): Question about the data
328
+ """
329
+ try:
330
+ # Read the Excel file
331
+ df = pd.read_excel(file_path)
332
+
333
+ # Run various analyses based on the query
334
+ result = (
335
+ f"Excel file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
336
+ )
337
+ result += f"Columns: {', '.join(df.columns)}\n\n"
338
+
339
+ # Add summary statistics
340
+ result += "Summary statistics:\n"
341
+ result += str(df.describe())
342
+
343
+ return result
344
+
345
+ except Exception as e:
346
+ return f"Error analyzing Excel file: {str(e)}"
347
+
348
+ @tool
349
+ def analyze_image(image_base64: str) -> Dict[str, Any]:
350
+ """
351
+ Analyze basic properties of an image (size, mode, color analysis, thumbnail preview).
352
+ Args:
353
+ image_base64 (str): Base64 encoded image string
354
+ Returns:
355
+ Dictionary with analysis result
356
+ """
357
+ try:
358
+ img = decode_image(image_base64)
359
+ width, height = img.size
360
+ mode = img.mode
361
+
362
+ if mode in ("RGB", "RGBA"):
363
+ arr = np.array(img)
364
+ avg_colors = arr.mean(axis=(0, 1))
365
+ dominant = ["Red", "Green", "Blue"][np.argmax(avg_colors[:3])]
366
+ brightness = avg_colors.mean()
367
+ color_analysis = {
368
+ "average_rgb": avg_colors.tolist(),
369
+ "brightness": brightness,
370
+ "dominant_color": dominant,
371
+ }
372
+ else:
373
+ color_analysis = {"note": f"No color analysis for mode {mode}"}
374
+
375
+ thumbnail = img.copy()
376
+ thumbnail.thumbnail((100, 100))
377
+ thumb_path = save_image(thumbnail, "thumbnails")
378
+ thumbnail_base64 = encode_image(thumb_path)
379
+
380
+ return {
381
+ "dimensions": (width, height),
382
+ "mode": mode,
383
+ "color_analysis": color_analysis,
384
+ "thumbnail": thumbnail_base64,
385
+ }
386
+ except Exception as e:
387
+ return {"error": str(e)}
388
+
389
+ @tool
390
+ def transform_image(
391
+ image_base64: str, operation: str, params: Optional[Dict[str, Any]] = None
392
+ ) -> Dict[str, Any]:
393
+ """
394
+ Apply transformations: resize, rotate, crop, flip, brightness, contrast, blur, sharpen, grayscale.
395
+ Args:
396
+ image_base64 (str): Base64 encoded input image
397
+ operation (str): Transformation operation
398
+ params (Dict[str, Any], optional): Parameters for the operation
399
+ Returns:
400
+ Dictionary with transformed image (base64)
401
+ """
402
+ try:
403
+ img = decode_image(image_base64)
404
+ params = params or {}
405
+
406
+ if operation == "resize":
407
+ img = img.resize(
408
+ (
409
+ params.get("width", img.width // 2),
410
+ params.get("height", img.height // 2),
411
+ )
412
+ )
413
+ elif operation == "rotate":
414
+ img = img.rotate(params.get("angle", 90), expand=True)
415
+ elif operation == "crop":
416
+ img = img.crop(
417
+ (
418
+ params.get("left", 0),
419
+ params.get("top", 0),
420
+ params.get("right", img.width),
421
+ params.get("bottom", img.height),
422
+ )
423
+ )
424
+ elif operation == "flip":
425
+ if params.get("direction", "horizontal") == "horizontal":
426
+ img = img.transpose(Image.FLIP_LEFT_RIGHT)
427
+ else:
428
+ img = img.transpose(Image.FLIP_TOP_BOTTOM)
429
+ elif operation == "adjust_brightness":
430
+ img = ImageEnhance.Brightness(img).enhance(params.get("factor", 1.5))
431
+ elif operation == "adjust_contrast":
432
+ img = ImageEnhance.Contrast(img).enhance(params.get("factor", 1.5))
433
+ elif operation == "blur":
434
+ img = img.filter(ImageFilter.GaussianBlur(params.get("radius", 2)))
435
+ elif operation == "sharpen":
436
+ img = img.filter(ImageFilter.SHARPEN)
437
+ elif operation == "grayscale":
438
+ img = img.convert("L")
439
+ else:
440
+ return {"error": f"Unknown operation: {operation}"}
441
+
442
+ result_path = save_image(img)
443
+ result_base64 = encode_image(result_path)
444
+ return {"transformed_image": result_base64}
445
+
446
+ except Exception as e:
447
+ return {"error": str(e)}
448
+
449
+ @tool
450
+ def draw_on_image(
451
+ image_base64: str, drawing_type: str, params: Dict[str, Any]
452
+ ) -> Dict[str, Any]:
453
+ """
454
+ Draw shapes (rectangle, circle, line) or text onto an image.
455
+ Args:
456
+ image_base64 (str): Base64 encoded input image
457
+ drawing_type (str): Drawing type
458
+ params (Dict[str, Any]): Drawing parameters
459
+ Returns:
460
+ Dictionary with result image (base64)
461
+ """
462
+ try:
463
+ img = decode_image(image_base64)
464
+ draw = ImageDraw.Draw(img)
465
+ color = params.get("color", "red")
466
+
467
+ if drawing_type == "rectangle":
468
+ draw.rectangle(
469
+ [params["left"], params["top"], params["right"], params["bottom"]],
470
+ outline=color,
471
+ width=params.get("width", 2),
472
+ )
473
+ elif drawing_type == "circle":
474
+ x, y, r = params["x"], params["y"], params["radius"]
475
+ draw.ellipse(
476
+ (x - r, y - r, x + r, y + r),
477
+ outline=color,
478
+ width=params.get("width", 2),
479
+ )
480
+ elif drawing_type == "line":
481
+ draw.line(
482
+ (
483
+ params["start_x"],
484
+ params["start_y"],
485
+ params["end_x"],
486
+ params["end_y"],
487
+ ),
488
+ fill=color,
489
+ width=params.get("width", 2),
490
+ )
491
+ elif drawing_type == "text":
492
+ font_size = params.get("font_size", 20)
493
+ try:
494
+ font = ImageFont.truetype("arial.ttf", font_size)
495
+ except IOError:
496
+ font = ImageFont.load_default()
497
+ draw.text(
498
+ (params["x"], params["y"]),
499
+ params.get("text", "Text"),
500
+ fill=color,
501
+ font=font,
502
+ )
503
+ else:
504
+ return {"error": f"Unknown drawing type: {drawing_type}"}
505
+
506
+ result_path = save_image(img)
507
+ result_base64 = encode_image(result_path)
508
+ return {"result_image": result_base64}
509
+
510
+ except Exception as e:
511
+ return {"error": str(e)}
512
+
513
+ @tool
514
+ def generate_simple_image(
515
+ image_type: str,
516
+ width: int = 500,
517
+ height: int = 500,
518
+ params: Optional[Dict[str, Any]] = None,
519
+ ) -> Dict[str, Any]:
520
+ """
521
+ Generate a simple image (gradient, noise, pattern, chart).
522
+ Args:
523
+ image_type (str): Type of image
524
+ width (int), height (int)
525
+ params (Dict[str, Any], optional): Specific parameters
526
+ Returns:
527
+ Dictionary with generated image (base64)
528
+ """
529
+ try:
530
+ params = params or {}
531
+
532
+ if image_type == "gradient":
533
+ direction = params.get("direction", "horizontal")
534
+ start_color = params.get("start_color", (255, 0, 0))
535
+ end_color = params.get("end_color", (0, 0, 255))
536
+
537
+ img = Image.new("RGB", (width, height))
538
+ draw = ImageDraw.Draw(img)
539
+
540
+ if direction == "horizontal":
541
+ for x in range(width):
542
+ r = int(
543
+ start_color[0] + (end_color[0] - start_color[0]) * x / width
544
+ )
545
+ g = int(
546
+ start_color[1] + (end_color[1] - start_color[1]) * x / width
547
+ )
548
+ b = int(
549
+ start_color[2] + (end_color[2] - start_color[2]) * x / width
550
+ )
551
+ draw.line([(x, 0), (x, height)], fill=(r, g, b))
552
+ else:
553
+ for y in range(height):
554
+ r = int(
555
+ start_color[0] + (end_color[0] - start_color[0]) * y / height
556
+ )
557
+ g = int(
558
+ start_color[1] + (end_color[1] - start_color[1]) * y / height
559
+ )
560
+ b = int(
561
+ start_color[2] + (end_color[2] - start_color[2]) * y / height
562
+ )
563
+ draw.line([(0, y), (width, y)], fill=(r, g, b))
564
+
565
+ elif image_type == "noise":
566
+ noise_array = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8)
567
+ img = Image.fromarray(noise_array, "RGB")
568
+
569
+ else:
570
+ return {"error": f"Unsupported image_type {image_type}"}
571
+
572
+ result_path = save_image(img)
573
+ result_base64 = encode_image(result_path)
574
+ return {"generated_image": result_base64}
575
+
576
+ except Exception as e:
577
+ return {"error": str(e)}
578
+
579
+ @tool
580
+ def combine_images(
581
+ images_base64: List[str], operation: str, params: Optional[Dict[str, Any]] = None
582
+ ) -> Dict[str, Any]:
583
+ """
584
+ Combine multiple images (collage, stack, blend).
585
+ Args:
586
+ images_base64 (List[str]): List of base64 images
587
+ operation (str): Combination type
588
+ params (Dict[str, Any], optional)
589
+ Returns:
590
+ Dictionary with combined image (base64)
591
+ """
592
+ try:
593
+ images = [decode_image(b64) for b64 in images_base64]
594
+ params = params or {}
595
+
596
+ if operation == "stack":
597
+ direction = params.get("direction", "horizontal")
598
+ if direction == "horizontal":
599
+ total_width = sum(img.width for img in images)
600
+ max_height = max(img.height for img in images)
601
+ new_img = Image.new("RGB", (total_width, max_height))
602
+ x = 0
603
+ for img in images:
604
+ new_img.paste(img, (x, 0))
605
+ x += img.width
606
+ else:
607
+ max_width = max(img.width for img in images)
608
+ total_height = sum(img.height for img in images)
609
+ new_img = Image.new("RGB", (max_width, total_height))
610
+ y = 0
611
+ for img in images:
612
+ new_img.paste(img, (0, y))
613
+ y += img.height
614
+ else:
615
+ return {"error": f"Unsupported combination operation {operation}"}
616
+
617
+ result_path = save_image(new_img)
618
+ result_base64 = encode_image(result_path)
619
+ return {"combined_image": result_base64}
620
+
621
+ except Exception as e:
622
+ return {"error": str(e)}
623
+
624
+ # load the system prompt from the file
625
+ #with open("system_prompt.txt", "r", encoding="utf-8") as f:
626
+ # system_prompt = f.read()
627
+
628
+ system_prompt = """
629
+ You are a helpful assistant tasked with answering questions using a set of tools.
630
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
631
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
632
+ If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
633
+ If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
634
+ If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
635
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer.""".strip()
636
+
637
+ # System message
638
+ sys_msg = SystemMessage(content=system_prompt)
639
+
640
+ """
641
+ # build a retriever
642
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768
643
+ supabase: Client = create_client(
644
+ os.environ.get("SUPABASE_URL"),
645
+ os.environ.get("SUPABASE_SERVICE_KEY"))
646
+ vector_store = SupabaseVectorStore(
647
+ client=supabase,
648
+ embedding= embeddings,
649
+ table_name="documents",
650
+ query_name="match_documents_langchain",
651
+ )
652
+ create_retriever_tool = create_retriever_tool(
653
+ retriever=vector_store.as_retriever(),
654
+ name="Question Search",
655
+ description="A tool to retrieve similar questions from a vector store.",
656
+ )
657
+ """
658
+
659
+
660
+ tools = [
661
+ multiply,
662
+ add,
663
+ subtract,
664
+ divide,
665
+ modulus,
666
+ power,
667
+ square_root,
668
+ wiki_search,
669
+ web_search,
670
+ arvix_search,
671
+ ]
672
+ #save_and_read_file,
673
+ #download_file_from_url,
674
+ #extract_text_from_image,
675
+ #analyze_csv_file,
676
+ #analyze_excel_file,
677
+ #execute_code_multilang,
678
+ #analyze_image,
679
+ #transform_image,
680
+ #draw_on_image,
681
+ #generate_simple_image,
682
+ #combine_images,
683
+
684
+
685
+ # Build graph function
686
+ def build_graph(provider: str = "huggingface"):
687
+ """Build the graph"""
688
+ # Load environment variables from .env file
689
+ if provider == "huggingface":
690
+ # Huggingface endpoint
691
+ """
692
+ llm = ChatHuggingFace(
693
+ llm=HuggingFaceEndpoint(
694
+ #endpoint_url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
695
+ #endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen3-30B-A3B",
696
+ endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B.Instruct",
697
+ #endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen3-4B",
698
+ temperature=0,
699
+ ),
700
+ )
701
+ """
702
+ llm = ChatHuggingFace(
703
+ llm=HuggingFaceEndpoint(
704
+ repo_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
705
+ #endpoint_url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
706
+ #endpoint_url="https://api-inference.huggingface.co/models/microsoft/phi-4",
707
+ #endpoint_url="https://api-inference.huggingface.co/models/TinyLlama/TinyLlama-1.1B-Chat-v1.0",
708
+ task="text-generation", # for chat‐style use “text-generation”
709
+ #max_new_tokens=1024,
710
+ #do_sample=False,
711
+ #repetition_penalty=1.03,
712
+ temperature=0,
713
+ ),
714
+ #verbose=True,
715
+ )
716
+
717
+ elif provider == "google":
718
+ # Google Gemini
719
+ #llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
720
+ llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", temperature=0)
721
+
722
+ else:
723
+ raise ValueError("Invalid provider. Choose 'huggingface'.")
724
+
725
+ # Bind tools to LLM
726
+ llm_with_tools = llm.bind_tools(tools)
727
+
728
+ # Node
729
+ def assistant(state: MessagesState):
730
+ """Assistant node"""
731
+ return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
732
+
733
+ """
734
+ def retriever(state: MessagesState):
735
+ #Retriever node
736
+ similar_question = vector_store.similarity_search(state["messages"][0].content)
737
+ example_msg = HumanMessage(
738
+ content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
739
+ )
740
+ return {"messages": [sys_msg] + state["messages"] + [example_msg]}
741
+
742
+ """
743
+ def retriever(state: MessagesState):
744
+ """Retriever node"""
745
+ return {"messages": [sys_msg] + state["messages"]}
746
+
747
+
748
+ builder = StateGraph(MessagesState)
749
+ #builder.add_node("retriever", retriever)
750
+ builder.add_node("assistant", assistant)
751
+ builder.add_node("tools", ToolNode(tools))
752
+ #builder.add_edge(START, "retriever")
753
+ builder.add_edge(START, "assistant")
754
+ #builder.add_edge("retriever", "assistant")
755
+ builder.add_conditional_edges(
756
+ "assistant",
757
+ tools_condition,
758
+ )
759
+ #builder.add_edge("tools", "retriever")
760
+ builder.add_edge("tools", "assistant")
761
+
762
+ # Compile graph
763
+ return builder.compile()
764
+
765
+ # test
766
+ if __name__ == "__main__":
767
+ question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
768
+ # Build the graph
769
+ graph = build_graph(provider="huggingface")
770
+ # Run the graph
771
+ messages = [HumanMessage(content=question)]
772
+ messages = graph.invoke({"messages": messages})
773
+ for m in messages["messages"]:
774
+ m.pretty_print()
775
+