Update agent.py
Browse files
agent.py
CHANGED
@@ -1,17 +1,34 @@
|
|
1 |
-
"""LangGraph
|
2 |
import os
|
3 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from langgraph.graph import START, StateGraph, MessagesState
|
5 |
-
from
|
6 |
-
from langgraph.prebuilt import ToolNode
|
7 |
-
from
|
8 |
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
|
9 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
10 |
from langchain_community.document_loaders import WikipediaLoader
|
11 |
from langchain_community.document_loaders import ArxivLoader
|
12 |
#from langchain_community.vectorstores import SupabaseVectorStore
|
13 |
-
from
|
14 |
-
from langchain_core.tools import tool
|
15 |
#from langchain.tools.retriever import create_retriever_tool
|
16 |
#from supabase.client import Client, create_client
|
17 |
|
@@ -69,6 +86,28 @@ def modulus(a: int, b: int) -> int:
|
|
69 |
"""
|
70 |
return a % b
|
71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
@tool
|
73 |
def wiki_search(query: str) -> str:
|
74 |
"""Search Wikipedia for a query and return maximum 2 results.
|
@@ -83,10 +122,8 @@ def wiki_search(query: str) -> str:
|
|
83 |
])
|
84 |
return {"wiki_results": formatted_search_docs}
|
85 |
|
86 |
-
|
87 |
@tool
|
88 |
def web_search(query: str) -> str:
|
89 |
-
|
90 |
"""Search Tavily for a query and return maximum 3 results.
|
91 |
|
92 |
Args:
|
@@ -100,7 +137,6 @@ def web_search(query: str) -> str:
|
|
100 |
])
|
101 |
return {"web_results": formatted_search_docs}
|
102 |
|
103 |
-
|
104 |
@tool
|
105 |
def arvix_search(query: str) -> str:
|
106 |
"""Search Arxiv for a query and return maximum 3 result.
|
@@ -115,7 +151,467 @@ def arvix_search(query: str) -> str:
|
|
115 |
])
|
116 |
return {"arvix_results": formatted_search_docs}
|
117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
|
|
|
|
119 |
|
120 |
# load the system prompt from the file
|
121 |
#with open("system_prompt.txt", "r", encoding="utf-8") as f:
|
@@ -158,9 +654,22 @@ tools = [
|
|
158 |
subtract,
|
159 |
divide,
|
160 |
modulus,
|
|
|
|
|
161 |
wiki_search,
|
162 |
web_search,
|
163 |
arvix_search,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
]
|
165 |
|
166 |
# Build graph function
|
@@ -168,7 +677,7 @@ def build_graph(provider: str = "huggingface"):
|
|
168 |
"""Build the graph"""
|
169 |
# Load environment variables from .env file
|
170 |
if provider == "huggingface":
|
171 |
-
#
|
172 |
"""
|
173 |
llm = ChatHuggingFace(
|
174 |
llm=HuggingFaceEndpoint(
|
@@ -202,7 +711,7 @@ def build_graph(provider: str = "huggingface"):
|
|
202 |
"""Assistant node"""
|
203 |
|
204 |
#return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
|
205 |
-
return {"messages": [llm_with_tools.invoke([sys_msg])]}
|
206 |
|
207 |
"""
|
208 |
def retriever(state: MessagesState):
|
|
|
1 |
+
"""LangGraph: agent graph w/ tools"""
|
2 |
import os
|
3 |
from dotenv import load_dotenv
|
4 |
+
from typing import List, Dict, Any, Optional
|
5 |
+
import tempfile
|
6 |
+
import re
|
7 |
+
import json
|
8 |
+
import requests
|
9 |
+
from urllib.parse import urlparse
|
10 |
+
import pytesseract
|
11 |
+
from PIL import Image, ImageDraw, ImageFont, ImageEnhance, ImageFilter
|
12 |
+
import cmath
|
13 |
+
import pandas as pd
|
14 |
+
import uuid
|
15 |
+
import numpy as np
|
16 |
+
from code_interpreter import CodeInterpreter
|
17 |
+
from image_processing import *
|
18 |
+
|
19 |
+
interpreter_instance = CodeInterpreter()
|
20 |
+
|
21 |
+
""" Langchain imports"""
|
22 |
from langgraph.graph import START, StateGraph, MessagesState
|
23 |
+
from langchain_core.messages import SystemMessage, HumanMessage
|
24 |
+
from langgraph.prebuilt import ToolNode, tools_condition
|
25 |
+
from langchain_core.tools import tool
|
26 |
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
|
27 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
28 |
from langchain_community.document_loaders import WikipediaLoader
|
29 |
from langchain_community.document_loaders import ArxivLoader
|
30 |
#from langchain_community.vectorstores import SupabaseVectorStore
|
31 |
+
#from langchain_google_genai import ChatGoogleGenerativeAI
|
|
|
32 |
#from langchain.tools.retriever import create_retriever_tool
|
33 |
#from supabase.client import Client, create_client
|
34 |
|
|
|
86 |
"""
|
87 |
return a % b
|
88 |
|
89 |
+
@tool
|
90 |
+
def power(a: float, b: float) -> float:
|
91 |
+
"""
|
92 |
+
Get the power of two numbers.
|
93 |
+
Args:
|
94 |
+
a (float): the first number
|
95 |
+
b (float): the second number
|
96 |
+
"""
|
97 |
+
return a**b
|
98 |
+
|
99 |
+
@tool
|
100 |
+
def square_root(a: float) -> float | complex:
|
101 |
+
"""
|
102 |
+
Get the square root of a number.
|
103 |
+
Args:
|
104 |
+
a (float): the number to get the square root of
|
105 |
+
"""
|
106 |
+
if a >= 0:
|
107 |
+
return a**0.5
|
108 |
+
return cmath.sqrt(a)
|
109 |
+
|
110 |
+
|
111 |
@tool
|
112 |
def wiki_search(query: str) -> str:
|
113 |
"""Search Wikipedia for a query and return maximum 2 results.
|
|
|
122 |
])
|
123 |
return {"wiki_results": formatted_search_docs}
|
124 |
|
|
|
125 |
@tool
|
126 |
def web_search(query: str) -> str:
|
|
|
127 |
"""Search Tavily for a query and return maximum 3 results.
|
128 |
|
129 |
Args:
|
|
|
137 |
])
|
138 |
return {"web_results": formatted_search_docs}
|
139 |
|
|
|
140 |
@tool
|
141 |
def arvix_search(query: str) -> str:
|
142 |
"""Search Arxiv for a query and return maximum 3 result.
|
|
|
151 |
])
|
152 |
return {"arvix_results": formatted_search_docs}
|
153 |
|
154 |
+
@tool
|
155 |
+
def execute_code_multilang(code: str, language: str = "python") -> str:
|
156 |
+
"""Execute code in multiple languages (Python, Bash, SQL, C, Java) and return results.
|
157 |
+
Args:
|
158 |
+
code (str): The source code to execute.
|
159 |
+
language (str): The language of the code. Supported: "python", "bash", "sql", "c", "java".
|
160 |
+
Returns:
|
161 |
+
A string summarizing the execution results (stdout, stderr, errors, plots, dataframes if any).
|
162 |
+
"""
|
163 |
+
supported_languages = ["python", "bash", "sql", "c", "java"]
|
164 |
+
language = language.lower()
|
165 |
+
|
166 |
+
if language not in supported_languages:
|
167 |
+
return f"❌ Unsupported language: {language}. Supported languages are: {', '.join(supported_languages)}"
|
168 |
+
|
169 |
+
result = interpreter_instance.execute_code(code, language=language)
|
170 |
+
|
171 |
+
response = []
|
172 |
+
|
173 |
+
if result["status"] == "success":
|
174 |
+
response.append(f"✅ Code executed successfully in **{language.upper()}**")
|
175 |
+
|
176 |
+
if result.get("stdout"):
|
177 |
+
response.append(
|
178 |
+
"\n**Standard Output:**\n```\n" + result["stdout"].strip() + "\n```"
|
179 |
+
)
|
180 |
+
|
181 |
+
if result.get("stderr"):
|
182 |
+
response.append(
|
183 |
+
"\n**Standard Error (if any):**\n```\n"
|
184 |
+
+ result["stderr"].strip()
|
185 |
+
+ "\n```"
|
186 |
+
)
|
187 |
+
|
188 |
+
if result.get("result") is not None:
|
189 |
+
response.append(
|
190 |
+
"\n**Execution Result:**\n```\n"
|
191 |
+
+ str(result["result"]).strip()
|
192 |
+
+ "\n```"
|
193 |
+
)
|
194 |
+
|
195 |
+
if result.get("dataframes"):
|
196 |
+
for df_info in result["dataframes"]:
|
197 |
+
response.append(
|
198 |
+
f"\n**DataFrame `{df_info['name']}` (Shape: {df_info['shape']})**"
|
199 |
+
)
|
200 |
+
df_preview = pd.DataFrame(df_info["head"])
|
201 |
+
response.append("First 5 rows:\n```\n" + str(df_preview) + "\n```")
|
202 |
+
|
203 |
+
if result.get("plots"):
|
204 |
+
response.append(
|
205 |
+
f"\n**Generated {len(result['plots'])} plot(s)** (Image data returned separately)"
|
206 |
+
)
|
207 |
+
|
208 |
+
else:
|
209 |
+
response.append(f"❌ Code execution failed in **{language.upper()}**")
|
210 |
+
if result.get("stderr"):
|
211 |
+
response.append(
|
212 |
+
"\n**Error Log:**\n```\n" + result["stderr"].strip() + "\n```"
|
213 |
+
)
|
214 |
+
|
215 |
+
return "\n".join(response)
|
216 |
+
|
217 |
+
@tool
|
218 |
+
def save_and_read_file(content: str, filename: Optional[str] = None) -> str:
|
219 |
+
"""
|
220 |
+
Save content to a file and return the path.
|
221 |
+
Args:
|
222 |
+
content (str): the content to save to the file
|
223 |
+
filename (str, optional): the name of the file. If not provided, a random name file will be created.
|
224 |
+
"""
|
225 |
+
temp_dir = tempfile.gettempdir()
|
226 |
+
if filename is None:
|
227 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir)
|
228 |
+
filepath = temp_file.name
|
229 |
+
else:
|
230 |
+
filepath = os.path.join(temp_dir, filename)
|
231 |
+
|
232 |
+
with open(filepath, "w") as f:
|
233 |
+
f.write(content)
|
234 |
+
|
235 |
+
return f"File saved to {filepath}. You can read this file to process its contents."
|
236 |
+
|
237 |
+
@tool
|
238 |
+
def download_file_from_url(url: str, filename: Optional[str] = None) -> str:
|
239 |
+
"""
|
240 |
+
Download a file from a URL and save it to a temporary location.
|
241 |
+
Args:
|
242 |
+
url (str): the URL of the file to download.
|
243 |
+
filename (str, optional): the name of the file. If not provided, a random name file will be created.
|
244 |
+
"""
|
245 |
+
try:
|
246 |
+
# Parse URL to get filename if not provided
|
247 |
+
if not filename:
|
248 |
+
path = urlparse(url).path
|
249 |
+
filename = os.path.basename(path)
|
250 |
+
if not filename:
|
251 |
+
filename = f"downloaded_{uuid.uuid4().hex[:8]}"
|
252 |
+
|
253 |
+
# Create temporary file
|
254 |
+
temp_dir = tempfile.gettempdir()
|
255 |
+
filepath = os.path.join(temp_dir, filename)
|
256 |
+
|
257 |
+
# Download the file
|
258 |
+
response = requests.get(url, stream=True)
|
259 |
+
response.raise_for_status()
|
260 |
+
|
261 |
+
# Save the file
|
262 |
+
with open(filepath, "wb") as f:
|
263 |
+
for chunk in response.iter_content(chunk_size=8192):
|
264 |
+
f.write(chunk)
|
265 |
+
|
266 |
+
return f"File downloaded to {filepath}. You can read this file to process its contents."
|
267 |
+
except Exception as e:
|
268 |
+
return f"Error downloading file: {str(e)}"
|
269 |
+
|
270 |
+
@tool
|
271 |
+
def extract_text_from_image(image_path: str) -> str:
|
272 |
+
"""
|
273 |
+
Extract text from an image using OCR library pytesseract (if available).
|
274 |
+
Args:
|
275 |
+
image_path (str): the path to the image file.
|
276 |
+
"""
|
277 |
+
try:
|
278 |
+
# Open the image
|
279 |
+
image = Image.open(image_path)
|
280 |
+
|
281 |
+
# Extract text from the image
|
282 |
+
text = pytesseract.image_to_string(image)
|
283 |
+
|
284 |
+
return f"Extracted text from image:\n\n{text}"
|
285 |
+
except Exception as e:
|
286 |
+
return f"Error extracting text from image: {str(e)}"
|
287 |
+
|
288 |
+
@tool
|
289 |
+
def analyze_csv_file(file_path: str, query: str) -> str:
|
290 |
+
"""
|
291 |
+
Analyze a CSV file using pandas and answer a question about it.
|
292 |
+
Args:
|
293 |
+
file_path (str): the path to the CSV file.
|
294 |
+
query (str): Question about the data
|
295 |
+
"""
|
296 |
+
try:
|
297 |
+
# Read the CSV file
|
298 |
+
df = pd.read_csv(file_path)
|
299 |
+
|
300 |
+
# Run various analyses based on the query
|
301 |
+
result = f"CSV file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
|
302 |
+
result += f"Columns: {', '.join(df.columns)}\n\n"
|
303 |
+
|
304 |
+
# Add summary statistics
|
305 |
+
result += "Summary statistics:\n"
|
306 |
+
result += str(df.describe())
|
307 |
+
|
308 |
+
return result
|
309 |
+
|
310 |
+
except Exception as e:
|
311 |
+
return f"Error analyzing CSV file: {str(e)}"
|
312 |
+
|
313 |
+
@tool
|
314 |
+
def analyze_excel_file(file_path: str, query: str) -> str:
|
315 |
+
"""
|
316 |
+
Analyze an Excel file using pandas and answer a question about it.
|
317 |
+
Args:
|
318 |
+
file_path (str): the path to the Excel file.
|
319 |
+
query (str): Question about the data
|
320 |
+
"""
|
321 |
+
try:
|
322 |
+
# Read the Excel file
|
323 |
+
df = pd.read_excel(file_path)
|
324 |
+
|
325 |
+
# Run various analyses based on the query
|
326 |
+
result = (
|
327 |
+
f"Excel file loaded with {len(df)} rows and {len(df.columns)} columns.\n"
|
328 |
+
)
|
329 |
+
result += f"Columns: {', '.join(df.columns)}\n\n"
|
330 |
+
|
331 |
+
# Add summary statistics
|
332 |
+
result += "Summary statistics:\n"
|
333 |
+
result += str(df.describe())
|
334 |
+
|
335 |
+
return result
|
336 |
+
|
337 |
+
except Exception as e:
|
338 |
+
return f"Error analyzing Excel file: {str(e)}"
|
339 |
+
|
340 |
+
@tool
|
341 |
+
def analyze_image(image_base64: str) -> Dict[str, Any]:
|
342 |
+
"""
|
343 |
+
Analyze basic properties of an image (size, mode, color analysis, thumbnail preview).
|
344 |
+
Args:
|
345 |
+
image_base64 (str): Base64 encoded image string
|
346 |
+
Returns:
|
347 |
+
Dictionary with analysis result
|
348 |
+
"""
|
349 |
+
try:
|
350 |
+
img = decode_image(image_base64)
|
351 |
+
width, height = img.size
|
352 |
+
mode = img.mode
|
353 |
+
|
354 |
+
if mode in ("RGB", "RGBA"):
|
355 |
+
arr = np.array(img)
|
356 |
+
avg_colors = arr.mean(axis=(0, 1))
|
357 |
+
dominant = ["Red", "Green", "Blue"][np.argmax(avg_colors[:3])]
|
358 |
+
brightness = avg_colors.mean()
|
359 |
+
color_analysis = {
|
360 |
+
"average_rgb": avg_colors.tolist(),
|
361 |
+
"brightness": brightness,
|
362 |
+
"dominant_color": dominant,
|
363 |
+
}
|
364 |
+
else:
|
365 |
+
color_analysis = {"note": f"No color analysis for mode {mode}"}
|
366 |
+
|
367 |
+
thumbnail = img.copy()
|
368 |
+
thumbnail.thumbnail((100, 100))
|
369 |
+
thumb_path = save_image(thumbnail, "thumbnails")
|
370 |
+
thumbnail_base64 = encode_image(thumb_path)
|
371 |
+
|
372 |
+
return {
|
373 |
+
"dimensions": (width, height),
|
374 |
+
"mode": mode,
|
375 |
+
"color_analysis": color_analysis,
|
376 |
+
"thumbnail": thumbnail_base64,
|
377 |
+
}
|
378 |
+
except Exception as e:
|
379 |
+
return {"error": str(e)}
|
380 |
+
|
381 |
+
@tool
|
382 |
+
def transform_image(
|
383 |
+
image_base64: str, operation: str, params: Optional[Dict[str, Any]] = None
|
384 |
+
) -> Dict[str, Any]:
|
385 |
+
"""
|
386 |
+
Apply transformations: resize, rotate, crop, flip, brightness, contrast, blur, sharpen, grayscale.
|
387 |
+
Args:
|
388 |
+
image_base64 (str): Base64 encoded input image
|
389 |
+
operation (str): Transformation operation
|
390 |
+
params (Dict[str, Any], optional): Parameters for the operation
|
391 |
+
Returns:
|
392 |
+
Dictionary with transformed image (base64)
|
393 |
+
"""
|
394 |
+
try:
|
395 |
+
img = decode_image(image_base64)
|
396 |
+
params = params or {}
|
397 |
+
|
398 |
+
if operation == "resize":
|
399 |
+
img = img.resize(
|
400 |
+
(
|
401 |
+
params.get("width", img.width // 2),
|
402 |
+
params.get("height", img.height // 2),
|
403 |
+
)
|
404 |
+
)
|
405 |
+
elif operation == "rotate":
|
406 |
+
img = img.rotate(params.get("angle", 90), expand=True)
|
407 |
+
elif operation == "crop":
|
408 |
+
img = img.crop(
|
409 |
+
(
|
410 |
+
params.get("left", 0),
|
411 |
+
params.get("top", 0),
|
412 |
+
params.get("right", img.width),
|
413 |
+
params.get("bottom", img.height),
|
414 |
+
)
|
415 |
+
)
|
416 |
+
elif operation == "flip":
|
417 |
+
if params.get("direction", "horizontal") == "horizontal":
|
418 |
+
img = img.transpose(Image.FLIP_LEFT_RIGHT)
|
419 |
+
else:
|
420 |
+
img = img.transpose(Image.FLIP_TOP_BOTTOM)
|
421 |
+
elif operation == "adjust_brightness":
|
422 |
+
img = ImageEnhance.Brightness(img).enhance(params.get("factor", 1.5))
|
423 |
+
elif operation == "adjust_contrast":
|
424 |
+
img = ImageEnhance.Contrast(img).enhance(params.get("factor", 1.5))
|
425 |
+
elif operation == "blur":
|
426 |
+
img = img.filter(ImageFilter.GaussianBlur(params.get("radius", 2)))
|
427 |
+
elif operation == "sharpen":
|
428 |
+
img = img.filter(ImageFilter.SHARPEN)
|
429 |
+
elif operation == "grayscale":
|
430 |
+
img = img.convert("L")
|
431 |
+
else:
|
432 |
+
return {"error": f"Unknown operation: {operation}"}
|
433 |
+
|
434 |
+
result_path = save_image(img)
|
435 |
+
result_base64 = encode_image(result_path)
|
436 |
+
return {"transformed_image": result_base64}
|
437 |
+
|
438 |
+
except Exception as e:
|
439 |
+
return {"error": str(e)}
|
440 |
+
|
441 |
+
@tool
|
442 |
+
def draw_on_image(
|
443 |
+
image_base64: str, drawing_type: str, params: Dict[str, Any]
|
444 |
+
) -> Dict[str, Any]:
|
445 |
+
"""
|
446 |
+
Draw shapes (rectangle, circle, line) or text onto an image.
|
447 |
+
Args:
|
448 |
+
image_base64 (str): Base64 encoded input image
|
449 |
+
drawing_type (str): Drawing type
|
450 |
+
params (Dict[str, Any]): Drawing parameters
|
451 |
+
Returns:
|
452 |
+
Dictionary with result image (base64)
|
453 |
+
"""
|
454 |
+
try:
|
455 |
+
img = decode_image(image_base64)
|
456 |
+
draw = ImageDraw.Draw(img)
|
457 |
+
color = params.get("color", "red")
|
458 |
+
|
459 |
+
if drawing_type == "rectangle":
|
460 |
+
draw.rectangle(
|
461 |
+
[params["left"], params["top"], params["right"], params["bottom"]],
|
462 |
+
outline=color,
|
463 |
+
width=params.get("width", 2),
|
464 |
+
)
|
465 |
+
elif drawing_type == "circle":
|
466 |
+
x, y, r = params["x"], params["y"], params["radius"]
|
467 |
+
draw.ellipse(
|
468 |
+
(x - r, y - r, x + r, y + r),
|
469 |
+
outline=color,
|
470 |
+
width=params.get("width", 2),
|
471 |
+
)
|
472 |
+
elif drawing_type == "line":
|
473 |
+
draw.line(
|
474 |
+
(
|
475 |
+
params["start_x"],
|
476 |
+
params["start_y"],
|
477 |
+
params["end_x"],
|
478 |
+
params["end_y"],
|
479 |
+
),
|
480 |
+
fill=color,
|
481 |
+
width=params.get("width", 2),
|
482 |
+
)
|
483 |
+
elif drawing_type == "text":
|
484 |
+
font_size = params.get("font_size", 20)
|
485 |
+
try:
|
486 |
+
font = ImageFont.truetype("arial.ttf", font_size)
|
487 |
+
except IOError:
|
488 |
+
font = ImageFont.load_default()
|
489 |
+
draw.text(
|
490 |
+
(params["x"], params["y"]),
|
491 |
+
params.get("text", "Text"),
|
492 |
+
fill=color,
|
493 |
+
font=font,
|
494 |
+
)
|
495 |
+
else:
|
496 |
+
return {"error": f"Unknown drawing type: {drawing_type}"}
|
497 |
+
|
498 |
+
result_path = save_image(img)
|
499 |
+
result_base64 = encode_image(result_path)
|
500 |
+
return {"result_image": result_base64}
|
501 |
+
|
502 |
+
except Exception as e:
|
503 |
+
return {"error": str(e)}
|
504 |
+
|
505 |
+
@tool
|
506 |
+
def generate_simple_image(
|
507 |
+
image_type: str,
|
508 |
+
width: int = 500,
|
509 |
+
height: int = 500,
|
510 |
+
params: Optional[Dict[str, Any]] = None,
|
511 |
+
) -> Dict[str, Any]:
|
512 |
+
"""
|
513 |
+
Generate a simple image (gradient, noise, pattern, chart).
|
514 |
+
Args:
|
515 |
+
image_type (str): Type of image
|
516 |
+
width (int), height (int)
|
517 |
+
params (Dict[str, Any], optional): Specific parameters
|
518 |
+
Returns:
|
519 |
+
Dictionary with generated image (base64)
|
520 |
+
"""
|
521 |
+
try:
|
522 |
+
params = params or {}
|
523 |
+
|
524 |
+
if image_type == "gradient":
|
525 |
+
direction = params.get("direction", "horizontal")
|
526 |
+
start_color = params.get("start_color", (255, 0, 0))
|
527 |
+
end_color = params.get("end_color", (0, 0, 255))
|
528 |
+
|
529 |
+
img = Image.new("RGB", (width, height))
|
530 |
+
draw = ImageDraw.Draw(img)
|
531 |
+
|
532 |
+
if direction == "horizontal":
|
533 |
+
for x in range(width):
|
534 |
+
r = int(
|
535 |
+
start_color[0] + (end_color[0] - start_color[0]) * x / width
|
536 |
+
)
|
537 |
+
g = int(
|
538 |
+
start_color[1] + (end_color[1] - start_color[1]) * x / width
|
539 |
+
)
|
540 |
+
b = int(
|
541 |
+
start_color[2] + (end_color[2] - start_color[2]) * x / width
|
542 |
+
)
|
543 |
+
draw.line([(x, 0), (x, height)], fill=(r, g, b))
|
544 |
+
else:
|
545 |
+
for y in range(height):
|
546 |
+
r = int(
|
547 |
+
start_color[0] + (end_color[0] - start_color[0]) * y / height
|
548 |
+
)
|
549 |
+
g = int(
|
550 |
+
start_color[1] + (end_color[1] - start_color[1]) * y / height
|
551 |
+
)
|
552 |
+
b = int(
|
553 |
+
start_color[2] + (end_color[2] - start_color[2]) * y / height
|
554 |
+
)
|
555 |
+
draw.line([(0, y), (width, y)], fill=(r, g, b))
|
556 |
+
|
557 |
+
elif image_type == "noise":
|
558 |
+
noise_array = np.random.randint(0, 256, (height, width, 3), dtype=np.uint8)
|
559 |
+
img = Image.fromarray(noise_array, "RGB")
|
560 |
+
|
561 |
+
else:
|
562 |
+
return {"error": f"Unsupported image_type {image_type}"}
|
563 |
+
|
564 |
+
result_path = save_image(img)
|
565 |
+
result_base64 = encode_image(result_path)
|
566 |
+
return {"generated_image": result_base64}
|
567 |
+
|
568 |
+
except Exception as e:
|
569 |
+
return {"error": str(e)}
|
570 |
+
|
571 |
+
@tool
|
572 |
+
def combine_images(
|
573 |
+
images_base64: List[str], operation: str, params: Optional[Dict[str, Any]] = None
|
574 |
+
) -> Dict[str, Any]:
|
575 |
+
"""
|
576 |
+
Combine multiple images (collage, stack, blend).
|
577 |
+
Args:
|
578 |
+
images_base64 (List[str]): List of base64 images
|
579 |
+
operation (str): Combination type
|
580 |
+
params (Dict[str, Any], optional)
|
581 |
+
Returns:
|
582 |
+
Dictionary with combined image (base64)
|
583 |
+
"""
|
584 |
+
try:
|
585 |
+
images = [decode_image(b64) for b64 in images_base64]
|
586 |
+
params = params or {}
|
587 |
+
|
588 |
+
if operation == "stack":
|
589 |
+
direction = params.get("direction", "horizontal")
|
590 |
+
if direction == "horizontal":
|
591 |
+
total_width = sum(img.width for img in images)
|
592 |
+
max_height = max(img.height for img in images)
|
593 |
+
new_img = Image.new("RGB", (total_width, max_height))
|
594 |
+
x = 0
|
595 |
+
for img in images:
|
596 |
+
new_img.paste(img, (x, 0))
|
597 |
+
x += img.width
|
598 |
+
else:
|
599 |
+
max_width = max(img.width for img in images)
|
600 |
+
total_height = sum(img.height for img in images)
|
601 |
+
new_img = Image.new("RGB", (max_width, total_height))
|
602 |
+
y = 0
|
603 |
+
for img in images:
|
604 |
+
new_img.paste(img, (0, y))
|
605 |
+
y += img.height
|
606 |
+
else:
|
607 |
+
return {"error": f"Unsupported combination operation {operation}"}
|
608 |
+
|
609 |
+
result_path = save_image(new_img)
|
610 |
+
result_base64 = encode_image(result_path)
|
611 |
+
return {"combined_image": result_base64}
|
612 |
|
613 |
+
except Exception as e:
|
614 |
+
return {"error": str(e)}
|
615 |
|
616 |
# load the system prompt from the file
|
617 |
#with open("system_prompt.txt", "r", encoding="utf-8") as f:
|
|
|
654 |
subtract,
|
655 |
divide,
|
656 |
modulus,
|
657 |
+
power,
|
658 |
+
square_root,
|
659 |
wiki_search,
|
660 |
web_search,
|
661 |
arvix_search,
|
662 |
+
save_and_read_file,
|
663 |
+
download_file_from_url,
|
664 |
+
extract_text_from_image,
|
665 |
+
analyze_csv_file,
|
666 |
+
analyze_excel_file,
|
667 |
+
execute_code_multilang,
|
668 |
+
analyze_image,
|
669 |
+
transform_image,
|
670 |
+
draw_on_image,
|
671 |
+
generate_simple_image,
|
672 |
+
combine_images,
|
673 |
]
|
674 |
|
675 |
# Build graph function
|
|
|
677 |
"""Build the graph"""
|
678 |
# Load environment variables from .env file
|
679 |
if provider == "huggingface":
|
680 |
+
# Huggingface endpoint
|
681 |
"""
|
682 |
llm = ChatHuggingFace(
|
683 |
llm=HuggingFaceEndpoint(
|
|
|
711 |
"""Assistant node"""
|
712 |
|
713 |
#return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
|
714 |
+
return {"messages": [llm_with_tools.invoke([sys_msg]+ state["messages" )]}
|
715 |
|
716 |
"""
|
717 |
def retriever(state: MessagesState):
|