Spaces:
Runtime error
Runtime error
Commit
·
ac0f9ba
1
Parent(s):
81917a3
feat: initail working agent
Browse files- .gitignore +1 -0
- .python-version +1 -0
- app.py +36 -5
- pyproject.toml +24 -0
- src/final_assignment_template/__init__.py +2 -0
- src/final_assignment_template/__pycache__/__init__.cpython-311.pyc +0 -0
- src/final_assignment_template/__pycache__/agent.cpython-311.pyc +0 -0
- src/final_assignment_template/__pycache__/models.cpython-311.pyc +0 -0
- src/final_assignment_template/__pycache__/tools.cpython-311.pyc +0 -0
- src/final_assignment_template/agent.py +55 -0
- src/final_assignment_template/models.py +29 -0
- src/final_assignment_template/tools.py +115 -0
- uv.lock +0 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.env
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.11
|
app.py
CHANGED
@@ -3,20 +3,43 @@ import gradio as gr
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
|
|
6 |
|
|
|
7 |
# (Keep Constants as is)
|
8 |
# --- Constants ---
|
|
|
9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
10 |
|
11 |
# --- Basic Agent Definition ---
|
12 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
class BasicAgent:
|
14 |
def __init__(self):
|
15 |
print("BasicAgent initialized.")
|
16 |
-
def __call__(self,
|
17 |
-
print(
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
return fixed_answer
|
21 |
|
22 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
@@ -73,14 +96,22 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
73 |
results_log = []
|
74 |
answers_payload = []
|
75 |
print(f"Running agent on {len(questions_data)} questions...")
|
|
|
76 |
for item in questions_data:
|
77 |
task_id = item.get("task_id")
|
78 |
question_text = item.get("question")
|
|
|
|
|
|
|
79 |
if not task_id or question_text is None:
|
80 |
print(f"Skipping item with missing task_id or question: {item}")
|
81 |
continue
|
82 |
try:
|
83 |
-
|
|
|
|
|
|
|
|
|
84 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
85 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
86 |
except Exception as e:
|
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
6 |
+
from typing import Any
|
7 |
|
8 |
+
from src.final_assignment_template.agent import manager_agent
|
9 |
# (Keep Constants as is)
|
10 |
# --- Constants ---
|
11 |
+
|
12 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
13 |
|
14 |
# --- Basic Agent Definition ---
|
15 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
16 |
+
# class BasicAgent:
|
17 |
+
# def __init__(self):
|
18 |
+
# print("BasicAgent initialized.")
|
19 |
+
# def __call__(self, question: str) -> str:
|
20 |
+
# print(f"Agent received question (first 50 chars): {question[:50]}...")
|
21 |
+
# fixed_answer = "This is a default answer."
|
22 |
+
# print(f"Agent returning fixed answer: {fixed_answer}")
|
23 |
+
# return fixed_answer
|
24 |
+
|
25 |
class BasicAgent:
|
26 |
def __init__(self):
|
27 |
print("BasicAgent initialized.")
|
28 |
+
def __call__(self, item:dict[str,Any]) -> str:
|
29 |
+
print(item)
|
30 |
+
question_text = item.get("question") or ''
|
31 |
+
task_id = item.get("task_id") or ''
|
32 |
+
file_name = item.get("file_name") or None
|
33 |
+
|
34 |
+
fixed_answer = ''
|
35 |
+
if task_id and file_name:
|
36 |
+
print('With task_id')
|
37 |
+
print(task_id)
|
38 |
+
fixed_answer = manager_agent.run(f"""<Task>{question_text}</Task>\n<TaskID>{task_id}</TaskID>""")
|
39 |
+
else:
|
40 |
+
fixed_answer = manager_agent.run(f'<Task>{question_text}</Task>')
|
41 |
+
print(f'---------------------fixed_answer----------------\n{fixed_answer}')
|
42 |
+
|
43 |
return fixed_answer
|
44 |
|
45 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
96 |
results_log = []
|
97 |
answers_payload = []
|
98 |
print(f"Running agent on {len(questions_data)} questions...")
|
99 |
+
|
100 |
for item in questions_data:
|
101 |
task_id = item.get("task_id")
|
102 |
question_text = item.get("question")
|
103 |
+
file_name = item.get("file_name")
|
104 |
+
file_data = None
|
105 |
+
# or file_name != ''
|
106 |
if not task_id or question_text is None:
|
107 |
print(f"Skipping item with missing task_id or question: {item}")
|
108 |
continue
|
109 |
try:
|
110 |
+
# if file_name != '':
|
111 |
+
# file_data = get_file(task_id)
|
112 |
+
# item['file_data'] = file_data
|
113 |
+
|
114 |
+
submitted_answer = agent(item)
|
115 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
116 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
117 |
except Exception as e:
|
pyproject.toml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "final-assignment-template"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Add your description here"
|
5 |
+
readme = "README.md"
|
6 |
+
authors = [
|
7 |
+
{ name = "Muhammad Maaz Uddin", email = "[email protected]" }
|
8 |
+
]
|
9 |
+
requires-python = ">=3.11"
|
10 |
+
dependencies = [
|
11 |
+
"gradio",
|
12 |
+
"itsdangerous>=2.2.0",
|
13 |
+
"langchain-community>=0.3.22",
|
14 |
+
"litellm>=1.67.2",
|
15 |
+
"requests",
|
16 |
+
"smolagents>=1.14.0",
|
17 |
+
]
|
18 |
+
|
19 |
+
[project.scripts]
|
20 |
+
final-assignment-template = "final_assignment_template:main"
|
21 |
+
|
22 |
+
[build-system]
|
23 |
+
requires = ["hatchling"]
|
24 |
+
build-backend = "hatchling.build"
|
src/final_assignment_template/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
def main() -> None:
|
2 |
+
print("Hello from final-assignment-template!")
|
src/final_assignment_template/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (415 Bytes). View file
|
|
src/final_assignment_template/__pycache__/agent.cpython-311.pyc
ADDED
Binary file (2.04 kB). View file
|
|
src/final_assignment_template/__pycache__/models.cpython-311.pyc
ADDED
Binary file (1.02 kB). View file
|
|
src/final_assignment_template/__pycache__/tools.cpython-311.pyc
ADDED
Binary file (3.81 kB). View file
|
|
src/final_assignment_template/agent.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from smolagents import load_tool, Tool, tool, ToolCallingAgent, CodeAgent, GoogleSearchTool,FinalAnswerTool,PythonInterpreterTool , LiteLLMModel, VisitWebpageTool, DuckDuckGoSearchTool
|
2 |
+
from litellm import completion
|
3 |
+
|
4 |
+
from langchain.agents import load_tools
|
5 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
6 |
+
|
7 |
+
import os
|
8 |
+
from src.final_assignment_template.models import openrouter_qwenCoder_model, modelLiteLLm
|
9 |
+
from src.final_assignment_template.tools import travily_tool, Video_understanding_tool, image_understanding_tool, get_task_file
|
10 |
+
# (Keep Constants as is)
|
11 |
+
# --- Constants ---
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
web_agent = CodeAgent(
|
19 |
+
model=openrouter_qwenCoder_model,
|
20 |
+
tools=[
|
21 |
+
# GoogleSearchTool(provider="serper"),
|
22 |
+
# DuckDuckGoSearchTool(max_results=10),
|
23 |
+
travily_tool,
|
24 |
+
VisitWebpageTool(),
|
25 |
+
],
|
26 |
+
name="web_agent",
|
27 |
+
description="""Browses the web to find information""",
|
28 |
+
verbosity_level=1,
|
29 |
+
max_steps=5,
|
30 |
+
)
|
31 |
+
|
32 |
+
manager_agent = CodeAgent(
|
33 |
+
name="Task_Agent",
|
34 |
+
description="""You will be provided a task and you need to verify before giving final answer
|
35 |
+
You can perform tasks which are text and image based, skip all other
|
36 |
+
""",
|
37 |
+
model=modelLiteLLm,
|
38 |
+
tools=[PythonInterpreterTool(),Video_understanding_tool,image_understanding_tool,get_task_file],
|
39 |
+
managed_agents=[web_agent],
|
40 |
+
additional_authorized_imports=[
|
41 |
+
"json",
|
42 |
+
"pandas",
|
43 |
+
"numpy",
|
44 |
+
"markdown"
|
45 |
+
'math', 'statistics', 're', 'unicodedata', 'random',
|
46 |
+
'datetime', 'queue', 'time', 'collections', 'stat', 'itertools',
|
47 |
+
'PIL','requests'
|
48 |
+
],
|
49 |
+
planning_interval=3,
|
50 |
+
verbosity_level=1,
|
51 |
+
# final_answer_checks=[check_reasoning_and_plot],
|
52 |
+
max_steps=5,
|
53 |
+
)
|
54 |
+
|
55 |
+
|
src/final_assignment_template/models.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from smolagents import LiteLLMModel
|
2 |
+
import os
|
3 |
+
|
4 |
+
|
5 |
+
openrouter_qwenCoder_model = LiteLLMModel(
|
6 |
+
model_id="openrouter/qwen/qwen-2.5-coder-32b-instruct:free",
|
7 |
+
api_base="https://openrouter.ai/api/v1",
|
8 |
+
api_key=os.getenv("OPENROUTER_API_KEY")
|
9 |
+
)
|
10 |
+
|
11 |
+
modelLiteLLm = LiteLLMModel(
|
12 |
+
model_id="openrouter/deepseek/deepseek-r1:free",
|
13 |
+
api_base="https://openrouter.ai/api/v1",
|
14 |
+
api_key=os.getenv("OPENROUTER_API_KEY")
|
15 |
+
)
|
16 |
+
|
17 |
+
|
18 |
+
videoLiteLLm = LiteLLMModel(
|
19 |
+
model_id="openrouter/google/gemini-2.0-flash-exp:free",
|
20 |
+
api_base="https://openrouter.ai/api/v1",
|
21 |
+
api_key=os.getenv("OPENROUTER_API_KEY")
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
imageLiteLLm = LiteLLMModel(
|
26 |
+
model_id="openrouter/meta-llama/llama-4-maverick:free",
|
27 |
+
api_base="https://openrouter.ai/api/v1",
|
28 |
+
api_key=os.getenv("OPENROUTER_API_KEY")
|
29 |
+
)
|
src/final_assignment_template/tools.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from smolagents import Tool, tool
|
2 |
+
|
3 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
4 |
+
|
5 |
+
import requests
|
6 |
+
import inspect
|
7 |
+
import pandas as pd
|
8 |
+
from PIL import Image
|
9 |
+
from io import BytesIO
|
10 |
+
import base64
|
11 |
+
|
12 |
+
|
13 |
+
from src.final_assignment_template.models import videoLiteLLm, imageLiteLLm
|
14 |
+
|
15 |
+
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
16 |
+
|
17 |
+
travily_tool = Tool.from_langchain(TavilySearchResults(max_results=25,))
|
18 |
+
|
19 |
+
from smolagents import Tool
|
20 |
+
|
21 |
+
# class SearchTool(Tool):
|
22 |
+
# name = "SearchTool"
|
23 |
+
# description = """
|
24 |
+
# This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub.
|
25 |
+
# It returns the name of the checkpoint."""
|
26 |
+
# inputs = {
|
27 |
+
# "task": {
|
28 |
+
# "type": "string",
|
29 |
+
# "description": "the task category (such as text-classification, depth-estimation, etc)",
|
30 |
+
# }
|
31 |
+
# }
|
32 |
+
# output_type = "string"
|
33 |
+
|
34 |
+
# def forward(self, task: str):
|
35 |
+
# from huggingface_hub import list_models
|
36 |
+
|
37 |
+
# model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
|
38 |
+
# return model.id
|
39 |
+
|
40 |
+
# model_downloads_tool = HFModelDownloadsTool()
|
41 |
+
|
42 |
+
|
43 |
+
@tool
|
44 |
+
def Video_understanding_tool(query:str)->str:
|
45 |
+
"""
|
46 |
+
This tool for understanding or finding something in the video link.
|
47 |
+
|
48 |
+
Args:
|
49 |
+
query: link with your query.
|
50 |
+
"""
|
51 |
+
print("processcing vidoe ",query)
|
52 |
+
messages =[ {"role": "user", "content": [{"type": "text", "text": query}]} ]
|
53 |
+
resp = videoLiteLLm(messages)
|
54 |
+
return resp.content or 'No data'
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
@tool
|
59 |
+
def get_task_file(task_id:str)->requests.models.Response:
|
60 |
+
"""
|
61 |
+
This tool is for get the task file using task_id.
|
62 |
+
it will return the request response and then this response will be used for other tools.
|
63 |
+
|
64 |
+
Args:
|
65 |
+
task_id: Task ID
|
66 |
+
"""
|
67 |
+
|
68 |
+
url = f"{DEFAULT_API_URL}/files/{task_id}"
|
69 |
+
print(url)
|
70 |
+
headers = {
|
71 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
|
72 |
+
}
|
73 |
+
response = requests.get(url,headers=headers)
|
74 |
+
return response
|
75 |
+
|
76 |
+
@tool
|
77 |
+
def image_understanding_tool(query:str,response:requests.models.Response)->str:
|
78 |
+
"""
|
79 |
+
This tool for understanding or perform any query on the image.
|
80 |
+
Provide the image base64 image data
|
81 |
+
|
82 |
+
|
83 |
+
Args:
|
84 |
+
query: Query for the image.
|
85 |
+
response : The return value from the get_task_file which returns the response.
|
86 |
+
"""
|
87 |
+
print("processcing image ")
|
88 |
+
|
89 |
+
|
90 |
+
image = Image.open(BytesIO(response.content)).convert("RGB")
|
91 |
+
|
92 |
+
buffered = BytesIO()
|
93 |
+
image.save(buffered, format="PNG") # change format if necessary
|
94 |
+
img_bytes = buffered.getvalue()
|
95 |
+
img_b64 = base64.b64encode(img_bytes).decode('utf-8')
|
96 |
+
|
97 |
+
print(img_b64)
|
98 |
+
messages =[ {
|
99 |
+
"role": "user",
|
100 |
+
"content": [
|
101 |
+
{"type": "text", "text": query},
|
102 |
+
{
|
103 |
+
"type": "image_url",
|
104 |
+
"image_url": {
|
105 |
+
"url": img_b64,
|
106 |
+
"format": "image/png" # Adjust MIME type if necessary
|
107 |
+
}
|
108 |
+
}
|
109 |
+
]
|
110 |
+
} ]
|
111 |
+
resp = imageLiteLLm(messages)
|
112 |
+
print(resp.content)
|
113 |
+
return resp.content or 'No data'
|
114 |
+
|
115 |
+
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|