baixianger commited on
Commit
a7035da
·
1 Parent(s): 41e1eee

Update app.py and add agent implementation

Browse files
Files changed (2) hide show
  1. agent.py +152 -0
  2. app.py +17 -4
agent.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LangGraph Agent"""
2
+ import dotenv
3
+ from langgraph.graph import MessagesState
4
+ from langgraph.graph import START, StateGraph
5
+ from langgraph.prebuilt import tools_condition
6
+ from langgraph.prebuilt import ToolNode
7
+ from langchain_google_genai import ChatGoogleGenerativeAI
8
+ from langchain_community.tools.tavily_search import TavilySearchResults
9
+ from langchain_community.document_loaders import WikipediaLoader
10
+ from langchain_community.document_loaders import ArxivLoader
11
+ from langchain_core.tools import tool
12
+ from langchain_core.messages import SystemMessage
13
+
14
+ @tool
15
+ def multiply(a: int, b: int) -> int:
16
+ """Multiply two numbers.
17
+
18
+ Args:
19
+ a: first int
20
+ b: second int
21
+ """
22
+ return a * b
23
+
24
+ @tool
25
+ def add(a: int, b: int) -> int:
26
+ """Add two numbers.
27
+
28
+ Args:
29
+ a: first int
30
+ b: second int
31
+ """
32
+ return a + b
33
+
34
+ @tool
35
+ def subtract(a: int, b: int) -> int:
36
+ """Subtract two numbers.
37
+
38
+ Args:
39
+ a: first int
40
+ b: second int
41
+ """
42
+ return a - b
43
+
44
+ @tool
45
+ def divide(a: int, b: int) -> int:
46
+ """Divide two numbers.
47
+
48
+ Args:
49
+ a: first int
50
+ b: second int
51
+ """
52
+ if b == 0:
53
+ raise ValueError("Cannot divide by zero.")
54
+ return a / b
55
+
56
+ @tool
57
+ def modulus(a: int, b: int) -> int:
58
+ """Get the modulus of two numbers.
59
+
60
+ Args:
61
+ a: first int
62
+ b: second int
63
+ """
64
+ return a % b
65
+
66
+ @tool
67
+ def wiki_search(query: str) -> str:
68
+ """Search Wikipedia for a query and return maximum 2 results.
69
+
70
+ Args:
71
+ query: The search query."""
72
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
73
+ formatted_search_docs = "\n\n---\n\n".join(
74
+ [
75
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
76
+ for doc in search_docs
77
+ ])
78
+ return {"wiki_results": formatted_search_docs}
79
+
80
+ @tool
81
+ def web_search(query: str) -> str:
82
+ """Search Tavily for a query and return maximum 3 results.
83
+
84
+ Args:
85
+ query: The search query."""
86
+ search_docs = TavilySearchResults(max_results=3).invoke(query=query)
87
+ formatted_search_docs = "\n\n---\n\n".join(
88
+ [
89
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
90
+ for doc in search_docs
91
+ ])
92
+ return {"web_results": formatted_search_docs}
93
+
94
+ @tool
95
+ def arvix_search(query: str) -> str:
96
+ """Search Arxiv for a query and return maximum 3 result.
97
+
98
+ Args:
99
+ query: The search query."""
100
+ search_docs = ArxivLoader(query=query, load_max_docs=3).load()
101
+ formatted_search_docs = "\n\n---\n\n".join(
102
+ [
103
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
104
+ for doc in search_docs
105
+ ])
106
+ return {"arvix_results": formatted_search_docs}
107
+
108
+ tools = [
109
+ multiply,
110
+ add,
111
+ subtract,
112
+ divide,
113
+ modulus,
114
+ wiki_search,
115
+ web_search,
116
+ arvix_search,
117
+ ]
118
+
119
+
120
+ # Load environment variables from .env file
121
+ dotenv.load_dotenv()
122
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
123
+ llm_with_tools = llm.bind_tools(tools)
124
+
125
+ # load the system prompt from the file
126
+ with open("system_prompt.txt", "r", encoding="utf-8") as f:
127
+ system_prompt = f.read()
128
+
129
+ # System message
130
+ sys_msg = SystemMessage(content=system_prompt)
131
+
132
+ # Node
133
+ def assistant(state: MessagesState):
134
+ """Assistant node"""
135
+ return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
136
+
137
+ # Build graph function
138
+ def build_graph():
139
+ """Build the graph"""
140
+
141
+ builder = StateGraph(MessagesState)
142
+ builder.add_node("assistant", assistant)
143
+ builder.add_node("tools", ToolNode(tools))
144
+ builder.add_edge(START, "assistant")
145
+ builder.add_conditional_edges(
146
+ "assistant",
147
+ tools_condition,
148
+ )
149
+ builder.add_edge("tools", "assistant")
150
+
151
+ # Compile graph
152
+ return builder.compile()
app.py CHANGED
@@ -1,8 +1,13 @@
 
1
  import os
 
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
 
 
 
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
@@ -10,14 +15,22 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
13
  class BasicAgent:
 
14
  def __init__(self):
15
  print("BasicAgent initialized.")
 
 
16
  def __call__(self, question: str) -> str:
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
 
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
 
1
+ """ Basic Agent Evaluation Runner"""
2
  import os
3
+ import inspect
4
  import gradio as gr
5
  import requests
 
6
  import pandas as pd
7
+ from langchain_core.messages import HumanMessage
8
+ from agent import build_graph
9
+
10
+
11
 
12
  # (Keep Constants as is)
13
  # --- Constants ---
 
15
 
16
  # --- Basic Agent Definition ---
17
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
18
+
19
+
20
  class BasicAgent:
21
+ """A langgraph agent."""
22
  def __init__(self):
23
  print("BasicAgent initialized.")
24
+ self.graph = build_graph()
25
+
26
  def __call__(self, question: str) -> str:
27
  print(f"Agent received question (first 50 chars): {question[:50]}...")
28
+ # Wrap the question in a HumanMessage from langchain_core
29
+ messages = [HumanMessage(content=question)]
30
+ messages = self.graph.invoke({"messages": messages})
31
+ answer = messages[-1].content
32
+ return answer
33
+
34
 
35
  def run_and_submit_all( profile: gr.OAuthProfile | None):
36
  """