hieudx7 commited on
Commit
f224484
·
1 Parent(s): 81917a3
Files changed (6) hide show
  1. agent.py +209 -0
  2. metadata.jsonl +0 -0
  3. prompts.py +5 -0
  4. requirements.txt +14 -1
  5. retriever.py +44 -0
  6. tools.py +112 -0
agent.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Basic Agent Evaluation Runner"""
2
+ import os
3
+ import inspect
4
+ import gradio as gr
5
+ import requests
6
+ import pandas as pd
7
+ from langchain_core.messages import HumanMessage
8
+ from agent import build_graph
9
+
10
+
11
+
12
+ # (Keep Constants as is)
13
+ # --- Constants ---
14
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
+
16
+ # --- Basic Agent Definition ---
17
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
18
+
19
+
20
+ class BasicAgent:
21
+ """A langgraph agent."""
22
+ def __init__(self):
23
+ print("BasicAgent initialized.")
24
+ self.graph = build_graph()
25
+
26
+ def __call__(self, question: str) -> str:
27
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
28
+ # Wrap the question in a HumanMessage from langchain_core
29
+ messages = [HumanMessage(content=question)]
30
+ messages = self.graph.invoke({"messages": messages})
31
+ answer = messages['messages'][-1].content
32
+ return answer[14:]
33
+
34
+
35
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
36
+ """
37
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
38
+ and displays the results.
39
+ """
40
+ # --- Determine HF Space Runtime URL and Repo URL ---
41
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
42
+
43
+ if profile:
44
+ username= f"{profile.username}"
45
+ print(f"User logged in: {username}")
46
+ else:
47
+ print("User not logged in.")
48
+ return "Please Login to Hugging Face with the button.", None
49
+
50
+ api_url = DEFAULT_API_URL
51
+ questions_url = f"{api_url}/questions"
52
+ submit_url = f"{api_url}/submit"
53
+
54
+ # 1. Instantiate Agent ( modify this part to create your agent)
55
+ try:
56
+ agent = BasicAgent()
57
+ except Exception as e:
58
+ print(f"Error instantiating agent: {e}")
59
+ return f"Error initializing agent: {e}", None
60
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
61
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
62
+ print(agent_code)
63
+
64
+ # 2. Fetch Questions
65
+ print(f"Fetching questions from: {questions_url}")
66
+ try:
67
+ response = requests.get(questions_url, timeout=15)
68
+ response.raise_for_status()
69
+ questions_data = response.json()
70
+ if not questions_data:
71
+ print("Fetched questions list is empty.")
72
+ return "Fetched questions list is empty or invalid format.", None
73
+ print(f"Fetched {len(questions_data)} questions.")
74
+ except requests.exceptions.RequestException as e:
75
+ print(f"Error fetching questions: {e}")
76
+ return f"Error fetching questions: {e}", None
77
+ except requests.exceptions.JSONDecodeError as e:
78
+ print(f"Error decoding JSON response from questions endpoint: {e}")
79
+ print(f"Response text: {response.text[:500]}")
80
+ return f"Error decoding server response for questions: {e}", None
81
+ except Exception as e:
82
+ print(f"An unexpected error occurred fetching questions: {e}")
83
+ return f"An unexpected error occurred fetching questions: {e}", None
84
+
85
+ # 3. Run your Agent
86
+ results_log = []
87
+ answers_payload = []
88
+ print(f"Running agent on {len(questions_data)} questions...")
89
+ for item in questions_data:
90
+ task_id = item.get("task_id")
91
+ question_text = item.get("question")
92
+ if not task_id or question_text is None:
93
+ print(f"Skipping item with missing task_id or question: {item}")
94
+ continue
95
+ try:
96
+ submitted_answer = agent(question_text)
97
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
98
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
99
+ except Exception as e:
100
+ print(f"Error running agent on task {task_id}: {e}")
101
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
102
+
103
+ if not answers_payload:
104
+ print("Agent did not produce any answers to submit.")
105
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
106
+
107
+ # 4. Prepare Submission
108
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
109
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
110
+ print(status_update)
111
+
112
+ # 5. Submit
113
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
114
+ try:
115
+ response = requests.post(submit_url, json=submission_data, timeout=60)
116
+ response.raise_for_status()
117
+ result_data = response.json()
118
+ final_status = (
119
+ f"Submission Successful!\n"
120
+ f"User: {result_data.get('username')}\n"
121
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
122
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
123
+ f"Message: {result_data.get('message', 'No message received.')}"
124
+ )
125
+ print("Submission successful.")
126
+ results_df = pd.DataFrame(results_log)
127
+ return final_status, results_df
128
+ except requests.exceptions.HTTPError as e:
129
+ error_detail = f"Server responded with status {e.response.status_code}."
130
+ try:
131
+ error_json = e.response.json()
132
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
133
+ except requests.exceptions.JSONDecodeError:
134
+ error_detail += f" Response: {e.response.text[:500]}"
135
+ status_message = f"Submission Failed: {error_detail}"
136
+ print(status_message)
137
+ results_df = pd.DataFrame(results_log)
138
+ return status_message, results_df
139
+ except requests.exceptions.Timeout:
140
+ status_message = "Submission Failed: The request timed out."
141
+ print(status_message)
142
+ results_df = pd.DataFrame(results_log)
143
+ return status_message, results_df
144
+ except requests.exceptions.RequestException as e:
145
+ status_message = f"Submission Failed: Network error - {e}"
146
+ print(status_message)
147
+ results_df = pd.DataFrame(results_log)
148
+ return status_message, results_df
149
+ except Exception as e:
150
+ status_message = f"An unexpected error occurred during submission: {e}"
151
+ print(status_message)
152
+ results_df = pd.DataFrame(results_log)
153
+ return status_message, results_df
154
+
155
+
156
+ # --- Build Gradio Interface using Blocks ---
157
+ with gr.Blocks() as demo:
158
+ gr.Markdown("# Basic Agent Evaluation Runner")
159
+ gr.Markdown(
160
+ """
161
+ **Instructions:**
162
+
163
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
164
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
165
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
166
+
167
+ ---
168
+ **Disclaimers:**
169
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
170
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
171
+ """
172
+ )
173
+
174
+ gr.LoginButton()
175
+
176
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
177
+
178
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
179
+ # Removed max_rows=10 from DataFrame constructor
180
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
181
+
182
+ run_button.click(
183
+ fn=run_and_submit_all,
184
+ outputs=[status_output, results_table]
185
+ )
186
+
187
+ if __name__ == "__main__":
188
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
189
+ # Check for SPACE_HOST and SPACE_ID at startup for information
190
+ space_host_startup = os.getenv("SPACE_HOST")
191
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
192
+
193
+ if space_host_startup:
194
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
195
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
196
+ else:
197
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
198
+
199
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
200
+ print(f"✅ SPACE_ID found: {space_id_startup}")
201
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
202
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
203
+ else:
204
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
205
+
206
+ print("-"*(60 + len(" App Starting ")) + "\n")
207
+
208
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
209
+ demo.launch(debug=True, share=False)
metadata.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
prompts.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ SYS_PROMPT = """You are a helpful assistant tasked with answering questions using a set of tools.
2
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
3
+ FINAL ANSWER: [YOUR FINAL ANSWER].
4
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
5
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer."""
requirements.txt CHANGED
@@ -1,2 +1,15 @@
1
  gradio
2
- requests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  gradio
2
+ requests
3
+ chromadb
4
+ langchain
5
+ langchain-community
6
+ langchain-core
7
+ langchain-chroma
8
+ langchain_openai
9
+ langchain-huggingface
10
+ langgraph
11
+ arxiv
12
+ pymupdf
13
+ wikipedia
14
+ python-dotenv
15
+ duckduckgo-search
retriever.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_chroma import Chroma
2
+ from langchain_huggingface import HuggingFaceEmbeddings
3
+ from langchain_chroma import Chroma
4
+ from langchain_core.documents import Document
5
+ import json
6
+
7
+ from uuid import uuid4
8
+
9
+
10
+ print("Loading embedding model...")
11
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
12
+
13
+ vector_store = Chroma(
14
+ collection_name="example_collection",
15
+ embedding_function=embeddings,
16
+ persist_directory="./chroma_langchain_db", # Where to save data locally, remove if not necessary
17
+ )
18
+
19
+ # Load the metadata.jsonl file
20
+ with open('metadata.jsonl', 'r') as jsonl_file:
21
+ json_list = list(jsonl_file)
22
+
23
+ json_QA = []
24
+ for json_str in json_list:
25
+ json_data = json.loads(json_str)
26
+ json_QA.append(json_data)
27
+
28
+ docs = []
29
+ for idx, sample in enumerate(json_QA):
30
+ content = f"Question: {sample['Question']}\n\nFinal answer: {sample['Final answer']}"
31
+ doc = Document(
32
+ page_content=content,
33
+ metadata={
34
+ "source": sample['task_id'],
35
+ },
36
+ id=str(uuid4()),
37
+ )
38
+ docs.append(doc)
39
+
40
+ # Add documents to the vector store
41
+ print("Adding documents to the vector store...")
42
+ vector_store.add_documents(documents=docs)
43
+ del docs
44
+ del json_QA
tools.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.tools import DuckDuckGoSearchResults
2
+ from langchain_community.document_loaders import WikipediaLoader
3
+ from langchain_community.document_loaders import ArxivLoader
4
+
5
+ from langchain_core.documents import Document
6
+
7
+
8
+ SEP_CHAR = "\n\n---\n\n"
9
+
10
+
11
+ def multiply(a: int, b: int) -> int:
12
+ """Multiply two numbers.
13
+ Args:
14
+ a: first int
15
+ b: second int
16
+ """
17
+ return a * b
18
+
19
+
20
+ def add(a: int, b: int) -> int:
21
+ """Add two numbers.
22
+
23
+ Args:
24
+ a: first int
25
+ b: second int
26
+ """
27
+ return a + b
28
+
29
+
30
+ def subtract(a: int, b: int) -> int:
31
+ """Subtract two numbers.
32
+
33
+ Args:
34
+ a: first int
35
+ b: second int
36
+ """
37
+ return a - b
38
+
39
+
40
+ def divide(a: int, b: int) -> int:
41
+ """Divide two numbers.
42
+
43
+ Args:
44
+ a: first int
45
+ b: second int
46
+ """
47
+ if b == 0:
48
+ raise ValueError("Cannot divide by zero.")
49
+ return a / b
50
+
51
+
52
+ def modulus(a: int, b: int) -> int:
53
+ """Get the modulus of two numbers.
54
+
55
+ Args:
56
+ a: first int
57
+ b: second int
58
+ """
59
+ return a % b
60
+
61
+
62
+ def wiki_search(query: str) -> dict:
63
+ """Search Wikipedia for a query and return maximum 2 results.
64
+
65
+ Args:
66
+ query: The search query."""
67
+ search_docs: list[Document] = WikipediaLoader(query=query, load_max_docs=2).load()
68
+ formatted_search_docs = SEP_CHAR.join(
69
+ [
70
+ f'<Document source="{doc.metadata["source"]}"/>\n{doc.page_content}\n</Document>'
71
+ for doc in search_docs
72
+ ])
73
+ return formatted_search_docs
74
+
75
+
76
+ def web_search(query: str) -> dict:
77
+ """Search Web for a query and return maximum 3 results.
78
+
79
+ Args:
80
+ query: The search query."""
81
+ search_docs: list[dict] = DuckDuckGoSearchResults(num_results=3, output_format='list').invoke(input=query)
82
+ formatted_search_docs = SEP_CHAR.join(
83
+ [
84
+ f'<Document source="{doc["link"]}" title="{doc.get("title", "")}"/>\n{doc['snippet']}\n</Document>'
85
+ for doc in search_docs
86
+ ])
87
+ return formatted_search_docs
88
+
89
+
90
+ def arvix_search(query: str) -> dict:
91
+ """Search Arxiv for a query and return maximum 3 result.
92
+
93
+ Args:
94
+ query: The search query."""
95
+ search_docs: list[Document] = ArxivLoader(query=query).load()
96
+ formatted_search_docs = SEP_CHAR.join(
97
+ [
98
+ f'<Document title="{doc.metadata["Title"]}" authors="{doc.metadata.get("Authors", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
99
+ for doc in search_docs
100
+ ])
101
+ return formatted_search_docs
102
+
103
+ tools = [
104
+ multiply,
105
+ add,
106
+ subtract,
107
+ divide,
108
+ modulus,
109
+ wiki_search,
110
+ web_search,
111
+ arvix_search,
112
+ ]