abhinav393 commited on
Commit
7a0b206
·
1 Parent(s): 81917a3

'Added agent'

Browse files
Files changed (4) hide show
  1. __init__.py +0 -0
  2. agent.py +124 -0
  3. app.py +89 -50
  4. requirements.txt +4 -1
__init__.py ADDED
File without changes
agent.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Imports
2
+ from langchain_core.tools import tool
3
+ from langchain_community.tools import DuckDuckGoSearchResults
4
+ from langchain_openai import ChatOpenAI
5
+ from langchain_groq import ChatGroq
6
+ from datetime import datetime
7
+ from langgraph.graph import StateGraph, END
8
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
9
+ from typing import TypedDict, Annotated
10
+ from langchain_core.messages import AnyMessage
11
+ from langgraph.graph.message import add_messages
12
+ from langgraph.graph import START, StateGraph
13
+ from langgraph.prebuilt import tools_condition, ToolNode
14
+ import gradio as gr
15
+ from dotenv import load_dotenv
16
+
17
+ load_dotenv()
18
+
19
+ #Fetch from the space's secrets (previously added)
20
+ import os
21
+ os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
22
+
23
+ #LLM Setup
24
+ # llm = ChatOpenAI(model="gpt-4.1")
25
+ llm = ChatGroq(model="llama3-70b-8192", api_key=os.getenv("GROQ_API_KEY"), temperature=0.0, max_tokens=1000, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0)
26
+
27
+ #Tools to be used by the LLM
28
+
29
+ @tool
30
+ def add(a: float, b: float) -> float:
31
+ """Add two numbers."""
32
+ return a + b
33
+
34
+
35
+ @tool
36
+ def subtract(a: float, b: float) -> float:
37
+ """Subtract the second number from the first."""
38
+ return a - b
39
+
40
+
41
+ @tool
42
+ def multiply(a: float, b: float) -> float:
43
+ """Multiply two numbers."""
44
+ return a * b
45
+
46
+
47
+ @tool
48
+ def divide(a: float, b: float) -> float:
49
+ """Divide the first number by the second."""
50
+ if b == 0:
51
+ raise ValueError("Division by zero.")
52
+ return a / b
53
+
54
+
55
+ @tool
56
+ def get_current_time() -> str:
57
+ """Get the current date and time."""
58
+ return datetime.now().isoformat()
59
+
60
+
61
+ search = DuckDuckGoSearchResults()
62
+
63
+ #Tool List
64
+ tools = [add, subtract, multiply, divide, get_current_time, search]
65
+
66
+ #Bind LLM with Tools
67
+ llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=True)
68
+
69
+ #Class to hold the state to be passed through the graph/ flow
70
+ class AgentState(TypedDict):
71
+ messages: Annotated[list[AnyMessage], add_messages]
72
+
73
+ #Define the Assistant Node
74
+ def assistant(state: AgentState) -> AgentState:
75
+ messages = state["messages"]
76
+ response = llm_with_tools.invoke(messages)
77
+ return {"messages": messages + [response]}
78
+
79
+ #Graph
80
+ builder = StateGraph(AgentState)
81
+
82
+ # Define nodes: these do the work
83
+ builder.add_node("assistant", assistant)
84
+ builder.add_node("tools", ToolNode(tools))
85
+
86
+ # Define edges: these determine how the control flow moves
87
+ builder.add_edge(START, "assistant")
88
+ builder.add_conditional_edges(
89
+ "assistant",
90
+ # If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
91
+ # If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
92
+ tools_condition,
93
+ )
94
+ builder.add_edge("tools", "assistant")
95
+
96
+ react_graph = builder.compile()
97
+
98
+ #Helper function to find the last LLM message/ response
99
+ def final_ai_message(input: str) -> str:
100
+ final_ai_message_temp = None
101
+ for message in reversed(input):
102
+ if isinstance(message, AIMessage):
103
+ final_ai_message_temp = message.content
104
+ return final_ai_message_temp
105
+ break
106
+ return final_ai_message_temp
107
+
108
+ sys_prompt = "You are a general AI assistant. I will ask you a question. Report your thoughts, and\nfinish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].\nYOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated\nlist of numbers and/or strings.\nIf you are asked for a number, don’t use comma to write your number neither use units such as $ or\npercent sign unless specified otherwise.\nIf you are asked for a string, don’t use articles, neither abbreviations (e.g. for cities), and write the\ndigits in plain text unless specified otherwise.\nIf you are asked for a comma separated list, apply the above rules depending of whether the element\nto be put in the list is a number or a string."
109
+
110
+ #Create a function to interact with graph
111
+ def chat_with_agent(user_input):
112
+ inputs = {
113
+ "messages": [
114
+ SystemMessage(content=sys_prompt),
115
+ HumanMessage(content=user_input)
116
+ ]
117
+ }
118
+
119
+ # Run the graph
120
+ state = react_graph.invoke(inputs)
121
+
122
+ final_ai_message_text = final_ai_message(state["messages"])
123
+
124
+ return final_ai_message_text if final_ai_message_text else "Sorry, I couldn't find a response."
app.py CHANGED
@@ -3,22 +3,101 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
 
 
 
6
 
7
- # (Keep Constants as is)
8
- # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
13
  class BasicAgent:
14
  def __init__(self):
15
  print("BasicAgent initialized.")
 
16
  def __call__(self, question: str) -> str:
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
 
 
 
 
 
 
20
  return fixed_answer
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
24
  Fetches all questions, runs the BasicAgent on them, submits all answers,
@@ -34,13 +113,9 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
34
  print("User not logged in.")
35
  return "Please Login to Hugging Face with the button.", None
36
 
37
- api_url = DEFAULT_API_URL
38
- questions_url = f"{api_url}/questions"
39
- submit_url = f"{api_url}/submit"
40
-
41
  # 1. Instantiate Agent ( modify this part to create your agent)
42
  try:
43
- agent = BasicAgent()
44
  except Exception as e:
45
  print(f"Error instantiating agent: {e}")
46
  return f"Error initializing agent: {e}", None
@@ -49,47 +124,11 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
49
  print(agent_code)
50
 
51
  # 2. Fetch Questions
52
- print(f"Fetching questions from: {questions_url}")
53
- try:
54
- response = requests.get(questions_url, timeout=15)
55
- response.raise_for_status()
56
- questions_data = response.json()
57
- if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
- print(f"Fetched {len(questions_data)} questions.")
61
- except requests.exceptions.RequestException as e:
62
- print(f"Error fetching questions: {e}")
63
- return f"Error fetching questions: {e}", None
64
- except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
- except Exception as e:
69
- print(f"An unexpected error occurred fetching questions: {e}")
70
- return f"An unexpected error occurred fetching questions: {e}", None
71
 
72
  # 3. Run your Agent
73
- results_log = []
74
- answers_payload = []
75
- print(f"Running agent on {len(questions_data)} questions...")
76
- for item in questions_data:
77
- task_id = item.get("task_id")
78
- question_text = item.get("question")
79
- if not task_id or question_text is None:
80
- print(f"Skipping item with missing task_id or question: {item}")
81
- continue
82
- try:
83
- submitted_answer = agent(question_text)
84
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
- except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
-
90
- if not answers_payload:
91
- print("Agent did not produce any answers to submit.")
92
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
 
94
  # 4. Prepare Submission
95
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
@@ -97,9 +136,9 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
97
  print(status_update)
98
 
99
  # 5. Submit
100
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
  try:
102
- response = requests.post(submit_url, json=submission_data, timeout=60)
103
  response.raise_for_status()
104
  result_data = response.json()
105
  final_status = (
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ # import agent
7
+ from typing import Any
8
+ from agent import chat_with_agent # import the function directly
9
 
10
+ #Constants
 
11
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
+ QUESTIONS_URL = f"{DEFAULT_API_URL}/questions"
13
+ SUBMIT_URL = f"{DEFAULT_API_URL}/submit"
14
+
15
+ #Fetch Questions
16
+ def fetchQuestions():
17
+ print(f"Fetching questions from: {QUESTIONS_URL}")
18
+ print("I am fetching questions from the server...")
19
+ try:
20
+ response = requests.get(QUESTIONS_URL, timeout=15)
21
+ response.raise_for_status()
22
+ questions_data = response.json()
23
+ if not questions_data:
24
+ print("Fetched questions list is empty.")
25
+ return "Fetched questions list is empty or invalid format.", None
26
+ return questions_data
27
+ except requests.exceptions.RequestException as e:
28
+ print(f"Error fetching questions: {e}")
29
+ return f"Error fetching questions: {e}", None
30
+ except requests.exceptions.JSONDecodeError as e:
31
+ print(f"Error decoding JSON response from questions endpoint: {e}")
32
+ print(f"Response text: {response.text[:500]}")
33
+ return f"Error decoding server response for questions: {e}", None
34
+ except Exception as e:
35
+ print(f"An unexpected error occurred fetching questions: {e}")
36
+ return f"An unexpected error occurred fetching questions: {e}", None
37
 
38
  # --- Basic Agent Definition ---
39
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
40
+
41
  class BasicAgent:
42
  def __init__(self):
43
  print("BasicAgent initialized.")
44
+
45
  def __call__(self, question: str) -> str:
46
  print(f"Agent received question (first 50 chars): {question[:50]}...")
47
+ fixed_answer = chat_with_agent(question)
48
+ print(f"Agent's answer (last 200 chars): {fixed_answer[-200:]}")
49
+
50
+ # --- FORCE FINAL ANSWER Format ---
51
+ if "FINAL ANSWER:" not in fixed_answer:
52
+ print("WARNING: FINAL ANSWER not found. Formatting...")
53
+ fixed_answer = f"FINAL ANSWER: {fixed_answer.strip()}"
54
+
55
  return fixed_answer
56
 
57
+
58
+ def runAgent(agent_object: BasicAgent, questions_data:list[Any]):
59
+ # gets submitted at the end
60
+ results_log = []
61
+ # For reference only
62
+ answers_payload = []
63
+ print(f"Running agent on {len(questions_data)} questions...")
64
+ for item in questions_data:
65
+ task_id = item.get("task_id")
66
+ print(f"Processing task_id: {task_id}")
67
+ question_text = item.get("question")
68
+ print(f"Question text (first 50 chars): {question_text[:50]}...")
69
+ if not task_id or question_text is None:
70
+ print(f"Skipping item with missing task_id or question: {item}")
71
+ continue
72
+ try:
73
+ submitted_answer = agent_object(question_text)
74
+ answers_payload.append(
75
+ {"task_id": task_id, "submitted_answer": submitted_answer}
76
+ )
77
+ results_log.append(
78
+ {
79
+ "Task ID": task_id,
80
+ "Question": question_text,
81
+ "Submitted Answer": submitted_answer,
82
+ }
83
+ )
84
+ except Exception as e:
85
+ print(f"Error running agent on task {task_id}: {e}")
86
+ results_log.append(
87
+ {
88
+ "Task ID": task_id,
89
+ "Question": question_text,
90
+ "Submitted Answer": f"AGENT ERROR: {e}",
91
+ }
92
+ )
93
+ print(results_log)
94
+
95
+ if not answers_payload:
96
+ print("Agent did not produce any answers to submit.")
97
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
98
+
99
+ return results_log, answers_payload
100
+
101
  def run_and_submit_all( profile: gr.OAuthProfile | None):
102
  """
103
  Fetches all questions, runs the BasicAgent on them, submits all answers,
 
113
  print("User not logged in.")
114
  return "Please Login to Hugging Face with the button.", None
115
 
 
 
 
 
116
  # 1. Instantiate Agent ( modify this part to create your agent)
117
  try:
118
+ agent_temp = BasicAgent()
119
  except Exception as e:
120
  print(f"Error instantiating agent: {e}")
121
  return f"Error initializing agent: {e}", None
 
124
  print(agent_code)
125
 
126
  # 2. Fetch Questions
127
+ questions_data = fetchQuestions()
128
+ print(questions_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
  # 3. Run your Agent
131
+ results_log, answers_payload = runAgent(agent_temp, questions_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  # 4. Prepare Submission
134
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
 
136
  print(status_update)
137
 
138
  # 5. Submit
139
+ print(f"Submitting {len(answers_payload)} answers to: {SUBMIT_URL}")
140
  try:
141
+ response = requests.post(SUBMIT_URL, json=submission_data, timeout=60)
142
  response.raise_for_status()
143
  result_data = response.json()
144
  final_status = (
requirements.txt CHANGED
@@ -1,2 +1,5 @@
1
  gradio
2
- requests
 
 
 
 
1
  gradio
2
+ requests
3
+ langchain
4
+ langgraph
5
+ langchain-groq