Spaces:
Sleeping
Sleeping
apple muncy
commited on
Commit
·
88f3c8e
1
Parent(s):
db02cce
ssgrummons changes
Browse filesSigned-off-by: apple muncy <[email protected]>
- app.py +107 -7
- prompts.yaml +131 -0
- requirements.txt +8 -0
- retriever.py +0 -2
- tools.py +42 -13
app.py
CHANGED
@@ -1,19 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
|
|
3 |
from smolagents import GradioUI, CodeAgent, HfApiModel
|
4 |
|
5 |
|
6 |
from retriever import load_guest_dataset
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
model = HfApiModel()
|
9 |
guest_info_tool = load_guest_dataset()
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
if __name__ == "__main__":
|
19 |
-
|
|
|
1 |
+
from typing import TypedDict, Annotated
|
2 |
+
from langgraph.graph.message import add_messages
|
3 |
+
from langchain_core.messages import AnyMessage, HumanMessage, AIMessage, SystemMessage
|
4 |
+
from langgraph.prebuilt import ToolNode
|
5 |
+
from langgraph.graph import START, StateGraph
|
6 |
+
from langgraph.prebuilt import tools_condition
|
7 |
+
|
8 |
+
from tools import search_tool, weather_info_tool, hub_stats_tool
|
9 |
+
|
10 |
+
|
11 |
import gradio as gr
|
12 |
+
|
13 |
+
|
14 |
from smolagents import GradioUI, CodeAgent, HfApiModel
|
15 |
|
16 |
|
17 |
from retriever import load_guest_dataset
|
18 |
|
19 |
+
from dotenv import load_dotenv
|
20 |
+
import yaml
|
21 |
+
load_dotenv()
|
22 |
+
|
23 |
+
# Import our custom tools from their modules
|
24 |
+
from retriever import load_guest_dataset
|
25 |
+
|
26 |
+
|
27 |
+
# Import our custom tools from their modules
|
28 |
+
from retriever import load_guest_dataset
|
29 |
+
|
30 |
model = HfApiModel()
|
31 |
guest_info_tool = load_guest_dataset()
|
32 |
|
33 |
+
# Load prompts from YAML file
|
34 |
+
with open("prompts.yaml", 'r') as stream:
|
35 |
+
prompt_templates = yaml.safe_load(stream)
|
36 |
+
|
37 |
+
# Get the system prompt from the YAML file
|
38 |
+
system_prompt = prompt_templates["system_prompt"]
|
39 |
+
|
40 |
+
# Initialize the chat model
|
41 |
+
chat = ChatOllama(model="qwen2:7b",
|
42 |
+
verbose=True)
|
43 |
+
|
44 |
+
# Define available tools
|
45 |
+
tools = [
|
46 |
+
load_guest_dataset,
|
47 |
+
search_tool,
|
48 |
+
weather_info_tool,
|
49 |
+
hub_stats_tool
|
50 |
+
]
|
51 |
+
|
52 |
+
# Bind tools to the chat model
|
53 |
+
chat_with_tools = chat.bind_tools(tools)
|
54 |
+
|
55 |
+
# Generate the AgentState and Agent graph
|
56 |
+
class AgentState(TypedDict):
|
57 |
+
messages: Annotated[list[AnyMessage], add_messages]
|
58 |
+
|
59 |
+
|
60 |
+
def assistant(state: AgentState):
|
61 |
+
# If this is the first message, add the system prompt
|
62 |
+
if len(state["messages"]) == 1 and isinstance(state["messages"][0], HumanMessage):
|
63 |
+
# Add system message with the ReAct framework prompt
|
64 |
+
messages = [SystemMessage(content=system_prompt)] + state["messages"]
|
65 |
+
else:
|
66 |
+
messages = state["messages"]
|
67 |
+
|
68 |
+
return {
|
69 |
+
"messages": [chat_with_tools.invoke(messages)],
|
70 |
+
}
|
71 |
+
|
72 |
+
|
73 |
+
## The graph
|
74 |
+
builder = StateGraph(AgentState)
|
75 |
+
|
76 |
+
# Define nodes: these do the work
|
77 |
+
builder.add_node("assistant", assistant)
|
78 |
+
builder.add_node("tools", ToolNode(tools))
|
79 |
+
|
80 |
+
# Define edges: these determine how the control flow moves
|
81 |
+
builder.add_edge(START, "assistant")
|
82 |
+
builder.add_conditional_edges(
|
83 |
+
"assistant",
|
84 |
+
# If the latest message requires a tool, route to tools
|
85 |
+
# Otherwise, provide a direct response
|
86 |
+
tools_condition,
|
87 |
)
|
88 |
+
builder.add_edge("tools", "assistant")
|
89 |
+
graph_app = builder.compile()
|
90 |
+
|
91 |
+
graph_state = {}
|
92 |
+
|
93 |
+
# Gradio expects a function with (chat_history, user_message) -> (updated_chat_history)
|
94 |
+
def chat_fn(message, history):
|
95 |
+
session_id = "session-123"
|
96 |
+
|
97 |
+
result = graph_app.invoke(
|
98 |
+
{"messages": [HumanMessage(content=message)]},
|
99 |
+
config={"configurable": {"thread_id": session_id}}
|
100 |
+
)
|
101 |
+
|
102 |
+
response = result["messages"][-1].content
|
103 |
+
history.append((message, response))
|
104 |
+
return history, ""
|
105 |
+
|
106 |
|
107 |
+
# Create Gradio interface
|
108 |
+
with gr.Blocks() as demo:
|
109 |
+
gr.Markdown("### LangGraph Chat with Gradio")
|
110 |
+
chatbot = gr.Chatbot()
|
111 |
+
msg = gr.Textbox(label="Type your message")
|
112 |
+
send_btn = gr.Button("Send")
|
113 |
+
|
114 |
+
# Hook the send button
|
115 |
+
send_btn.click(fn=chat_fn, inputs=[msg, chatbot], outputs=[chatbot, msg])
|
116 |
+
|
117 |
+
|
118 |
if __name__ == "__main__":
|
119 |
+
demo.launch()
|
prompts.yaml
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"system_prompt": |-
|
2 |
+
You are an expert assistant who can solve any task using available tools. You will be given a task to solve as best you can.
|
3 |
+
To do so, you have been given access to a list of tools: these tools are Python functions which you can call directly.
|
4 |
+
To solve the task, you should follow the ReAct framework: Reason, Act, and Observe.
|
5 |
+
|
6 |
+
At each step, you should:
|
7 |
+
1. Reason: Explain your thinking about the task and which tools you want to use
|
8 |
+
2. Act: Call the appropriate tools with the correct parameters
|
9 |
+
3. Observe: Use the results from the tools to inform your next steps
|
10 |
+
|
11 |
+
Here are the tools available to you:
|
12 |
+
{%- for tool in tools.values() %}
|
13 |
+
- {{ tool.name }}: {{ tool.description }}
|
14 |
+
Takes inputs: {{tool.inputs}}
|
15 |
+
Returns an output of type: {{tool.output_type}}
|
16 |
+
{%- endfor %}
|
17 |
+
|
18 |
+
Here are the rules you should always follow:
|
19 |
+
1. Always explain your reasoning before using a tool
|
20 |
+
2. Use only the tools that are available to you
|
21 |
+
3. Always use the right arguments for the tools
|
22 |
+
4. Take care to not chain too many sequential tool calls in the same response
|
23 |
+
5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters
|
24 |
+
6. Don't give up! You're in charge of solving the task
|
25 |
+
|
26 |
+
Now Begin!
|
27 |
+
"planning":
|
28 |
+
"initial_facts": |-
|
29 |
+
Below I will present you a task.
|
30 |
+
|
31 |
+
You will now build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need.
|
32 |
+
To do so, you will have to read the task and identify things that must be discovered in order to successfully complete it.
|
33 |
+
Don't make any assumptions. For each item, provide a thorough reasoning. Here is how you will structure this survey:
|
34 |
+
|
35 |
+
---
|
36 |
+
### 1. Facts given in the task
|
37 |
+
List here the specific facts given in the task that could help you (there might be nothing here).
|
38 |
+
|
39 |
+
### 2. Facts to look up
|
40 |
+
List here any facts that we may need to look up.
|
41 |
+
Also list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here.
|
42 |
+
|
43 |
+
### 3. Facts to derive
|
44 |
+
List here anything that we want to derive from the above by logical reasoning, for instance computation or simulation.
|
45 |
+
|
46 |
+
Keep in mind that "facts" will typically be specific names, dates, values, etc. Your answer should use the below headings:
|
47 |
+
### 1. Facts given in the task
|
48 |
+
### 2. Facts to look up
|
49 |
+
### 3. Facts to derive
|
50 |
+
Do not add anything else.
|
51 |
+
"initial_plan": |-
|
52 |
+
You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.
|
53 |
+
|
54 |
+
Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
|
55 |
+
This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer.
|
56 |
+
Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.
|
57 |
+
After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
|
58 |
+
|
59 |
+
Here is your task:
|
60 |
+
|
61 |
+
Task:
|
62 |
+
```
|
63 |
+
{{task}}
|
64 |
+
```
|
65 |
+
You can leverage these tools:
|
66 |
+
{%- for tool in tools.values() %}
|
67 |
+
- {{ tool.name }}: {{ tool.description }}
|
68 |
+
Takes inputs: {{tool.inputs}}
|
69 |
+
Returns an output of type: {{tool.output_type}}
|
70 |
+
{%- endfor %}
|
71 |
+
|
72 |
+
List of facts that you know:
|
73 |
+
```
|
74 |
+
{{answer_facts}}
|
75 |
+
```
|
76 |
+
|
77 |
+
Now begin! Write your plan below.
|
78 |
+
"update_facts_pre_messages": |-
|
79 |
+
You are a world expert at gathering known and unknown facts based on a conversation.
|
80 |
+
Below you will find a task, and a history of attempts made to solve the task. You will have to produce a list of these:
|
81 |
+
### 1. Facts given in the task
|
82 |
+
### 2. Facts that we have learned
|
83 |
+
### 3. Facts still to look up
|
84 |
+
### 4. Facts still to derive
|
85 |
+
Find the task and history below:
|
86 |
+
"update_facts_post_messages": |-
|
87 |
+
Earlier we've built a list of facts.
|
88 |
+
But since in your previous steps you may have learned useful new facts or invalidated some false ones.
|
89 |
+
Please update your list of facts based on the previous history, and provide these headings:
|
90 |
+
### 1. Facts given in the task
|
91 |
+
### 2. Facts that we have learned
|
92 |
+
### 3. Facts still to look up
|
93 |
+
### 4. Facts still to derive
|
94 |
+
|
95 |
+
Now write your new list of facts below.
|
96 |
+
"update_plan_pre_messages": |-
|
97 |
+
You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.
|
98 |
+
|
99 |
+
You have been given a task:
|
100 |
+
```
|
101 |
+
{{task}}
|
102 |
+
```
|
103 |
+
|
104 |
+
Find below the record of what has been tried so far to solve it. Then you will be asked to make an updated plan to solve the task.
|
105 |
+
If the previous tries so far have met some success, you can make an updated plan based on these actions.
|
106 |
+
If you are stalled, you can make a completely new plan starting from scratch.
|
107 |
+
"update_plan_post_messages": |-
|
108 |
+
You're still working towards solving this task:
|
109 |
+
```
|
110 |
+
{{task}}
|
111 |
+
```
|
112 |
+
|
113 |
+
You can leverage these tools:
|
114 |
+
{%- for tool in tools.values() %}
|
115 |
+
- {{ tool.name }}: {{ tool.description }}
|
116 |
+
Takes inputs: {{tool.inputs}}
|
117 |
+
Returns an output of type: {{tool.output_type}}
|
118 |
+
{%- endfor %}
|
119 |
+
|
120 |
+
Here is the up to date list of facts that you know:
|
121 |
+
```
|
122 |
+
{{facts_update}}
|
123 |
+
```
|
124 |
+
|
125 |
+
Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
|
126 |
+
This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer.
|
127 |
+
Beware that you have {remaining_steps} steps remaining.
|
128 |
+
Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.
|
129 |
+
After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
|
130 |
+
|
131 |
+
Now write your new plan below.
|
requirements.txt
CHANGED
@@ -2,3 +2,11 @@ datasets
|
|
2 |
smolagents
|
3 |
langchain-community
|
4 |
rank_bm25
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
smolagents
|
3 |
langchain-community
|
4 |
rank_bm25
|
5 |
+
gradio
|
6 |
+
litellm
|
7 |
+
langchain
|
8 |
+
langchain-litellm
|
9 |
+
langgraph
|
10 |
+
langchain-huggingface
|
11 |
+
langchain-ollama
|
12 |
+
dotenv
|
retriever.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
from smolagents import Tool
|
2 |
from langchain_community.retrievers import BM25Retriever
|
3 |
from langchain.docstore.document import Document
|
4 |
-
|
5 |
import datasets
|
6 |
|
7 |
class GuestInfoRetrieverTool(Tool):
|
@@ -31,7 +30,6 @@ class GuestInfoRetrieverTool(Tool):
|
|
31 |
def load_guest_dataset():
|
32 |
# Load the dataset
|
33 |
guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train")
|
34 |
-
print (guest_dataset)
|
35 |
# Convert dataset entries into Document objects
|
36 |
docs = [
|
37 |
Document(
|
|
|
1 |
from smolagents import Tool
|
2 |
from langchain_community.retrievers import BM25Retriever
|
3 |
from langchain.docstore.document import Document
|
|
|
4 |
import datasets
|
5 |
|
6 |
class GuestInfoRetrieverTool(Tool):
|
|
|
30 |
def load_guest_dataset():
|
31 |
# Load the dataset
|
32 |
guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train")
|
|
|
33 |
# Convert dataset entries into Document objects
|
34 |
docs = [
|
35 |
Document(
|
tools.py
CHANGED
@@ -1,18 +1,47 @@
|
|
1 |
-
from langchain_community.
|
|
|
|
|
2 |
from langchain.tools import Tool
|
3 |
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
def
|
7 |
-
"""
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
else:
|
12 |
-
return "No matching guest information found."
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
)
|
|
|
|
|
|
1 |
+
from langchain_community.tools import DuckDuckGoSearchRun
|
2 |
+
import random
|
3 |
+
|
4 |
from langchain.tools import Tool
|
5 |
|
6 |
+
def get_weather_info(location: str) -> str:
|
7 |
+
"""Fetches dummy weather information for a given location."""
|
8 |
+
# Dummy weather data
|
9 |
+
weather_conditions = [
|
10 |
+
{"condition": "Rainy", "temp_c": 15},
|
11 |
+
{"condition": "Clear", "temp_c": 25},
|
12 |
+
{"condition": "Windy", "temp_c": 20}
|
13 |
+
]
|
14 |
+
# Randomly select a weather condition
|
15 |
+
data = random.choice(weather_conditions)
|
16 |
+
return f"Weather in {location}: {data['condition']}, {data['temp_c']}°C"
|
17 |
+
|
18 |
+
# Initialize the tool
|
19 |
+
weather_info_tool = Tool(
|
20 |
+
name="get_weather_info",
|
21 |
+
func=get_weather_info,
|
22 |
+
description="Fetches dummy weather information for a given location."
|
23 |
+
)
|
24 |
+
|
25 |
|
26 |
+
def get_hub_stats(author: str) -> str:
|
27 |
+
"""Fetches the most downloaded model from a specific author on the Hugging Face Hub."""
|
28 |
+
try:
|
29 |
+
# List models from the specified author, sorted by downloads
|
30 |
+
models = list(list_models(author=author, sort="downloads", direction=-1, limit=1))
|
|
|
|
|
31 |
|
32 |
+
if models:
|
33 |
+
model = models[0]
|
34 |
+
return f"The most downloaded model by {author} is {model.id} with {model.downloads:,} downloads."
|
35 |
+
else:
|
36 |
+
return f"No models found for author {author}."
|
37 |
+
except Exception as e:
|
38 |
+
return f"Error fetching models for {author}: {str(e)}"
|
39 |
+
|
40 |
+
# Initialize the tool
|
41 |
+
hub_stats_tool = Tool(
|
42 |
+
name="get_hub_stats",
|
43 |
+
func=get_hub_stats,
|
44 |
+
description="Fetches the most downloaded model from a specific author on the Hugging Face Hub."
|
45 |
)
|
46 |
+
|
47 |
+
|