bstraehle commited on
Commit
6dc2383
·
verified ·
1 Parent(s): c65ba01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -190
app.py CHANGED
@@ -1,194 +1,4 @@
1
  import gradio as gr
2
- import getpass
3
- import os2
4
-
5
- os.environ["LANGCHAIN_TRACING_V2"] = "true"
6
- os.environ["LANGCHAIN_PROJECT"] = "Multi-agent Collaboration"
7
-
8
- from langchain_core.messages import (
9
- BaseMessage,
10
- ToolMessage,
11
- HumanMessage,
12
- )
13
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
14
- from langgraph.graph import END, StateGraph
15
-
16
-
17
- def create_agent(llm, tools, system_message: str):
18
- """Create an agent."""
19
- prompt = ChatPromptTemplate.from_messages(
20
- [
21
- (
22
- "system",
23
- "You are a helpful AI assistant, collaborating with other assistants."
24
- " Use the provided tools to progress towards answering the question."
25
- " If you are unable to fully answer, that's OK, another assistant with different tools "
26
- " will help where you left off. Execute what you can to make progress."
27
- " If you or any of the other assistants have the final answer or deliverable,"
28
- " prefix your response with FINAL ANSWER so the team knows to stop."
29
- " You have access to the following tools: {tool_names}.\n{system_message}",
30
- ),
31
- MessagesPlaceholder(variable_name="messages"),
32
- ]
33
- )
34
- prompt = prompt.partial(system_message=system_message)
35
- prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
36
- return prompt | llm.bind_tools(tools)
37
-
38
- from langchain_core.tools import tool
39
- from typing import Annotated
40
- from langchain_experimental.utilities import PythonREPL
41
- from langchain_community.tools.tavily_search import TavilySearchResults
42
-
43
- tavily_tool = TavilySearchResults(max_results=5)
44
-
45
- # Warning: This executes code locally, which can be unsafe when not sandboxed
46
-
47
- repl = PythonREPL()
48
-
49
- @tool
50
- def python_repl(
51
- code: Annotated[str, "The python code to execute to generate your chart."]
52
- ):
53
- """Use this to execute python code. If you want to see the output of a value,
54
- you should print it out with `print(...)`. This is visible to the user."""
55
- try:
56
- result = repl.run(code)
57
- except BaseException as e:
58
- return f"Failed to execute. Error: {repr(e)}"
59
- result_str = f"Successfully executed:\n```python\n{code}\n```\nStdout: {result}"
60
- return (
61
- result_str + "\n\nIf you have completed all tasks, respond with FINAL ANSWER."
62
- )
63
-
64
- import operator
65
- from typing import Annotated, Sequence, TypedDict
66
-
67
- from langchain_openai import ChatOpenAI
68
- from typing_extensions import TypedDict
69
-
70
-
71
- # This defines the object that is passed between each node
72
- # in the graph. We will create different nodes for each agent and tool
73
- class AgentState(TypedDict):
74
- messages: Annotated[Sequence[BaseMessage], operator.add]
75
- sender: str
76
-
77
- import functools
78
- from langchain_core.messages import AIMessage
79
-
80
-
81
- # Helper function to create a node for a given agent
82
- def agent_node(state, agent, name):
83
- result = agent.invoke(state)
84
- # We convert the agent output into a format that is suitable to append to the global state
85
- if isinstance(result, ToolMessage):
86
- pass
87
- else:
88
- result = AIMessage(**result.dict(exclude={"type", "name"}), name=name)
89
- return {
90
- "messages": [result],
91
- # Since we have a strict workflow, we can
92
- # track the sender so we know who to pass to next.
93
- "sender": name,
94
- }
95
-
96
- llm = ChatOpenAI(model="gpt-4-1106-preview")
97
-
98
- # Research agent and node
99
- research_agent = create_agent(
100
- llm,
101
- [tavily_tool],
102
- system_message="You should provide accurate data for the chart_generator to use.",
103
- )
104
- research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
105
-
106
- # chart_generator
107
- chart_agent = create_agent(
108
- llm,
109
- [python_repl],
110
- system_message="Any charts you display will be visible by the user.",
111
- )
112
- chart_node = functools.partial(agent_node, agent=chart_agent, name="chart_generator")
113
-
114
- from langgraph.prebuilt import ToolNode
115
-
116
- tools = [tavily_tool, python_repl]
117
- tool_node = ToolNode(tools)
118
-
119
- # Either agent can decide to end
120
- from typing import Literal
121
-
122
- def router(state) -> Literal["call_tool", "__end__", "continue"]:
123
- # This is the router
124
- messages = state["messages"]
125
- last_message = messages[-1]
126
- if last_message.tool_calls:
127
- # The previous agent is invoking a tool
128
- return "call_tool"
129
- if "FINAL ANSWER" in last_message.content:
130
- # Any agent decided the work is done
131
- return "__end__"
132
- return "continue"
133
-
134
- workflow = StateGraph(AgentState)
135
-
136
- workflow.add_node("Researcher", research_node)
137
- workflow.add_node("chart_generator", chart_node)
138
- workflow.add_node("call_tool", tool_node)
139
-
140
- workflow.add_conditional_edges(
141
- "Researcher",
142
- router,
143
- {"continue": "chart_generator", "call_tool": "call_tool", "__end__": END},
144
- )
145
- workflow.add_conditional_edges(
146
- "chart_generator",
147
- router,
148
- {"continue": "Researcher", "call_tool": "call_tool", "__end__": END},
149
- )
150
-
151
- workflow.add_conditional_edges(
152
- "call_tool",
153
- # Each agent node updates the 'sender' field
154
- # the tool calling node does not, meaning
155
- # this edge will route back to the original agent
156
- # who invoked the tool
157
- lambda x: x["sender"],
158
- {
159
- "Researcher": "Researcher",
160
- "chart_generator": "chart_generator",
161
- },
162
- )
163
- workflow.set_entry_point("Researcher")
164
- graph = workflow.compile()
165
-
166
- from IPython.display import Image, display
167
-
168
- try:
169
- display(Image(graph.get_graph(xray=True).draw_mermaid_png()))
170
- except:
171
- # This requires some extra dependencies and is optional
172
- pass
173
-
174
- events = graph.stream(
175
- {
176
- "messages": [
177
- HumanMessage(
178
- content="Fetch the UK's GDP over the past 5 years,"
179
- " then draw a line graph of it."
180
- " Once you code it up, finish."
181
- )
182
- ],
183
- },
184
- # Maximum number of steps to take in the graph
185
- {"recursion_limit": 150},
186
- )
187
- for s in events:
188
- print(s)
189
- print("----")
190
-
191
- ###
192
 
193
  def invoke(openai_api_key):
194
  if (openai_api_key == ""):
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  def invoke(openai_api_key):
4
  if (openai_api_key == ""):