Eric Botti
commited on
Commit
·
7c01a62
1
Parent(s):
70dce6e
Updated to langchain 0.1.0, restructured agents
Browse files- src/agent_process.py +17 -103
- src/agents.py +34 -0
- src/reasoning_tools.py +22 -6
src/agent_process.py
CHANGED
@@ -45,43 +45,19 @@ Provide:
|
|
45 |
Output:
|
46 |
- Another player's name as a choice of who to vote for e.g. Vote("Lisa")
|
47 |
"""
|
|
|
48 |
import random
|
49 |
import json
|
50 |
import re
|
51 |
import uuid
|
52 |
|
53 |
-
from langchain.agents import initialize_agent
|
54 |
-
from langchain.agents import AgentType
|
55 |
-
from langchain.chat_models import ChatOpenAI
|
56 |
from langchain.prompts import PromptTemplate
|
|
|
|
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
# LLM Configuration for each role
|
62 |
-
# TODO: Agents keep throwing OutputParserExceptions can't parse output, need to look into this
|
63 |
-
# Chameleon
|
64 |
-
chameleon_llm_params = {
|
65 |
-
'model': 'gpt-3.5-turbo',
|
66 |
-
'temperature': 1
|
67 |
-
}
|
68 |
-
chameleon_llm = ChatOpenAI(**chameleon_llm_params)
|
69 |
-
chameleon_agent = initialize_agent(tools, chameleon_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, return_intermediate_steps=True)
|
70 |
-
# Herd
|
71 |
-
herd_llm_params = {
|
72 |
-
'model': 'gpt-3.5-turbo',
|
73 |
-
'temperature': 1
|
74 |
-
}
|
75 |
-
herd_llm = ChatOpenAI(**herd_llm_params)
|
76 |
-
herd_agent = initialize_agent(tools, chameleon_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, return_intermediate_steps=True)
|
77 |
-
# Judge
|
78 |
-
judge_llm_params = {
|
79 |
-
'model': 'gpt-3.5-turbo',
|
80 |
-
'temperature': 1
|
81 |
-
}
|
82 |
-
judge_llm = ChatOpenAI(**judge_llm_params)
|
83 |
-
judge_agent = initialize_agent(tools, chameleon_llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
|
84 |
-
|
85 |
|
86 |
# Game Setup
|
87 |
NUM_PLAYERS = 5
|
@@ -190,7 +166,11 @@ judge_response = judge_agent.invoke({"input": JUDGE_PROMPT})
|
|
190 |
|
191 |
|
192 |
# Determine Winner - doesn't work because sometimes the judges final answer will mention multiple players...
|
193 |
-
|
|
|
|
|
|
|
|
|
194 |
if herd_win:
|
195 |
winner = "Herd"
|
196 |
else:
|
@@ -199,84 +179,18 @@ else:
|
|
199 |
print(f"The {winner} has won!")
|
200 |
|
201 |
# Save the experiment
|
202 |
-
game_ruleset = '
|
203 |
experiment_id = f"{game_ruleset}-{uuid.uuid4().hex}"
|
204 |
experiment = {
|
205 |
"experiment_id": experiment_id,
|
206 |
"game_ruleset": game_ruleset,
|
207 |
-
"chameleon_llm_parameters":
|
208 |
-
"herd_llm_parameters":
|
209 |
-
"judge_llm_parameters":
|
210 |
"player_responses": player_responses
|
211 |
}
|
212 |
|
213 |
experiment_path = os.path.join(os.pardir, 'experiments', f"{experiment_id}.json")
|
214 |
with open(experiment_path, "w") as output_file:
|
215 |
-
output_file.write(json.dumps(experiment))
|
216 |
-
|
217 |
-
|
218 |
-
# # This is an LLMChain to write a synopsis given a title of a play and the era it is set in.
|
219 |
-
# llm = OpenAI(temperature=.7)
|
220 |
-
# synopsis_template = """You are a playwright. Given the title of play and the era it is set in, it is your job to write a synopsis for that title.
|
221 |
-
#
|
222 |
-
# Title: {title}
|
223 |
-
# Era: {era}
|
224 |
-
# Playwright: This is a synopsis for the above play:"""
|
225 |
-
# synopsis_prompt_template = PromptTemplate(input_variables=["title", "era"], template=synopsis_template)
|
226 |
-
# synopsis_chain = LLMChain(llm=llm, prompt=synopsis_prompt_template, output_key="synopsis")
|
227 |
-
#
|
228 |
-
# # This is an LLMChain to write a review of a play given a synopsis.
|
229 |
-
# llm = OpenAI(temperature=.7)
|
230 |
-
# template = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
|
231 |
-
#
|
232 |
-
# Play Synopsis:
|
233 |
-
# {synopsis}
|
234 |
-
# Review from a New York Times play critic of the above play:"""
|
235 |
-
# prompt_template = PromptTemplate(input_variables=["synopsis"], template=template)
|
236 |
-
# review_chain = LLMChain(llm=llm, prompt=prompt_template, output_key="review")
|
237 |
-
#
|
238 |
-
# # This is the overall chain where we run these two chains in sequence.
|
239 |
-
# from langchain.chains import SequentialChain
|
240 |
-
# overall_chain = SequentialChain(
|
241 |
-
# chains=[synopsis_chain, review_chain],
|
242 |
-
# input_variables=["era", "title"],
|
243 |
-
# # Here we return multiple variables
|
244 |
-
# output_variables=["synopsis", "review"],
|
245 |
-
# verbose=True)
|
246 |
-
|
247 |
-
#
|
248 |
-
# #BABYAGI
|
249 |
-
# import os
|
250 |
-
# from collections import deque
|
251 |
-
# from typing import Dict, List, Optional, Any
|
252 |
-
#
|
253 |
-
# from langchain.chains import LLMChain
|
254 |
-
# from langchain.prompts import PromptTemplate
|
255 |
-
# from langchain.embeddings import OpenAIEmbeddings
|
256 |
-
# # from langchain.llms import BaseLLM
|
257 |
-
# # from langchain.schema.vectorstore import VectorStore
|
258 |
-
# # from pydantic import BaseModel, Field
|
259 |
-
# # from langchain.chains.base import Chain
|
260 |
-
# from langchain_experimental.autonomous_agents import BabyAGI
|
261 |
-
#
|
262 |
-
# from langchain.vectorstores import FAISS
|
263 |
-
# from langchain.docstore import InMemoryDocstore
|
264 |
-
# # Define your embedding model
|
265 |
-
# embeddings_model = OpenAIEmbeddings()
|
266 |
-
# # Initialize the vectorstore as empty
|
267 |
-
# import faiss
|
268 |
-
#
|
269 |
-
# embedding_size = 1536
|
270 |
-
# index = faiss.IndexFlatL2(embedding_size)
|
271 |
-
# vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
|
272 |
-
# llm = ChatOpenAI(model='gpt-4', temperature=0)
|
273 |
-
#
|
274 |
-
# # Logging of LLMChains
|
275 |
-
# verbose = False
|
276 |
-
# # If None, will keep going on forever
|
277 |
-
# max_iterations = 10
|
278 |
-
# baby_agi = BabyAGI.from_llm(
|
279 |
-
# llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations
|
280 |
-
# )
|
281 |
-
#
|
282 |
-
# baby_agi({"objective": VOTING_PROMPT})
|
|
|
45 |
Output:
|
46 |
- Another player's name as a choice of who to vote for e.g. Vote("Lisa")
|
47 |
"""
|
48 |
+
|
49 |
import random
|
50 |
import json
|
51 |
import re
|
52 |
import uuid
|
53 |
|
|
|
|
|
|
|
54 |
from langchain.prompts import PromptTemplate
|
55 |
+
from agents import PlayerAgent, llm_parameters
|
56 |
+
from reasoning_tools import extract_vote
|
57 |
|
58 |
+
chameleon_agent = PlayerAgent(role="chameleon")
|
59 |
+
herd_agent = PlayerAgent(role="herd")
|
60 |
+
judge_agent = PlayerAgent(role="judge")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
# Game Setup
|
63 |
NUM_PLAYERS = 5
|
|
|
166 |
|
167 |
|
168 |
# Determine Winner - doesn't work because sometimes the judges final answer will mention multiple players...
|
169 |
+
player_vote = extract_vote(judge_response['output'])
|
170 |
+
|
171 |
+
print(player_vote)
|
172 |
+
|
173 |
+
herd_win = re.match(f"Player {selected_chameleon+1}".lower(), player_vote.lower())
|
174 |
if herd_win:
|
175 |
winner = "Herd"
|
176 |
else:
|
|
|
179 |
print(f"The {winner} has won!")
|
180 |
|
181 |
# Save the experiment
|
182 |
+
game_ruleset = 'vote'
|
183 |
experiment_id = f"{game_ruleset}-{uuid.uuid4().hex}"
|
184 |
experiment = {
|
185 |
"experiment_id": experiment_id,
|
186 |
"game_ruleset": game_ruleset,
|
187 |
+
"chameleon_llm_parameters": llm_parameters['chameleon'],
|
188 |
+
"herd_llm_parameters": llm_parameters['herd'],
|
189 |
+
"judge_llm_parameters": llm_parameters['judge'],
|
190 |
"player_responses": player_responses
|
191 |
}
|
192 |
|
193 |
experiment_path = os.path.join(os.pardir, 'experiments', f"{experiment_id}.json")
|
194 |
with open(experiment_path, "w") as output_file:
|
195 |
+
output_file.write(json.dumps(experiment, indent=4))
|
196 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/agents.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain import hub
|
2 |
+
from langchain.agents import AgentExecutor, create_openai_functions_agent
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
|
5 |
+
from langchain.prompts import PromptTemplate
|
6 |
+
from reasoning_tools import animal_tools, extract_vote
|
7 |
+
|
8 |
+
# LLM Configuration for each role
|
9 |
+
llm_parameters = {
|
10 |
+
"chameleon": {
|
11 |
+
'model': 'gpt-4-turbo-preview',
|
12 |
+
'temperature': 1
|
13 |
+
},
|
14 |
+
"herd": {
|
15 |
+
'model': 'gpt-3.5-turbo',
|
16 |
+
'temperature': 1
|
17 |
+
},
|
18 |
+
"judge": {
|
19 |
+
'model': 'gpt-3.5-turbo',
|
20 |
+
'temperature': 1
|
21 |
+
}
|
22 |
+
}
|
23 |
+
|
24 |
+
prompt = hub.pull("hwchase17/openai-functions-agent")
|
25 |
+
|
26 |
+
|
27 |
+
class PlayerAgent(AgentExecutor):
|
28 |
+
|
29 |
+
def __init__(self, role):
|
30 |
+
llm = ChatOpenAI(**llm_parameters[role])
|
31 |
+
|
32 |
+
agent = create_openai_functions_agent(llm, animal_tools, prompt)
|
33 |
+
|
34 |
+
super().__init__(agent=agent, tools=animal_tools, verbose=True, return_intermediate_steps=True)
|
src/reasoning_tools.py
CHANGED
@@ -1,6 +1,8 @@
|
|
|
|
|
|
1 |
from langchain.prompts import PromptTemplate
|
2 |
from langchain.tools import Tool
|
3 |
-
from
|
4 |
from langchain.chains import LLMChain
|
5 |
|
6 |
tool_llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=0)
|
@@ -22,7 +24,7 @@ likely_animals_chain = LLMChain(llm=tool_llm, prompt=likely_animals_prompt)
|
|
22 |
|
23 |
def get_likely_animals(description: str) -> str:
|
24 |
"""Provides animals from a description"""
|
25 |
-
return likely_animals_chain.
|
26 |
|
27 |
|
28 |
# Animal Match Tool
|
@@ -43,15 +45,29 @@ animal_match_template = PromptTemplate(
|
|
43 |
animal_match_tool = LLMChain(llm=tool_llm, prompt=likely_animals_prompt)
|
44 |
|
45 |
|
46 |
-
def does_animal_match_description(animal: str, description: str) -> str:
|
47 |
"""Given an animal and a description, consider whether the animal matches that description"""
|
48 |
-
return animal_match_tool.
|
49 |
|
50 |
|
51 |
-
|
52 |
Tool(
|
53 |
name='get_likely_animals',
|
54 |
func=get_likely_animals,
|
55 |
description='used to get a list of potential animals corresponding to a description of an animal'
|
56 |
)
|
57 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Any
|
2 |
+
|
3 |
from langchain.prompts import PromptTemplate
|
4 |
from langchain.tools import Tool
|
5 |
+
from langchain_openai import ChatOpenAI
|
6 |
from langchain.chains import LLMChain
|
7 |
|
8 |
tool_llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=0)
|
|
|
24 |
|
25 |
def get_likely_animals(description: str) -> str:
|
26 |
"""Provides animals from a description"""
|
27 |
+
return likely_animals_chain.invoke(input={'animal_description': description})['text']
|
28 |
|
29 |
|
30 |
# Animal Match Tool
|
|
|
45 |
animal_match_tool = LLMChain(llm=tool_llm, prompt=likely_animals_prompt)
|
46 |
|
47 |
|
48 |
+
def does_animal_match_description(animal: str, description: str) -> dict[str, Any]:
|
49 |
"""Given an animal and a description, consider whether the animal matches that description"""
|
50 |
+
return animal_match_tool.invoke(input={"animal": animal, "description": description})['text']
|
51 |
|
52 |
|
53 |
+
animal_tools = [
|
54 |
Tool(
|
55 |
name='get_likely_animals',
|
56 |
func=get_likely_animals,
|
57 |
description='used to get a list of potential animals corresponding to a description of an animal'
|
58 |
)
|
59 |
+
]
|
60 |
+
|
61 |
+
VOTE_EXTRACTION_PROMPT = """Extract the name of the player being voted for, from the following statement:
|
62 |
+
{statement}"""
|
63 |
+
|
64 |
+
vote_extraction_template = PromptTemplate(
|
65 |
+
input_variables=['statement'],
|
66 |
+
template=VOTE_EXTRACTION_PROMPT
|
67 |
+
)
|
68 |
+
|
69 |
+
vote_extraction_chain = LLMChain(llm=tool_llm, prompt=vote_extraction_template)
|
70 |
+
|
71 |
+
def extract_vote(statement: str) -> str:
|
72 |
+
"""Extract the name of the player being voted for from the statement"""
|
73 |
+
return vote_extraction_chain.invoke(input={"statement":statement})['text']
|