changed 'k' in memory
Browse files
utils.py
CHANGED
@@ -16,7 +16,7 @@ from langchain.embeddings import HuggingFaceEmbeddings
|
|
16 |
from langchain.document_loaders import PyPDFLoader
|
17 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
18 |
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
|
19 |
-
from langchain.memory import ConversationBufferMemory
|
20 |
from langchain.chains import ConversationalRetrievalChain
|
21 |
from langchain.prompts.prompt import PromptTemplate
|
22 |
from langchain.vectorstores import Chroma
|
@@ -30,9 +30,6 @@ from langchain.agents.agent_toolkits import create_conversational_retrieval_agen
|
|
30 |
from langchain.utilities import SerpAPIWrapper
|
31 |
from langchain.agents import Tool
|
32 |
from langchain.agents import load_tools
|
33 |
-
from langchain.chat_models import ChatOpenAI
|
34 |
-
from langchain.retrievers.multi_query import MultiQueryRetriever
|
35 |
-
from langchain.chains import RetrievalQA
|
36 |
|
37 |
load_dotenv()
|
38 |
|
@@ -254,70 +251,30 @@ def load_text_chunks(text_chunks_pkl_dir):
|
|
254 |
def load_ensemble_retriver(text_chunks, embeddings, chroma_vectorstore):
|
255 |
"""Load ensemble retiriever with BM25 and Chroma as individual retrievers"""
|
256 |
bm25_retriever = BM25Retriever.from_documents(text_chunks)
|
257 |
-
bm25_retriever.k =
|
258 |
-
chroma_retriever = chroma_vectorstore.as_retriever(search_kwargs={"k":
|
259 |
ensemble_retriever = EnsembleRetriever(retrievers=[bm25_retriever, chroma_retriever], weights=[0.3, 0.7])
|
260 |
-
|
261 |
-
)
|
262 |
-
return retriever_from_llm
|
263 |
|
264 |
|
265 |
def load_conversational_retrievel_chain(retriever, llm):
|
266 |
-
'''Load Conversational Retrievel
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
# return agent_executor
|
284 |
-
# string_dialogue = "You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
|
285 |
-
# _template= """
|
286 |
-
# You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'.
|
287 |
-
# Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
|
288 |
-
# Your answer should in English language only.
|
289 |
-
# Chat History:
|
290 |
-
# {chat_history}
|
291 |
-
# Follow Up Input: {question}
|
292 |
-
# Standalone question:"""
|
293 |
-
|
294 |
-
# CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
295 |
-
# memory = ConversationBufferMemory(return_messages=True,memory_key="chat_history")
|
296 |
-
# conversation_chain = ConversationalRetrievalChain.from_llm(
|
297 |
-
# llm=st.session_state["llm"],
|
298 |
-
# retriever=st.session_state["ensemble_retriver"],
|
299 |
-
# condense_question_prompt=CONDENSE_QUESTION_PROMPT,
|
300 |
-
# memory=memory,
|
301 |
-
# verbose=True,
|
302 |
-
# )
|
303 |
-
template = """You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'.
|
304 |
-
Use the following pieces of context to answer the question at the end. If you don't know the answer,\
|
305 |
-
just say that you don't know, don't try to make up an answer.
|
306 |
-
|
307 |
-
{context}
|
308 |
-
|
309 |
-
{history}
|
310 |
-
Question: {question}
|
311 |
-
Helpful Answer:"""
|
312 |
-
|
313 |
-
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=template)
|
314 |
-
memory = ConversationBufferMemory(input_key="question", memory_key="history")
|
315 |
-
|
316 |
-
qa = RetrievalQA.from_chain_type(
|
317 |
-
llm=llm,
|
318 |
-
chain_type="stuff",
|
319 |
-
retriever=retriever,
|
320 |
-
return_source_documents=True,
|
321 |
-
chain_type_kwargs={"prompt": prompt, "memory": memory},
|
322 |
)
|
323 |
-
return
|
|
|
|
16 |
from langchain.document_loaders import PyPDFLoader
|
17 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
18 |
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
|
19 |
+
from langchain.memory import ConversationBufferMemory, ConversationBufferWindowMemory
|
20 |
from langchain.chains import ConversationalRetrievalChain
|
21 |
from langchain.prompts.prompt import PromptTemplate
|
22 |
from langchain.vectorstores import Chroma
|
|
|
30 |
from langchain.utilities import SerpAPIWrapper
|
31 |
from langchain.agents import Tool
|
32 |
from langchain.agents import load_tools
|
|
|
|
|
|
|
33 |
|
34 |
load_dotenv()
|
35 |
|
|
|
251 |
def load_ensemble_retriver(text_chunks, embeddings, chroma_vectorstore):
|
252 |
"""Load ensemble retiriever with BM25 and Chroma as individual retrievers"""
|
253 |
bm25_retriever = BM25Retriever.from_documents(text_chunks)
|
254 |
+
bm25_retriever.k = 2
|
255 |
+
chroma_retriever = chroma_vectorstore.as_retriever(search_kwargs={"k": 2})
|
256 |
ensemble_retriever = EnsembleRetriever(retrievers=[bm25_retriever, chroma_retriever], weights=[0.3, 0.7])
|
257 |
+
return ensemble_retriever
|
|
|
|
|
258 |
|
259 |
|
260 |
def load_conversational_retrievel_chain(retriever, llm):
|
261 |
+
'''Load Conversational Retrievel chain,'''
|
262 |
+
_template= """
|
263 |
+
You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'.
|
264 |
+
Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
|
265 |
+
Chat History:
|
266 |
+
{chat_history}
|
267 |
+
Follow Up Input: {question}
|
268 |
+
Standalone question:"""
|
269 |
+
|
270 |
+
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
271 |
+
memory = ConversationBufferWindowMemory(return_messages=True,memory_key="chat_history", k=2)
|
272 |
+
conversation_chain = ConversationalRetrievalChain.from_llm(
|
273 |
+
llm=st.session_state["llm"],
|
274 |
+
retriever=st.session_state["ensemble_retriver"],
|
275 |
+
condense_question_prompt=CONDENSE_QUESTION_PROMPT,
|
276 |
+
memory=memory,
|
277 |
+
verbose=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
)
|
279 |
+
return conversation_chain
|
280 |
+
|