Ludovicollin commited on
Commit
f74e2bf
·
1 Parent(s): 0261aeb

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +19 -10
main.py CHANGED
@@ -15,7 +15,7 @@ from langchain.schema import StrOutputParser
15
  from langchain.chains.qa_with_sources import load_qa_with_sources_chain
16
  from langchain.vectorstores import Pinecone
17
  import pinecone
18
- from langchain.memory import ChatMessageHistory, ConversationBufferMemory
19
  import pandas as pd
20
  import numpy as np
21
  from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
@@ -95,7 +95,7 @@ def retriever_to_cache():
95
  vectorstore = Pinecone.from_existing_index(
96
  index_name=index_name, embedding=embeddings
97
  )
98
- retriever = vectorstore.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .7, "k": 30,"filter": {'categorie': {'$eq': 'OF'}}})
99
  return retriever
100
 
101
  @cl.set_chat_profiles
@@ -154,14 +154,15 @@ async def start():
154
  CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
155
 
156
  ########## Chain with streaming ##########
157
- message_history = ChatMessageHistory()
158
- memory = ConversationBufferMemory(
159
- memory_key="chat_history",
160
- output_key="answer",
161
- chat_memory=message_history,
162
- return_messages=True,
163
- )
164
- #llm = ChatAnthropic()
 
165
  streaming_llm = ChatAnthropic(
166
  streaming=True,
167
  temperature=1,
@@ -170,6 +171,12 @@ async def start():
170
  #question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
171
  #doc_chain = load_qa_chain(streaming_llm, chain_type="stuff")
172
  #relevant=retriever_to_cache()
 
 
 
 
 
 
173
  qa = ConversationalRetrievalChain.from_llm(
174
  streaming_llm,
175
  chain_type="stuff",
@@ -178,6 +185,8 @@ async def start():
178
  #question_generator=question_generator,
179
  memory=memory,
180
  return_source_documents=True,
 
 
181
  )
182
 
183
  cl.user_session.set("conversation_chain", qa)
 
15
  from langchain.chains.qa_with_sources import load_qa_with_sources_chain
16
  from langchain.vectorstores import Pinecone
17
  import pinecone
18
+ from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryBufferMemory
19
  import pandas as pd
20
  import numpy as np
21
  from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
 
95
  vectorstore = Pinecone.from_existing_index(
96
  index_name=index_name, embedding=embeddings
97
  )
98
+ retriever = vectorstore.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": .7, "k": 30,"filter": {'categorie': {'$eq': 'OF'}}, "include_metadata": True})
99
  return retriever
100
 
101
  @cl.set_chat_profiles
 
154
  CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
155
 
156
  ########## Chain with streaming ##########
157
+ #message_history = ChatMessageHistory()
158
+ #memory = ConversationBufferMemory(
159
+ # memory_key="chat_history",
160
+ # output_key="answer",
161
+ # chat_memory=message_history,
162
+ # return_messages=True,
163
+ #)
164
+ #llm = ChatAnthropic(temperature=0)
165
+
166
  streaming_llm = ChatAnthropic(
167
  streaming=True,
168
  temperature=1,
 
171
  #question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
172
  #doc_chain = load_qa_chain(streaming_llm, chain_type="stuff")
173
  #relevant=retriever_to_cache()
174
+ memory = ConversationSummaryBufferMemory(
175
+ llm=streaming_llm,
176
+ output_key='answer',
177
+ memory_key='chat_history',
178
+ return_messages=True
179
+ )
180
  qa = ConversationalRetrievalChain.from_llm(
181
  streaming_llm,
182
  chain_type="stuff",
 
185
  #question_generator=question_generator,
186
  memory=memory,
187
  return_source_documents=True,
188
+ get_chat_history=lambda h : h,
189
+ verbose=False
190
  )
191
 
192
  cl.user_session.set("conversation_chain", qa)