Spaces:
Sleeping
Sleeping
Commit
·
f8af325
1
Parent(s):
74631d0
Update main.py
Browse files
main.py
CHANGED
@@ -7,11 +7,11 @@ from langchain.schema.runnable.config import RunnableConfig
|
|
7 |
from langchain.embeddings import HuggingFaceEmbeddings
|
8 |
from langchain.chains import ConversationalRetrievalChain
|
9 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
10 |
-
from langchain.chains import LLMChain
|
11 |
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
12 |
from langchain.schema import StrOutputParser
|
13 |
#from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
|
14 |
-
from langchain.chains.question_answering import load_qa_chain
|
15 |
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
|
16 |
from langchain.vectorstores import Pinecone
|
17 |
import pinecone
|
@@ -157,19 +157,21 @@ async def start():
|
|
157 |
chat_memory=message_history,
|
158 |
return_messages=True,
|
159 |
)
|
160 |
-
llm = ChatAnthropic()
|
161 |
streaming_llm = ChatAnthropic(
|
162 |
streaming=True,
|
163 |
temperature=1,
|
164 |
max_tokens=4000
|
165 |
)
|
166 |
-
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
|
167 |
-
doc_chain = load_qa_chain(streaming_llm, chain_type="stuff")
|
168 |
|
169 |
-
qa = ConversationalRetrievalChain(
|
|
|
|
|
170 |
retriever=retriever,
|
171 |
-
combine_docs_chain=doc_chain,
|
172 |
-
question_generator=question_generator,
|
173 |
memory=memory,
|
174 |
return_source_documents=True,
|
175 |
)
|
|
|
7 |
from langchain.embeddings import HuggingFaceEmbeddings
|
8 |
from langchain.chains import ConversationalRetrievalChain
|
9 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
10 |
+
#from langchain.chains import LLMChain
|
11 |
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
12 |
from langchain.schema import StrOutputParser
|
13 |
#from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
|
14 |
+
#from langchain.chains.question_answering import load_qa_chain
|
15 |
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
|
16 |
from langchain.vectorstores import Pinecone
|
17 |
import pinecone
|
|
|
157 |
chat_memory=message_history,
|
158 |
return_messages=True,
|
159 |
)
|
160 |
+
#llm = ChatAnthropic()
|
161 |
streaming_llm = ChatAnthropic(
|
162 |
streaming=True,
|
163 |
temperature=1,
|
164 |
max_tokens=4000
|
165 |
)
|
166 |
+
#question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
|
167 |
+
#doc_chain = load_qa_chain(streaming_llm, chain_type="stuff")
|
168 |
|
169 |
+
qa = ConversationalRetrievalChain.from_llm(
|
170 |
+
streaming_llm,
|
171 |
+
chain_type="stuff",
|
172 |
retriever=retriever,
|
173 |
+
#combine_docs_chain=doc_chain,
|
174 |
+
#question_generator=question_generator,
|
175 |
memory=memory,
|
176 |
return_source_documents=True,
|
177 |
)
|