Ramendra commited on
Commit
5c41325
·
verified ·
1 Parent(s): 5c13bc2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -14
app.py CHANGED
@@ -4,9 +4,9 @@ import langchain
4
 
5
  from langchain_community.llms import HuggingFaceEndpoint
6
  from langchain_community.vectorstores import Chroma # Light-weight and in memory
7
- #from langchain.chains import RetrievalQA
8
- from langchain.chains import ConversationalRetrievalChain
9
- from langchain.memory import ConversationBufferMemory
10
 
11
 
12
  # Authentication for Huggingface API
@@ -56,23 +56,17 @@ vectordb = Chroma(
56
 
57
  # Adding memory
58
 
59
- memory = ConversationBufferMemory(
60
- memory_key="chat_history",
61
- return_messages=True
62
- )
63
 
64
 
65
  title = "Q&A on enterprise data"
66
  description = "Implementation of Open Source RAG on Private Document"
67
 
68
  def quena(question):
69
- #qa_chain = RetrievalQA.from_chain_type(llm, retriever=vectordb.as_retriever(), return_source_documents=True)
70
- qa_chain = ConversationalRetrievalChain.from_llm(llm,
71
- retriever=vectordb.as_retriever(search_type="mmr",search_kwargs={"k": 10, "fetch_k":15} ), # "k":2, "fetch_k":3
72
- memory=memory
73
- )
74
- result = qa_chain.invoke({"question": question})
75
- return result['answer']
76
 
77
  demo=gr.Interface(fn=quena,
78
  inputs=gr.Textbox(lines=10,placeholder='''Write your question inside double quotation..Type the Sample Question:\n
 
4
 
5
  from langchain_community.llms import HuggingFaceEndpoint
6
  from langchain_community.vectorstores import Chroma # Light-weight and in memory
7
+ from langchain.chains import RetrievalQA
8
+ #from langchain.chains import ConversationalRetrievalChain
9
+ #from langchain.memory import ConversationBufferMemory
10
 
11
 
12
  # Authentication for Huggingface API
 
56
 
57
  # Adding memory
58
 
59
+ #memory = ConversationBufferMemory(memory_key="chat_history",return_messages=True)
 
 
 
60
 
61
 
62
  title = "Q&A on enterprise data"
63
  description = "Implementation of Open Source RAG on Private Document"
64
 
65
  def quena(question):
66
+ qa_chain = RetrievalQA.from_chain_type(llm, retriever=vectordb.as_retriever(search_type="mmr",search_kwargs={"k": 8, "fetch_k":12}), return_source_documents=True)
67
+ #qa_chain = ConversationalRetrievalChain.from_llm(llm,retriever=vectordb.as_retriever(search_type="mmr",search_kwargs={"k": 10, "fetch_k":15} ),memory=memory)
68
+ result = qa_chain.invoke({"query": question}) # qa_chain.invoke({"question": question})
69
+ return result["result"] # result['answer']
 
 
 
70
 
71
  demo=gr.Interface(fn=quena,
72
  inputs=gr.Textbox(lines=10,placeholder='''Write your question inside double quotation..Type the Sample Question:\n