Paul-Joshi commited on
Commit
e92de57
·
verified ·
1 Parent(s): 8a1a6fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -31
app.py CHANGED
@@ -42,51 +42,51 @@ def method_get_vectorstore(document_chunks):
42
  vector_store = Chroma.from_documents(document_chunks, embeddings)
43
  return vector_store
44
 
45
- def get_context_retriever_chain(vector_store,question):
46
- # Initialize the retriever
47
- retriever = vector_store.as_retriever()
48
-
49
- # Define the RAG template and RAG prompt template
50
- prompt = hub.pull("rlm/rag-prompt")
51
-
52
- # Initialize the Hugging Face language model (LLM)
53
- llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.2", model_kwargs={"temperature":0.6, "max_length":512})
54
-
55
- # Construct the RAG pipeline
56
- rag_chain = (
57
- {"context": retriever, "question": RunnablePassthrough()}
58
- | prompt
59
- | llm
60
- | StrOutputParser()
61
- )
62
- return rag_chain.invoke(str(question))
63
-
64
-
65
  # def get_context_retriever_chain(vector_store,question):
66
  # # Initialize the retriever
67
  # retriever = vector_store.as_retriever()
68
 
69
- # # Define the RAG template
70
- # after_rag_template = """Answer the question based only on the following context:
71
- # {context}
72
- # Question: {question}
73
- # """
74
-
75
- # # Create the RAG prompt template
76
- # after_rag_prompt = ChatPromptTemplate.from_template(after_rag_template)
77
 
78
  # # Initialize the Hugging Face language model (LLM)
79
  # llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.2", model_kwargs={"temperature":0.6, "max_length":512})
80
 
81
  # # Construct the RAG pipeline
82
- # after_rag_chain = (
83
  # {"context": retriever, "question": RunnablePassthrough()}
84
- # | after_rag_prompt
85
  # | llm
86
  # | StrOutputParser()
87
  # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
- # return after_rag_chain.invoke(question)
90
 
91
  def main():
92
  st.set_page_config(page_title="Chat with websites", page_icon="🤖")
@@ -116,6 +116,8 @@ def main():
116
  # Generate response using the RAG pipeline
117
  answer = get_context_retriever_chain(vector_store,question)
118
  # Display the generated answer
 
 
119
  st.text_area("Answer", value=answer, height=300, disabled=True)
120
 
121
  if __name__ == '__main__':
 
42
  vector_store = Chroma.from_documents(document_chunks, embeddings)
43
  return vector_store
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # def get_context_retriever_chain(vector_store,question):
46
  # # Initialize the retriever
47
  # retriever = vector_store.as_retriever()
48
 
49
+ # # Define the RAG template and RAG prompt template
50
+ # prompt = hub.pull("rlm/rag-prompt")
 
 
 
 
 
 
51
 
52
  # # Initialize the Hugging Face language model (LLM)
53
  # llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.2", model_kwargs={"temperature":0.6, "max_length":512})
54
 
55
  # # Construct the RAG pipeline
56
+ # rag_chain = (
57
  # {"context": retriever, "question": RunnablePassthrough()}
58
+ # | prompt
59
  # | llm
60
  # | StrOutputParser()
61
  # )
62
+ # return rag_chain.invoke(str(question))
63
+
64
+
65
+ def get_context_retriever_chain(vector_store,question):
66
+ # Initialize the retriever
67
+ retriever = vector_store.as_retriever()
68
+
69
+ # Define the RAG template
70
+ after_rag_template = """Answer the question based only on the following context:
71
+ {context}
72
+ Question: {question}
73
+ """
74
+
75
+ # Create the RAG prompt template
76
+ after_rag_prompt = ChatPromptTemplate.from_template(after_rag_template)
77
+
78
+ # Initialize the Hugging Face language model (LLM)
79
+ llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.2", model_kwargs={"temperature":0.6, "max_length":512})
80
+
81
+ # Construct the RAG pipeline
82
+ after_rag_chain = (
83
+ {"context": retriever, "question": RunnablePassthrough()}
84
+ | after_rag_prompt
85
+ | llm
86
+ | StrOutputParser()
87
+ )
88
 
89
+ return after_rag_chain.invoke(question)
90
 
91
  def main():
92
  st.set_page_config(page_title="Chat with websites", page_icon="🤖")
 
116
  # Generate response using the RAG pipeline
117
  answer = get_context_retriever_chain(vector_store,question)
118
  # Display the generated answer
119
+ st.write(type(answer))
120
+ st.write(answer)
121
  st.text_area("Answer", value=answer, height=300, disabled=True)
122
 
123
  if __name__ == '__main__':