rishisim commited on
Commit
ace9bdd
·
verified ·
1 Parent(s): 3a2e9e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -20
app.py CHANGED
@@ -31,26 +31,7 @@ from langchain_huggingface.embeddings import HuggingFaceEndpointEmbeddings
31
  model = "BAAI/bge-m3"
32
  embeddings = HuggingFaceEndpointEmbeddings(model = model)
33
 
34
- vectorstore = Chroma.from_documents(documents = data, embedding = embeddings)
35
- retriever = vectorstore.as_retriever()
36
 
37
- # from langchain.prompts import PromptTemplate
38
-
39
- from langchain_core.prompts import ChatPromptTemplate
40
-
41
- prompt = ChatPromptTemplate.from_template("""Given the following history, context and a question, generate an answer based on the context only.
42
-
43
- In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
44
- If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
45
- If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at [email protected]" Don't try to make up an answer.
46
-
47
- CONTEXT: {context}
48
-
49
- HISTORY: {history}
50
-
51
- QUESTION: {question}""")
52
-
53
- from langchain_core.runnables import RunnablePassthrough
54
 
55
  # Define the chat response function
56
  def chatresponse(message, history):
@@ -60,8 +41,30 @@ def chatresponse(message, history):
60
  # history_langchain_format.append(AIMessage(content=ai))
61
  # history_langchain_format.append(HumanMessage(content=message))
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  rag_chain = (
64
- {"context": retriever, "history": history, "question": RunnablePassthrough()}
65
  | prompt
66
  | llm
67
  | StrOutputParser()
@@ -75,6 +78,83 @@ def chatresponse(message, history):
75
  # Launch the Gradio chat interface
76
  gr.ChatInterface(chatresponse).launch()
77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  # import gradio as gr
79
 
80
  # def chatresponse(message, history):
 
31
  model = "BAAI/bge-m3"
32
  embeddings = HuggingFaceEndpointEmbeddings(model = model)
33
 
 
 
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  # Define the chat response function
37
  def chatresponse(message, history):
 
41
  # history_langchain_format.append(AIMessage(content=ai))
42
  # history_langchain_format.append(HumanMessage(content=message))
43
 
44
+ data_vectorstore = Chroma.from_documents(documents = data, embedding = embeddings)
45
+ history_vectorstore = Chroma.from_documents(documents = history, embedding = embeddings)
46
+ vectorstore = data_vectorstore + history_vectorstore
47
+ retriever = vectorstore.as_retriever()
48
+
49
+ # from langchain.prompts import PromptTemplate
50
+
51
+ from langchain_core.prompts import ChatPromptTemplate
52
+
53
+ prompt = ChatPromptTemplate.from_template("""Given the following history, context and a question, generate an answer based on the context only.
54
+
55
+ In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
56
+ If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
57
+ If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at [email protected]" Don't try to make up an answer.
58
+
59
+ CONTEXT: {context}
60
+
61
+ HISTORY: {history}
62
+
63
+ QUESTION: {question}""")
64
+
65
+ from langchain_core.runnables import RunnablePassthrough
66
  rag_chain = (
67
+ {"context": retriever, "question": RunnablePassthrough()}
68
  | prompt
69
  | llm
70
  | StrOutputParser()
 
78
  # Launch the Gradio chat interface
79
  gr.ChatInterface(chatresponse).launch()
80
 
81
+ # import gradio as gr
82
+ # from langchain.schema import AIMessage, HumanMessage
83
+
84
+
85
+ # import os
86
+ # hftoken = os.environ["hftoken"]
87
+
88
+ # from langchain_huggingface import HuggingFaceEndpoint
89
+
90
+ # repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
91
+ # llm = HuggingFaceEndpoint(repo_id = repo_id, max_new_tokens = 128, temperature = 0.7, huggingfacehub_api_token = hftoken)
92
+
93
+ # from langchain_core.output_parsers import StrOutputParser
94
+ # from langchain_core.prompts import ChatPromptTemplate
95
+
96
+ # # prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
97
+ # # chain = prompt | llm | StrOutputParser()
98
+
99
+ # # from langchain.document_loaders.csv_loader import CSVLoader
100
+ # from langchain_community.document_loaders.csv_loader import CSVLoader
101
+
102
+
103
+ # loader = CSVLoader(file_path='aiotsmartlabs_faq.csv', source_column = 'prompt')
104
+ # data = loader.load()
105
+
106
+ # from langchain_huggingface import HuggingFaceEmbeddings
107
+ # from langchain_chroma import Chroma
108
+ # from langchain_huggingface.embeddings import HuggingFaceEndpointEmbeddings
109
+
110
+ # # CHECK MTEB LEADERBOARD & FIND BEST EMBEDDING MODEL
111
+ # model = "BAAI/bge-m3"
112
+ # embeddings = HuggingFaceEndpointEmbeddings(model = model)
113
+
114
+ # vectorstore = Chroma.from_documents(documents = data, embedding = embeddings)
115
+ # retriever = vectorstore.as_retriever()
116
+
117
+ # # from langchain.prompts import PromptTemplate
118
+
119
+ # from langchain_core.prompts import ChatPromptTemplate
120
+
121
+ # prompt = ChatPromptTemplate.from_template("""Given the following history, context and a question, generate an answer based on the context only.
122
+
123
+ # In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
124
+ # If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
125
+ # If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at [email protected]" Don't try to make up an answer.
126
+
127
+ # CONTEXT: {context}
128
+
129
+ # HISTORY: {history}
130
+
131
+ # QUESTION: {question}""")
132
+
133
+ # from langchain_core.runnables import RunnablePassthrough
134
+
135
+ # # Define the chat response function
136
+ # def chatresponse(message, history):
137
+ # # history_langchain_format = []
138
+ # # for human, ai in history:
139
+ # # history_langchain_format.append(HumanMessage(content=human))
140
+ # # history_langchain_format.append(AIMessage(content=ai))
141
+ # # history_langchain_format.append(HumanMessage(content=message))
142
+
143
+ # rag_chain = (
144
+ # {"context": retriever, "history": history, "question": RunnablePassthrough()}
145
+ # | prompt
146
+ # | llm
147
+ # | StrOutputParser()
148
+ # )
149
+
150
+
151
+ # output = rag_chain.invoke(message)
152
+ # response = output.split('ANSWER: ')[-1].strip()
153
+ # return response
154
+
155
+ # # Launch the Gradio chat interface
156
+ # gr.ChatInterface(chatresponse).launch()
157
+
158
  # import gradio as gr
159
 
160
  # def chatresponse(message, history):