Kaushik066 commited on
Commit
7b38e4b
·
verified ·
1 Parent(s): 61785ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -23
app.py CHANGED
@@ -16,7 +16,7 @@ from langchain_community.llms import HuggingFaceHub
16
  # define constants
17
  # Embedding models
18
  #EMB_MODEL_bge_base = 'BAAI/bge-base-en-v1.5'
19
- EMB_MODEL_bge_large = 'BAAI/bge-large-en-v1.5'
20
  #EMB_MODEL_gtr_t5_base = 'sentence-transformers/gtr-t5-base'
21
  EMB_MODEL_gtr_t5_large = 'sentence-transformers/gtr-t5-large'
22
  #EMB_MODEL_e5_base = 'intfloat/e5-large-v2'
@@ -25,7 +25,7 @@ MISTRAL_MODEL1 = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
25
  HF_MODEL1 = 'HuggingFaceH4/zephyr-7b-beta'
26
  # define paths
27
  #vector_path_bge_base = 'vectorDB/faiss_index_bge_base'
28
- vector_path_bge_large = 'vectorDB/faiss_index_bge_large'
29
  #vector_path_gtr_t5_base = 'vectorDB/faiss_index_gtr_t5_base'
30
  vector_path_gtr_t5_large = 'vectorDB/faiss_index_gtr_t5_large'
31
  #vector_path_e5_base = 'vectorDB/faiss_index_e5_base'
@@ -35,17 +35,17 @@ hf_token = os.environ["HUGGINGFACEHUB_API_TOKEN"]
35
  def respond(message, history):
36
 
37
  # Initialize your embedding model
38
- embedding_model_bge = HuggingFaceEmbeddings(model_name=EMB_MODEL_bge_large)
39
  embedding_model_gtr_t5 = HuggingFaceEmbeddings(model_name=EMB_MODEL_gtr_t5_large)
40
  #embedding_model_e5 = HuggingFaceEmbeddings(model_name=EMB_MODEL_e5_base)
41
 
42
  # Load FAISS from relative path
43
- vectordb_bge = FAISS.load_local(vector_path_bge_large, embedding_model_bge, allow_dangerous_deserialization=True)
44
  vectordb_gtr_t5 = FAISS.load_local(vector_path_gtr_t5_large, embedding_model_gtr_t5, allow_dangerous_deserialization=True)
45
  #vectordb_e5 = FAISS.load_local(vector_path_e5_base, embedding_model_e5, allow_dangerous_deserialization=True)
46
 
47
  # define retriever object
48
- retriever_bge = vectordb_bge.as_retriever(search_type="similarity", search_kwargs={"k": 5})
49
  retriever_gtr_t5 = vectordb_gtr_t5.as_retriever(search_type="similarity", search_kwargs={"k": 5})
50
  #retriever_e5 = vectordb_e5.as_retriever(search_type="similarity", search_kwargs={"k": 5})
51
 
@@ -57,41 +57,41 @@ def respond(message, history):
57
  )
58
 
59
  # create a RAG pipeline
60
- qa_chain_bge = RetrievalQA.from_chain_type(llm=llm, retriever=retriever_bge)
61
  qa_chain_gtr_t5 = RetrievalQA.from_chain_type(llm=llm, retriever=retriever_gtr_t5)
62
  #qa_chain_e5 = RetrievalQA.from_chain_type(llm=llm, retriever=retriever_e5)
63
 
64
  #generate results
65
- responce_bge = qa_chain_bge.invoke(message)['result']
66
  responce_gtr_t5 = qa_chain_gtr_t5.invoke(message)['result']
67
  #responce_e5 = qa_chain_e5.invoke(message)['result']
68
 
69
  # remove the top instructions
70
- #instruction_prefix = (
71
- #"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer."
72
- #)
73
  #if responce_bge.strip().startswith(instruction_prefix):
74
  # responce_bge = responce_bge.strip()[len(instruction_prefix):].strip()
75
- #if responce_gtr_t5.strip().startswith(instruction_prefix):
76
- # responce_gtr_t5 = responce_gtr_t5.strip()[len(instruction_prefix):].strip()
77
  #if responce_e5.strip().startswith(instruction_prefix):
78
  # responce_e5 = responce_e5.strip()[len(instruction_prefix):].strip()
79
- #
80
- ## Split question, Helpful Answer and Reason
81
  #match_bge = re.search(r"^(.*?)(?:\n+)?Question:\s*(.*?)(?:\n+)?Helpful Answer:\s*(.*)", responce_bge, re.DOTALL)
82
- #match_gtr_t5 = re.search(r"^(.*?)(?:\n+)?Question:\s*(.*?)(?:\n+)?Helpful Answer:\s*(.*)", responce_gtr_t5, re.DOTALL)
83
  #match_e5 = re.search(r"^(.*?)(?:\n+)?Question:\s*(.*?)(?:\n+)?Helpful Answer:\s*(.*)", responce_e5, re.DOTALL)
84
- #
85
  #if match_bge:
86
  # #original_text_bge = match_bge.group(1).strip()
87
  # question_bge = match_bge.group(2).strip()
88
  # answer_bge = match_bge.group(3).strip()
89
- #
90
- #if match_gtr_t5:
91
- # #original_text_gtr_t5 = match_gtr_t5.group(1).strip()
92
- # #question_gtr_t5 = match_gtr_t5.group(2).strip()
93
- # answer_gtr_t5 = match_gtr_t5.group(3).strip()
94
- #
95
  #if match_e5:
96
  # #original_text_e5 = match_e5.group(1).strip()
97
  # #question_e5 = match_e5.group(2).strip()
@@ -99,7 +99,8 @@ def respond(message, history):
99
  #
100
  #formatted_responce = f'Question:{question_bge}\nHelpful Answer Type 1:\n{answer_bge}\nHelpful Answer Type 2:\n{answer_gtr_t5}\nHelpful Answer Type 3:\n{answer_e5}'
101
  #formatted_responce = f'\n************* BAAI/bge-large-en-v1.5 ****************\n{responce_bge}\n************** sentence-transformers/gtr-t5-large ***************\n{responce_gtr_t5}\n************ intfloat/e5-large-v2 **************\n{responce_e5}'
102
- formatted_responce = f'\n************* BAAI/bge-large-en-v1.5 ****************\n{responce_bge}\n************** sentence-transformers/gtr-t5-large ***************\n{responce_gtr_t5}'
 
103
  yield formatted_responce
104
 
105
  # Read the content of the README.md file
@@ -118,6 +119,8 @@ with gr.Blocks() as demo:
118
  #demo = gr.ChatInterface(
119
  gr.ChatInterface(
120
  respond,
 
 
121
  type="messages",
122
  autofocus=False #,
123
  #additional_inputs=[
 
16
  # define constants
17
  # Embedding models
18
  #EMB_MODEL_bge_base = 'BAAI/bge-base-en-v1.5'
19
+ #EMB_MODEL_bge_large = 'BAAI/bge-large-en-v1.5'
20
  #EMB_MODEL_gtr_t5_base = 'sentence-transformers/gtr-t5-base'
21
  EMB_MODEL_gtr_t5_large = 'sentence-transformers/gtr-t5-large'
22
  #EMB_MODEL_e5_base = 'intfloat/e5-large-v2'
 
25
  HF_MODEL1 = 'HuggingFaceH4/zephyr-7b-beta'
26
  # define paths
27
  #vector_path_bge_base = 'vectorDB/faiss_index_bge_base'
28
+ #vector_path_bge_large = 'vectorDB/faiss_index_bge_large'
29
  #vector_path_gtr_t5_base = 'vectorDB/faiss_index_gtr_t5_base'
30
  vector_path_gtr_t5_large = 'vectorDB/faiss_index_gtr_t5_large'
31
  #vector_path_e5_base = 'vectorDB/faiss_index_e5_base'
 
35
  def respond(message, history):
36
 
37
  # Initialize your embedding model
38
+ #embedding_model_bge = HuggingFaceEmbeddings(model_name=EMB_MODEL_bge_large)
39
  embedding_model_gtr_t5 = HuggingFaceEmbeddings(model_name=EMB_MODEL_gtr_t5_large)
40
  #embedding_model_e5 = HuggingFaceEmbeddings(model_name=EMB_MODEL_e5_base)
41
 
42
  # Load FAISS from relative path
43
+ #vectordb_bge = FAISS.load_local(vector_path_bge_large, embedding_model_bge, allow_dangerous_deserialization=True)
44
  vectordb_gtr_t5 = FAISS.load_local(vector_path_gtr_t5_large, embedding_model_gtr_t5, allow_dangerous_deserialization=True)
45
  #vectordb_e5 = FAISS.load_local(vector_path_e5_base, embedding_model_e5, allow_dangerous_deserialization=True)
46
 
47
  # define retriever object
48
+ #retriever_bge = vectordb_bge.as_retriever(search_type="similarity", search_kwargs={"k": 5})
49
  retriever_gtr_t5 = vectordb_gtr_t5.as_retriever(search_type="similarity", search_kwargs={"k": 5})
50
  #retriever_e5 = vectordb_e5.as_retriever(search_type="similarity", search_kwargs={"k": 5})
51
 
 
57
  )
58
 
59
  # create a RAG pipeline
60
+ #qa_chain_bge = RetrievalQA.from_chain_type(llm=llm, retriever=retriever_bge)
61
  qa_chain_gtr_t5 = RetrievalQA.from_chain_type(llm=llm, retriever=retriever_gtr_t5)
62
  #qa_chain_e5 = RetrievalQA.from_chain_type(llm=llm, retriever=retriever_e5)
63
 
64
  #generate results
65
+ #responce_bge = qa_chain_bge.invoke(message)['result']
66
  responce_gtr_t5 = qa_chain_gtr_t5.invoke(message)['result']
67
  #responce_e5 = qa_chain_e5.invoke(message)['result']
68
 
69
  # remove the top instructions
70
+ instruction_prefix = (
71
+ "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer."
72
+ )
73
  #if responce_bge.strip().startswith(instruction_prefix):
74
  # responce_bge = responce_bge.strip()[len(instruction_prefix):].strip()
75
+ if responce_gtr_t5.strip().startswith(instruction_prefix):
76
+ responce_gtr_t5 = responce_gtr_t5.strip()[len(instruction_prefix):].strip()
77
  #if responce_e5.strip().startswith(instruction_prefix):
78
  # responce_e5 = responce_e5.strip()[len(instruction_prefix):].strip()
79
+
80
+ # Split question, Helpful Answer and Reason
81
  #match_bge = re.search(r"^(.*?)(?:\n+)?Question:\s*(.*?)(?:\n+)?Helpful Answer:\s*(.*)", responce_bge, re.DOTALL)
82
+ match_gtr_t5 = re.search(r"^(.*?)(?:\n+)?Question:\s*(.*?)(?:\n+)?Helpful Answer:\s*(.*)", responce_gtr_t5, re.DOTALL)
83
  #match_e5 = re.search(r"^(.*?)(?:\n+)?Question:\s*(.*?)(?:\n+)?Helpful Answer:\s*(.*)", responce_e5, re.DOTALL)
84
+
85
  #if match_bge:
86
  # #original_text_bge = match_bge.group(1).strip()
87
  # question_bge = match_bge.group(2).strip()
88
  # answer_bge = match_bge.group(3).strip()
89
+
90
+ if match_gtr_t5:
91
+ original_text_gtr_t5 = match_gtr_t5.group(1).strip()
92
+ #question_gtr_t5 = match_gtr_t5.group(2).strip()
93
+ answer_gtr_t5 = match_gtr_t5.group(3).strip()
94
+
95
  #if match_e5:
96
  # #original_text_e5 = match_e5.group(1).strip()
97
  # #question_e5 = match_e5.group(2).strip()
 
99
  #
100
  #formatted_responce = f'Question:{question_bge}\nHelpful Answer Type 1:\n{answer_bge}\nHelpful Answer Type 2:\n{answer_gtr_t5}\nHelpful Answer Type 3:\n{answer_e5}'
101
  #formatted_responce = f'\n************* BAAI/bge-large-en-v1.5 ****************\n{responce_bge}\n************** sentence-transformers/gtr-t5-large ***************\n{responce_gtr_t5}\n************ intfloat/e5-large-v2 **************\n{responce_e5}'
102
+ #formatted_responce = f'\n************* BAAI/bge-large-en-v1.5 ****************\n{responce_bge}\n************** sentence-transformers/gtr-t5-large ***************\n{responce_gtr_t5}'
103
+ formatted_responce = f'\n************* sentence-transformers/gtr-t5-large ****************\n Helpful Answer:{answer_gtr_t5}\n Reasoning:\n{original_text_gtr_t5}'
104
  yield formatted_responce
105
 
106
  # Read the content of the README.md file
 
119
  #demo = gr.ChatInterface(
120
  gr.ChatInterface(
121
  respond,
122
+ fill_width=True,
123
+ fill_height=True,
124
  type="messages",
125
  autofocus=False #,
126
  #additional_inputs=[