Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -71,11 +71,12 @@ vec_cre = create_db(splt, 'data')
|
|
71 |
|
72 |
|
73 |
def initialize_llmchain(temperature, max_tokens, top_k, vector_db):
|
74 |
-
memory
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
79 |
|
80 |
llm = HuggingFaceEndpoint(
|
81 |
repo_id='mistralai/Mixtral-8x7B-Instruct-v0.1',
|
@@ -89,7 +90,7 @@ def initialize_llmchain(temperature, max_tokens, top_k, vector_db):
|
|
89 |
llm,
|
90 |
retriever=retriever,
|
91 |
chain_type="stuff",
|
92 |
-
memory=memory,
|
93 |
return_source_documents=True,
|
94 |
verbose=False,
|
95 |
)
|
@@ -100,18 +101,18 @@ qa = initialize_llmchain(0.6, 1024, 40, vec_cre) #The model question answer
|
|
100 |
pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr") # This pipeline translate english to french , it isn't adviced as it add more latency
|
101 |
|
102 |
|
103 |
-
def format_chat_history(message, chat_history):
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
|
110 |
def conversation(message, history):
|
111 |
-
formatted_chat_history = format_chat_history(message, history)
|
112 |
|
113 |
# Generate response using QA chain
|
114 |
-
response = qa({"question": message + " According to the document", "chat_history":
|
115 |
response_answer = response["answer"]
|
116 |
if response_answer.find("Helpful Answer:") != -1:
|
117 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|
|
|
71 |
|
72 |
|
73 |
def initialize_llmchain(temperature, max_tokens, top_k, vector_db):
|
74 |
+
#Use memory if you want for the chatbot to be conversational, in this case it is just for answering from the document
|
75 |
+
# memory = ConversationBufferMemory(
|
76 |
+
# memory_key="chat_history",
|
77 |
+
# output_key='answer',
|
78 |
+
# return_messages=True
|
79 |
+
# )
|
80 |
|
81 |
llm = HuggingFaceEndpoint(
|
82 |
repo_id='mistralai/Mixtral-8x7B-Instruct-v0.1',
|
|
|
90 |
llm,
|
91 |
retriever=retriever,
|
92 |
chain_type="stuff",
|
93 |
+
#memory=memory,
|
94 |
return_source_documents=True,
|
95 |
verbose=False,
|
96 |
)
|
|
|
101 |
pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr") # This pipeline translate english to french , it isn't adviced as it add more latency
|
102 |
|
103 |
|
104 |
+
# def format_chat_history(message, chat_history):
|
105 |
+
# formatted_chat_history = []
|
106 |
+
# for user_message, bot_message in chat_history:
|
107 |
+
# formatted_chat_history.append(f"User: {user_message}")
|
108 |
+
# formatted_chat_history.append(f"Assistant: {bot_message}")
|
109 |
+
# return formatted_chat_history
|
110 |
|
111 |
def conversation(message, history):
|
112 |
+
#formatted_chat_history = format_chat_history(message, history)
|
113 |
|
114 |
# Generate response using QA chain
|
115 |
+
response = qa({"question": message + " According to the document", "chat_history": []})
|
116 |
response_answer = response["answer"]
|
117 |
if response_answer.find("Helpful Answer:") != -1:
|
118 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|