Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,358 +1,193 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
-
|
4 |
-
|
5 |
-
from
|
6 |
-
|
7 |
-
from langchain.
|
8 |
-
from
|
9 |
-
from
|
10 |
-
from langchain.
|
|
|
|
|
11 |
from langchain.memory import ConversationBufferMemory
|
12 |
-
from
|
13 |
-
|
14 |
-
from
|
15 |
-
import
|
16 |
-
from
|
17 |
-
|
18 |
-
from transformers import AutoTokenizer
|
19 |
-
import transformers
|
20 |
import torch
|
21 |
-
import
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
"
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
# Create vector database
|
55 |
-
def create_db(splits, collection_name):
|
56 |
-
embedding = HuggingFaceEmbeddings()
|
57 |
-
new_client = chromadb.EphemeralClient()
|
58 |
-
vectordb = Chroma.from_documents(
|
59 |
-
documents=splits,
|
60 |
-
embedding=embedding,
|
61 |
-
client=new_client,
|
62 |
-
collection_name=collection_name,
|
63 |
-
# persist_directory=default_persist_directory
|
64 |
-
)
|
65 |
-
return vectordb
|
66 |
-
|
67 |
-
|
68 |
-
# Load vector database
|
69 |
-
def load_db():
|
70 |
-
embedding = HuggingFaceEmbeddings()
|
71 |
-
vectordb = Chroma(
|
72 |
-
# persist_directory=default_persist_directory,
|
73 |
-
embedding_function=embedding)
|
74 |
-
return vectordb
|
75 |
-
|
76 |
-
|
77 |
-
# Initialize langchain LLM chain
|
78 |
-
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
|
79 |
-
progress(0.1, desc="Initializing HF tokenizer...")
|
80 |
-
# HuggingFacePipeline uses local model
|
81 |
-
# Note: it will download model locally...
|
82 |
-
# tokenizer=AutoTokenizer.from_pretrained(llm_model)
|
83 |
-
# progress(0.5, desc="Initializing HF pipeline...")
|
84 |
-
# pipeline=transformers.pipeline(
|
85 |
-
# "text-generation",
|
86 |
-
# model=llm_model,
|
87 |
-
# tokenizer=tokenizer,
|
88 |
-
# torch_dtype=torch.bfloat16,
|
89 |
-
# trust_remote_code=True,
|
90 |
-
# device_map="auto",
|
91 |
-
# # max_length=1024,
|
92 |
-
# max_new_tokens=max_tokens,
|
93 |
-
# do_sample=True,
|
94 |
-
# top_k=top_k,
|
95 |
-
# num_return_sequences=1,
|
96 |
-
# eos_token_id=tokenizer.eos_token_id
|
97 |
-
# )
|
98 |
-
# llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature': temperature})
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
# Use of trust_remote_code as model_kwargs
|
103 |
-
# Warning: langchain issue
|
104 |
-
# URL: https://github.com/langchain-ai/langchain/issues/6080
|
105 |
-
if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
|
106 |
-
llm = HuggingFaceEndpoint(
|
107 |
-
repo_id=llm_model,
|
108 |
-
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
|
109 |
-
temperature = temperature,
|
110 |
-
max_new_tokens = max_tokens,
|
111 |
-
top_k = top_k,
|
112 |
-
load_in_8bit = True,
|
113 |
-
)
|
114 |
-
elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1","mosaicml/mpt-7b-instruct"]:
|
115 |
-
raise gr.Error("LLM model is too large to be loaded automatically on free inference endpoint")
|
116 |
-
llm = HuggingFaceEndpoint(
|
117 |
-
repo_id=llm_model,
|
118 |
-
temperature = temperature,
|
119 |
-
max_new_tokens = max_tokens,
|
120 |
-
top_k = top_k,
|
121 |
-
)
|
122 |
-
elif llm_model == "microsoft/phi-2":
|
123 |
-
# raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
|
124 |
-
llm = HuggingFaceEndpoint(
|
125 |
-
repo_id=llm_model,
|
126 |
-
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
|
127 |
-
temperature = temperature,
|
128 |
-
max_new_tokens = max_tokens,
|
129 |
-
top_k = top_k,
|
130 |
-
trust_remote_code = True,
|
131 |
-
torch_dtype = "auto",
|
132 |
-
)
|
133 |
-
elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
|
134 |
-
llm = HuggingFaceEndpoint(
|
135 |
-
repo_id=llm_model,
|
136 |
-
# model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
|
137 |
-
temperature = temperature,
|
138 |
-
max_new_tokens = 250,
|
139 |
-
top_k = top_k,
|
140 |
-
)
|
141 |
-
elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
|
142 |
-
raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
|
143 |
-
llm = HuggingFaceEndpoint(
|
144 |
-
repo_id=llm_model,
|
145 |
-
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
|
146 |
-
temperature = temperature,
|
147 |
-
max_new_tokens = max_tokens,
|
148 |
-
top_k = top_k,
|
149 |
-
)
|
150 |
else:
|
151 |
-
|
152 |
-
repo_id=llm_model,
|
153 |
-
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
|
154 |
-
# model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
|
155 |
-
temperature = temperature,
|
156 |
-
max_new_tokens = max_tokens,
|
157 |
-
top_k = top_k,
|
158 |
-
)
|
159 |
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
retriever=retriever,
|
172 |
-
chain_type="stuff",
|
173 |
-
memory=memory,
|
174 |
-
# combine_docs_chain_kwargs={"prompt": your_prompt})
|
175 |
-
return_source_documents=True,
|
176 |
-
#return_generated_question=False,
|
177 |
-
verbose=False,
|
178 |
)
|
179 |
-
progress(0
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
# Generate collection name for vector database
|
184 |
-
# - Use filepath as input, ensuring unicode text
|
185 |
-
def create_collection_name(filepath):
|
186 |
-
# Extract filename without extension
|
187 |
-
collection_name = Path(filepath).stem
|
188 |
-
# Fix potential issues from naming convention
|
189 |
-
## Remove space
|
190 |
-
collection_name = collection_name.replace(" ","-")
|
191 |
-
## ASCII transliterations of Unicode text
|
192 |
-
collection_name = unidecode(collection_name)
|
193 |
-
## Remove special characters
|
194 |
-
#collection_name = re.findall("[\dA-Za-z]*", collection_name)[0]
|
195 |
-
collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
|
196 |
-
## Limit length to 50 characters
|
197 |
-
collection_name = collection_name[:50]
|
198 |
-
## Minimum length of 3 characters
|
199 |
-
if len(collection_name) < 3:
|
200 |
-
collection_name = collection_name + 'xyz'
|
201 |
-
## Enforce start and end as alphanumeric character
|
202 |
-
if not collection_name[0].isalnum():
|
203 |
-
collection_name = 'A' + collection_name[1:]
|
204 |
-
if not collection_name[-1].isalnum():
|
205 |
-
collection_name = collection_name[:-1] + 'Z'
|
206 |
-
print('Filepath: ', filepath)
|
207 |
-
print('Collection name: ', collection_name)
|
208 |
-
return collection_name
|
209 |
-
|
210 |
-
|
211 |
-
# Initialize database
|
212 |
-
def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
|
213 |
-
# Create list of documents (when valid)
|
214 |
-
list_file_path = [x.name for x in list_file_obj if x is not None]
|
215 |
-
# Create collection_name for vector database
|
216 |
-
progress(0.1, desc="Creating collection name...")
|
217 |
-
collection_name = create_collection_name(list_file_path[0])
|
218 |
-
progress(0.25, desc="Loading document...")
|
219 |
-
# Load document and create splits
|
220 |
-
doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
|
221 |
-
# Create or load vector database
|
222 |
-
progress(0.5, desc="Generating vector database...")
|
223 |
-
# global vector_db
|
224 |
-
vector_db = create_db(doc_splits, collection_name)
|
225 |
-
progress(0.9, desc="Done!")
|
226 |
-
return vector_db, collection_name, "Complete!"
|
227 |
-
|
228 |
-
|
229 |
-
def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
|
230 |
-
# print("llm_option",llm_option)
|
231 |
-
llm_name = list_llm[llm_option]
|
232 |
-
print("llm_name: ",llm_name)
|
233 |
-
qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
|
234 |
return qa_chain, "Complete!"
|
235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
-
def
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
formatted_chat_history.append(f"Assistant: {bot_message}")
|
242 |
-
return formatted_chat_history
|
243 |
-
|
244 |
|
245 |
-
def conversation(qa_chain, message, history):
|
246 |
formatted_chat_history = format_chat_history(message, history)
|
247 |
-
#print("formatted_chat_history",formatted_chat_history)
|
248 |
|
249 |
-
# Generate response using QA chain
|
250 |
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
|
251 |
response_answer = response["answer"]
|
252 |
-
if response_answer.find("Helpful Answer:")
|
253 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
response_sources = response["source_documents"]
|
255 |
response_source1 = response_sources[0].page_content.strip()
|
256 |
response_source2 = response_sources[1].page_content.strip()
|
257 |
response_source3 = response_sources[2].page_content.strip()
|
258 |
-
# Langchain sources are zero-based
|
259 |
response_source1_page = response_sources[0].metadata["page"] + 1
|
260 |
response_source2_page = response_sources[1].metadata["page"] + 1
|
261 |
response_source3_page = response_sources[2].metadata["page"] + 1
|
262 |
-
# print ('chat response: ', response_answer)
|
263 |
-
# print('DB source', response_sources)
|
264 |
|
265 |
-
|
266 |
-
new_history = history + [(message, response_answer)]
|
267 |
-
# return gr.update(value=""), new_history, response_sources[0], response_sources[1]
|
268 |
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
269 |
-
|
270 |
-
|
271 |
-
def upload_file(file_obj):
|
272 |
-
list_file_path = []
|
273 |
-
for idx, file in enumerate(file_obj):
|
274 |
-
file_path = file_obj.name
|
275 |
-
list_file_path.append(file_path)
|
276 |
-
# print(file_path)
|
277 |
-
# initialize_database(file_path, progress)
|
278 |
-
return list_file_path
|
279 |
-
|
280 |
|
281 |
def demo():
|
282 |
with gr.Blocks(theme="base") as demo:
|
283 |
vector_db = gr.State()
|
284 |
qa_chain = gr.State()
|
285 |
collection_name = gr.State()
|
|
|
286 |
|
287 |
gr.Markdown(
|
288 |
-
"""<center><h2>PDF
|
289 |
-
<h3>
|
290 |
gr.Markdown(
|
291 |
-
"""<b>Note:</b>
|
292 |
-
|
293 |
-
|
294 |
-
<br><b>
|
295 |
""")
|
296 |
|
297 |
-
with gr.Tab("Step 1 -
|
298 |
with gr.Row():
|
299 |
-
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="
|
300 |
-
# upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
|
301 |
|
302 |
-
with gr.Tab("Step 2 -
|
303 |
with gr.Row():
|
304 |
-
db_btn = gr.Radio(["ChromaDB"], label="
|
305 |
-
with gr.Accordion("
|
306 |
with gr.Row():
|
307 |
-
slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="
|
308 |
with gr.Row():
|
309 |
-
slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="
|
310 |
with gr.Row():
|
311 |
-
db_progress = gr.Textbox(label="
|
312 |
with gr.Row():
|
313 |
-
db_btn = gr.Button("
|
314 |
|
315 |
-
with gr.Tab("Step 3 -
|
316 |
with gr.Row():
|
317 |
llm_btn = gr.Radio(list_llm_simple, \
|
318 |
-
label="LLM
|
319 |
-
with gr.Accordion("
|
320 |
with gr.Row():
|
321 |
-
slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="
|
322 |
with gr.Row():
|
323 |
-
slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="
|
324 |
with gr.Row():
|
325 |
-
slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k
|
326 |
with gr.Row():
|
327 |
-
llm_progress = gr.Textbox(value="
|
328 |
with gr.Row():
|
329 |
-
qachain_btn = gr.Button("
|
330 |
|
331 |
with gr.Tab("Step 4 - Chatbot"):
|
332 |
chatbot = gr.Chatbot(height=300)
|
333 |
-
with gr.Accordion("
|
334 |
with gr.Row():
|
335 |
-
doc_source1 = gr.Textbox(label="
|
336 |
-
source1_page = gr.Number(label="
|
337 |
with gr.Row():
|
338 |
-
doc_source2 = gr.Textbox(label="
|
339 |
-
source2_page = gr.Number(label="
|
340 |
with gr.Row():
|
341 |
-
doc_source3 = gr.Textbox(label="
|
342 |
-
source3_page = gr.Number(label="
|
343 |
with gr.Row():
|
344 |
-
msg = gr.Textbox(placeholder="
|
345 |
with gr.Row():
|
346 |
-
submit_btn = gr.Button("
|
347 |
-
clear_btn = gr.ClearButton([msg, chatbot], value="
|
|
|
|
|
348 |
|
349 |
# Preprocessing events
|
350 |
-
#upload_btn.upload(upload_file, inputs=[upload_btn], outputs=[document])
|
351 |
db_btn.click(initialize_database, \
|
352 |
inputs=[document, slider_chunk_size, slider_chunk_overlap], \
|
353 |
outputs=[vector_db, collection_name, db_progress])
|
354 |
qachain_btn.click(initialize_LLM, \
|
355 |
-
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
|
356 |
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
|
357 |
inputs=None, \
|
358 |
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
@@ -360,15 +195,18 @@ def demo():
|
|
360 |
|
361 |
# Chatbot events
|
362 |
msg.submit(conversation, \
|
363 |
-
inputs=[qa_chain, msg, chatbot], \
|
364 |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
365 |
queue=False)
|
366 |
submit_btn.click(conversation, \
|
367 |
-
inputs=[qa_chain, msg, chatbot], \
|
368 |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
369 |
queue=False)
|
370 |
clear_btn.click(lambda:[None,"",0,"",0,"",0], \
|
371 |
inputs=None, \
|
|
|
|
|
|
|
372 |
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
373 |
queue=False)
|
374 |
demo.queue().launch(debug=True)
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
+
from googletrans import Translator
|
4 |
+
import requests
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
import numpy as np
|
7 |
+
from langchain.embeddings import Chroma
|
8 |
+
from langchain.vectorstores import Chroma
|
9 |
+
from langchain.document_loaders import UnstructuredPDFLoader
|
10 |
+
from langchain.text_splitter import CharacterTextSplitter
|
11 |
+
from langchain.chains.qa_with_sources import load_qa_with_sources_from_chain_type
|
12 |
+
from langchain.schema import Document
|
13 |
from langchain.memory import ConversationBufferMemory
|
14 |
+
from langchain.callbacks.manager import CallbackManager
|
15 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
16 |
+
from langchain.llms.base import LLM
|
17 |
+
from typing import List, Dict, Any, Optional
|
18 |
+
from pydantic import BaseModel
|
19 |
+
from tqdm import tqdm
|
|
|
|
|
20 |
import torch
|
21 |
+
import logging
|
22 |
+
|
23 |
+
logging.basicConfig(level=logging.INFO)
|
24 |
+
logger = logging.getLogger(__name__)
|
25 |
+
|
26 |
+
class PDFDocument(Document):
|
27 |
+
def _extract_metadata(self, **kwargs) -> Dict[str, Any]:
|
28 |
+
metadata = super()._extract_metadata(**kwargs)
|
29 |
+
metadata["filename"] = self.page_content
|
30 |
+
return metadata
|
31 |
+
|
32 |
+
def initialize_database(document, chunk_size, chunk_overlap, progress=gr.Progress()):
|
33 |
+
logger.info("Initializing database...")
|
34 |
+
embedding_function = Chroma.from_pretrained("chroma-rt")
|
35 |
+
documents = []
|
36 |
+
for file in document:
|
37 |
+
loader = UnstructuredPDFLoader(file.name)
|
38 |
+
docs = loader.load()
|
39 |
+
splitter = CharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
40 |
+
for doc in docs:
|
41 |
+
pages = splitter.split_document(doc)
|
42 |
+
for page in pages:
|
43 |
+
documents.append(PDFDocument(page_content=page.page_content, metadata={"filename": file.name}))
|
44 |
+
vectorstore = Chroma.create_index(embedding_function, documents)
|
45 |
+
progress.update(0.5)
|
46 |
+
logger.info("Database initialized successfully.")
|
47 |
+
return vectorstore, "Initialized"
|
48 |
+
|
49 |
+
def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress(), language="italian"):
|
50 |
+
logger.info("Initializing LLM chain...")
|
51 |
+
llm_name = list_llm[llm_option]
|
52 |
+
print("llm_name: ",llm_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
if language == "italian":
|
55 |
+
default_llm = "google/gemma-7b-it"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
else:
|
57 |
+
default_llm = "mistralai/Mistral-7B-Instruct-v0.2"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
if llm_name != default_llm:
|
60 |
+
print(f"Using default LLM {default_llm} for {language}")
|
61 |
+
llm_name = default_llm
|
62 |
+
|
63 |
+
qa_chain = load_qa_with_sources_from_chain_type(
|
64 |
+
llm=llm_name,
|
65 |
+
chain_type="stuff",
|
66 |
+
retriever=vector_db.as_retriever(),
|
67 |
+
temperature=llm_temperature,
|
68 |
+
top_k_per_token=top_k,
|
69 |
+
max_tokens=max_tokens,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
)
|
71 |
+
progress.update(1.0)
|
72 |
+
logger.info("LLM chain initialized successfully.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
return qa_chain, "Complete!"
|
74 |
|
75 |
+
def format_chat_history(message, history):
|
76 |
+
chat_history = ""
|
77 |
+
for item in history:
|
78 |
+
chat_history += f"\nUser: {item[0]}\nAI: {item[1]}"
|
79 |
+
chat_history += f"\n\nUser: {message}"
|
80 |
+
return chat_history
|
81 |
|
82 |
+
def translate_text(text, src_lang, dest_lang):
|
83 |
+
translator = Translator()
|
84 |
+
result = translator.translate(text, src=src_lang, dest=dest_lang)
|
85 |
+
return result.text
|
|
|
|
|
|
|
86 |
|
87 |
+
def conversation(qa_chain, message, history, language):
|
88 |
formatted_chat_history = format_chat_history(message, history)
|
|
|
89 |
|
|
|
90 |
response = qa_chain({"question": message, "chat_history": formatted_chat_history})
|
91 |
response_answer = response["answer"]
|
92 |
+
if response_answer.find("Helpful Answer:")!= -1:
|
93 |
response_answer = response_answer.split("Helpful Answer:")[-1]
|
94 |
+
|
95 |
+
if language != "italian":
|
96 |
+
try:
|
97 |
+
translated_response = translate_text(response_answer, src="en", dest="it")
|
98 |
+
except Exception as e:
|
99 |
+
logger.error(f"Error translating response: {e}")
|
100 |
+
translated_response = response_answer
|
101 |
+
else:
|
102 |
+
translated_response = response_answer
|
103 |
+
|
104 |
response_sources = response["source_documents"]
|
105 |
response_source1 = response_sources[0].page_content.strip()
|
106 |
response_source2 = response_sources[1].page_content.strip()
|
107 |
response_source3 = response_sources[2].page_content.strip()
|
|
|
108 |
response_source1_page = response_sources[0].metadata["page"] + 1
|
109 |
response_source2_page = response_sources[1].metadata["page"] + 1
|
110 |
response_source3_page = response_sources[2].metadata["page"] + 1
|
|
|
|
|
111 |
|
112 |
+
new_history = history + [(message, translated_response)]
|
|
|
|
|
113 |
return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
def demo():
|
116 |
with gr.Blocks(theme="base") as demo:
|
117 |
vector_db = gr.State()
|
118 |
qa_chain = gr.State()
|
119 |
collection_name = gr.State()
|
120 |
+
language = gr.State(default_value="italian")
|
121 |
|
122 |
gr.Markdown(
|
123 |
+
"""<center><h2>Chatbot basato su PDF</center></h2>
|
124 |
+
<h3>Fai domande sui tuoi documenti PDF</h3>""")
|
125 |
gr.Markdown(
|
126 |
+
"""<b>Note:</b> Questo assistente AI, utilizzando Langchain e LLM open-source, esegue retrieval-augmented generation (RAG) dai tuoi documenti PDF. \
|
127 |
+
L'interfaccia utente mostra esplicitamente più passaggi per aiutare a comprendere il flusso di lavoro RAG.
|
128 |
+
Questo chatbot tiene conto delle domande precedenti quando genera risposte (tramite memoria conversazionale), e include riferimenti al documento per scopi di chiarezza.<br>
|
129 |
+
<br><b>Avviso:</b> Questo spazio utilizza l'hardware CPU Basic gratuito da Hugging Face. Alcuni passaggi e modelli LLM utilizzati qui sotto (endpoint di inferenza gratuiti) possono richiedere del tempo per generare una risposta.
|
130 |
""")
|
131 |
|
132 |
+
with gr.Tab("Step 1 - Carica PDF"):
|
133 |
with gr.Row():
|
134 |
+
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Carica i tuoi documenti PDF (singolo o multiplo)")
|
|
|
135 |
|
136 |
+
with gr.Tab("Step 2 - Processa documento"):
|
137 |
with gr.Row():
|
138 |
+
db_btn = gr.Radio(["ChromaDB"], label="Tipo di database vettoriale", value = "ChromaDB", type="index", info="Scegli il tuo database vettoriale")
|
139 |
+
with gr.Accordion("Opzioni avanzate - Divisore testo documento", open=False):
|
140 |
with gr.Row():
|
141 |
+
slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Dimensione chunk", info="Dimensione chunk", interactive=True)
|
142 |
with gr.Row():
|
143 |
+
slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label=" Sovrapposizione chunk", info="Sovrapposizione chunk", interactive=True)
|
144 |
with gr.Row():
|
145 |
+
db_progress = gr.Textbox(label="Inizializzazione database vettoriale", value="Nessuno")
|
146 |
with gr.Row():
|
147 |
+
db_btn = gr.Button("Genera database vettoriale")
|
148 |
|
149 |
+
with gr.Tab("Step 3 - Inizializza catena QA"):
|
150 |
with gr.Row():
|
151 |
llm_btn = gr.Radio(list_llm_simple, \
|
152 |
+
label="Modelli LLM", value = list_llm_simple[0], type="index", info="Scegli il tuo modello LLM")
|
153 |
+
with gr.Accordion("Opzioni avanzate - Modello LLM", open=False):
|
154 |
with gr.Row():
|
155 |
+
slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperatura", info="Temperatura del modello", interactive=True)
|
156 |
with gr.Row():
|
157 |
+
slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Token massimi", info="Token massimi del modello", interactive=True)
|
158 |
with gr.Row():
|
159 |
+
slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="Campioni top-k", info="Campioni top-k del modello", interactive=True)
|
160 |
with gr.Row():
|
161 |
+
llm_progress = gr.Textbox(value="Nessuno",label="Inizializzazione catena QA")
|
162 |
with gr.Row():
|
163 |
+
qachain_btn = gr.Button("Inizializza catena Question Answering")
|
164 |
|
165 |
with gr.Tab("Step 4 - Chatbot"):
|
166 |
chatbot = gr.Chatbot(height=300)
|
167 |
+
with gr.Accordion("Avanzate - Riferimenti documento", open=False):
|
168 |
with gr.Row():
|
169 |
+
doc_source1 = gr.Textbox(label="Riferimento 1", lines=2, container=True, scale=20)
|
170 |
+
source1_page = gr.Number(label="Pagina", scale=1)
|
171 |
with gr.Row():
|
172 |
+
doc_source2 = gr.Textbox(label="Riferimento 2", lines=2, container=True, scale=20)
|
173 |
+
source2_page = gr.Number(label="Pagina", scale=1)
|
174 |
with gr.Row():
|
175 |
+
doc_source3 = gr.Textbox(label="Riferimento 3", lines=2, container=True, scale=20)
|
176 |
+
source3_page = gr.Number(label="Pagina", scale=1)
|
177 |
with gr.Row():
|
178 |
+
msg = gr.Textbox(placeholder="Digita un messaggio (es. 'Di cosa parla questo documento?')", container=True)
|
179 |
with gr.Row():
|
180 |
+
submit_btn = gr.Button("Invia messaggio")
|
181 |
+
clear_btn = gr.ClearButton([msg, chatbot], value="Pulisci conversazione")
|
182 |
+
with gr.Row():
|
183 |
+
language_selector = gr.Radio(choices=["italiano", "inglese"], value="italiano", label="Lingua")
|
184 |
|
185 |
# Preprocessing events
|
|
|
186 |
db_btn.click(initialize_database, \
|
187 |
inputs=[document, slider_chunk_size, slider_chunk_overlap], \
|
188 |
outputs=[vector_db, collection_name, db_progress])
|
189 |
qachain_btn.click(initialize_LLM, \
|
190 |
+
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db, language], \
|
191 |
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
|
192 |
inputs=None, \
|
193 |
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
|
|
195 |
|
196 |
# Chatbot events
|
197 |
msg.submit(conversation, \
|
198 |
+
inputs=[qa_chain, msg, chatbot, language], \
|
199 |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
200 |
queue=False)
|
201 |
submit_btn.click(conversation, \
|
202 |
+
inputs=[qa_chain, msg, chatbot, language], \
|
203 |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
204 |
queue=False)
|
205 |
clear_btn.click(lambda:[None,"",0,"",0,"",0], \
|
206 |
inputs=None, \
|
207 |
+
outputs=[chatbot, doc_source1, source1```
|
208 |
+
clear_btn.click(lambda:[None,"",0,"",0,"",0], \
|
209 |
+
inputs=None, \
|
210 |
outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
211 |
queue=False)
|
212 |
demo.queue().launch(debug=True)
|