Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import InferenceClient
|
2 |
+
import gradio as gr
|
3 |
+
from langchain_community.document_loaders import PyPDFLoader
|
4 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
+
from langchain_community.vectorstores import Chroma
|
6 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
7 |
+
import os
|
8 |
+
|
9 |
+
# CSS para estilização
|
10 |
+
css = '''
|
11 |
+
.gradio-container{max-width: 1000px !important}
|
12 |
+
h1{text-align:center}
|
13 |
+
footer {visibility: hidden}
|
14 |
+
'''
|
15 |
+
|
16 |
+
# Inicializar o cliente de inferência
|
17 |
+
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
18 |
+
|
19 |
+
# Configurar o retriever globalmente
|
20 |
+
def initialize_retriever(file_objs):
|
21 |
+
"""Carrega documentos PDFs e cria um retriever."""
|
22 |
+
if not file_objs:
|
23 |
+
return None, "Nenhum documento carregado."
|
24 |
+
|
25 |
+
# Carregar e dividir documentos
|
26 |
+
documents = []
|
27 |
+
for file_obj in file_objs:
|
28 |
+
loader = PyPDFLoader(file_obj.name)
|
29 |
+
documents.extend(loader.load())
|
30 |
+
|
31 |
+
# Dividir em pedaços menores
|
32 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2048, chunk_overlap=128)
|
33 |
+
splits = text_splitter.split_documents(documents)
|
34 |
+
|
35 |
+
# Criar embeddings e banco de vetores
|
36 |
+
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
37 |
+
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
|
38 |
+
retriever = vectorstore.as_retriever(search_kwargs={"k": 2}) # Retorna 2 documentos mais relevantes
|
39 |
+
|
40 |
+
return retriever, "Documentos processados com sucesso!"
|
41 |
+
|
42 |
+
# Formatar o prompt para RAG
|
43 |
+
def format_prompt(message, history, retriever=None, system_prompt=None):
|
44 |
+
prompt = "<s>"
|
45 |
+
|
46 |
+
# Adicionar histórico
|
47 |
+
for user_prompt, bot_response in history:
|
48 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
49 |
+
prompt += f" {bot_response}</s> "
|
50 |
+
|
51 |
+
# Adicionar instrução do sistema, se fornecida
|
52 |
+
if system_prompt:
|
53 |
+
prompt += f"[SYS] {system_prompt} [/SYS]"
|
54 |
+
|
55 |
+
# Adicionar contexto recuperado, se houver retriever
|
56 |
+
if retriever:
|
57 |
+
# Buscar documentos relevantes
|
58 |
+
docs = retriever.get_relevant_documents(message)
|
59 |
+
context = "\n".join([doc.page_content for doc in docs])
|
60 |
+
prompt += f"[CONTEXT] {context} [/CONTEXT]"
|
61 |
+
|
62 |
+
# Adicionar a mensagem do usuário
|
63 |
+
prompt += f"[INST] {message} [/INST]"
|
64 |
+
return prompt
|
65 |
+
|
66 |
+
# Função de geração com RAG
|
67 |
+
def generate(
|
68 |
+
prompt, history, retriever=None, system_prompt=None, temperature=0.2, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0
|
69 |
+
):
|
70 |
+
temperature = float(temperature)
|
71 |
+
if temperature < 1e-2:
|
72 |
+
temperature = 1e-2
|
73 |
+
top_p = float(top_p)
|
74 |
+
|
75 |
+
generate_kwargs = dict(
|
76 |
+
temperature=temperature,
|
77 |
+
max_new_tokens=max_new_tokens,
|
78 |
+
top_p=top_p,
|
79 |
+
repetition_penalty=repetition_penalty,
|
80 |
+
do_sample=True,
|
81 |
+
seed=42,
|
82 |
+
)
|
83 |
+
|
84 |
+
# Formatar o prompt com contexto RAG, se disponível
|
85 |
+
formatted_prompt = format_prompt(prompt, history, retriever, system_prompt)
|
86 |
+
|
87 |
+
# Gerar resposta em streaming
|
88 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
89 |
+
output = ""
|
90 |
+
|
91 |
+
for response in stream:
|
92 |
+
output += response.token.text
|
93 |
+
yield output
|
94 |
+
|
95 |
+
# Interface Gradio com RAG
|
96 |
+
def create_demo():
|
97 |
+
with gr.Blocks(css=css) as demo:
|
98 |
+
retriever_state = gr.State(value=None)
|
99 |
+
status = gr.State(value="Nenhum documento carregado")
|
100 |
+
|
101 |
+
# Título
|
102 |
+
gr.Markdown("<h1>RAG Chatbot</h1>")
|
103 |
+
|
104 |
+
# Seção de upload de documentos
|
105 |
+
with gr.Row():
|
106 |
+
with gr.Column(scale=1):
|
107 |
+
gr.Markdown("### Carregar Documentos")
|
108 |
+
file_input = gr.Files(label="Upload PDFs", file_types=["pdf"], file_count="multiple")
|
109 |
+
process_btn = gr.Button("Processar Documentos")
|
110 |
+
status_output = gr.Textbox(label="Status", value="Nenhum documento carregado")
|
111 |
+
|
112 |
+
# Interface de chat
|
113 |
+
chat_interface = gr.ChatInterface(
|
114 |
+
fn=generate,
|
115 |
+
additional_inputs=[
|
116 |
+
gr.State(value=retriever_state), # Passa o retriever como entrada adicional
|
117 |
+
gr.Textbox(label="System Prompt", placeholder="Digite um prompt de sistema (opcional)", value=None)
|
118 |
+
],
|
119 |
+
title="",
|
120 |
+
)
|
121 |
+
|
122 |
+
# Evento para processar documentos
|
123 |
+
process_btn.click(
|
124 |
+
fn=initialize_retriever,
|
125 |
+
inputs=[file_input],
|
126 |
+
outputs=[retriever_state, status_output]
|
127 |
+
)
|
128 |
+
|
129 |
+
return demo
|
130 |
+
|
131 |
+
# Lançar a aplicação
|
132 |
+
demo = create_demo()
|
133 |
+
demo.queue().launch(share=False)
|