File size: 11,803 Bytes
3875c87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709f6b7
 
3875c87
709f6b7
6ccf2cb
709f6b7
 
 
 
 
 
 
 
28c38fd
709f6b7
 
3875c87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709f6b7
3875c87
6ccf2cb
3875c87
6ccf2cb
 
 
 
 
 
3875c87
 
 
709f6b7
3875c87
51ac619
709f6b7
3875c87
 
 
 
 
 
 
 
 
 
 
51ac619
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3875c87
 
 
709f6b7
 
3875c87
51ac619
 
3875c87
 
 
 
 
 
 
 
709f6b7
 
51ac619
 
 
3875c87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709f6b7
3875c87
 
709f6b7
3875c87
 
709f6b7
3875c87
 
 
709f6b7
3875c87
709f6b7
 
 
 
 
 
3875c87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709f6b7
3875c87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709f6b7
28c38fd
3875c87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
# import os
# import logging
# import streamlit as st
# import torch
# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
# from langchain_community.document_loaders import PDFMinerLoader
# from langchain.text_splitter import RecursiveCharacterTextSplitter
# from langchain_community.embeddings import SentenceTransformerEmbeddings
# from langchain_community.vectorstores import Chroma
# from langchain_community.llms import HuggingFacePipeline
# from langchain.chains import RetrievalQA

# # Set up logging
# logging.basicConfig(level=logging.INFO)

# # Paths and model
# PERSIST_DIRECTORY = "db"
# UPLOAD_FOLDER = "uploaded_files"
# os.makedirs(UPLOAD_FOLDER, exist_ok=True)

# CHECKPOINT = "MBZUAI/LaMini-T5-738M"
# tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
# base_model = AutoModelForSeq2SeqLM.from_pretrained(CHECKPOINT)
# device = 0 if torch.cuda.is_available() else -1

# def ingest_data():
#     try:
#         st.info("πŸ“š Ingesting documents...")

#         docs = []
#         for file_name in os.listdir(UPLOAD_FOLDER):
#             if file_name.endswith(".pdf"):
#                 path = os.path.join(UPLOAD_FOLDER, file_name)
#                 loader = PDFMinerLoader(path)
#                 loaded_docs = loader.load()
#                 docs.extend(loaded_docs)

#         if not docs:
#             st.error("No valid PDFs found.")
#             return

#         splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
#         texts = splitter.split_documents(docs)

#         embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
#         db = Chroma.from_documents(texts, embeddings, persist_directory=PERSIST_DIRECTORY)
#         db.persist()
#         st.success("βœ… Ingestion successful!")
#     except Exception as e:
#         logging.error(f"Ingestion error: {str(e)}")
#         st.error(f"Ingestion error: {str(e)}")

# def get_qa_chain():
#     embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
#     vectordb = Chroma(persist_directory=PERSIST_DIRECTORY, embedding_function=embeddings)
#     retriever = vectordb.as_retriever()

#     pipe = pipeline(
#         "text2text-generation",
#         model=base_model,
#         tokenizer=tokenizer,
#         max_length=256,
#         do_sample=True,
#         temperature=0.3,
#         top_p=0.95,
#         device=device,
#     )
#     llm = HuggingFacePipeline(pipeline=pipe)

#     qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True)
#     return qa_chain

# def main():
#     st.set_page_config(page_title="CA Audit QA Chatbot", layout="wide")
#     st.title("πŸ“„ CA Audit QA Assistant")

#     with st.sidebar:
#         st.header("πŸ“€ Upload Audit PDFs")
#         uploaded_file = st.file_uploader("Choose a PDF file", type="pdf")

#         if uploaded_file is not None:
#             file_path = os.path.join(UPLOAD_FOLDER, uploaded_file.name)
#             with open(file_path, "wb") as f:
#                 f.write(uploaded_file.getbuffer())
#             st.success(f"{uploaded_file.name} uploaded.")
#             ingest_data()

#     query = st.text_input("❓ Ask an audit-related question:")
#     if st.button("πŸ” Get Answer") and query:
#         st.info("Generating answer...")
#         qa_chain = get_qa_chain()
#         prompt = f"""
#         You are an AI assistant helping Chartered Accountants (CAs) in auditing.
#         Provide accurate, concise answers based on the uploaded documents.
#         Question: {query}
#         """
#         result = qa_chain({"query": prompt})
#         st.success("βœ… Answer:")
#         st.write(result["result"])

# if __name__ == "__main__":
#     main()

import os
import logging
import math
import streamlit as st
import fitz  # PyMuPDF
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
from langchain_community.document_loaders import PDFMinerLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import SentenceTransformerEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_community.llms import HuggingFacePipeline
from langchain.chains import RetrievalQA

# Set up logging
logging.basicConfig(level=logging.INFO)

# Define global variables
device = 'cpu'
persist_directory = "db"
uploaded_files_dir = "uploaded_files"

# Streamlit app configuration
st.set_page_config(page_title="Audit Assistant", layout="wide")
st.title("Audit Assistant")

# Load the model
checkpoint = "MBZUAI/LaMini-T5-738M"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
base_model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)

# Helper Functions

def extract_text_from_pdf(file_path):
    """Extract text from a PDF using PyMuPDF (fitz)."""
    try:
        doc = fitz.open(file_path)
        text = ""
        for page_num in range(doc.page_count):
            page = doc.load_page(page_num)
            text += page.get_text("text")
        return text
    except Exception as e:
        logging.error(f"Error reading PDF {file_path}: {e}")
        return None

def data_ingestion():
    """Function to load PDFs and create embeddings with improved error handling and efficiency."""
    try:
        logging.info("Starting data ingestion")

        if not os.path.exists(uploaded_files_dir):
            os.makedirs(uploaded_files_dir)

        documents = []  
        for filename in os.listdir(uploaded_files_dir):
            if filename.endswith(".pdf"):
                file_path = os.path.join(uploaded_files_dir, filename)
                logging.info(f"Processing file: {file_path}")
                
                loader = PDFMinerLoader(file_path)
                
                loaded_docs = loader.load()
                
                # Check the structure of the loaded docs to ensure it has the correct format
                for doc in loaded_docs:
                    if isinstance(doc, dict):  # If the document is a dictionary
                        # Extract text content if present in the dictionary
                        if 'content' in doc:
                            doc_content = doc['content']
                        else:
                            logging.warning(f"Skipping invalid document structure in {file_path}")
                            continue
                    elif hasattr(doc, 'page_content'):  # If the document is a proper object
                        doc_content = doc.page_content
                    else:
                        logging.warning(f"Skipping invalid document structure in {file_path}")
                        continue
                    
                    # If document content exists, add it to the documents list
                    if doc_content and len(doc_content.strip()) > 0:
                        documents.append(doc)
                    else:
                        logging.warning(f"Skipping empty or invalid document: {file_path}")

        if not documents:
            logging.error("No valid documents found to process.")
            return

        logging.info(f"Total valid documents: {len(documents)}")
        
        # Split documents into smaller chunks
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
        texts = text_splitter.split_documents(documents)

        logging.info(f"Total text chunks created: {len(texts)}")
        
        if not texts:
            logging.error("No valid text chunks to create embeddings.")
            return

        embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
        
        # Proceed to split and embed the documents
        MAX_BATCH_SIZE = 5461  
        total_batches = math.ceil(len(texts) / MAX_BATCH_SIZE)
        
        logging.info(f"Processing {len(texts)} text chunks in {total_batches} batches...")

        db = None
        for i in range(total_batches):
            batch_start = i * MAX_BATCH_SIZE
            batch_end = min((i + 1) * MAX_BATCH_SIZE, len(texts))
            text_batch = texts[batch_start:batch_end]
            
            logging.info(f"Processing batch {i + 1}/{total_batches}, size: {len(text_batch)}")

            if db is None:
                db = Chroma.from_documents(text_batch, embeddings, persist_directory=persist_directory)
            else:
                db.add_documents(text_batch)

        db.persist()
        logging.info("Data ingestion completed successfully")
        
    except Exception as e:
        logging.error(f"Error during data ingestion: {str(e)}")
        raise

def llm_pipeline():
    """Set up the language model pipeline."""
    logging.info("Setting up LLM pipeline")
    pipe = pipeline(
        'text2text-generation',
        model=base_model,
        tokenizer=tokenizer,
        max_length=256,
        do_sample=True,
        temperature=0.3,
        top_p=0.95,
        device=device
    )
    local_llm = HuggingFacePipeline(pipeline=pipe)
    logging.info("LLM pipeline setup complete")
    return local_llm

def qa_llm():
    """Set up the question-answering chain."""
    logging.info("Setting up QA model")
    llm = llm_pipeline()
    embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
    db = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
    retriever = db.as_retriever()  # Set up the retriever for the vector store
    qa = RetrievalQA.from_chain_type(
        llm=llm,
        chain_type="stuff",
        retriever=retriever,
        return_source_documents=True
    )
    logging.info("QA model setup complete")
    return qa

def process_answer(user_question):
    """Generate an answer to the user’s question."""
    try:
        logging.info("Processing user question")
        qa = qa_llm() 

        tailored_prompt = f"""
        You are an expert chatbot designed to assist Chartered Accountants (CAs) in the field of audits. 
        Your goal is to provide accurate and comprehensive answers to any questions related to audit policies, procedures, 
        and accounting standards based on the provided PDF documents. 
        Please respond effectively and refer to the relevant standards and policies whenever applicable.

        User question: {user_question}
        """

        generated_text = qa({"query": tailored_prompt})
        answer = generated_text['result']

        if "not provide" in answer or "no information" in answer:
            return "The document does not provide sufficient information to answer your question."

        logging.info("Answer generated successfully")
        return answer

    except Exception as e:
        logging.error(f"Error during answer generation: {str(e)}")
        return "Error processing the question."

# Streamlit UI Setup
st.sidebar.header("File Upload")
uploaded_files = st.sidebar.file_uploader("Upload your PDF files", type=["pdf"], accept_multiple_files=True)

if uploaded_files:
    # Save uploaded files
    if not os.path.exists(uploaded_files_dir):
        os.makedirs(uploaded_files_dir)

    for uploaded_file in uploaded_files:
        file_path = os.path.join(uploaded_files_dir, uploaded_file.name)
        with open(file_path, "wb") as f:
            f.write(uploaded_file.getbuffer())
    
    st.sidebar.success(f"Uploaded {len(uploaded_files)} file(s) successfully!")

    # Run data ingestion when files are uploaded
    data_ingestion()

    # Display UI for Q&A
    st.header("Ask a Question")
    user_question = st.text_input("Enter your question here:")

    if user_question:
        answer = process_answer(user_question)
        st.write(answer)

else:
    st.sidebar.info("Upload PDF files to get started!")