File size: 3,075 Bytes
c20f7c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import os
import io
from pdfminer.high_level import extract_text_to_fp
from pdfminer.layout import LAParams
import re
from tqdm import tqdm
import time
from nylon import ChatDatabase, get_keywords

def extract_text_from_pdf(pdf_path):
    output_string = io.StringIO()
    with open(pdf_path, 'rb') as fin:
        extract_text_to_fp(fin, output_string, laparams=LAParams(), 
                           output_type='text', codec='utf-8')
    return output_string.getvalue()

def process_text_into_paragraphs(text):
    # Split text into paragraphs
    paragraphs = re.split(r'\n\s*\n', text)
    
    # Clean up each paragraph
    cleaned_paragraphs = []
    for para in paragraphs:
        # Remove extra whitespace and join broken words
        cleaned_para = re.sub(r'\s+', ' ', para).strip()
        cleaned_para = re.sub(r'(\w+)-\s*(\w+)', r'\1\2', cleaned_para)
        if cleaned_para:  # Only add non-empty paragraphs
            cleaned_paragraphs.append(cleaned_para)
    
    return cleaned_paragraphs

def process_pdfs(directory, db):
    fixed_timestamp = "2024-10-22 12:00:00"
    sender = "Arcana"  # Set sender to "Arcana" for all messages
    
    pdf_files = [f for f in os.listdir(directory) if f.endswith('.pdf')]
    total_files = len(pdf_files)
    
    with tqdm(total=total_files, desc="Processing PDFs", unit="file") as pbar:
        for filename in pdf_files:
            pdf_path = os.path.join(directory, filename)
            tag = os.path.splitext(filename)[0]  # Use filename without .pdf as tag
            
            text = extract_text_from_pdf(pdf_path)
            paragraphs = process_text_into_paragraphs(text)
            
            for paragraph in paragraphs:
                db.add_message(sender, fixed_timestamp, paragraph, tag)
            
            pbar.update(1)
            pbar.set_postfix({"Current File": filename})

def main():
    db_filename = 'textbooks.txt'
    
    if os.path.exists(db_filename):
        print(f"Database file '{db_filename}' already exists. Loading existing database...")
        db = ChatDatabase(db_filename)
    else:
        print(f"Creating new database '{db_filename}'...")
        db = ChatDatabase(db_filename)
        pdf_directory = 'pdfdemos'
        
        start_time = time.time()
        process_pdfs(pdf_directory, db)
        end_time = time.time()
        
        total_time = end_time - start_time
        print(f"\nDatabase creation complete. Total time: {total_time:.2f} seconds")

    # Example query
    query = "NaCl"
    sender = "Arcana"  # Now all senders are "Arcana"
    N = 5
    cache = {}
    query_tag = "Chemistry2e-WEB"  # Use the PDF name as the tag for querying
    
    relevant_messages = db.get_relevant_messages(sender, query, N, cache, query_tag)
    
    print(f"\nTop {N} relevant paragraphs for query '{query}' with tag '{query_tag}':")
    for message in relevant_messages:
        print(f"From {message[0]} at {message[1]}:")
        print(f"Tag: {message[3]}")
        print(message[2][:200] + "...\n")

if __name__ == "__main__":
    main()