Spaces:
Sleeping
Sleeping
import os | |
import io | |
from pdfminer.high_level import extract_text_to_fp | |
from pdfminer.layout import LAParams | |
import re | |
from tqdm import tqdm | |
import time | |
from nylon import ChatDatabase, get_keywords | |
def extract_text_from_pdf(pdf_path): | |
output_string = io.StringIO() | |
with open(pdf_path, 'rb') as fin: | |
extract_text_to_fp(fin, output_string, laparams=LAParams(), | |
output_type='text', codec='utf-8') | |
return output_string.getvalue() | |
def process_text_into_paragraphs(text): | |
# Split text into paragraphs | |
paragraphs = re.split(r'\n\s*\n', text) | |
# Clean up each paragraph | |
cleaned_paragraphs = [] | |
for para in paragraphs: | |
# Remove extra whitespace and join broken words | |
cleaned_para = re.sub(r'\s+', ' ', para).strip() | |
cleaned_para = re.sub(r'(\w+)-\s*(\w+)', r'\1\2', cleaned_para) | |
if cleaned_para: # Only add non-empty paragraphs | |
cleaned_paragraphs.append(cleaned_para) | |
return cleaned_paragraphs | |
def process_pdfs(directory, db): | |
fixed_timestamp = "2024-10-22 12:00:00" | |
sender = "Arcana" # Set sender to "Arcana" for all messages | |
pdf_files = [f for f in os.listdir(directory) if f.endswith('.pdf')] | |
total_files = len(pdf_files) | |
with tqdm(total=total_files, desc="Processing PDFs", unit="file") as pbar: | |
for filename in pdf_files: | |
pdf_path = os.path.join(directory, filename) | |
tag = os.path.splitext(filename)[0] # Use filename without .pdf as tag | |
text = extract_text_from_pdf(pdf_path) | |
paragraphs = process_text_into_paragraphs(text) | |
for paragraph in paragraphs: | |
db.add_message(sender, fixed_timestamp, paragraph, tag) | |
pbar.update(1) | |
pbar.set_postfix({"Current File": filename}) | |
def main(): | |
db_filename = 'textbooks.txt' | |
if os.path.exists(db_filename): | |
print(f"Database file '{db_filename}' already exists. Loading existing database...") | |
db = ChatDatabase(db_filename) | |
else: | |
print(f"Creating new database '{db_filename}'...") | |
db = ChatDatabase(db_filename) | |
pdf_directory = 'pdfdemos' | |
start_time = time.time() | |
process_pdfs(pdf_directory, db) | |
end_time = time.time() | |
total_time = end_time - start_time | |
print(f"\nDatabase creation complete. Total time: {total_time:.2f} seconds") | |
# Example query | |
query = "NaCl" | |
sender = "Arcana" # Now all senders are "Arcana" | |
N = 5 | |
cache = {} | |
query_tag = "Chemistry2e-WEB" # Use the PDF name as the tag for querying | |
relevant_messages = db.get_relevant_messages(sender, query, N, cache, query_tag) | |
print(f"\nTop {N} relevant paragraphs for query '{query}' with tag '{query_tag}':") | |
for message in relevant_messages: | |
print(f"From {message[0]} at {message[1]}:") | |
print(f"Tag: {message[3]}") | |
print(message[2][:200] + "...\n") | |
if __name__ == "__main__": | |
main() | |