Yoxas commited on
Commit
1ffeef0
·
verified ·
1 Parent(s): f8af002

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -86
app.py CHANGED
@@ -1,129 +1,85 @@
1
- import os
2
  import re
3
  import pandas as pd
4
  from PyPDF2 import PdfReader
5
- from transformers import pipeline, AutoTokenizer
6
- from gradio import Interface, File
7
  import gradio as gr
8
- import spaces
9
 
10
- # Initialize a list to store the data
11
- data = []
12
-
13
- # Load the LED tokenizer and model
14
  led_tokenizer = AutoTokenizer.from_pretrained("allenai/led-base-16384-multi_lexsum-source-long")
15
- classifier = pipeline("text-classification", model="allenai/led-base-16384-multi_lexsum-source-long", tokenizer=led_tokenizer, framework="pt")
16
-
17
- # Load the summarization model and tokenizer
18
  summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", tokenizer="sshleifer/distilbart-cnn-12-6", framework="pt")
19
 
 
 
 
 
 
 
 
20
  # Function to clean text by keeping only alphanumeric characters and spaces
21
  def clean_text(text):
22
  return re.sub(r'[^a-zA-Z0-9\s]', '', text)
23
-
24
  # Function to extract text from PDF files
25
  def extract_text(pdf_file):
26
  try:
27
- pdf_reader = PdfReader(pdf_file)
28
- if pdf_reader.is_encrypted:
29
- print(f"Skipping encrypted file: {pdf_file}")
30
- return None
31
- text = ''
32
- for page in pdf_reader.pages:
33
- text += page.extract_text() or ''
34
- return text
35
  except Exception as e:
36
  print(f"Error extracting text from {pdf_file}: {e}")
37
  return None
38
 
39
- # Function to split text into chunks of a specified size
40
- def split_text(text, chunk_size=1024):
41
- words = text.split()
42
- for i in range(0, len(words), chunk_size):
43
- yield ' '.join(words[i:i + chunk_size])
44
 
45
- # Function to classify text using LED model
46
- @spaces.GPU(duration=120)
47
- def classify_text(text):
48
- try:
49
- return classifier(text)[0]['label']
50
- except IndexError:
51
- return "Unable to classify"
52
-
53
- # Function to summarize text using the summarizer model
54
- @spaces.GPU(duration=120)
55
- def summarize_text(text, max_length=100, min_length=30):
56
- try:
57
- return summarizer(text, max_length=max_length, min_length=min_length, do_sample=False)[0]['summary_text']
58
- except IndexError:
59
- return "Unable to summarize"
60
 
61
  # Function to extract a title-like summary from the beginning of the text
62
- @spaces.GPU(duration=120)
63
- def extract_title(text, max_length=20):
64
- try:
65
- return summarizer(text, max_length=max_length, min_length=5, do_sample=False)[0]['summary_text']
66
- except IndexError:
67
- return "Unable to extract title"
68
 
69
- # Define the folder path and CSV file path
70
- # output_folder_path = '/content/drive/My Drive/path_to_output' # Adjust this to your actual path
71
-
72
- # Define the Gradio interface for file upload and download
73
- @spaces.GPU(duration=120)
74
  def process_files(pdf_files):
 
75
  for pdf_file in pdf_files:
76
  text = extract_text(pdf_file)
77
-
78
- # Skip encrypted files
79
  if text is None:
80
  continue
81
 
82
- # Extract a title from the beginning of the text
83
- title_text = ' '.join(text.split()[:512]) # Take the first 512 tokens for title extraction
84
  title = extract_title(title_text)
85
 
86
- # Initialize placeholders for combined results
87
- combined_abstract = []
88
- combined_cleaned_text = []
89
-
90
- # Split text into chunks and process each chunk
91
- for chunk in split_text(text, chunk_size=512):
92
- # Summarize the text chunk
93
- abstract = summarize_text(chunk)
94
- combined_abstract.append(abstract)
95
-
96
- # Clean the text chunk
97
- cleaned_text = clean_text(chunk)
98
- combined_cleaned_text.append(cleaned_text)
99
 
100
- # Combine results from all chunks
101
- final_abstract = ' '.join(combined_abstract)
102
- final_cleaned_text = ' '.join(combined_cleaned_text)
103
 
104
- # Append the data to the list
105
- data.append([title, final_abstract, final_cleaned_text])
106
-
107
- # Create a DataFrame from the data list
108
  df = pd.DataFrame(data, columns=['Title', 'Abstract', 'Content'])
109
-
110
- # Save the DataFrame to a CSV file
111
  output_file_path = 'processed_pdfs.csv'
112
  df.to_csv(output_file_path, index=False)
113
-
114
  return output_file_path
115
-
116
  # Gradio interface
117
- pdf_input = gr.File(label="Upload PDF Files", file_types=[".pdf"], file_count="multiple")
118
- csv_output = gr.File(label="Download CSV")
119
 
120
  gr.Interface(
121
- fn=process_files,
122
- inputs=pdf_input,
123
  outputs=csv_output,
124
  title="Dataset creation",
125
  description="Upload PDF files and get a summarized CSV file.",
126
- article="""<p>This is an experimental app that allows you to create a dataset from research papers.</p>
127
- <p>This app uses the allenai/led-base-16384-multi_lexsum-source-long and sshleifer/distilbart-cnn-12-6 AI models.</p>
128
- <p>The output file is a CSV with 3 columns: title, abstract, and content.</p>"""
129
  ).launch(share=True)
 
1
+ import torch
2
  import re
3
  import pandas as pd
4
  from PyPDF2 import PdfReader
5
+ from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM
 
6
  import gradio as gr
7
+ import space
8
 
9
+ # Load the tokenizer and model
 
 
 
10
  led_tokenizer = AutoTokenizer.from_pretrained("allenai/led-base-16384-multi_lexsum-source-long")
 
 
 
11
  summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", tokenizer="sshleifer/distilbart-cnn-12-6", framework="pt")
12
 
13
+ # Load the model separately
14
+ model = AutoModelForSeq2SeqLM.from_pretrained("allenai/led-base-16384-multi_lexsum-source-long")
15
+
16
+ # Move the model to CUDA if available
17
+ if torch.cuda.is_available():
18
+ model = model.to("cuda")
19
+
20
  # Function to clean text by keeping only alphanumeric characters and spaces
21
  def clean_text(text):
22
  return re.sub(r'[^a-zA-Z0-9\s]', '', text)
23
+
24
  # Function to extract text from PDF files
25
  def extract_text(pdf_file):
26
  try:
27
+ with open(pdf_file, 'rb') as file:
28
+ pdf_reader = PdfReader(file)
29
+ if pdf_reader.is_encrypted:
30
+ print(f"Skipping encrypted file: {pdf_file}")
31
+ return None
32
+ return ' '.join(page.extract_text() or '' for page in pdf_reader.pages)
 
 
33
  except Exception as e:
34
  print(f"Error extracting text from {pdf_file}: {e}")
35
  return None
36
 
37
+ # Function to classify text using LED model in batches
38
+ def classify_texts(texts):
39
+ return [classifier(text)["label"] for text in texts]
 
 
40
 
41
+ # Function to summarize text using the summarizer model in batches
42
+ @spaces.GPU
43
+ def summarize_texts(texts):
44
+ return [summarizer(text, max_length=100, min_length=30, do_sample=False)[0]['summary_text'] for text in texts]
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  # Function to extract a title-like summary from the beginning of the text
47
+ @spaces.GPU
48
+ def extract_title(text):
49
+ return summarizer(text, max_length=20, min_length=5, do_sample=False)[0]['summary_text']
 
 
 
50
 
51
+ # Function to process PDF files
52
+ @spaces.GPU
 
 
 
53
  def process_files(pdf_files):
54
+ data = []
55
  for pdf_file in pdf_files:
56
  text = extract_text(pdf_file)
 
 
57
  if text is None:
58
  continue
59
 
60
+ title_text = text.split(maxsplit=512)[0]
 
61
  title = extract_title(title_text)
62
 
63
+ # Clean the entire text at once
64
+ cleaned_text = clean_text(text)
 
 
 
 
 
 
 
 
 
 
 
65
 
66
+ data.append([title, summarize_texts([cleaned_text])[0], cleaned_text])
 
 
67
 
 
 
 
 
68
  df = pd.DataFrame(data, columns=['Title', 'Abstract', 'Content'])
 
 
69
  output_file_path = 'processed_pdfs.csv'
70
  df.to_csv(output_file_path, index=False)
 
71
  return output_file_path
72
+
73
  # Gradio interface
74
+ pdf_input = gr.Interface.inputs.File(label="Upload PDF Files", type="file", multiple=True)
75
+ csv_output = gr.Interface.outputs.File(label="Download CSV")
76
 
77
  gr.Interface(
78
+ fn=process_files,
79
+ inputs=pdf_input,
80
  outputs=csv_output,
81
  title="Dataset creation",
82
  description="Upload PDF files and get a summarized CSV file.",
83
+ article="""<p>This app creates a dataset from research papers using AI models.</p>
84
+ <p>It uses models for classification and summarization to extract titles, abstracts, and content from PDFs.</p>"""
 
85
  ).launch(share=True)