awacke1 commited on
Commit
815cce0
ยท
verified ยท
1 Parent(s): e7d0fac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -77
app.py CHANGED
@@ -10,13 +10,45 @@ import io
10
  import asyncio
11
  import aiohttp
12
  import aiofiles
13
- from concurrent.futures import ThreadPoolExecutor
14
  import re
15
  from datetime import datetime
16
- import zipfile
17
  import base64
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
 
20
  base_url = "https://paperswithcode.com"
21
  session = requests.Session()
22
  headers = {
@@ -36,6 +68,7 @@ def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
36
  soup = BeautifulSoup(response.text, 'html.parser')
37
  paper_info = soup.find_all('div', class_='row infinite-item item paper-card')
38
  if not paper_info:
 
39
  break
40
  for ppr in paper_info:
41
  title = ppr.find('h1').text.strip()
@@ -44,15 +77,20 @@ def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
44
  link = base_url + ppr.find('a')['href']
45
  else:
46
  link = ppr.find('a')['href']
47
- Github_Star = ppr.find('span', class_='badge badge-secondary').text.strip().replace(',', '')
 
 
48
  pdf_link = ''
49
  try:
50
  response_link = session.get(link, headers=headers)
51
  soup_link = BeautifulSoup(response_link.text, 'html.parser')
52
  paper_info_link = soup_link.find_all('div', class_='paper-abstract')
53
  pdf_link = paper_info_link[0].find('div', class_='col-md-12').find('a')['href']
54
- except:
55
- pass
 
 
 
56
  if title not in data_list:
57
  data_list[title] = {'link': link, 'Github Star': int(Github_Star), 'pdf_link': pdf_link.strip()}
58
  else:
@@ -65,24 +103,19 @@ def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
65
  return data_list
66
 
67
  def load_cached_data(cache_file):
 
68
  if os.path.exists(cache_file):
69
  with open(cache_file, 'r') as f:
70
  return json.load(f)
71
  return None
72
 
73
  def save_cached_data(data, cache_file):
 
74
  with open(cache_file, 'w') as f:
75
  json.dump(data, f)
76
 
77
- def format_dataframe(data):
78
- df = pd.DataFrame(data).T
79
- df['title'] = df.index
80
- df = df[['title', 'Github Star', 'link', 'pdf_link']]
81
- df['link'] = df['link'].apply(lambda x: f'<a href="{x}" target="_blank">Link</a>')
82
- df['pdf_link'] = df['pdf_link'].apply(lambda x: f'<a href="{x}" target="_blank">{x}</a>')
83
- return df
84
-
85
  def load_and_cache_data(url, cache_file):
 
86
  cached_data = load_cached_data(cache_file)
87
 
88
  if cached_data:
@@ -94,7 +127,31 @@ def load_and_cache_data(url, cache_file):
94
  save_cached_data(new_data, cache_file)
95
  return new_data
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  def update_display(category):
 
98
  cache_file = f"{category}_papers_cache.json"
99
  url = f"https://paperswithcode.com/{category}" if category != "top" else "https://paperswithcode.com/"
100
 
@@ -104,94 +161,85 @@ def update_display(category):
104
  return len(df), df.to_html(escape=False, index=False)
105
 
106
  def load_all_data():
 
107
  top_count, top_html = update_display("top")
108
  new_count, new_html = update_display("latest")
109
  greatest_count, greatest_html = update_display("greatest")
110
  return top_count, top_html, new_count, new_html, greatest_count, greatest_html
111
 
112
- def safe_filename(title):
113
- """Convert a string to a safe filename."""
114
- return re.sub(r'[^\w\-_\. ]', '_', title)
115
-
116
- def create_date_directory():
117
- """Create a directory named with the current date."""
118
- date_str = datetime.now().strftime("%Y-%m-%d")
119
- os.makedirs(date_str, exist_ok=True)
120
- return date_str
121
 
122
- async def download_and_save_pdf(session, title, paper_info, directory):
 
123
  pdf_url = paper_info['pdf_link']
124
  if not pdf_url:
125
- return f"No PDF link available for: {title}", None
126
 
127
  try:
128
  timeout = aiohttp.ClientTimeout(total=60) # 60 seconds timeout
129
  async with session.get(pdf_url, timeout=timeout) as response:
130
  if response.status != 200:
131
- return f"Failed to download PDF for {title}: HTTP {response.status}", None
132
  pdf_content = await response.read()
133
 
134
- if len(pdf_content) < 2048: # Check if the PDF is less than 2KB
135
- return f"Downloaded PDF for {title} is too small (less than 2KB). Skipping.", None
 
 
 
 
 
 
 
 
 
 
 
136
 
137
  safe_title = safe_filename(title)
138
- filename = f"{safe_title}.pdf"
139
- filepath = os.path.join(directory, filename)
140
 
141
- async with aiofiles.open(filepath, 'wb') as f:
142
- await f.write(pdf_content)
143
 
144
- return f"Successfully saved: {filename}", filepath
145
  except asyncio.TimeoutError:
146
- return f"Timeout while downloading PDF for {title}", None
147
  except Exception as e:
148
- return f"Error saving PDF for {title}: {str(e)}", None
149
 
150
  async def process_papers(data, directory, progress=gr.Progress()):
 
151
  async with aiohttp.ClientSession() as session:
152
  tasks = []
153
  for title, paper_info in data.items():
154
- task = asyncio.ensure_future(download_and_save_pdf(session, title, paper_info, directory))
155
  tasks.append(task)
156
 
157
  results = []
158
  successful_downloads = []
159
  errors = []
160
  for i, task in enumerate(asyncio.as_completed(tasks), start=1):
161
- result, filepath = await task
162
  results.append(result)
163
- if filepath:
164
- successful_downloads.append(filepath)
165
  else:
166
  errors.append(result)
167
- progress(i / len(tasks), f"Processed {i}/{len(tasks)} papers")
 
 
 
 
 
 
 
168
 
169
  return results, successful_downloads, errors
170
 
171
- def zip_directory(files_to_zip, directory):
172
- """Zip the specified files."""
173
- zip_filename = f"{directory}.zip"
174
- with zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:
175
- for file in files_to_zip:
176
- zipf.write(file, os.path.relpath(file, os.path.join(directory, '..')))
177
- return zip_filename
178
-
179
- def get_base64_download_link(file_path):
180
- """Create a base64 download link for a file."""
181
- with open(file_path, "rb") as file:
182
- content = file.read()
183
- b64 = base64.b64encode(content).decode()
184
- return f'<a href="data:application/zip;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}</a>'
185
-
186
- def get_existing_zip_links():
187
- """Get download links for existing zip files."""
188
- links = []
189
- for file in os.listdir('.'):
190
- if file.endswith('.zip') and os.path.isfile(file):
191
- links.append(get_base64_download_link(file))
192
- return "<br>".join(links)
193
-
194
  def download_all_papers(progress=gr.Progress()):
 
195
  all_data = {}
196
  for category in ["top", "latest", "greatest"]:
197
  cache_file = f"{category}_papers_cache.json"
@@ -202,20 +250,21 @@ def download_all_papers(progress=gr.Progress()):
202
  date_directory = create_date_directory()
203
  results, successful_downloads, errors = asyncio.run(process_papers(all_data, date_directory, progress))
204
 
205
- if successful_downloads:
206
- zip_file = zip_directory(successful_downloads, date_directory)
207
- download_link = get_base64_download_link(zip_file)
208
- else:
209
- download_link = "No papers were successfully downloaded."
210
 
211
- existing_links = get_existing_zip_links()
 
 
 
 
 
212
 
213
- summary = f"Papers processed: {len(all_data)}\n"
214
- summary += f"Successfully downloaded: {len(successful_downloads)}\n"
215
- summary += f"Errors: {len(errors)}\n\n"
216
- summary += "Error List:\n" + "\n".join(errors) if errors else "No errors occurred."
217
-
218
- return summary, f"{download_link}<br><br>Previous downloads:<br>{existing_links}"
219
 
220
  with gr.Blocks() as demo:
221
  gr.Markdown("<h1><center>Papers Leaderboard</center></h1>")
@@ -241,10 +290,13 @@ with gr.Blocks() as demo:
241
  download_button = gr.Button("๐Ÿ“š Download All Papers", variant="primary")
242
  download_output = gr.Textbox(label="Download Status")
243
  download_links = gr.HTML(label="Download Links")
244
- download_button.click(fn=download_all_papers, inputs=None, outputs=[download_output, download_links])
 
 
245
 
246
  # Load initial data for all tabs
247
  demo.load(fn=load_all_data, outputs=[top_count, top_html, new_count, new_html, greatest_count, greatest_html])
248
 
249
- # Launch the Gradio interface with a public link
250
- demo.launch(share=True)
 
 
10
  import asyncio
11
  import aiohttp
12
  import aiofiles
 
13
  import re
14
  from datetime import datetime
 
15
  import base64
16
 
17
+ # ๐Ÿง™โ€โ™‚๏ธ Magical Utility Functions ๐Ÿง™โ€โ™‚๏ธ
18
+
19
+ def safe_filename(title):
20
+ """Convert a string to a safe filename. No more 'file not found' nightmares! ๐Ÿ™…โ€โ™‚๏ธ๐Ÿ“"""
21
+ return re.sub(r'[^\w\-_\. ]', '_', title)
22
+
23
+ def create_date_directory():
24
+ """Create a directory named with the current date. It's like a time capsule for your downloads! ๐Ÿ—“๏ธ๐Ÿ“ฆ"""
25
+ date_str = datetime.now().strftime("%Y-%m-%d")
26
+ os.makedirs(date_str, exist_ok=True)
27
+ return date_str
28
+
29
+ def get_base64_download_link(content, filename):
30
+ """Create a base64 download link for text content. It's like teleportation for your files! ๐ŸŒŸ๐Ÿ“ฒ"""
31
+ b64 = base64.b64encode(content.encode()).decode()
32
+ return f'<a href="data:text/plain;base64,{b64}" download="{filename}">Download {filename}</a>'
33
+
34
+ # ๐ŸŽฌ Animated Banner Messages ๐ŸŽฌ
35
+ def animated_banner(message, emoji):
36
+ """Create an animated banner message. It's like a tiny parade for your console! ๐ŸŽ‰๐Ÿšฉ"""
37
+ frames = [
38
+ f"โ•”โ•โ•โ•โ• {emoji} โ•โ•โ•โ•โ•—\nโ•‘ {message:^16} โ•‘\nโ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•",
39
+ f"โ•”โ•โ•โ•โ• {emoji} โ•โ•โ•โ•โ•—\nโ•‘ {message:^16} โ•‘\nโ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•",
40
+ f"โ•”โ•โ•โ•โ•{emoji}โ•โ•โ•โ•โ•—\nโ•‘ {message:^14} โ•‘\nโ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•",
41
+ f"โ•”โ•โ•โ•{emoji}โ•โ•โ•โ•—\nโ•‘ {message:^12} โ•‘\nโ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•",
42
+ f"โ•”โ•โ•{emoji}โ•โ•โ•—\nโ•‘ {message:^10} โ•‘\nโ•šโ•โ•โ•โ•โ•โ•โ•",
43
+ f"โ•”โ•{emoji}โ•โ•—\nโ•‘ {message:^8} โ•‘\nโ•šโ•โ•โ•โ•โ•",
44
+ f"โ•”{emoji}โ•—\nโ•‘ {message:^6} โ•‘\nโ•šโ•โ•โ•",
45
+ ]
46
+ return frames
47
+
48
+ # ๐Ÿ•ต๏ธโ€โ™‚๏ธ Data Fetching and Caching Shenanigans ๐Ÿ•ต๏ธโ€โ™‚๏ธ
49
+
50
  def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
51
+ """Fetch papers from the interwebs. It's like fishing, but for knowledge! ๐ŸŽฃ๐Ÿ“š"""
52
  base_url = "https://paperswithcode.com"
53
  session = requests.Session()
54
  headers = {
 
68
  soup = BeautifulSoup(response.text, 'html.parser')
69
  paper_info = soup.find_all('div', class_='row infinite-item item paper-card')
70
  if not paper_info:
71
+ print("No paper information found.")
72
  break
73
  for ppr in paper_info:
74
  title = ppr.find('h1').text.strip()
 
77
  link = base_url + ppr.find('a')['href']
78
  else:
79
  link = ppr.find('a')['href']
80
+
81
+ Github_Star = ppr.find('span', class_='badge badge-secondary').text.strip().replace(',', '') if ppr.find('span', class_='badge badge-secondary') else "0"
82
+
83
  pdf_link = ''
84
  try:
85
  response_link = session.get(link, headers=headers)
86
  soup_link = BeautifulSoup(response_link.text, 'html.parser')
87
  paper_info_link = soup_link.find_all('div', class_='paper-abstract')
88
  pdf_link = paper_info_link[0].find('div', class_='col-md-12').find('a')['href']
89
+ except Exception as e:
90
+ print(f"Failed to retrieve PDF link for {title}: {e}")
91
+
92
+ print(f"Title: {title}, Link: {link}, Github Star: {Github_Star}, PDF Link: {pdf_link}")
93
+
94
  if title not in data_list:
95
  data_list[title] = {'link': link, 'Github Star': int(Github_Star), 'pdf_link': pdf_link.strip()}
96
  else:
 
103
  return data_list
104
 
105
  def load_cached_data(cache_file):
106
+ """Load cached data. It's like finding money in your old jeans! ๐Ÿ’ฐ๐Ÿงต"""
107
  if os.path.exists(cache_file):
108
  with open(cache_file, 'r') as f:
109
  return json.load(f)
110
  return None
111
 
112
  def save_cached_data(data, cache_file):
113
+ """Save data to cache. Future you will thank present you! ๐Ÿฆธโ€โ™‚๏ธ๐Ÿ•ฐ๏ธ"""
114
  with open(cache_file, 'w') as f:
115
  json.dump(data, f)
116
 
 
 
 
 
 
 
 
 
117
  def load_and_cache_data(url, cache_file):
118
+ """Load data from cache or fetch new data. It's like a time machine for your data! โฐ๐Ÿ”„"""
119
  cached_data = load_cached_data(cache_file)
120
 
121
  if cached_data:
 
127
  save_cached_data(new_data, cache_file)
128
  return new_data
129
 
130
+ # ๐Ÿ“Š Data Processing and Display Magic ๐Ÿ“Š
131
+
132
+ def format_dataframe(data):
133
+ """Format data into a pretty DataFrame. It's like giving your data a makeover! ๐Ÿ’…๐Ÿ“ˆ"""
134
+ if not data:
135
+ print("No data found to format.")
136
+ return pd.DataFrame()
137
+
138
+ df = pd.DataFrame(data).T
139
+ df['title'] = df.index
140
+
141
+ # Check if required columns are present
142
+ if 'Github Star' in df.columns and 'link' in df.columns and 'pdf_link' in df.columns:
143
+ df = df[['title', 'Github Star', 'link', 'pdf_link']]
144
+ df = df.sort_values(by='Github Star', ascending=False)
145
+ df['link'] = df['link'].apply(lambda x: f'<a href="{x}" target="_blank">Link</a>')
146
+ df['pdf_link'] = df['pdf_link'].apply(lambda x: f'<a href="{x}" target="_blank">{x}</a>')
147
+ else:
148
+ print("Required columns are missing in the dataframe.")
149
+ print(f"Columns available: {df.columns}")
150
+
151
+ return df
152
+
153
  def update_display(category):
154
+ """Update the display for a category. Freshen up your data like it's spring cleaning! ๐Ÿงน๐ŸŒธ"""
155
  cache_file = f"{category}_papers_cache.json"
156
  url = f"https://paperswithcode.com/{category}" if category != "top" else "https://paperswithcode.com/"
157
 
 
161
  return len(df), df.to_html(escape=False, index=False)
162
 
163
  def load_all_data():
164
+ """Load data for all categories. It's like a buffet for your brain! ๐Ÿง ๐Ÿฝ๏ธ"""
165
  top_count, top_html = update_display("top")
166
  new_count, new_html = update_display("latest")
167
  greatest_count, greatest_html = update_display("greatest")
168
  return top_count, top_html, new_count, new_html, greatest_count, greatest_html
169
 
170
+ # ๐Ÿš€ Asynchronous Paper Processing Wizardry ๐Ÿš€
 
 
 
 
 
 
 
 
171
 
172
+ async def download_and_process_pdf(session, title, paper_info, directory):
173
+ """Download and process a PDF. It's like turning lead into gold, but with papers! ๐Ÿ“œโœจ"""
174
  pdf_url = paper_info['pdf_link']
175
  if not pdf_url:
176
+ return f"๐Ÿšซ No PDF link for: {title}. It's playing hide and seek! ๐Ÿ™ˆ", None, None
177
 
178
  try:
179
  timeout = aiohttp.ClientTimeout(total=60) # 60 seconds timeout
180
  async with session.get(pdf_url, timeout=timeout) as response:
181
  if response.status != 200:
182
+ return f"๐Ÿšจ Failed to grab PDF for {title}: HTTP {response.status}. The internet gremlins strike again! ๐Ÿ‘น", None, None
183
  pdf_content = await response.read()
184
 
185
+ file_length = len(pdf_content)
186
+ if file_length < 5000: # Check if the PDF is less than 5KB
187
+ return f"๐Ÿœ PDF for {title} is tiny ({file_length} bytes). It's like a paper for ants! ๐Ÿœ๐Ÿ“„", None, None
188
+
189
+ # Convert PDF to text
190
+ pdf_file = io.BytesIO(pdf_content)
191
+ pdf_reader = PyPDF2.PdfReader(pdf_file)
192
+ text = ""
193
+ for page in pdf_reader.pages:
194
+ text += page.extract_text()
195
+
196
+ if len(text) < 5000: # Check if the extracted text is less than 5KB
197
+ return f"๐Ÿ“‰ Extracted text for {title} is too small ({len(text)} characters). It's not you, it's the PDF! ๐Ÿ’”", None, None
198
 
199
  safe_title = safe_filename(title)
200
+ txt_filename = f"{safe_title}.txt"
201
+ txt_filepath = os.path.join(directory, txt_filename)
202
 
203
+ async with aiofiles.open(txt_filepath, 'w', encoding='utf-8') as f:
204
+ await f.write(text)
205
 
206
+ return f"๐ŸŽ‰ Successfully processed: {txt_filename} (File length: {file_length} bytes). It's alive! ๐Ÿงฌ", txt_filepath, text
207
  except asyncio.TimeoutError:
208
+ return f"โณ Timeout for {title}. The PDF is playing hard to get! ๐Ÿ’ƒ", None, None
209
  except Exception as e:
210
+ return f"๐Ÿ’ฅ Oops! Error processing {title}: {str(e)}. Gremlins in the system! ๐Ÿ› ๏ธ", None, None
211
 
212
  async def process_papers(data, directory, progress=gr.Progress()):
213
+ """Process multiple papers asynchronously. It's like juggling papers, but faster! ๐Ÿคนโ€โ™‚๏ธ๐Ÿ“š"""
214
  async with aiohttp.ClientSession() as session:
215
  tasks = []
216
  for title, paper_info in data.items():
217
+ task = asyncio.ensure_future(download_and_process_pdf(session, title, paper_info, directory))
218
  tasks.append(task)
219
 
220
  results = []
221
  successful_downloads = []
222
  errors = []
223
  for i, task in enumerate(asyncio.as_completed(tasks), start=1):
224
+ result, filepath, text = await task
225
  results.append(result)
226
+ if filepath and text:
227
+ successful_downloads.append((filepath, text))
228
  else:
229
  errors.append(result)
230
+ progress(i / len(tasks), f"๐Ÿš€ Processed {i}/{len(tasks)} papers. Science waits for no one!")
231
+
232
+ # Display animated banner
233
+ banner_frames = animated_banner("Processing", "๐Ÿ“„")
234
+ for frame in banner_frames:
235
+ print(frame, end='\r')
236
+ await asyncio.sleep(0.1)
237
+ print(" " * len(banner_frames[-1]), end='\r') # Clear the last frame
238
 
239
  return results, successful_downloads, errors
240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  def download_all_papers(progress=gr.Progress()):
242
+ """Download and process all papers. It's like hosting a paper party, and everyone's invited! ๐ŸŽ‰๐Ÿ“š"""
243
  all_data = {}
244
  for category in ["top", "latest", "greatest"]:
245
  cache_file = f"{category}_papers_cache.json"
 
250
  date_directory = create_date_directory()
251
  results, successful_downloads, errors = asyncio.run(process_papers(all_data, date_directory, progress))
252
 
253
+ summary = f"๐Ÿ“Š Papers processed: {len(all_data)} (We're basically librarians now!)\n"
254
+ summary += f"โœ… Successfully downloaded and converted: {len(successful_downloads)} (Take that, PDF gremlins!)\n"
255
+ summary += f"โŒ Errors: {len(errors)} (Even superheroes have off days)\n\n"
256
+ summary += "๐Ÿšจ Error List (AKA 'The Wall of Shame'):\n" + "\n".join(errors) if errors else "No errors occurred. It's a miracle! ๐Ÿ™Œ"
 
257
 
258
+ download_links = []
259
+ text_contents = []
260
+ for filepath, text in successful_downloads:
261
+ filename = os.path.basename(filepath)
262
+ download_links.append(get_base64_download_link(text, filename))
263
+ text_contents.append(f"--- {filename} ---\n\n{text[:1000]}... (There's more, but we don't want to spoil the ending! ๐Ÿ“š๐Ÿ”ฎ)\n\n")
264
 
265
+ return summary, "<br>".join(download_links), "\n".join(text_contents)
266
+
267
+ # ๐ŸŽญ Gradio Interface: Where the Magic Happens ๐ŸŽญ
 
 
 
268
 
269
  with gr.Blocks() as demo:
270
  gr.Markdown("<h1><center>Papers Leaderboard</center></h1>")
 
290
  download_button = gr.Button("๐Ÿ“š Download All Papers", variant="primary")
291
  download_output = gr.Textbox(label="Download Status")
292
  download_links = gr.HTML(label="Download Links")
293
+ # Updated this to gr.Code with Python as language for displaying paper contents
294
+ text_output = gr.Code(label="Paper Contents", language="python")
295
+ download_button.click(fn=download_all_papers, inputs=None, outputs=[download_output, download_links, text_output])
296
 
297
  # Load initial data for all tabs
298
  demo.load(fn=load_all_data, outputs=[top_count, top_html, new_count, new_html, greatest_count, greatest_html])
299
 
300
+ # ๐Ÿš€ Launch the Gradio interface with a public link
301
+ print("๐ŸŽญ Launching the Papers Leaderboard! Get ready for a wild ride through the land of academia! ๐ŸŽข๐Ÿ“š")
302
+ demo.launch(share=True)