Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -31,20 +31,6 @@ def get_base64_download_link(content, filename):
|
|
31 |
b64 = base64.b64encode(content.encode()).decode()
|
32 |
return f'<a href="data:text/plain;base64,{b64}" download="{filename}">Download {filename}</a>'
|
33 |
|
34 |
-
# ๐ฌ Animated Banner Messages ๐ฌ
|
35 |
-
def animated_banner(message, emoji):
|
36 |
-
"""Create an animated banner message. It's like a tiny parade for your console! ๐๐ฉ"""
|
37 |
-
frames = [
|
38 |
-
f"โโโโโ {emoji} โโโโโ\nโ {message:^16} โ\nโโโโโโโโโโโโโโ",
|
39 |
-
f"โโโโโ {emoji} โโโโโ\nโ {message:^16} โ\nโโโโโโโโโโโโโโ",
|
40 |
-
f"โโโโโ{emoji}โโโโโ\nโ {message:^14} โ\nโโโโโโโโโโโโ",
|
41 |
-
f"โโโโ{emoji}โโโโ\nโ {message:^12} โ\nโโโโโโโโโโ",
|
42 |
-
f"โโโ{emoji}โโโ\nโ {message:^10} โ\nโโโโโโโโ",
|
43 |
-
f"โโ{emoji}โโ\nโ {message:^8} โ\nโโโโโโ",
|
44 |
-
f"โ{emoji}โ\nโ {message:^6} โ\nโโโโ",
|
45 |
-
]
|
46 |
-
return frames
|
47 |
-
|
48 |
# ๐ต๏ธโโ๏ธ Data Fetching and Caching Shenanigans ๐ต๏ธโโ๏ธ
|
49 |
|
50 |
def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
|
@@ -68,7 +54,6 @@ def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
|
|
68 |
soup = BeautifulSoup(response.text, 'html.parser')
|
69 |
paper_info = soup.find_all('div', class_='row infinite-item item paper-card')
|
70 |
if not paper_info:
|
71 |
-
print("No paper information found.")
|
72 |
break
|
73 |
for ppr in paper_info:
|
74 |
title = ppr.find('h1').text.strip()
|
@@ -77,20 +62,15 @@ def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
|
|
77 |
link = base_url + ppr.find('a')['href']
|
78 |
else:
|
79 |
link = ppr.find('a')['href']
|
80 |
-
|
81 |
-
Github_Star = ppr.find('span', class_='badge badge-secondary').text.strip().replace(',', '') if ppr.find('span', class_='badge badge-secondary') else "0"
|
82 |
-
|
83 |
pdf_link = ''
|
84 |
try:
|
85 |
response_link = session.get(link, headers=headers)
|
86 |
soup_link = BeautifulSoup(response_link.text, 'html.parser')
|
87 |
paper_info_link = soup_link.find_all('div', class_='paper-abstract')
|
88 |
pdf_link = paper_info_link[0].find('div', class_='col-md-12').find('a')['href']
|
89 |
-
except
|
90 |
-
|
91 |
-
|
92 |
-
print(f"Title: {title}, Link: {link}, Github Star: {Github_Star}, PDF Link: {pdf_link}")
|
93 |
-
|
94 |
if title not in data_list:
|
95 |
data_list[title] = {'link': link, 'Github Star': int(Github_Star), 'pdf_link': pdf_link.strip()}
|
96 |
else:
|
@@ -131,23 +111,12 @@ def load_and_cache_data(url, cache_file):
|
|
131 |
|
132 |
def format_dataframe(data):
|
133 |
"""Format data into a pretty DataFrame. It's like giving your data a makeover! ๐
๐"""
|
134 |
-
if not data:
|
135 |
-
print("No data found to format.")
|
136 |
-
return pd.DataFrame()
|
137 |
-
|
138 |
df = pd.DataFrame(data).T
|
139 |
df['title'] = df.index
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
df = df.sort_values(by='Github Star', ascending=False)
|
145 |
-
df['link'] = df['link'].apply(lambda x: f'<a href="{x}" target="_blank">Link</a>')
|
146 |
-
df['pdf_link'] = df['pdf_link'].apply(lambda x: f'<a href="{x}" target="_blank">{x}</a>')
|
147 |
-
else:
|
148 |
-
print("Required columns are missing in the dataframe.")
|
149 |
-
print(f"Columns available: {df.columns}")
|
150 |
-
|
151 |
return df
|
152 |
|
153 |
def update_display(category):
|
@@ -173,18 +142,18 @@ async def download_and_process_pdf(session, title, paper_info, directory):
|
|
173 |
"""Download and process a PDF. It's like turning lead into gold, but with papers! ๐โจ"""
|
174 |
pdf_url = paper_info['pdf_link']
|
175 |
if not pdf_url:
|
176 |
-
return f"
|
177 |
|
178 |
try:
|
179 |
timeout = aiohttp.ClientTimeout(total=60) # 60 seconds timeout
|
180 |
async with session.get(pdf_url, timeout=timeout) as response:
|
181 |
if response.status != 200:
|
182 |
-
return f"
|
183 |
pdf_content = await response.read()
|
184 |
|
185 |
file_length = len(pdf_content)
|
186 |
if file_length < 5000: # Check if the PDF is less than 5KB
|
187 |
-
return f"
|
188 |
|
189 |
# Convert PDF to text
|
190 |
pdf_file = io.BytesIO(pdf_content)
|
@@ -194,7 +163,7 @@ async def download_and_process_pdf(session, title, paper_info, directory):
|
|
194 |
text += page.extract_text()
|
195 |
|
196 |
if len(text) < 5000: # Check if the extracted text is less than 5KB
|
197 |
-
return f"
|
198 |
|
199 |
safe_title = safe_filename(title)
|
200 |
txt_filename = f"{safe_title}.txt"
|
@@ -203,11 +172,11 @@ async def download_and_process_pdf(session, title, paper_info, directory):
|
|
203 |
async with aiofiles.open(txt_filepath, 'w', encoding='utf-8') as f:
|
204 |
await f.write(text)
|
205 |
|
206 |
-
return f"
|
207 |
except asyncio.TimeoutError:
|
208 |
-
return f"
|
209 |
except Exception as e:
|
210 |
-
return f"
|
211 |
|
212 |
async def process_papers(data, directory, progress=gr.Progress()):
|
213 |
"""Process multiple papers asynchronously. It's like juggling papers, but faster! ๐คนโโ๏ธ๐"""
|
@@ -227,14 +196,7 @@ async def process_papers(data, directory, progress=gr.Progress()):
|
|
227 |
successful_downloads.append((filepath, text))
|
228 |
else:
|
229 |
errors.append(result)
|
230 |
-
progress(i / len(tasks), f"
|
231 |
-
|
232 |
-
# Display animated banner
|
233 |
-
banner_frames = animated_banner("Processing", "๐")
|
234 |
-
for frame in banner_frames:
|
235 |
-
print(frame, end='\r')
|
236 |
-
await asyncio.sleep(0.1)
|
237 |
-
print(" " * len(banner_frames[-1]), end='\r') # Clear the last frame
|
238 |
|
239 |
return results, successful_downloads, errors
|
240 |
|
@@ -250,17 +212,17 @@ def download_all_papers(progress=gr.Progress()):
|
|
250 |
date_directory = create_date_directory()
|
251 |
results, successful_downloads, errors = asyncio.run(process_papers(all_data, date_directory, progress))
|
252 |
|
253 |
-
summary = f"
|
254 |
-
summary += f"
|
255 |
-
summary += f"
|
256 |
-
summary += "
|
257 |
|
258 |
download_links = []
|
259 |
text_contents = []
|
260 |
for filepath, text in successful_downloads:
|
261 |
filename = os.path.basename(filepath)
|
262 |
download_links.append(get_base64_download_link(text, filename))
|
263 |
-
text_contents.append(f"--- {filename} ---\n\n{text[:1000]}
|
264 |
|
265 |
return summary, "<br>".join(download_links), "\n".join(text_contents)
|
266 |
|
@@ -290,13 +252,11 @@ with gr.Blocks() as demo:
|
|
290 |
download_button = gr.Button("๐ Download All Papers", variant="primary")
|
291 |
download_output = gr.Textbox(label="Download Status")
|
292 |
download_links = gr.HTML(label="Download Links")
|
293 |
-
|
294 |
-
text_output = gr.Code(label="Paper Contents", language="python")
|
295 |
download_button.click(fn=download_all_papers, inputs=None, outputs=[download_output, download_links, text_output])
|
296 |
|
297 |
# Load initial data for all tabs
|
298 |
demo.load(fn=load_all_data, outputs=[top_count, top_html, new_count, new_html, greatest_count, greatest_html])
|
299 |
|
300 |
# ๐ Launch the Gradio interface with a public link
|
301 |
-
|
302 |
-
demo.launch(share=True)
|
|
|
31 |
b64 = base64.b64encode(content.encode()).decode()
|
32 |
return f'<a href="data:text/plain;base64,{b64}" download="{filename}">Download {filename}</a>'
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
# ๐ต๏ธโโ๏ธ Data Fetching and Caching Shenanigans ๐ต๏ธโโ๏ธ
|
35 |
|
36 |
def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
|
|
|
54 |
soup = BeautifulSoup(response.text, 'html.parser')
|
55 |
paper_info = soup.find_all('div', class_='row infinite-item item paper-card')
|
56 |
if not paper_info:
|
|
|
57 |
break
|
58 |
for ppr in paper_info:
|
59 |
title = ppr.find('h1').text.strip()
|
|
|
62 |
link = base_url + ppr.find('a')['href']
|
63 |
else:
|
64 |
link = ppr.find('a')['href']
|
65 |
+
Github_Star = ppr.find('span', class_='badge badge-secondary').text.strip().replace(',', '')
|
|
|
|
|
66 |
pdf_link = ''
|
67 |
try:
|
68 |
response_link = session.get(link, headers=headers)
|
69 |
soup_link = BeautifulSoup(response_link.text, 'html.parser')
|
70 |
paper_info_link = soup_link.find_all('div', class_='paper-abstract')
|
71 |
pdf_link = paper_info_link[0].find('div', class_='col-md-12').find('a')['href']
|
72 |
+
except:
|
73 |
+
pass
|
|
|
|
|
|
|
74 |
if title not in data_list:
|
75 |
data_list[title] = {'link': link, 'Github Star': int(Github_Star), 'pdf_link': pdf_link.strip()}
|
76 |
else:
|
|
|
111 |
|
112 |
def format_dataframe(data):
|
113 |
"""Format data into a pretty DataFrame. It's like giving your data a makeover! ๐
๐"""
|
|
|
|
|
|
|
|
|
114 |
df = pd.DataFrame(data).T
|
115 |
df['title'] = df.index
|
116 |
+
df = df[['title', 'Github Star', 'link', 'pdf_link']]
|
117 |
+
df = df.sort_values(by='Github Star', ascending=False)
|
118 |
+
df['link'] = df['link'].apply(lambda x: f'<a href="{x}" target="_blank">Link</a>')
|
119 |
+
df['pdf_link'] = df['pdf_link'].apply(lambda x: f'<a href="{x}" target="_blank">{x}</a>')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
return df
|
121 |
|
122 |
def update_display(category):
|
|
|
142 |
"""Download and process a PDF. It's like turning lead into gold, but with papers! ๐โจ"""
|
143 |
pdf_url = paper_info['pdf_link']
|
144 |
if not pdf_url:
|
145 |
+
return f"No PDF link available for: {title}", None, None
|
146 |
|
147 |
try:
|
148 |
timeout = aiohttp.ClientTimeout(total=60) # 60 seconds timeout
|
149 |
async with session.get(pdf_url, timeout=timeout) as response:
|
150 |
if response.status != 200:
|
151 |
+
return f"Failed to download PDF for {title}: HTTP {response.status}", None, None
|
152 |
pdf_content = await response.read()
|
153 |
|
154 |
file_length = len(pdf_content)
|
155 |
if file_length < 5000: # Check if the PDF is less than 5KB
|
156 |
+
return f"Downloaded PDF for {title} is too small ({file_length} bytes). Skipping.", None, None
|
157 |
|
158 |
# Convert PDF to text
|
159 |
pdf_file = io.BytesIO(pdf_content)
|
|
|
163 |
text += page.extract_text()
|
164 |
|
165 |
if len(text) < 5000: # Check if the extracted text is less than 5KB
|
166 |
+
return f"Extracted text for {title} is too small ({len(text)} characters). Skipping.", None, None
|
167 |
|
168 |
safe_title = safe_filename(title)
|
169 |
txt_filename = f"{safe_title}.txt"
|
|
|
172 |
async with aiofiles.open(txt_filepath, 'w', encoding='utf-8') as f:
|
173 |
await f.write(text)
|
174 |
|
175 |
+
return f"Successfully processed: {txt_filename} (File length: {file_length} bytes)", txt_filepath, text
|
176 |
except asyncio.TimeoutError:
|
177 |
+
return f"Timeout while downloading PDF for {title}", None, None
|
178 |
except Exception as e:
|
179 |
+
return f"Error processing PDF for {title}: {str(e)}", None, None
|
180 |
|
181 |
async def process_papers(data, directory, progress=gr.Progress()):
|
182 |
"""Process multiple papers asynchronously. It's like juggling papers, but faster! ๐คนโโ๏ธ๐"""
|
|
|
196 |
successful_downloads.append((filepath, text))
|
197 |
else:
|
198 |
errors.append(result)
|
199 |
+
progress(i / len(tasks), f"Processed {i}/{len(tasks)} papers")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
|
201 |
return results, successful_downloads, errors
|
202 |
|
|
|
212 |
date_directory = create_date_directory()
|
213 |
results, successful_downloads, errors = asyncio.run(process_papers(all_data, date_directory, progress))
|
214 |
|
215 |
+
summary = f"Papers processed: {len(all_data)}\n"
|
216 |
+
summary += f"Successfully downloaded and converted: {len(successful_downloads)}\n"
|
217 |
+
summary += f"Errors: {len(errors)}\n\n"
|
218 |
+
summary += "Error List:\n" + "\n".join(errors) if errors else "No errors occurred."
|
219 |
|
220 |
download_links = []
|
221 |
text_contents = []
|
222 |
for filepath, text in successful_downloads:
|
223 |
filename = os.path.basename(filepath)
|
224 |
download_links.append(get_base64_download_link(text, filename))
|
225 |
+
text_contents.append(f"--- {filename} ---\n\n{text[:1000]}...\n\n") # Show first 1000 characters
|
226 |
|
227 |
return summary, "<br>".join(download_links), "\n".join(text_contents)
|
228 |
|
|
|
252 |
download_button = gr.Button("๐ Download All Papers", variant="primary")
|
253 |
download_output = gr.Textbox(label="Download Status")
|
254 |
download_links = gr.HTML(label="Download Links")
|
255 |
+
text_output = gr.Code(label="Paper Contents", language="text")
|
|
|
256 |
download_button.click(fn=download_all_papers, inputs=None, outputs=[download_output, download_links, text_output])
|
257 |
|
258 |
# Load initial data for all tabs
|
259 |
demo.load(fn=load_all_data, outputs=[top_count, top_html, new_count, new_html, greatest_count, greatest_html])
|
260 |
|
261 |
# ๐ Launch the Gradio interface with a public link
|
262 |
+
demo.launch(share=True)
|
|