Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
import pandas as pd
|
4 |
+
import gradio as gr
|
5 |
+
import time
|
6 |
+
|
7 |
+
def get_rank_papers(url,progress=gr.Progress(track_tqdm=True)):
|
8 |
+
base_url = "https://paperswithcode.com"
|
9 |
+
|
10 |
+
session = requests.Session()
|
11 |
+
headers = {
|
12 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
|
13 |
+
'Cache-Control': 'no-cache'
|
14 |
+
}
|
15 |
+
print("Time run at : ",time.ctime())
|
16 |
+
offset = 0
|
17 |
+
data_list = {}
|
18 |
+
break_duplicate = 10
|
19 |
+
|
20 |
+
|
21 |
+
while True:
|
22 |
+
response = session.get(url, headers=headers, params={'page': offset})
|
23 |
+
if response.status_code != 200:
|
24 |
+
print('Failed to retrieve data')
|
25 |
+
break
|
26 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
27 |
+
paper_info = soup.find_all('div', class_='row infinite-item item paper-card')
|
28 |
+
if not paper_info:
|
29 |
+
break
|
30 |
+
for ppr in paper_info:
|
31 |
+
title = ppr.find('h1').text.strip()
|
32 |
+
|
33 |
+
if "paper" in ppr.find('a')['href']:
|
34 |
+
link = base_url + ppr.find('a')['href']
|
35 |
+
else:
|
36 |
+
link = ppr.find('a')['href']
|
37 |
+
Github_Star = ppr.find('span', class_='badge badge-secondary').text.strip().replace(',', '')
|
38 |
+
pdf_link = ''
|
39 |
+
try:
|
40 |
+
response_link = session.get(link, headers=headers)
|
41 |
+
soup_link = BeautifulSoup(response_link.text, 'html.parser')
|
42 |
+
paper_info_link = soup_link.find_all('div', class_='paper-abstract')
|
43 |
+
pdf_link = paper_info_link[0].find('div', class_='col-md-12').find('a')['href']
|
44 |
+
except:
|
45 |
+
pass
|
46 |
+
if title not in data_list:
|
47 |
+
data_list[title] = {'link': link, 'Github Star': int(Github_Star), 'pdf_link': pdf_link.strip()}
|
48 |
+
else:
|
49 |
+
break_duplicate -= 1
|
50 |
+
if break_duplicate == 0:
|
51 |
+
return data_list
|
52 |
+
offset += 1
|
53 |
+
progress.update(offset)
|
54 |
+
print('Data retrieval complete')
|
55 |
+
return data_list
|
56 |
+
|
57 |
+
|
58 |
+
import requests
|
59 |
+
from bs4 import BeautifulSoup
|
60 |
+
import pandas as pd
|
61 |
+
import gradio as gr
|
62 |
+
import time
|
63 |
+
import os
|
64 |
+
import json
|
65 |
+
from datetime import datetime
|
66 |
+
|
67 |
+
def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
|
68 |
+
# ... (keep the existing get_rank_papers function as is)
|
69 |
+
# This function remains unchanged from your original code
|
70 |
+
|
71 |
+
def load_cached_data(cache_file):
|
72 |
+
if os.path.exists(cache_file):
|
73 |
+
with open(cache_file, 'r') as f:
|
74 |
+
return json.load(f)
|
75 |
+
return None
|
76 |
+
|
77 |
+
def save_cached_data(data, cache_file):
|
78 |
+
with open(cache_file, 'w') as f:
|
79 |
+
json.dump(data, f)
|
80 |
+
|
81 |
+
def format_dataframe(data):
|
82 |
+
df = pd.DataFrame(data).T
|
83 |
+
df['title'] = df.index
|
84 |
+
df = df[['title', 'Github Star', 'link', 'pdf_link']]
|
85 |
+
df['link'] = df['link'].apply(lambda x: f'<a href="{x}" target="_blank">Link</a>')
|
86 |
+
df['pdf_link'] = df['pdf_link'].apply(lambda x: f'<a href="{x}" target="_blank">{x}</a>')
|
87 |
+
return df
|
88 |
+
|
89 |
+
def load_and_cache_data(url, cache_file):
|
90 |
+
cached_data = load_cached_data(cache_file)
|
91 |
+
|
92 |
+
if cached_data:
|
93 |
+
print(f"Loading cached data from {cache_file}")
|
94 |
+
return cached_data
|
95 |
+
|
96 |
+
print(f"Fetching new data from {url}")
|
97 |
+
new_data = get_rank_papers(url)
|
98 |
+
save_cached_data(new_data, cache_file)
|
99 |
+
return new_data
|
100 |
+
|
101 |
+
def update_display(category):
|
102 |
+
cache_file = f"{category}_papers_cache.json"
|
103 |
+
url = f"https://paperswithcode.com/{category}" if category != "top" else "https://paperswithcode.com/"
|
104 |
+
|
105 |
+
data = load_and_cache_data(url, cache_file)
|
106 |
+
df = format_dataframe(data)
|
107 |
+
|
108 |
+
return len(df), df.to_html(escape=False, index=False)
|
109 |
+
|
110 |
+
with gr.Blocks() as demo:
|
111 |
+
gr.Markdown("<h1><center>Papers Leaderboard</center></h1>")
|
112 |
+
|
113 |
+
with gr.Tab("Top Trending Papers"):
|
114 |
+
top_output = [gr.Textbox(label="Number of Papers Fetched"),
|
115 |
+
gr.HTML()]
|
116 |
+
top_button = gr.Button("Refresh Leaderboard")
|
117 |
+
top_button.click(fn=lambda: update_display("top"), inputs=None, outputs=top_output)
|
118 |
+
|
119 |
+
with gr.Tab("New Papers"):
|
120 |
+
new_output = [gr.Textbox(label="Number of Papers Fetched"),
|
121 |
+
gr.HTML()]
|
122 |
+
new_button = gr.Button("Refresh Leaderboard")
|
123 |
+
new_button.click(fn=lambda: update_display("latest"), inputs=None, outputs=new_output)
|
124 |
+
|
125 |
+
with gr.Tab("Greatest Papers"):
|
126 |
+
greatest_output = [gr.Textbox(label="Number of Papers Fetched"),
|
127 |
+
gr.HTML()]
|
128 |
+
greatest_button = gr.Button("Refresh Leaderboard")
|
129 |
+
greatest_button.click(fn=lambda: update_display("greatest"), inputs=None, outputs=greatest_output)
|
130 |
+
|
131 |
+
# Load initial data for all tabs
|
132 |
+
demo.load(fn=lambda: (update_display("top"), update_display("latest"), update_display("greatest")),
|
133 |
+
inputs=None,
|
134 |
+
outputs=[top_output, new_output, greatest_output])
|
135 |
+
|
136 |
+
# Launch the Gradio interface
|
137 |
+
demo.launch()
|