awacke1 commited on
Commit
d233a2c
·
verified ·
1 Parent(s): f2c0706

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -7
app.py CHANGED
@@ -10,25 +10,97 @@ import io
10
  import markdown
11
 
12
  def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
13
- # ... (existing code remains the same)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  def load_cached_data(cache_file):
16
- # ... (existing code remains the same)
 
 
 
17
 
18
  def save_cached_data(data, cache_file):
19
- # ... (existing code remains the same)
 
20
 
21
  def format_dataframe(data):
22
- # ... (existing code remains the same)
 
 
 
 
 
23
 
24
  def load_and_cache_data(url, cache_file):
25
- # ... (existing code remains the same)
 
 
 
 
 
 
 
 
 
26
 
27
  def update_display(category):
28
- # ... (existing code remains the same)
 
 
 
 
 
 
29
 
30
  def load_all_data():
31
- # ... (existing code remains the same)
 
 
 
32
 
33
  def download_and_convert_pdfs(data):
34
  consolidated_text = ""
 
10
  import markdown
11
 
12
  def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
13
+ base_url = "https://paperswithcode.com"
14
+ session = requests.Session()
15
+ headers = {
16
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
17
+ 'Cache-Control': 'no-cache'
18
+ }
19
+ print("Time run at : ", time.ctime())
20
+ offset = 0
21
+ data_list = {}
22
+ break_duplicate = 10
23
+
24
+ while True:
25
+ response = session.get(url, headers=headers, params={'page': offset})
26
+ if response.status_code != 200:
27
+ print('Failed to retrieve data')
28
+ break
29
+ soup = BeautifulSoup(response.text, 'html.parser')
30
+ paper_info = soup.find_all('div', class_='row infinite-item item paper-card')
31
+ if not paper_info:
32
+ break
33
+ for ppr in paper_info:
34
+ title = ppr.find('h1').text.strip()
35
+
36
+ if "paper" in ppr.find('a')['href']:
37
+ link = base_url + ppr.find('a')['href']
38
+ else:
39
+ link = ppr.find('a')['href']
40
+ Github_Star = ppr.find('span', class_='badge badge-secondary').text.strip().replace(',', '')
41
+ pdf_link = ''
42
+ try:
43
+ response_link = session.get(link, headers=headers)
44
+ soup_link = BeautifulSoup(response_link.text, 'html.parser')
45
+ paper_info_link = soup_link.find_all('div', class_='paper-abstract')
46
+ pdf_link = paper_info_link[0].find('div', class_='col-md-12').find('a')['href']
47
+ except:
48
+ pass
49
+ if title not in data_list:
50
+ data_list[title] = {'link': link, 'Github Star': int(Github_Star), 'pdf_link': pdf_link.strip()}
51
+ else:
52
+ break_duplicate -= 1
53
+ if break_duplicate == 0:
54
+ return data_list
55
+ offset += 1
56
+ progress.update(offset)
57
+ print('Data retrieval complete')
58
+ return data_list
59
 
60
  def load_cached_data(cache_file):
61
+ if os.path.exists(cache_file):
62
+ with open(cache_file, 'r') as f:
63
+ return json.load(f)
64
+ return None
65
 
66
  def save_cached_data(data, cache_file):
67
+ with open(cache_file, 'w') as f:
68
+ json.dump(data, f)
69
 
70
  def format_dataframe(data):
71
+ df = pd.DataFrame(data).T
72
+ df['title'] = df.index
73
+ df = df[['title', 'Github Star', 'link', 'pdf_link']]
74
+ df['link'] = df['link'].apply(lambda x: f'<a href="{x}" target="_blank">Link</a>')
75
+ df['pdf_link'] = df['pdf_link'].apply(lambda x: f'<a href="{x}" target="_blank">{x}</a>')
76
+ return df
77
 
78
  def load_and_cache_data(url, cache_file):
79
+ cached_data = load_cached_data(cache_file)
80
+
81
+ if cached_data:
82
+ print(f"Loading cached data from {cache_file}")
83
+ return cached_data
84
+
85
+ print(f"Fetching new data from {url}")
86
+ new_data = get_rank_papers(url)
87
+ save_cached_data(new_data, cache_file)
88
+ return new_data
89
 
90
  def update_display(category):
91
+ cache_file = f"{category}_papers_cache.json"
92
+ url = f"https://paperswithcode.com/{category}" if category != "top" else "https://paperswithcode.com/"
93
+
94
+ data = load_and_cache_data(url, cache_file)
95
+ df = format_dataframe(data)
96
+
97
+ return len(df), df.to_html(escape=False, index=False)
98
 
99
  def load_all_data():
100
+ top_count, top_html = update_display("top")
101
+ new_count, new_html = update_display("latest")
102
+ greatest_count, greatest_html = update_display("greatest")
103
+ return top_count, top_html, new_count, new_html, greatest_count, greatest_html
104
 
105
  def download_and_convert_pdfs(data):
106
  consolidated_text = ""