File size: 7,565 Bytes
f2c0706
 
 
 
 
 
 
 
 
f00ae4c
 
 
 
 
f2c0706
a7988d4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2c0706
f00ae4c
 
 
d233a2c
f00ae4c
 
 
d233a2c
f00ae4c
 
d233a2c
f00ae4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f2c0706
f00ae4c
 
 
 
 
 
 
 
 
 
 
 
f2c0706
 
 
f00ae4c
f2c0706
 
 
 
 
 
 
f00ae4c
f2c0706
 
 
 
a7988d4
f2c0706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f00ae4c
 
f2c0706
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import requests
from bs4 import BeautifulSoup
import pandas as pd
import gradio as gr
import time
import os
import json
import PyPDF2
import io
import markdown
import asyncio
import aiohttp
import aiofiles
from concurrent.futures import ThreadPoolExecutor

def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
    base_url = "https://paperswithcode.com"
    session = requests.Session()
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
        'Cache-Control': 'no-cache'
    }
    print("Time run at : ", time.ctime())
    offset = 0
    data_list = {}
    break_duplicate = 10
    
    while True:
        response = session.get(url, headers=headers, params={'page': offset})
        if response.status_code != 200:
            print('Failed to retrieve data')
            break
        soup = BeautifulSoup(response.text, 'html.parser')
        paper_info = soup.find_all('div', class_='row infinite-item item paper-card')
        if not paper_info:
            break
        for ppr in paper_info:
            title = ppr.find('h1').text.strip()
            
            if "paper" in ppr.find('a')['href']:
                link = base_url + ppr.find('a')['href']
            else:
                link = ppr.find('a')['href'] 
            Github_Star = ppr.find('span', class_='badge badge-secondary').text.strip().replace(',', '')
            pdf_link = ''
            try:
                response_link = session.get(link, headers=headers)
                soup_link = BeautifulSoup(response_link.text, 'html.parser')
                paper_info_link = soup_link.find_all('div', class_='paper-abstract')
                pdf_link = paper_info_link[0].find('div', class_='col-md-12').find('a')['href']
            except:
                pass
            if title not in data_list:
                data_list[title] = {'link': link, 'Github Star': int(Github_Star), 'pdf_link': pdf_link.strip()}
            else:
                break_duplicate -= 1
                if break_duplicate == 0:
                    return data_list
        offset += 1 
        progress.update(offset)
    print('Data retrieval complete')   
    return data_list

def load_cached_data(cache_file):
    if os.path.exists(cache_file):
        with open(cache_file, 'r') as f:
            return json.load(f)
    return None

def save_cached_data(data, cache_file):
    with open(cache_file, 'w') as f:
        json.dump(data, f)

def format_dataframe(data):
    df = pd.DataFrame(data).T
    df['title'] = df.index
    df = df[['title', 'Github Star', 'link', 'pdf_link']]
    df['link'] = df['link'].apply(lambda x: f'<a href="{x}" target="_blank">Link</a>')
    df['pdf_link'] = df['pdf_link'].apply(lambda x: f'<a href="{x}" target="_blank">{x}</a>')
    return df

def load_and_cache_data(url, cache_file):
    cached_data = load_cached_data(cache_file)
    
    if cached_data:
        print(f"Loading cached data from {cache_file}")
        return cached_data
    
    print(f"Fetching new data from {url}")
    new_data = get_rank_papers(url)
    save_cached_data(new_data, cache_file)
    return new_data

def update_display(category):
    cache_file = f"{category}_papers_cache.json"
    url = f"https://paperswithcode.com/{category}" if category != "top" else "https://paperswithcode.com/"
    
    data = load_and_cache_data(url, cache_file)
    df = format_dataframe(data)
    
    return len(df), df.to_html(escape=False, index=False)

def load_all_data():
    top_count, top_html = update_display("top")
    new_count, new_html = update_display("latest")
    greatest_count, greatest_html = update_display("greatest")
    return top_count, top_html, new_count, new_html, greatest_count, greatest_html

async def download_and_convert_pdf(session, title, paper_info):
    pdf_url = paper_info['pdf_link']
    cache_file = f"cache/{title.replace(' ', '_')}.md"
    
    if os.path.exists(cache_file):
        async with aiofiles.open(cache_file, 'r') as f:
            return await f.read()
    
    if not pdf_url:
        return f"# {title}\n\nNo PDF link available.\n\n---\n\n"
    
    try:
        async with session.get(pdf_url) as response:
            pdf_content = await response.read()
        
        pdf_file = io.BytesIO(pdf_content)
        pdf_reader = PyPDF2.PdfReader(pdf_file)
        text = ""
        for page in pdf_reader.pages:
            text += page.extract_text()
        
        markdown_text = f"# {title}\n\n{text}\n\n---\n\n"
        
        os.makedirs('cache', exist_ok=True)
        async with aiofiles.open(cache_file, 'w') as f:
            await f.write(markdown_text)
        
        return markdown_text
    except Exception as e:
        return f"# {title}\n\nError processing PDF: {str(e)}\n\n---\n\n"

async def process_papers(data, progress=gr.Progress()):
    async with aiohttp.ClientSession() as session:
        tasks = []
        for title, paper_info in data.items():
            task = asyncio.ensure_future(download_and_convert_pdf(session, title, paper_info))
            tasks.append(task)
        
        consolidated_text = ""
        for i, task in enumerate(asyncio.as_completed(tasks), start=1):
            markdown_text = await task
            consolidated_text += markdown_text
            progress(i / len(tasks), f"Processed {i}/{len(tasks)} papers")
    
    return consolidated_text

def download_all_papers(progress=gr.Progress()):
    all_data = {}
    for category in ["top", "latest", "greatest"]:
        cache_file = f"{category}_papers_cache.json"
        data = load_cached_data(cache_file)
        if data:
            all_data.update(data)
    
    consolidated_text = asyncio.run(process_papers(all_data, progress))
    
    with open("consolidated_papers.md", "w", encoding="utf-8") as f:
        f.write(consolidated_text)
    
    return "All papers have been downloaded and consolidated into 'consolidated_papers.md'", consolidated_text

with gr.Blocks() as demo:
    gr.Markdown("<h1><center>Papers Leaderboard</center></h1>")
    
    with gr.Tab("Top Trending Papers"):
        top_count = gr.Textbox(label="Number of Papers Fetched")
        top_html = gr.HTML()
        top_button = gr.Button("Refresh Leaderboard")
        top_button.click(fn=lambda: update_display("top"), inputs=None, outputs=[top_count, top_html])
    
    with gr.Tab("New Papers"):
        new_count = gr.Textbox(label="Number of Papers Fetched")
        new_html = gr.HTML()
        new_button = gr.Button("Refresh Leaderboard")
        new_button.click(fn=lambda: update_display("latest"), inputs=None, outputs=[new_count, new_html])
    
    with gr.Tab("Greatest Papers"):
        greatest_count = gr.Textbox(label="Number of Papers Fetched")
        greatest_html = gr.HTML()
        greatest_button = gr.Button("Refresh Leaderboard")
        greatest_button.click(fn=lambda: update_display("greatest"), inputs=None, outputs=[greatest_count, greatest_html])

    download_button = gr.Button("๐Ÿ“š Download All Papers", variant="primary")
    download_output = gr.Textbox(label="Download Status")
    markdown_output = gr.Markdown(label="Paper Content")
    download_button.click(fn=download_all_papers, inputs=None, outputs=[download_output, markdown_output])

    # Load initial data for all tabs
    demo.load(fn=load_all_data, outputs=[top_count, top_html, new_count, new_html, greatest_count, greatest_html])

# Launch the Gradio interface with a public link
demo.launch(share=True)