File size: 5,959 Bytes
2b31c18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1a6f6f
2b31c18
a1a6f6f
2b31c18
 
a1a6f6f
2b31c18
 
 
 
 
a1a6f6f
2b31c18
 
a1a6f6f
 
 
 
 
 
 
 
 
 
 
 
 
2b31c18
 
a1a6f6f
 
2b31c18
a1a6f6f
 
2b31c18
a1a6f6f
2b31c18
a1a6f6f
2b31c18
a1a6f6f
2b31c18
 
 
 
 
a1a6f6f
2b31c18
 
 
 
 
 
a1a6f6f
2b31c18
a1a6f6f
 
2b31c18
 
 
 
 
 
a1a6f6f
 
 
 
2b31c18
 
 
 
 
 
 
 
 
 
 
 
 
a1a6f6f
2b31c18
 
 
a1a6f6f
 
 
 
 
 
 
 
2b31c18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1a6f6f
 
2b31c18
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import requests
from bs4 import BeautifulSoup
import pandas as pd
import gradio as gr
import time
import os
import json
import PyPDF2
import io
import asyncio
import aiohttp
import aiofiles
from concurrent.futures import ThreadPoolExecutor
import re
from datetime import datetime
import base64

# ... (keep all the previous functions up to create_date_directory)

async def download_and_process_pdf(session, title, paper_info, directory):
    pdf_url = paper_info['pdf_link']
    if not pdf_url:
        return f"No PDF link available for: {title}", None, None
    
    try:
        timeout = aiohttp.ClientTimeout(total=60)  # 60 seconds timeout
        async with session.get(pdf_url, timeout=timeout) as response:
            if response.status != 200:
                return f"Failed to download PDF for {title}: HTTP {response.status}", None, None
            pdf_content = await response.read()
        
        file_length = len(pdf_content)
        if file_length < 5000:  # Check if the PDF is less than 5KB
            return f"Downloaded PDF for {title} is too small ({file_length} bytes). Skipping.", None, None
        
        # Convert PDF to text
        pdf_file = io.BytesIO(pdf_content)
        pdf_reader = PyPDF2.PdfReader(pdf_file)
        text = ""
        for page in pdf_reader.pages:
            text += page.extract_text()
        
        if len(text) < 5000:  # Check if the extracted text is less than 5KB
            return f"Extracted text for {title} is too small ({len(text)} characters). Skipping.", None, None
        
        safe_title = safe_filename(title)
        txt_filename = f"{safe_title}.txt"
        txt_filepath = os.path.join(directory, txt_filename)
        
        async with aiofiles.open(txt_filepath, 'w', encoding='utf-8') as f:
            await f.write(text)
        
        return f"Successfully processed: {txt_filename} (File length: {file_length} bytes)", txt_filepath, text
    except asyncio.TimeoutError:
        return f"Timeout while downloading PDF for {title}", None, None
    except Exception as e:
        return f"Error processing PDF for {title}: {str(e)}", None, None

async def process_papers(data, directory, progress=gr.Progress()):
    async with aiohttp.ClientSession() as session:
        tasks = []
        for title, paper_info in data.items():
            task = asyncio.ensure_future(download_and_process_pdf(session, title, paper_info, directory))
            tasks.append(task)
        
        results = []
        successful_downloads = []
        errors = []
        for i, task in enumerate(asyncio.as_completed(tasks), start=1):
            result, filepath, text = await task
            results.append(result)
            if filepath and text:
                successful_downloads.append((filepath, text))
            else:
                errors.append(result)
            progress(i / len(tasks), f"Processed {i}/{len(tasks)} papers")
    
    return results, successful_downloads, errors

def get_base64_download_link(content, filename):
    """Create a base64 download link for text content."""
    b64 = base64.b64encode(content.encode()).decode()
    return f'<a href="data:text/plain;base64,{b64}" download="{filename}">Download {filename}</a>'

def download_all_papers(progress=gr.Progress()):
    all_data = {}
    for category in ["top", "latest", "greatest"]:
        cache_file = f"{category}_papers_cache.json"
        data = load_cached_data(cache_file)
        if data:
            all_data.update(data)
    
    date_directory = create_date_directory()
    results, successful_downloads, errors = asyncio.run(process_papers(all_data, date_directory, progress))
    
    summary = f"Papers processed: {len(all_data)}\n"
    summary += f"Successfully downloaded and converted: {len(successful_downloads)}\n"
    summary += f"Errors: {len(errors)}\n\n"
    summary += "Error List:\n" + "\n".join(errors) if errors else "No errors occurred."
    
    download_links = []
    text_contents = []
    for filepath, text in successful_downloads:
        filename = os.path.basename(filepath)
        download_links.append(get_base64_download_link(text, filename))
        text_contents.append(f"--- {filename} ---\n\n{text[:1000]}...\n\n")  # Show first 1000 characters
    
    return summary, "<br>".join(download_links), "\n".join(text_contents)

with gr.Blocks() as demo:
    gr.Markdown("<h1><center>Papers Leaderboard</center></h1>")
    
    with gr.Tab("Top Trending Papers"):
        top_count = gr.Textbox(label="Number of Papers Fetched")
        top_html = gr.HTML()
        top_button = gr.Button("Refresh Leaderboard")
        top_button.click(fn=lambda: update_display("top"), inputs=None, outputs=[top_count, top_html])
    
    with gr.Tab("New Papers"):
        new_count = gr.Textbox(label="Number of Papers Fetched")
        new_html = gr.HTML()
        new_button = gr.Button("Refresh Leaderboard")
        new_button.click(fn=lambda: update_display("latest"), inputs=None, outputs=[new_count, new_html])
    
    with gr.Tab("Greatest Papers"):
        greatest_count = gr.Textbox(label="Number of Papers Fetched")
        greatest_html = gr.HTML()
        greatest_button = gr.Button("Refresh Leaderboard")
        greatest_button.click(fn=lambda: update_display("greatest"), inputs=None, outputs=[greatest_count, greatest_html])

    download_button = gr.Button("๐Ÿ“š Download All Papers", variant="primary")
    download_output = gr.Textbox(label="Download Status")
    download_links = gr.HTML(label="Download Links")
    text_output = gr.Code(label="Paper Contents", language="text")
    download_button.click(fn=download_all_papers, inputs=None, outputs=[download_output, download_links, text_output])

    # Load initial data for all tabs
    demo.load(fn=load_all_data, outputs=[top_count, top_html, new_count, new_html, greatest_count, greatest_html])

# Launch the Gradio interface with a public link
demo.launch(share=True)