File size: 7,814 Bytes
7b8a04f
 
 
 
 
 
 
 
 
36e61fc
 
 
 
 
7b8a04f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36e61fc
7b8a04f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36e61fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b8a04f
 
 
 
 
 
 
36e61fc
7b8a04f
36e61fc
 
7b8a04f
36e61fc
 
 
 
 
 
 
 
 
 
7b8a04f
3e3cb70
 
 
 
 
 
 
 
 
7b8a04f
 
 
36e61fc
7b8a04f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36e61fc
 
 
 
 
7b8a04f
 
 
 
 
36e61fc
7b8a04f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
import requests
from bs4 import BeautifulSoup
import pandas as pd
import gradio as gr
import time
import os
import json
import re
from datetime import datetime
import torch
from transformers import AutoTokenizer, AutoModel
import networkx as nx
from pyvis.network import Network
import matplotlib.pyplot as plt

# πŸ§™β€β™‚οΈ Magical Utility Functions πŸ§™β€β™‚οΈ

def safe_filename(title):
    """Convert a string to a safe filename. No more 'file not found' nightmares! πŸ™…β€β™‚οΈπŸ“"""
    return re.sub(r'[^\w\-_\. ]', '_', title)

# πŸ•΅οΈβ€β™‚οΈ Data Fetching and Caching Shenanigans πŸ•΅οΈβ€β™‚οΈ

def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)):
    """Fetch papers from the interwebs. It's like fishing, but for knowledge! πŸŽ£πŸ“š"""
    base_url = "https://paperswithcode.com"
    session = requests.Session()
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
        'Cache-Control': 'no-cache'
    }
    print("Time run at : ", time.ctime())
    offset = 0
    data_list = {}
    break_duplicate = 10
    
    while True:
        response = session.get(url, headers=headers, params={'page': offset})
        if response.status_code != 200:
            print('Failed to retrieve data')
            break
        soup = BeautifulSoup(response.text, 'html.parser')
        paper_info = soup.find_all('div', class_='row infinite-item item paper-card')
        if not paper_info:
            print("No paper information found.")
            break
        for ppr in paper_info:
            title = ppr.find('h1').text.strip()
            
            if "paper" in ppr.find('a')['href']:
                link = base_url + ppr.find('a')['href']
            else:
                link = ppr.find('a')['href'] 
            
            Github_Star = ppr.find('span', class_='badge badge-secondary').text.strip().replace(',', '') if ppr.find('span', class_='badge badge-secondary') else "0"
            
            if title not in data_list:
                data_list[title] = {'link': link, 'Github Star': int(Github_Star), 'title': title}
            else:
                break_duplicate -= 1
                if break_duplicate == 0:
                    return data_list
        offset += 1 
        progress.update(offset)
    print('Data retrieval complete')   
    return data_list

def load_cached_data(cache_file):
    """Load cached data. It's like finding money in your old jeans! πŸ’°πŸ§΅"""
    if os.path.exists(cache_file):
        with open(cache_file, 'r') as f:
            return json.load(f)
    return None

def save_cached_data(data, cache_file):
    """Save data to cache. Future you will thank present you! πŸ¦Έβ€β™‚οΈπŸ•°οΈ"""
    with open(cache_file, 'w') as f:
        json.dump(data, f)

def load_and_cache_data(url, cache_file):
    """Load data from cache or fetch new data. It's like a time machine for your data! β°πŸ”„"""
    cached_data = load_cached_data(cache_file)
    
    if cached_data:
        print(f"Loading cached data from {cache_file}")
        return cached_data
    
    print(f"Fetching new data from {url}")
    new_data = get_rank_papers(url)
    save_cached_data(new_data, cache_file)
    return new_data

# πŸš€ Transformer-based Word and Context Analysis πŸš€

def generate_embeddings(titles):
    """Generate word embeddings using a transformer model."""
    model_name = "sentence-transformers/all-MiniLM-L6-v2"
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModel.from_pretrained(model_name)
    
    embeddings = []
    with torch.no_grad():
        for title in titles:
            tokens = tokenizer(title, return_tensors="pt", padding=True, truncation=True)
            output = model(**tokens)
            embeddings.append(output.last_hidden_state.mean(dim=1).squeeze())
    
    return embeddings

def build_graph(titles, embeddings, threshold=0.7):
    """Build a graph of words based on similarity between titles."""
    G = nx.Graph()

    for i, title in enumerate(titles):
        G.add_node(i, label=title)

    for i in range(len(embeddings)):
        for j in range(i+1, len(embeddings)):
            sim = torch.cosine_similarity(embeddings[i], embeddings[j], dim=0).item()
            if sim > threshold:
                G.add_edge(i, j, weight=sim)

    return G

def visualize_graph(G, titles):
    """Visualize the graph using pyvis and show it as a mind map."""
    net = Network(height="750px", width="100%", notebook=True)
    
    for node in G.nodes(data=True):
        net.add_node(node[0], label=titles[node[0]])

    for edge in G.edges(data=True):
        net.add_edge(edge[0], edge[1], value=edge[2]['weight'])

    net.show("paper_network.html")
    return "paper_network.html"

def analyze_and_generate_graph(progress=gr.Progress()):
    """Analyze papers, generate embeddings, and visualize the relationship graph."""
    all_data = {}
    for category in ["top", "latest", "greatest"]:
        cache_file = f"{category}_papers_cache.json"
        data = load_cached_data(cache_file)
        if data:
            all_data.update(data)
    
    titles = [paper['title'] for paper in all_data.values()]
    
    # Generate embeddings
    embeddings = generate_embeddings(titles)
    
    # Build a similarity graph based on the embeddings
    G = build_graph(titles, embeddings)
    
    # Visualize the graph as a mind map
    graph_file = visualize_graph(G, titles)
    
    summary = f"πŸ“Š Papers analyzed: {len(titles)}\n"
    summary += f"βœ… Graph generated and visualized.\n"
    
    return summary, graph_file

# πŸš€ Define load_all_data Properly πŸš€

def load_all_data():
    """Load data for all categories and prepare for display."""
    top_count, top_html = update_display("top")
    new_count, new_html = update_display("latest")
    greatest_count, greatest_html = update_display("greatest")
    return top_count, top_html, new_count, new_html, greatest_count, greatest_html

# 🎭 Gradio Interface: Where the Magic Happens 🎭

with gr.Blocks() as demo:
    gr.Markdown("<h1><center>Papers Leaderboard with Context Analysis</center></h1>")
    
    with gr.Tab("Top Trending Papers"):
        top_count = gr.Textbox(label="Number of Papers Fetched")
        top_html = gr.HTML()
        top_button = gr.Button("Refresh Leaderboard")
        top_button.click(fn=lambda: update_display("top"), inputs=None, outputs=[top_count, top_html])
    
    with gr.Tab("New Papers"):
        new_count = gr.Textbox(label="Number of Papers Fetched")
        new_html = gr.HTML()
        new_button = gr.Button("Refresh Leaderboard")
        new_button.click(fn=lambda: update_display("latest"), inputs=None, outputs=[new_count, new_html])
    
    with gr.Tab("Greatest Papers"):
        greatest_count = gr.Textbox(label="Number of Papers Fetched")
        greatest_html = gr.HTML()
        greatest_button = gr.Button("Refresh Leaderboard")
        greatest_button.click(fn=lambda: update_display("greatest"), inputs=None, outputs=[greatest_count, greatest_html])

    analyze_button = gr.Button("πŸ” Analyze and Generate Graph", variant="primary")
    analyze_output = gr.Textbox(label="Analysis Status")
    graph_output = gr.HTML(label="Graph Visualization")
    
    analyze_button.click(fn=analyze_and_generate_graph, inputs=None, outputs=[analyze_output, graph_output])

    # Load initial data for all tabs
    demo.load(fn=load_all_data, outputs=[top_count, top_html, new_count, new_html, greatest_count, greatest_html])

# πŸš€ Launch the Gradio interface with a public link
print("🎭 Launching the Papers Leaderboard with Context Analysis! Get ready to explore the relationships between papers! πŸŽ’πŸ“š")
demo.launch(share=True)