Spaces:
Sleeping
Sleeping
import requests | |
from bs4 import BeautifulSoup | |
import pandas as pd | |
import gradio as gr | |
import time | |
import os | |
import json | |
import re | |
from datetime import datetime | |
import torch | |
from transformers import AutoTokenizer, AutoModel | |
import networkx as nx | |
from pyvis.network import Network | |
import matplotlib.pyplot as plt | |
# π§ββοΈ Magical Utility Functions π§ββοΈ | |
def safe_filename(title): | |
"""Convert a string to a safe filename. No more 'file not found' nightmares! π ββοΈπ""" | |
return re.sub(r'[^\w\-_\. ]', '_', title) | |
# π΅οΈββοΈ Data Fetching and Caching Shenanigans π΅οΈββοΈ | |
def get_rank_papers(url, progress=gr.Progress(track_tqdm=True)): | |
"""Fetch papers from the interwebs. It's like fishing, but for knowledge! π£π""" | |
base_url = "https://paperswithcode.com" | |
session = requests.Session() | |
headers = { | |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3', | |
'Cache-Control': 'no-cache' | |
} | |
print("Time run at : ", time.ctime()) | |
offset = 0 | |
data_list = {} | |
break_duplicate = 10 | |
while True: | |
response = session.get(url, headers=headers, params={'page': offset}) | |
if response.status_code != 200: | |
print('Failed to retrieve data') | |
break | |
soup = BeautifulSoup(response.text, 'html.parser') | |
paper_info = soup.find_all('div', class_='row infinite-item item paper-card') | |
if not paper_info: | |
print("No paper information found.") | |
break | |
for ppr in paper_info: | |
title = ppr.find('h1').text.strip() | |
if "paper" in ppr.find('a')['href']: | |
link = base_url + ppr.find('a')['href'] | |
else: | |
link = ppr.find('a')['href'] | |
Github_Star = ppr.find('span', class_='badge badge-secondary').text.strip().replace(',', '') if ppr.find('span', class_='badge badge-secondary') else "0" | |
if title not in data_list: | |
data_list[title] = {'link': link, 'Github Star': int(Github_Star), 'title': title} | |
else: | |
break_duplicate -= 1 | |
if break_duplicate == 0: | |
return data_list | |
offset += 1 | |
progress.update(offset) | |
print('Data retrieval complete') | |
return data_list | |
def load_cached_data(cache_file): | |
"""Load cached data. It's like finding money in your old jeans! π°π§΅""" | |
if os.path.exists(cache_file): | |
with open(cache_file, 'r') as f: | |
return json.load(f) | |
return None | |
def save_cached_data(data, cache_file): | |
"""Save data to cache. Future you will thank present you! π¦ΈββοΈπ°οΈ""" | |
with open(cache_file, 'w') as f: | |
json.dump(data, f) | |
def load_and_cache_data(url, cache_file): | |
"""Load data from cache or fetch new data. It's like a time machine for your data! β°π""" | |
cached_data = load_cached_data(cache_file) | |
if cached_data: | |
print(f"Loading cached data from {cache_file}") | |
return cached_data | |
print(f"Fetching new data from {url}") | |
new_data = get_rank_papers(url) | |
save_cached_data(new_data, cache_file) | |
return new_data | |
# π Transformer-based Word and Context Analysis π | |
def generate_embeddings(titles): | |
"""Generate word embeddings using a transformer model.""" | |
model_name = "sentence-transformers/all-MiniLM-L6-v2" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModel.from_pretrained(model_name) | |
embeddings = [] | |
with torch.no_grad(): | |
for title in titles: | |
tokens = tokenizer(title, return_tensors="pt", padding=True, truncation=True) | |
output = model(**tokens) | |
embeddings.append(output.last_hidden_state.mean(dim=1).squeeze()) | |
return embeddings | |
def build_graph(titles, embeddings, threshold=0.7): | |
"""Build a graph of words based on similarity between titles.""" | |
G = nx.Graph() | |
for i, title in enumerate(titles): | |
G.add_node(i, label=title) | |
for i in range(len(embeddings)): | |
for j in range(i+1, len(embeddings)): | |
sim = torch.cosine_similarity(embeddings[i], embeddings[j], dim=0).item() | |
if sim > threshold: | |
G.add_edge(i, j, weight=sim) | |
return G | |
def visualize_graph(G, titles): | |
"""Visualize the graph using pyvis and show it as a mind map.""" | |
net = Network(height="750px", width="100%", notebook=True) | |
for node in G.nodes(data=True): | |
net.add_node(node[0], label=titles[node[0]]) | |
for edge in G.edges(data=True): | |
net.add_edge(edge[0], edge[1], value=edge[2]['weight']) | |
net.show("paper_network.html") | |
return "paper_network.html" | |
def analyze_and_generate_graph(progress=gr.Progress()): | |
"""Analyze papers, generate embeddings, and visualize the relationship graph.""" | |
all_data = {} | |
for category in ["top", "latest", "greatest"]: | |
cache_file = f"{category}_papers_cache.json" | |
data = load_cached_data(cache_file) | |
if data: | |
all_data.update(data) | |
titles = [paper['title'] for paper in all_data.values()] | |
# Generate embeddings | |
embeddings = generate_embeddings(titles) | |
# Build a similarity graph based on the embeddings | |
G = build_graph(titles, embeddings) | |
# Visualize the graph as a mind map | |
graph_file = visualize_graph(G, titles) | |
summary = f"π Papers analyzed: {len(titles)}\n" | |
summary += f"β Graph generated and visualized.\n" | |
return summary, graph_file | |
# π Define load_all_data Properly π | |
def load_all_data(): | |
"""Load data for all categories and prepare for display.""" | |
top_count, top_html = update_display("top") | |
new_count, new_html = update_display("latest") | |
greatest_count, greatest_html = update_display("greatest") | |
return top_count, top_html, new_count, new_html, greatest_count, greatest_html | |
# π Gradio Interface: Where the Magic Happens π | |
with gr.Blocks() as demo: | |
gr.Markdown("<h1><center>Papers Leaderboard with Context Analysis</center></h1>") | |
with gr.Tab("Top Trending Papers"): | |
top_count = gr.Textbox(label="Number of Papers Fetched") | |
top_html = gr.HTML() | |
top_button = gr.Button("Refresh Leaderboard") | |
top_button.click(fn=lambda: update_display("top"), inputs=None, outputs=[top_count, top_html]) | |
with gr.Tab("New Papers"): | |
new_count = gr.Textbox(label="Number of Papers Fetched") | |
new_html = gr.HTML() | |
new_button = gr.Button("Refresh Leaderboard") | |
new_button.click(fn=lambda: update_display("latest"), inputs=None, outputs=[new_count, new_html]) | |
with gr.Tab("Greatest Papers"): | |
greatest_count = gr.Textbox(label="Number of Papers Fetched") | |
greatest_html = gr.HTML() | |
greatest_button = gr.Button("Refresh Leaderboard") | |
greatest_button.click(fn=lambda: update_display("greatest"), inputs=None, outputs=[greatest_count, greatest_html]) | |
analyze_button = gr.Button("π Analyze and Generate Graph", variant="primary") | |
analyze_output = gr.Textbox(label="Analysis Status") | |
graph_output = gr.HTML(label="Graph Visualization") | |
analyze_button.click(fn=analyze_and_generate_graph, inputs=None, outputs=[analyze_output, graph_output]) | |
# Load initial data for all tabs | |
demo.load(fn=load_all_data, outputs=[top_count, top_html, new_count, new_html, greatest_count, greatest_html]) | |
# π Launch the Gradio interface with a public link | |
print("π Launching the Papers Leaderboard with Context Analysis! Get ready to explore the relationships between papers! π’π") | |
demo.launch(share=True) | |