File size: 4,578 Bytes
ea4d0fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f27244
a9127b0
2f27244
 
 
 
 
6c04839
ea4d0fa
 
 
481f2d5
67105c9
6f5ef35
b5793e9
 
b04cbde
 
 
cf8d7e8
b04cbde
 
0d8ea05
cf8d7e8
ea4d0fa
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import httpx
from cytoolz import groupby
from functools import lru_cache
from rich import print
from functools import partial
import gradio as gr
from typing import Optional


def query_author(author_name: str):
    url = f"https://api.semanticscholar.org/graph/v1/author/search?query={author_name}&fields=name,url,externalIds,papers.externalIds,papers.title,papers.year"
    resp = httpx.get(url)
    resp.raise_for_status()
    return resp.json()["data"]


def get_arxiv_paper(papers):
    papers_with_externalIds = [paper for paper in papers if paper.get("externalIds")]
    return [
        paper for paper in papers_with_externalIds if paper["externalIds"].get("ArXiv")
    ]


def check_arxiv_in_papers(arxiv_ids, papers):
    papers_with_externalIds = [paper for paper in papers if paper.get("externalIds")]
    papers_with_arxiv_ids = [
        paper for paper in papers_with_externalIds if paper["externalIds"].get("ArXiv")
    ]
    return any(
        paper
        for paper in papers_with_arxiv_ids
        if paper["externalIds"].get("ArXiv") in arxiv_ids
    )


def get_author_from_options(potential_authors, positive_arxiv_ids):
    return next(
        (
            author
            for author in potential_authors
            if check_arxiv_in_papers(set(positive_arxiv_ids), author["papers"])
        ),
        None,
    )


def sort_by_date(papers):
    return sorted(papers, key=lambda paper: paper["year"], reverse=True)


@lru_cache()
def lookup_hf_paper(arxiv_id):
    url = f"https://huggingface.co/api/papers/{arxiv_id}"
    resp = httpx.get(url)
    return resp.json()


def check_if_index_hf_paper(paper):
    arxiv_id = paper["externalIds"]["ArXiv"]
    data = lookup_hf_paper(arxiv_id)
    return not data.get("error")


def groupby_indexed_by_hf_papers(papers):
    return groupby(check_if_index_hf_paper, papers)


def check_hf_user_in_authors(paper, hf_user_name):
    authors = paper["authors"]
    authors = [author for author in authors if author.get("user")]
    return any(author["user"]["user"] == hf_user_name for author in authors)


def groupby_hf_user_papers(papers, hf_user_name):
    check_hf_user_in_authors_partial = partial(
        check_hf_user_in_authors, hf_user_name=hf_user_name
    )
    return groupby(check_hf_user_in_authors_partial, papers)


def get_papers(
    author_name, positive_arxiv_ids, hf_user_name: Optional[gr.OAuthProfile]
):
    hf_user_name = hf_user_name.preferred_username
    positive_arxiv_ids = positive_arxiv_ids.split(",")
    potential_authors = query_author(author_name)
    author = get_author_from_options(potential_authors, positive_arxiv_ids)
    papers = get_arxiv_paper(author["papers"])
    papers = sort_by_date(papers)
    papers_indexed_by_hf = groupby_indexed_by_hf_papers(papers)
    # print(papers_indexed_by_hf[True])

    indexed_papers = [
        lookup_hf_paper(paper["externalIds"]["ArXiv"])
        for paper in papers_indexed_by_hf[True]
    ]

    already_claimed = groupby_hf_user_papers(indexed_papers, hf_user_name)
    results = (
        "# Papers already indexed by Hugging Face which you haven't claimed\n"
        + "These papers are already indexed by Hugging Face, but you haven't claimed them yet. You can claim them by clicking on the link and then clicking on the 'Claim' button on the Hugging Face papers page.\n"
    )
    for paper in already_claimed[False]:
        url = f"https://huggingface.co/papers/{paper['id']}"
        results += f"- [{paper['title']}]({url})\n"
    if len(papers_indexed_by_hf[False]) > 0:
        results += f"# Papers not yet indexed by Hugging Face\n"
        for paper in papers_indexed_by_hf[False]:
            paper_title = paper["title"]
            arxiv_id = paper["externalIds"]["ArXiv"]
            url = f"https://huggingface.co/papers/{arxiv_id}"
            results += f"- [{paper_title}]({url})\n"
    return results


with gr.Blocks() as demo:
    gr.HTML("<h1 style='text-align:center;'>Hugging Face Paper Claimer</h1>")
    gr.Markdown("This Space helps you claim your papers on Hugging Face papers")
    with gr.Row():
        gr.LoginButton(size="sm")
        gr.LogoutButton(size="sm")
    author_name = gr.Textbox(
        placeholder="daniel van strien", label="Your name", interactive=True
    )
    positive_arxiv_ids = gr.Textbox(
        placeholder="1910.01108",
        label="ArXiv ID for a paper for which you are an author",
        interactive=True,
    )
    btn = gr.Button("Get papers")
    btn.click(get_papers, [author_name, positive_arxiv_ids], gr.Markdown())

demo.launch(debug=True)