id
stringlengths 2
115
| private
bool 1
class | tags
sequence | description
stringlengths 0
5.93k
⌀ | downloads
int64 0
1.14M
| likes
int64 0
1.79k
|
---|---|---|---|---|---|
wisesight1000 | false | [
"task_categories:token-classification",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:n<1K",
"source_datasets:extended|wisesight_sentiment",
"language:th",
"license:cc0-1.0",
"word-tokenization"
] | `wisesight1000` contains Thai social media texts randomly drawn from the full `wisesight-sentiment`, tokenized by human annotators.
Out of the labels `neg` (negative), `neu` (neutral), `pos` (positive), `q` (question), 250 samples each. Some texts are removed because
they look like spam.Because these samples are representative of real world content, we believe having these annotaed samples will allow
the community to robustly evaluate tokenization algorithms. | 269 | 0 |
wisesight_sentiment | false | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:th",
"license:cc0-1.0"
] | Wisesight Sentiment Corpus: Social media messages in Thai language with sentiment category (positive, neutral, negative, question)
* Released to public domain under Creative Commons Zero v1.0 Universal license.
* Category (Labels): {"pos": 0, "neu": 1, "neg": 2, "q": 3}
* Size: 26,737 messages
* Language: Central Thai
* Style: Informal and conversational. With some news headlines and advertisement.
* Time period: Around 2016 to early 2019. With small amount from other period.
* Domains: Mixed. Majority are consumer products and services (restaurants, cosmetics, drinks, car, hotels), with some current affairs.
* Privacy:
* Only messages that made available to the public on the internet (websites, blogs, social network sites).
* For Facebook, this means the public comments (everyone can see) that made on a public page.
* Private/protected messages and messages in groups, chat, and inbox are not included.
* Alternations and modifications:
* Keep in mind that this corpus does not statistically represent anything in the language register.
* Large amount of messages are not in their original form. Personal data are removed or masked.
* Duplicated, leading, and trailing whitespaces are removed. Other punctuations, symbols, and emojis are kept intact.
(Mis)spellings are kept intact.
* Messages longer than 2,000 characters are removed.
* Long non-Thai messages are removed. Duplicated message (exact match) are removed.
* More characteristics of the data can be explore: https://github.com/PyThaiNLP/wisesight-sentiment/blob/master/exploration.ipynb | 589 | 4 |
wmt14 | false | [
"task_categories:translation",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:translation",
"size_categories:10M<n<100M",
"source_datasets:extended|europarl_bilingual",
"source_datasets:extended|giga_fren",
"source_datasets:extended|news_commentary",
"source_datasets:extended|un_multi",
"source_datasets:extended|hind_encorp",
"language:cs",
"language:de",
"language:en",
"language:fr",
"language:hi",
"language:ru",
"license:unknown"
] | null | 4,869 | 2 |
wmt15 | false | [
"task_categories:translation",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:translation",
"size_categories:10M<n<100M",
"source_datasets:extended|europarl_bilingual",
"source_datasets:extended|giga_fren",
"source_datasets:extended|news_commentary",
"source_datasets:extended|un_multi",
"language:cs",
"language:de",
"language:en",
"language:fi",
"language:fr",
"language:ru",
"license:unknown"
] | null | 851 | 1 |
wmt16 | false | [
"task_categories:translation",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:translation",
"size_categories:10M<n<100M",
"source_datasets:extended|europarl_bilingual",
"source_datasets:extended|news_commentary",
"source_datasets:extended|setimes",
"source_datasets:extended|un_multi",
"language:cs",
"language:de",
"language:en",
"language:fi",
"language:ro",
"language:ru",
"language:tr",
"license:unknown"
] | null | 30,391 | 9 |
wmt17 | false | [
"task_categories:translation",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:translation",
"size_categories:10M<n<100M",
"source_datasets:extended|europarl_bilingual",
"source_datasets:extended|news_commentary",
"source_datasets:extended|setimes",
"source_datasets:extended|un_multi",
"language:cs",
"language:de",
"language:en",
"language:fi",
"language:lv",
"language:ru",
"language:tr",
"language:zh",
"license:unknown"
] | null | 1,316 | 1 |
wmt18 | false | [
"task_categories:translation",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:translation",
"size_categories:10M<n<100M",
"source_datasets:extended|europarl_bilingual",
"source_datasets:extended|news_commentary",
"source_datasets:extended|opus_paracrawl",
"source_datasets:extended|setimes",
"source_datasets:extended|un_multi",
"language:cs",
"language:de",
"language:en",
"language:et",
"language:fi",
"language:kk",
"language:ru",
"language:tr",
"language:zh",
"license:unknown"
] | null | 1,373 | 3 |
wmt19 | false | [
"task_categories:translation",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:translation",
"size_categories:10M<n<100M",
"source_datasets:extended|europarl_bilingual",
"source_datasets:extended|news_commentary",
"source_datasets:extended|opus_paracrawl",
"source_datasets:extended|un_multi",
"language:cs",
"language:de",
"language:en",
"language:fi",
"language:fr",
"language:gu",
"language:kk",
"language:lt",
"language:ru",
"language:zh",
"license:unknown"
] | null | 2,775 | 9 |
wmt20_mlqe_task1 | false | [
"task_categories:translation",
"annotations_creators:expert-generated",
"annotations_creators:machine-generated",
"language_creators:found",
"multilinguality:translation",
"size_categories:1K<n<10K",
"source_datasets:extended|reddit",
"source_datasets:extended|wikipedia",
"language:de",
"language:en",
"language:et",
"language:ne",
"language:ro",
"language:ru",
"language:si",
"language:zh",
"license:unknown"
] | This shared task (part of WMT20) will build on its previous editions
to further examine automatic methods for estimating the quality
of neural machine translation output at run-time, without relying
on reference translations. As in previous years, we cover estimation
at various levels. Important elements introduced this year include: a new
task where sentences are annotated with Direct Assessment (DA)
scores instead of labels based on post-editing; a new multilingual
sentence-level dataset mainly from Wikipedia articles, where the
source articles can be retrieved for document-wide context; the
availability of NMT models to explore system-internal information for the task.
Task 1 uses Wikipedia data for 6 language pairs that includes high-resource
English--German (En-De) and English--Chinese (En-Zh), medium-resource
Romanian--English (Ro-En) and Estonian--English (Et-En), and low-resource
Sinhalese--English (Si-En) and Nepalese--English (Ne-En), as well as a
dataset with a combination of Wikipedia articles and Reddit articles
for Russian-English (En-Ru). The datasets were collected by translating
sentences sampled from source language articles using state-of-the-art NMT
models built using the fairseq toolkit and annotated with Direct Assessment (DA)
scores by professional translators. Each sentence was annotated following the
FLORES setup, which presents a form of DA, where at least three professional
translators rate each sentence from 0-100 according to the perceived translation
quality. DA scores are standardised using the z-score by rater. Participating systems
are required to score sentences according to z-standardised DA scores. | 1,190 | 1 |
wmt20_mlqe_task2 | false | [
"task_categories:translation",
"task_categories:text-classification",
"annotations_creators:expert-generated",
"annotations_creators:machine-generated",
"language_creators:found",
"multilinguality:translation",
"size_categories:1K<n<10K",
"source_datasets:extended|wikipedia",
"language:de",
"language:en",
"language:zh",
"license:unknown",
"translation-quality-estimation",
"arxiv:1902.08646"
] | This shared task (part of WMT20) will build on its previous editions
to further examine automatic methods for estimating the quality
of neural machine translation output at run-time, without relying
on reference translations. As in previous years, we cover estimation
at various levels. Important elements introduced this year include: a new
task where sentences are annotated with Direct Assessment (DA)
scores instead of labels based on post-editing; a new multilingual
sentence-level dataset mainly from Wikipedia articles, where the
source articles can be retrieved for document-wide context; the
availability of NMT models to explore system-internal information for the task.
Task 2 evaluates the application of QE for post-editing purposes. It consists of predicting:
- A/ Word-level tags. This is done both on source side (to detect which words caused errors)
and target side (to detect mistranslated or missing words).
- A1/ Each token is tagged as either `OK` or `BAD`. Additionally,
each gap between two words is tagged as `BAD` if one or more
missing words should have been there, and `OK` otherwise. Note
that number of tags for each target sentence is 2*N+1, where
N is the number of tokens in the sentence.
- A2/ Tokens are tagged as `OK` if they were correctly
translated, and `BAD` otherwise. Gaps are not tagged.
- B/ Sentence-level HTER scores. HTER (Human Translation Error Rate)
is the ratio between the number of edits (insertions/deletions/replacements)
needed and the reference translation length. | 481 | 2 |
wmt20_mlqe_task3 | false | [
"task_categories:translation",
"annotations_creators:expert-generated",
"annotations_creators:machine-generated",
"language_creators:found",
"multilinguality:translation",
"size_categories:1K<n<10K",
"source_datasets:extended|amazon_us_reviews",
"language:en",
"language:fr",
"license:unknown"
] | This shared task (part of WMT20) will build on its previous editions
to further examine automatic methods for estimating the quality
of neural machine translation output at run-time, without relying
on reference translations. As in previous years, we cover estimation
at various levels. Important elements introduced this year include: a new
task where sentences are annotated with Direct Assessment (DA)
scores instead of labels based on post-editing; a new multilingual
sentence-level dataset mainly from Wikipedia articles, where the
source articles can be retrieved for document-wide context; the
availability of NMT models to explore system-internal information for the task.
The goal of this task 3 is to predict document-level quality scores as well as fine-grained annotations. | 263 | 0 |
wmt_t2t | false | [
"task_categories:translation",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:translation",
"size_categories:10M<n<100M",
"source_datasets:extended|europarl_bilingual",
"source_datasets:extended|news_commentary",
"source_datasets:extended|opus_paracrawl",
"source_datasets:extended|un_multi",
"language:de",
"language:en",
"license:unknown"
] | null | 267 | 0 |
wnut_17 | false | [
"task_categories:token-classification",
"task_ids:named-entity-recognition",
"annotations_creators:crowdsourced",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | WNUT 17: Emerging and Rare entity recognition
This shared task focuses on identifying unusual, previously-unseen entities in the context of emerging discussions.
Named entities form the basis of many modern approaches to other tasks (like event clustering and summarisation),
but recall on them is a real problem in noisy text - even among annotators. This drop tends to be due to novel entities and surface forms.
Take for example the tweet “so.. kktny in 30 mins?” - even human experts find entity kktny hard to detect and resolve.
This task will evaluate the ability to detect and classify novel, emerging, singleton named entities in noisy text.
The goal of this task is to provide a definition of emerging and of rare entities, and based on that, also datasets for detecting these entities. | 3,531 | 6 |
wongnai_reviews | false | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"annotations_creators:found",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:th",
"license:lgpl-3.0"
] | Wongnai's review dataset contains restaurant reviews and ratings, mainly in Thai language.
The reviews are in 5 classes ranging from 1 to 5 stars. | 473 | 1 |
woz_dialogue | false | [
"task_categories:text-generation",
"task_categories:fill-mask",
"task_categories:token-classification",
"task_categories:text-classification",
"task_ids:dialogue-modeling",
"task_ids:multi-class-classification",
"task_ids:parsing",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:de",
"language:en",
"language:it",
"license:unknown",
"arxiv:1604.04562"
] | Wizard-of-Oz (WOZ) is a dataset for training task-oriented dialogue systems. The dataset is designed around the task of finding a restaurant in the Cambridge, UK area. There are three informable slots (food, pricerange,area) that users can use to constrain the search and six requestable slots (address, phone, postcode plus the three informable slots) that the user can ask a value for once a restaurant has been offered. | 805 | 2 |
wrbsc | false | [
"task_categories:text-classification",
"task_ids:semantic-similarity-classification",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:pl",
"license:cc-by-sa-3.0"
] | WUT Relations Between Sentences Corpus contains 2827 pairs of related sentences.
Relationships are derived from Cross-document Structure Theory (CST), which enables multi-document summarization through identification of cross-document rhetorical relationships within a cluster of related documents.
Every relation was marked by at least 3 annotators. | 263 | 0 |
x_stance | false | [
"task_categories:text-classification",
"annotations_creators:machine-generated",
"language_creators:found",
"multilinguality:multilingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:de",
"language:en",
"language:fr",
"language:it",
"license:cc-by-nc-4.0",
"stance-detection",
"arxiv:2003.08385"
] | The x-stance dataset contains more than 150 political questions, and 67k comments written by candidates on those questions.
It can be used to train and evaluate stance detection systems. | 273 | 3 |
xcopa | false | [
"task_categories:question-answering",
"task_ids:multiple-choice-qa",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:multilingual",
"size_categories:unknown",
"source_datasets:extended|copa",
"language:et",
"language:ht",
"language:id",
"language:it",
"language:qu",
"language:sw",
"language:ta",
"language:th",
"language:tr",
"language:vi",
"language:zh",
"license:cc-by-4.0"
] | XCOPA: A Multilingual Dataset for Causal Commonsense Reasoning
The Cross-lingual Choice of Plausible Alternatives dataset is a benchmark to evaluate the ability of machine learning models to transfer commonsense reasoning across
languages. The dataset is the translation and reannotation of the English COPA (Roemmele et al. 2011) and covers 11 languages from 11 families and several areas around
the globe. The dataset is challenging as it requires both the command of world knowledge and the ability to generalise to new languages. All the details about the
creation of XCOPA and the implementation of the baselines are available in the paper.\n | 9,049 | 2 |
xcsr | false | [
"task_categories:question-answering",
"task_ids:multiple-choice-qa",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"language_creators:machine-generated",
"multilinguality:multilingual",
"size_categories:1K<n<10K",
"source_datasets:extended|codah",
"source_datasets:extended|commonsense_qa",
"language:ar",
"language:de",
"language:en",
"language:es",
"language:fr",
"language:hi",
"language:it",
"language:ja",
"language:nl",
"language:pl",
"language:pt",
"language:ru",
"language:sw",
"language:ur",
"language:vi",
"language:zh",
"license:mit",
"arxiv:2106.06937"
] | To evaluate multi-lingual language models (ML-LMs) for commonsense reasoning in a cross-lingual zero-shot transfer setting (X-CSR), i.e., training in English and test in other languages, we create two benchmark datasets, namely X-CSQA and X-CODAH. Specifically, we automatically translate the original CSQA and CODAH datasets, which only have English versions, to 15 other languages, forming development and test sets for studying X-CSR. As our goal is to evaluate different ML-LMs in a unified evaluation protocol for X-CSR, we argue that such translated examples, although might contain noise, can serve as a starting benchmark for us to obtain meaningful analysis, before more human-translated datasets will be available in the future. | 4,337 | 2 |
xed_en_fi | false | [
"task_categories:text-classification",
"task_ids:intent-classification",
"task_ids:multi-class-classification",
"task_ids:multi-label-classification",
"task_ids:sentiment-classification",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:multilingual",
"size_categories:10K<n<100K",
"size_categories:1K<n<10K",
"source_datasets:extended|other-OpenSubtitles2016",
"language:en",
"language:fi",
"license:cc-by-4.0",
"arxiv:2011.01612"
] | A multilingual fine-grained emotion dataset. The dataset consists of human annotated Finnish (25k) and English sentences (30k). Plutchik’s
core emotions are used to annotate the dataset with the addition of neutral to create a multilabel multiclass
dataset. The dataset is carefully evaluated using language-specific BERT models and SVMs to
show that XED performs on par with other similar datasets and is therefore a useful tool for
sentiment analysis and emotion detection. | 653 | 4 |
xglue | false | [
"task_categories:question-answering",
"task_categories:summarization",
"task_categories:text-classification",
"task_categories:text2text-generation",
"task_categories:token-classification",
"task_ids:acceptability-classification",
"task_ids:extractive-qa",
"task_ids:named-entity-recognition",
"task_ids:natural-language-inference",
"task_ids:news-articles-headline-generation",
"task_ids:open-domain-qa",
"task_ids:parsing",
"task_ids:topic-classification",
"annotations_creators:crowdsourced",
"annotations_creators:expert-generated",
"annotations_creators:found",
"annotations_creators:machine-generated",
"language_creators:crowdsourced",
"language_creators:expert-generated",
"language_creators:found",
"language_creators:machine-generated",
"multilinguality:multilingual",
"multilinguality:translation",
"size_categories:100K<n<1M",
"size_categories:10K<n<100K",
"source_datasets:extended|conll2003",
"source_datasets:extended|squad",
"source_datasets:extended|xnli",
"source_datasets:original",
"language:ar",
"language:bg",
"language:de",
"language:el",
"language:en",
"language:es",
"language:fr",
"language:hi",
"language:it",
"language:nl",
"language:pl",
"language:pt",
"language:ru",
"language:sw",
"language:th",
"language:tr",
"language:ur",
"language:vi",
"language:zh",
"license:cc-by-nc-4.0",
"license:cc-by-sa-4.0",
"license:other",
"paraphrase-identification",
"question-answering",
"arxiv:2004.01401"
] | XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained
models with respect to cross-lingual natural language understanding and generation.
The benchmark is composed of the following 11 tasks:
- NER
- POS Tagging (POS)
- News Classification (NC)
- MLQA
- XNLI
- PAWS-X
- Query-Ad Matching (QADSM)
- Web Page Ranking (WPR)
- QA Matching (QAM)
- Question Generation (QG)
- News Title Generation (NTG)
For more information, please take a look at https://microsoft.github.io/XGLUE/. | 4,102 | 13 |
xnli | false | [
"language:ar",
"language:bg",
"language:de",
"language:el",
"language:en",
"language:es",
"language:fr",
"language:hi",
"language:ru",
"language:sw",
"language:th",
"language:tr",
"language:ur",
"language:vi",
"language:zh"
] | XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels). | 43,897 | 16 |
xor_tydi_qa | false | [
"task_categories:question-answering",
"task_ids:open-domain-qa",
"annotations_creators:crowdsourced",
"language_creators:expert-generated",
"language_creators:found",
"multilinguality:multilingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"source_datasets:extended|tydiqa",
"language:ar",
"language:bn",
"language:fi",
"language:ja",
"language:ko",
"language:ru",
"language:te",
"license:mit",
"arxiv:2010.11856"
] | XOR-TyDi QA brings together for the first time information-seeking questions,
open-retrieval QA, and multilingual QA to create a multilingual open-retrieval
QA dataset that enables cross-lingual answer retrieval. It consists of questions
written by information-seeking native speakers in 7 typologically diverse languages
and answer annotations that are retrieved from multilingual document collections.
There are three sub-tasks: XOR-Retrieve, XOR-EnglishSpan, and XOR-Full. | 394 | 0 |
xquad | false | [
"task_categories:question-answering",
"task_ids:extractive-qa",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:multilingual",
"size_categories:unknown",
"source_datasets:extended|squad",
"language:ar",
"language:de",
"language:el",
"language:en",
"language:es",
"language:hi",
"language:ro",
"language:ru",
"language:th",
"language:tr",
"language:vi",
"language:zh",
"license:cc-by-sa-4.0",
"arxiv:1910.11856"
] | XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question answering
performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from the development set
of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into ten languages: Spanish, German,
Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, Hindi and Romanian. Consequently, the dataset is entirely parallel
across 12 languages. | 6,695 | 5 |
xquad_r | false | [
"task_categories:question-answering",
"task_ids:extractive-qa",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:multilingual",
"size_categories:1K<n<10K",
"source_datasets:extended|squad",
"source_datasets:extended|xquad",
"language:ar",
"language:de",
"language:el",
"language:en",
"language:es",
"language:hi",
"language:ru",
"language:th",
"language:tr",
"language:vi",
"language:zh",
"license:cc-by-sa-4.0",
"arxiv:2004.05484"
] | XQuAD-R is a retrieval version of the XQuAD dataset (a cross-lingual extractive QA dataset). Like XQuAD, XQUAD-R is an 11-way parallel dataset, where each question appears in 11 different languages and has 11 parallel correct answers across the languages. | 3,593 | 2 |
xsum | false | [
"task_categories:summarization",
"task_ids:news-articles-summarization",
"annotations_creators:found",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:unknown",
"arxiv:1808.08745"
] | Extreme Summarization (XSum) Dataset.
There are three features:
- document: Input news article.
- summary: One sentence summary of the article.
- id: BBC ID of the article. | 37,094 | 23 |
xsum_factuality | false | [
"task_categories:summarization",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:extended|other-xsum",
"language:en",
"license:cc-by-4.0",
"hallucinations"
] | Neural abstractive summarization models are highly prone to hallucinate content that is unfaithful to the input
document. The popular metric such as ROUGE fails to show the severity of the problem. The dataset consists of
faithfulness and factuality annotations of abstractive summaries for the XSum dataset. We have crowdsourced 3 judgements
for each of 500 x 5 document-system pairs. This will be a valuable resource to the abstractive summarization community. | 408 | 3 |
xtreme | false | [
"task_categories:multiple-choice",
"task_categories:question-answering",
"task_categories:token-classification",
"task_categories:text-classification",
"task_categories:text-retrieval",
"task_ids:multiple-choice-qa",
"task_ids:extractive-qa",
"task_ids:open-domain-qa",
"task_ids:natural-language-inference",
"task_ids:named-entity-recognition",
"task_ids:part-of-speech",
"annotations_creators:found",
"language_creators:found",
"multilinguality:multilingual",
"multilinguality:translation",
"size_categories:n<1K",
"size_categories:1K<n<10K",
"size_categories:10K<n<100K",
"size_categories:100K<n<1M",
"source_datasets:extended|xnli",
"source_datasets:extended|paws-x",
"source_datasets:extended|wikiann",
"source_datasets:extended|xquad",
"source_datasets:extended|mlqa",
"source_datasets:extended|tydiqa",
"source_datasets:extended|tatoeba",
"source_datasets:extended|squad",
"language:af",
"language:ar",
"language:bg",
"language:bn",
"language:de",
"language:el",
"language:en",
"language:es",
"language:et",
"language:eu",
"language:fa",
"language:fi",
"language:fr",
"language:he",
"language:hi",
"language:hu",
"language:id",
"language:it",
"language:ja",
"language:jv",
"language:ka",
"language:kk",
"language:ko",
"language:ml",
"language:mr",
"language:ms",
"language:my",
"language:nl",
"language:pt",
"language:ru",
"language:sw",
"language:ta",
"language:te",
"language:th",
"language:tl",
"language:tr",
"language:ur",
"language:vi",
"language:yo",
"language:zh",
"license:apache-2.0",
"license:cc-by-4.0",
"license:cc-by-2.0",
"license:cc-by-sa-4.0",
"license:other",
"license:cc-by-nc-4.0",
"parallel-sentence-retrieval",
"paraphrase-identification",
"arxiv:2003.11080"
] | The Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of
the cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages
(spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of
syntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,
and availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil
(spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the
Niger-Congo languages Swahili and Yoruba, spoken in Africa. | 42,291 | 28 |
yahoo_answers_qa | false | [
"task_categories:question-answering",
"task_ids:open-domain-qa",
"annotations_creators:found",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:extended|other-yahoo-webscope-l6",
"language:en",
"license:unknown"
] | Yahoo Non-Factoid Question Dataset is derived from Yahoo's Webscope L6 collection using machine learning techiques such that the questions would contain non-factoid answers.The dataset contains 87,361 questions and their corresponding answers. Each question contains its best answer along with additional other answers submitted by users. Only the best answer was reviewed in determining the quality of the question-answer pair. | 885 | 8 |
yahoo_answers_topics | false | [
"task_categories:text-classification",
"task_ids:topic-classification",
"annotations_creators:found",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:extended|other-yahoo-answers-corpus",
"language:en",
"license:unknown"
] | Yahoo! Answers Topic Classification is text classification dataset. The dataset is the Yahoo! Answers corpus as of 10/25/2007. The Yahoo! Answers topic classification dataset is constructed using 10 largest main categories. From all the answers and other meta-information, this dataset only used the best answer content and the main category information. | 2,492 | 15 |
yelp_polarity | false | [
"language:en",
"arxiv:1509.01626"
] | Large Yelp Review Dataset.
This is a dataset for binary sentiment classification. We provide a set of 560,000 highly polar yelp reviews for training, and 38,000 for testing.
ORIGIN
The Yelp reviews dataset consists of reviews from Yelp. It is extracted
from the Yelp Dataset Challenge 2015 data. For more information, please
refer to http://www.yelp.com/dataset_challenge
The Yelp reviews polarity dataset is constructed by
Xiang Zhang ([email protected]) from the above dataset.
It is first used as a text classification benchmark in the following paper:
Xiang Zhang, Junbo Zhao, Yann LeCun. Character-level Convolutional Networks
for Text Classification. Advances in Neural Information Processing Systems 28
(NIPS 2015).
DESCRIPTION
The Yelp reviews polarity dataset is constructed by considering stars 1 and 2
negative, and 3 and 4 positive. For each polarity 280,000 training samples and
19,000 testing samples are take randomly. In total there are 560,000 trainig
samples and 38,000 testing samples. Negative polarity is class 1,
and positive class 2.
The files train.csv and test.csv contain all the training samples as
comma-sparated values. There are 2 columns in them, corresponding to class
index (1 and 2) and review text. The review texts are escaped using double
quotes ("), and any internal double quote is escaped by 2 double quotes ("").
New lines are escaped by a backslash followed with an "n" character,
that is "\n". | 4,529 | 4 |
yelp_review_full | false | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:other",
"arxiv:1509.01626"
] | The Yelp reviews dataset consists of reviews from Yelp. It is extracted from the Yelp Dataset Challenge 2015 data.
The Yelp reviews full star dataset is constructed by Xiang Zhang ([email protected]) from the above dataset.
It is first used as a text classification benchmark in the following paper: Xiang Zhang, Junbo Zhao, Yann LeCun.
Character-level Convolutional Networks for Text Classification. Advances in Neural Information Processing Systems 28 (NIPS 2015). | 18,154 | 16 |
yoruba_bbc_topics | false | [
"task_categories:text-classification",
"task_ids:topic-classification",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:yo",
"license:unknown"
] | A collection of news article headlines in Yoruba from BBC Yoruba.
Each headline is labeled with one of the following classes: africa,
entertainment, health, nigeria, politics, sport or world.
The dataset was presented in the paper:
Hedderich, Adelani, Zhu, Alabi, Markus, Klakow: Transfer Learning and
Distant Supervision for Multilingual Transformer Models: A Study on
African Languages (EMNLP 2020). | 265 | 0 |
yoruba_gv_ner | false | [
"task_categories:token-classification",
"task_ids:named-entity-recognition",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:yo",
"license:cc-by-3.0"
] | The Yoruba GV NER dataset is a labeled dataset for named entity recognition in Yoruba. The texts were obtained from
Yoruba Global Voices News articles https://yo.globalvoices.org/ . We concentrate on
four types of named entities: persons [PER], locations [LOC], organizations [ORG], and dates & time [DATE].
The Yoruba GV NER data files contain 2 columns separated by a tab ('\t'). Each word has been put on a separate line and
there is an empty line after each sentences i.e the CoNLL format. The first item on each line is a word, the second
is the named entity tag. The named entity tags have the format I-TYPE which means that the word is inside a phrase
of type TYPE. For every multi-word expression like 'New York', the first word gets a tag B-TYPE and the subsequent words
have tags I-TYPE, a word with tag O is not part of a phrase. The dataset is in the BIO tagging scheme.
For more details, see https://www.aclweb.org/anthology/2020.lrec-1.335/ | 263 | 0 |
yoruba_text_c3 | false | [
"task_categories:text-generation",
"task_categories:fill-mask",
"task_ids:language-modeling",
"task_ids:masked-language-modeling",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:yo",
"license:cc-by-nc-4.0"
] | Yoruba Text C3 is the largest Yoruba texts collected and used to train FastText embeddings in the
YorubaTwi Embedding paper: https://www.aclweb.org/anthology/2020.lrec-1.335/ | 263 | 1 |
yoruba_wordsim353 | false | [
"task_categories:text-classification",
"task_ids:text-scoring",
"task_ids:semantic-similarity-scoring",
"annotations_creators:crowdsourced",
"language_creators:expert-generated",
"multilinguality:multilingual",
"size_categories:n<1K",
"source_datasets:original",
"language:en",
"language:yo",
"license:unknown"
] | A translation of the word pair similarity dataset wordsim-353 to Yorùbá.
The dataset was presented in the paper
Alabi et al.: Massive vs. Curated Embeddings for Low-Resourced
Languages: the Case of Yorùbá and Twi (LREC 2020). | 265 | 0 |
youtube_caption_corrections | false | [
"task_categories:other",
"task_categories:text-generation",
"task_categories:fill-mask",
"task_ids:slot-filling",
"annotations_creators:expert-generated",
"annotations_creators:machine-generated",
"language_creators:machine-generated",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:en",
"license:mit",
"token-classification-of-text-errors"
] | Dataset built from pairs of YouTube captions where both 'auto-generated' and
'manually-corrected' captions are available for a single specified language.
This dataset labels two-way (e.g. ignoring single-sided insertions) same-length
token differences in the `diff_type` column. The `default_seq` is composed of
tokens from the 'auto-generated' captions. When a difference occurs between
the 'auto-generated' vs 'manually-corrected' captions types, the `correction_seq`
contains tokens from the 'manually-corrected' captions. | 269 | 1 |
zest | false | [
"task_categories:question-answering",
"task_categories:token-classification",
"task_ids:closed-domain-qa",
"task_ids:extractive-qa",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:en",
"license:cc-by-4.0",
"output-structure",
"yes-no-qa",
"arxiv:2011.08115"
] | ZEST tests whether NLP systems can perform unseen tasks in a zero-shot way, given a natural language description of
the task. It is an instantiation of our proposed framework "learning from task descriptions". The tasks include
classification, typed entity extraction and relationship extraction, and each task is paired with 20 different
annotated (input, output) examples. ZEST's structure allows us to systematically test whether models can generalize
in five different ways. | 857 | 1 |
0n1xus/codexglue | false | [] | CodeXGLUE is a benchmark dataset to foster machine learning research for program understanding and generation.
CodeXGLUE includes a collection of 10 tasks across 14 datasets and a platform for model evaluation and comparison. | 467 | 2 |
0n1xus/pytorrent-standalone | false | [] | pytorrent-standalone is a subset of the PyTorrent dataset, where only functions that does not depend on external libraries
are kept. | 299 | 0 |
AConsApart/anime_subtitles_DialoGPT | false | [] | null | 152 | 0 |
AHussain0418/day2_data | false | [] | null | 301 | 0 |
AHussain0418/day4data | false | [] | null | 299 | 0 |
AHussain0418/demo_data | false | [] | null | 300 | 0 |
AI-Sweden/SuperLim | false | [
"task_categories:question-answering",
"task_categories:text-classification",
"task_categories:other",
"multilinguality:monolingual",
"language:sv"
] | \ | 1,934 | 2 |
ARKseal/YFCC14M_subset_webdataset | false | [] | null | 300 | 0 |
ARTeLab/fanpage | false | [
"task_categories:summarization",
"multilinguality:monolingual",
"size_categories:10K<n<100k",
"source_datasets:original",
"language:it",
"license:unknown"
] | null | 329 | 2 |
ARTeLab/ilpost | false | [
"task_categories:summarization",
"multilinguality:monolingual",
"size_categories:10K<n<100k",
"language:it"
] | null | 307 | 0 |
ARTeLab/mlsum-it | false | [
"task_categories:summarization",
"multilinguality:monolingual",
"size_categories:10K<n<100k",
"language:it"
] | null | 307 | 0 |
ASCCCCCCCC/amazon_zh | false | [
"license:apache-2.0"
] | null | 300 | 1 |
ASCCCCCCCC/amazon_zh_simple | false | [
"license:apache-2.0"
] | null | 302 | 1 |
Abdo1Kamr/Arabic_Hadith | false | [] | null | 153 | 0 |
Abirate/code_net_dataset | false | [] | null | 300 | 1 |
Abirate/code_net_dev_dataset | false | [] | null | 301 | 0 |
Abirate/code_net_test_final_dataset | false | [] | null | 300 | 0 |
Abirate/english_quotes | false | [
"task_categories:text-classification",
"task_ids:multi-label-classification",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"source_datasets:original",
"language:en"
] | null | 2,854 | 0 |
Abirate/french_book_reviews | false | [
"task_categories:text-classification",
"task_ids:multi-label-classification",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"source_datasets:original",
"language:fr"
] | null | 354 | 2 |
AdWeeb/DravidianMT | false | [] | null | 153 | 0 |
Adnan/Urdu_News_Headlines | false | [] | null | 153 | 0 |
AhmadSawal/qa | false | [] | null | 153 | 0 |
AhmedSSoliman/CoNaLa | false | [] | null | 307 | 0 |
Aisha/BAAD16 | false | [
"task_categories:text-classification",
"task_ids:multi-class-classification",
"annotations_creators:found",
"annotations_creators:crowdsourced",
"annotations_creators:expert-generated",
"language_creators:found",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"source_datasets:original",
"language:bn",
"license:cc-by-4.0",
"arxiv:2001.05316"
] | null | 299 | 0 |
Aisha/BAAD6 | false | [
"task_categories:text-classification",
"task_ids:multi-class-classification",
"annotations_creators:found",
"annotations_creators:crowdsourced",
"annotations_creators:expert-generated",
"language_creators:found",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:unknown",
"source_datasets:original",
"language:bn",
"license:cc-by-4.0"
] | null | 299 | 0 |
Akila/ForgottenRealmsWikiDataset | false | [] | null | 302 | 2 |
Akshith/aa | false | [] | null | 153 | 0 |
Akshith/g_rock | false | [] | null | 153 | 0 |
Akshith/test | false | [] | null | 301 | 0 |
adorkin/extended_tweet_emojis | false | [
"task_categories:text-classification",
"size_categories:10K<n<100K",
"language:en"
] | null | 267 | 1 |
AlekseyKorshuk/comedy-scripts | false | [] | This dataset is designed to generate lyrics with HuggingArtists. | 300 | 1 |
AlekseyKorshuk/horror-scripts | false | [] | This dataset is designed to generate lyrics with HuggingArtists. | 299 | 1 |
AlexMaclean/all-deletion-compressions | false | [] | null | 300 | 1 |
AlexMaclean/wikipedia-deletion-compressions | false | [] | null | 300 | 1 |
AlexZapolskii/zapolskii-amazon | false | [] | null | 299 | 0 |
AlgoveraAI/CryptoPunks | false | [] | CryptoPunks is a non-fungible token (NFT) collection on the Ethereum blockchain. The dataset contains 10,000 CryptoPunk images, most of humans but also of three special types: Zombie (88), Ape (24) and Alien (9). They are provided with both clear backgrounds and teal backgrounds. | 152 | 4 |
Aliseyfi/event_token_type | false | [] | null | 149 | 0 |
Alvenir/nst-da-16khz | false | [] | null | 297 | 1 |
AndrewMcDowell/de_corpora_parliament_processed | false | [] | null | 297 | 0 |
Annabelleabbott/real-fake-news-workshop | false | [] | null | 299 | 0 |
Annielytics/DoctorsNotes | false | [] | null | 150 | 0 |
Anurag-Singh-creator/task | false | [] | null | 297 | 0 |
Anurag-Singh-creator/tasks | false | [] | null | 151 | 0 |
ApiInferenceTest/asr_dummy | false | [] | Self-supervised learning (SSL) has proven vital for advancing research in
natural language processing (NLP) and computer vision (CV). The paradigm
pretrains a shared model on large volumes of unlabeled data and achieves
state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
speech processing community lacks a similar setup to systematically explore the
paradigm. To bridge this gap, we introduce Speech processing Universal
PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
performance of a shared model across a wide range of speech processing tasks
with minimal architecture changes and labeled data. Among multiple usages of the
shared model, we especially focus on extracting the representation learned from
SSL due to its preferable re-usability. We present a simple framework to solve
SUPERB tasks by learning task-specialized lightweight prediction heads on top of
the frozen shared model. Our results demonstrate that the framework is promising
as SSL representations show competitive generalizability and accessibility
across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
benchmark toolkit to fuel the research in representation learning and general
speech processing.
Note that in order to limit the required storage for preparing this dataset, the
audio is stored in the .flac format and is not converted to a float32 array. To
convert, the audio file to a float32 array, please make use of the `.map()`
function as follows:
```python
import soundfile as sf
def map_to_array(batch):
speech_array, _ = sf.read(batch["file"])
batch["speech"] = speech_array
return batch
dataset = dataset.map(map_to_array, remove_columns=["file"])
``` | 304 | 0 |
Arnold/hausa_common_voice | false | [] | null | 299 | 0 |
AryanLala/autonlp-data-Scientific_Title_Generator | false | [] | null | 301 | 1 |
Atsushi/fungi_diagnostic_chars_comparison_japanese | false | [
"task_categories:text-classification",
"task_ids:multi-class-classification",
"annotations_creators:other",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:ja",
"license:cc-by-4.0"
] | null | 297 | 0 |
Atsushi/fungi_indexed_mycological_papers_japanese | false | [
"annotations_creators:other",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:ja",
"license:cc-by-4.0"
] | null | 297 | 0 |
Atsushi/fungi_trait_circus_database | false | [
"annotations_creators:other",
"multilinguality:multilingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"language:ja",
"license:cc-by-4.0"
] | null | 300 | 0 |
Avishekavi/Avi | false | [] | null | 151 | 0 |
BSC-LT/SQAC | false | [
"task_categories:question-answering",
"task_ids:extractive-qa",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:unknown",
"source_datasets:original",
"language:es",
"license:cc-by-sa-4.0",
"arxiv:2107.07253",
"arxiv:1606.05250"
] | This dataset contains 6,247 contexts and 18,817 questions with their answers, 1 to 5 for each fragment.
The sources of the contexts are:
* Encyclopedic articles from [Wikipedia in Spanish](https://es.wikipedia.org/), used under [CC-by-sa licence](https://creativecommons.org/licenses/by-sa/3.0/legalcode).
* News from [Wikinews in Spanish](https://es.wikinews.org/), used under [CC-by licence](https://creativecommons.org/licenses/by/2.5/).
* Text from the Spanish corpus [AnCora](http://clic.ub.edu/corpus/en), which is a mix from diferent newswire and literature sources, used under [CC-by licence] (https://creativecommons.org/licenses/by/4.0/legalcode).
This dataset can be used to build extractive-QA. | 297 | 4 |
BSC-LT/ancora-ca-ner | false | [
"language:ca"
] | AnCora Catalan NER.
This is a dataset for Named Eentity Reacognition (NER) from Ancora corpus adapted for
Machine Learning and Language Model evaluation purposes.
Since multiwords (including Named Entites) in the original Ancora corpus are aggregated as
a single lexical item using underscores (e.g. "Ajuntament_de_Barcelona")
we splitted them to align with word-per-line format, and added conventional Begin-Inside-Outside (IOB)
tags to mark and classify Named Entites.
We did not filter out the different categories of NEs from Ancora (weak and strong).
We did 6 minor edits by hand.
AnCora corpus is used under [CC-by] (https://creativecommons.org/licenses/by/4.0/) licence.
This dataset was developed by BSC TeMU as part of the AINA project, and to enrich the Catalan Language Understanding Benchmark (CLUB). | 297 | 1 |
BSC-LT/sts-ca | false | [
"language:ca"
] | Semantic Textual Similarity in Catalan.
STS corpus is a benchmark for evaluating Semantic Text Similarity in Catalan.
It consists of more than 3000 sentence pairs, annotated with the semantic similarity between them,
using a scale from 0 (no similarity at all) to 5 (semantic equivalence).
It is done manually by 4 different annotators following our guidelines based on previous work from the SemEval challenges (https://www.aclweb.org/anthology/S13-1004.pdf).
The source data are scraped sentences from the Catalan Textual Corpus (https://doi.org/10.5281/zenodo.4519349), used under CC-by-SA-4.0 licence (https://creativecommons.org/licenses/by-sa/4.0/). The dataset is released under the same licence.
This dataset was developed by BSC TeMU as part of the AINA project, and to enrich the Catalan Language Understanding Benchmark (CLUB).
This is the version 1.0.2 of the dataset with the complete human and automatic annotations and the analysis scripts. It also has a more accurate license.
This dataset can be used to build and score semantic similiarity models. | 297 | 0 |
BSC-LT/tecla | false | [
"language:ca"
] | TeCla: Text Classification Catalan dataset
Catalan News corpus for Text classification, crawled from ACN (Catalan News Agency) site: www.acn.cat
Corpus de notícies en català per a classificació textual, extret del web de l'Agència Catalana de Notícies - www.acn.cat | 297 | 0 |
BSC-LT/viquiquad | false | [
"language:ca",
"arxiv:1606.05250"
] | ViquiQuAD: an extractive QA dataset from Catalan Wikipedia.
This dataset contains 3111 contexts extracted from a set of 597 high quality original (no translations)
articles in the Catalan Wikipedia "Viquipèdia" (ca.wikipedia.org), and 1 to 5 questions with their
answer for each fragment. Viquipedia articles are used under CC-by-sa licence.
This dataset can be used to build extractive-QA and Language Models.
Funded by the Generalitat de Catalunya, Departament de Polítiques Digitals i Administració Pública (AINA),
MT4ALL and Plan de Impulso de las Tecnologías del Lenguaje (Plan TL). | 297 | 0 |
BSC-LT/xquad-ca | false | [
"language:ca",
"arxiv:1910.11856"
] | Professional translation into Catalan of XQuAD dataset (https://github.com/deepmind/xquad).
XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating
cross-lingual question answering performance.
The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from
the development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with
their professional translations into ten languages:
Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi.
Rumanian was added later.
We added the 13th language to the corpus using also professional native catalan translators.
XQuAD and XQuAD-Ca datasets are released under CC-by-sa licence. | 297 | 0 |
Babelscape/rebel-dataset | false | [
"task_categories:text-retrieval",
"task_categories:text-generation",
"annotations_creators:machine-generated",
"language_creators:machine-generated",
"multilinguality:monolingual",
"size_categories:unknown",
"source_datasets:original",
"language:en",
"license:cc-by-nc-sa-4.0",
"relation-extraction",
"conditional-text-generation",
"arxiv:2005.00614"
] | REBEL is a silver dataset created for the paper REBEL: Relation Extraction By End-to-end Language generation | 317 | 10 |
Babelscape/wikineural | false | [
"task_categories:token-classification",
"task_ids:named-entity-recognition",
"annotations_creators:machine-generated",
"language_creators:machine-generated",
"multilinguality:multilingual",
"source_datasets:original",
"language:de",
"language:en",
"language:es",
"language:fr",
"language:it",
"language:nl",
"language:pl",
"language:pt",
"language:ru",
"license:cc-by-nc-sa-4.0",
"structure-prediction",
"arxiv:1810.04805"
] | null | 597 | 8 |
BatuhanYilmaz/github-issues | false | [] | null | 151 | 0 |
Baybars/parla_text_corpus | false | [
"task_ids:language-modeling",
"annotations_creators:no-annotation",
"language_creators:various",
"multilinguality:monolingual",
"size_categories:100k<n<1M",
"source_datasets:found",
"language:ca",
"license:cc-by-4.0",
"robust-speech-event"
] | null | 297 | 0 |
BeIR/beir-corpus | false | [
"task_categories:text-retrieval",
"task_ids:entity-linking-retrieval",
"task_ids:fact-checking-retrieval",
"multilinguality:monolingual",
"language:en",
"license:cc-by-sa-4.0"
] | null | 445 | 1 |
BeIR/beir | false | [
"task_categories:text-retrieval",
"task_ids:entity-linking-retrieval",
"task_ids:fact-checking-retrieval",
"multilinguality:monolingual",
"language:en",
"license:cc-by-sa-4.0"
] | null | 603 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.