id
stringlengths 2
115
| private
bool 1
class | tags
sequence | description
stringlengths 0
5.93k
⌀ | downloads
int64 0
1.14M
| likes
int64 0
1.79k
|
---|---|---|---|---|---|
SetFit/go_emotions | false | [] | null | 295 | 1 |
SetFit/hate_speech18 | false | [] | null | 268 | 0 |
SetFit/hate_speech_offensive | false | [] | null | 273 | 0 |
SetFit/imdb | false | [] | null | 1,671 | 1 |
SetFit/insincere-questions | false | [] | null | 283 | 1 |
SetFit/mnli | false | [] | null | 695 | 0 |
SetFit/mnli_mm | false | [] | null | 269 | 0 |
SetFit/mrpc | false | [] | null | 462 | 1 |
SetFit/qnli | false | [] | null | 465 | 0 |
SetFit/qqp | false | [] | null | 705 | 3 |
SetFit/rte | false | [] | null | 564 | 0 |
SetFit/sst2 | false | [] | null | 1,593 | 1 |
SetFit/sst5 | false | [] | null | 6,969 | 3 |
SetFit/stsb | false | [] | null | 513 | 0 |
SetFit/student-question-categories | false | [] | null | 271 | 0 |
SetFit/subj | false | [] | null | 891 | 3 |
SetFit/toxic_conversations | false | [] | null | 312 | 3 |
SetFit/tweet_eval_stance | false | [] | null | 799 | 0 |
SetFit/tweet_sentiment_extraction | false | [] | null | 279 | 0 |
SetFit/wnli | false | [] | null | 285 | 0 |
SetFit/yelp_review_full | false | [] | null | 291 | 0 |
Shanna/Jamaica | false | [] | null | 135 | 0 |
ShinyQ/PPKM_Pemerintah | false | [] | null | 135 | 0 |
ShreyaR/DepressionDetection | false | [] | null | 304 | 0 |
Shushant/ContaminationQA | false | [] | null | 267 | 0 |
Shushant/NepaliSentiment | false | [] | null | 267 | 0 |
Shushant/nepali | false | [] | null | 267 | 0 |
SkelterLabsInc/JaQuAD | false | [
"task_categories:question-answering",
"task_ids:extractive-qa",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:ja",
"license:cc-by-sa-3.0",
"arxiv:2202.01764"
] | null | 372 | 4 |
Smiling/webnovels-en | false | [] | null | 265 | 0 |
SoLID/shellcode_i_a32 | false | [
"task_categories:text-generation",
"task_ids:language-modeling",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"language_creators:found",
"multilinguality:translation",
"size_categories:unknown",
"source_datasets:original",
"language:code",
"language:en",
"license:gpl-3.0",
"arxiv:2104.13100"
] | Shellcode_IA32 is a dataset for shellcode generation from English intents. The shellcodes are compilable on Intel Architecture 32-bits. | 263 | 3 |
SocialGrep/one-million-reddit-confessions | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | null | 265 | 1 |
SocialGrep/one-million-reddit-jokes | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | null | 316 | 3 |
SocialGrep/one-million-reddit-questions | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | null | 745 | 2 |
SocialGrep/one-year-of-r-india | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | This corpus contains the complete data for the activity of the subreddit /r/India from Sep 30, 2020 to Sep 30, 2021. | 397 | 1 |
SocialGrep/reddit-crypto-aug-2021 | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | This corpus contains the complete data for the activity on seven major cryptocurrency subreddits for the entire month of August 2021. | 407 | 2 |
SocialGrep/reddit-nonewnormal-complete | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | This corpus contains the complete data for the activity on subreddit /r/NoNewNormal for the entire duration of its existence. | 397 | 1 |
SocialGrep/reddit-wallstreetbets-aug-2021 | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | This corpus contains the complete data for the activity on /r/WallStreetBets for the entire month of August 2021. | 397 | 1 |
SocialGrep/ten-million-reddit-answers | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:10M<n<100M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | A spiritual successor to our One Million Questions, this NLP dataset contains an outstanding ten million of /r/AskReddit answers, going back from the end of November of 2020. | 415 | 4 |
SocialGrep/the-2022-trucker-strike-on-reddit | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | null | 267 | 1 |
SocialGrep/the-reddit-covid-dataset | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | This dataset attempts to capture the full extent of COVID-19 discussion across the entire site of Reddit. All posts and comments found to mention the term 'COVID' as of 2021-10-25 have been gathered from the site. | 423 | 1 |
SocialGrep/top-american-universities-on-reddit | false | [
"annotations_creators:lexyr",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:cc-by-4.0"
] | This NLP dataset contains all the posts and comments in the subreddits of top 10 universities in the United States, chosen according to the 2019 Forbes ranking. | 400 | 1 |
SophieTr/reddit_clean | false | [] | null | 300 | 2 |
Sunbird/salt-dataset | false | [] | null | 436 | 2 |
SuperAI2-Machima/ThaiQA_LST20 | false | [
"language:thai",
"language:th",
"license:mit",
"question-generation dataset",
"qa dataset"
] | null | 266 | 0 |
SuperAI2-Machima/Yord_ThaiQA_LST20 | false | [] | null | 267 | 0 |
TRoboto/masc | false | [] | null | 135 | 0 |
TRoboto/names | false | [
"license:cc-by-4.0"
] | List of Arabic first names with meaning and origin of most names | 270 | 1 |
TaahaKazi/FCE | false | [] | null | 135 | 0 |
Taekyoon/test_none_state | false | [] | null | 267 | 0 |
Tahsin-Mayeesha/Bengali-SQuAD | false | [
"task_categories:question-answering",
"multilinguality:monolingual",
"language:bn"
] | null | 265 | 0 |
Tatyana/ru_sentiment_dataset | false | [
"language:ru",
"sentiment",
"text-classification"
] | null | 273 | 2 |
Terry0107/RiSAWOZ | false | [] | null | 135 | 0 |
TestCher/Testi | false | [] | null | 135 | 0 |
Tevatron/msmarco-passage-corpus | false | [] | null | 785 | 0 |
Tevatron/msmarco-passage | false | [] | null | 862 | 1 |
Tevatron/scifact-corpus | false | [] | null | 271 | 0 |
Tevatron/scifact | false | [] | null | 301 | 0 |
Tevatron/wikipedia-curated-corpus | false | [] | null | 267 | 0 |
Tevatron/wikipedia-curated | false | [] | null | 267 | 0 |
Tevatron/wikipedia-nq-corpus | false | [] | null | 713 | 0 |
Tevatron/wikipedia-nq | false | [] | null | 606 | 1 |
Tevatron/wikipedia-squad-corpus | false | [] | null | 267 | 0 |
Tevatron/wikipedia-squad | false | [] | null | 270 | 0 |
Tevatron/wikipedia-trivia-corpus | false | [] | null | 519 | 0 |
Tevatron/wikipedia-trivia | false | [] | null | 481 | 0 |
Tevatron/wikipedia-wq-corpus | false | [] | null | 270 | 0 |
Tevatron/wikipedia-wq | false | [] | null | 274 | 0 |
TheBlindBandit/SpongeNot | false | [] | null | 135 | 0 |
TimTreasure4/Test | false | [] | null | 135 | 0 |
Trainmaster9977/957 | false | [] | null | 135 | 0 |
Trainmaster9977/zbakuman | false | [] | null | 135 | 0 |
TristanBehrens/js-fakes-4bars | false | [] | null | 279 | 9 |
TurkuNLP/register_mc4 | false | [] | null | 267 | 0 |
TurkuNLP/register_oscar | false | [] | null | 276 | 4 |
TurkuNLP/turku_hockey_data2text | false | [] | The Turku Hockey Data2Text corpus was developed as a benchmark for evaluating template-free, machine learning methods on Finnish news generation in the area of ice hockey reporting. This dataset is a collection of 3,454 ice hockey games, each including game statistics and a news article describing the game. Each game includes manual alignment of events (such as goals or penalties) and sentences describing the specific event in natural language extracted from the news article. The corpus includes 12,827 annotated events. The natural language passages are manually curated not to include any information not derivable from the input data or world knowledge. | 267 | 0 |
TurkuNLP/turku_paraphrase_corpus | false | [
"task_categories:text-classification",
"task_categories:sentence-similarity",
"task_categories:text2text-generation",
"task_categories:other",
"task_ids:semantic-similarity-classification",
"annotations_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:fi",
"license:cc-by-sa-4.0"
] | Turku Paraphrase Corpus is a dataset of 104,645 manually annotated Finnish paraphrases. The vast majority of the data is classified as a paraphrase either in the given context, or universally. | 814 | 2 |
Tyler/wikimatrix_collapsed | false | [] | null | 135 | 0 |
Usin2705/test | false | [] | null | 135 | 0 |
VJGamer/test | false | [] | null | 267 | 0 |
VadorMazer/skyrimdialogstest | false | [] | null | 267 | 0 |
Valahaar/wsdmt | false | [] | null | 271 | 0 |
Vishnu393831/VICTORY_dataset | false | [
"license:afl-3.0"
] | null | 133 | 0 |
Vishva/UniFAQ_DataSET | false | [] | null | 135 | 0 |
Wiedy/be | false | [] | null | 135 | 0 |
Wiedy/wav2vec2-large-xls-r-300m-tr-colab | false | [] | null | 135 | 0 |
Wikidepia/IndoParaCrawl | false | [] | null | 267 | 2 |
Wikidepia/IndoSQuAD | false | [] | null | 135 | 0 |
Wikidepia/mc4-filter | false | [] | null | 135 | 0 |
WillFerreiraSantos/halos | false | [] | null | 267 | 0 |
leey4n/KR3 | false | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"multilinguality:monolingual",
"size_categories:100K<n<1m",
"language:ko",
"license:cc-by-nc-sa-4.0"
] | null | 296 | 2 |
Wuhu0/output | false | [] | null | 135 | 0 |
WyrdCurt/AO4W | false | [] | null | 135 | 0 |
Xenova/sponsorblock-768 | false | [] | null | 267 | 1 |
Xenova/sponsorblock | false | [] | null | 267 | 2 |
XiangPan/iflytek | false | [] | null | 135 | 0 |
XiangPan/snli_break | false | [] | The SNLI corpus (version 1.0) is a collection of 570k human-written English
sentence pairs manually labeled for balanced classification with the labels
entailment, contradiction, and neutral, supporting the task of natural language
inference (NLI), also known as recognizing textual entailment (RTE). | 267 | 0 |
XiangXiang/clt | false | [] | null | 135 | 0 |
Xinghua/test | false | [] | null | 267 | 0 |
Yatoro/github-issues | false | [] | null | 267 | 0 |
Yatoro/github_issues | false | [] | null | 135 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.