id
stringlengths 2
115
| private
bool 1
class | tags
sequence | description
stringlengths 0
5.93k
⌀ | downloads
int64 0
1.14M
| likes
int64 0
1.79k
|
---|---|---|---|---|---|
masakhane/masakhanews | false | [
"task_categories:text-classification",
"task_ids:topic-classification",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:multilingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:am",
"language:en",
"language:fr",
"language:ha",
"language:ig",
"language:ln",
"language:lg",
"language:om",
"language:pcm",
"language:rn",
"language:sn",
"language:so",
"language:sw",
"language:ti",
"language:xh",
"language:yo",
"license:afl-3.0",
"news-topic",
"masakhanews",
"masakhane"
] | MasakhaNEWS is the largest publicly available dataset for news topic classification in 16 languages widely spoken in Africa.
The languages are:
- Amharic (amh)
- English (eng)
- French (fra)
- Hausa (hau)
- Igbo (ibo)
- Lingala (lin)
- Luganda (lug)
- Oromo (orm)
- Nigerian Pidgin (pcm)
- Rundi (run)
- chShona (sna)
- Somali (som)
- Kiswahili (swą)
- Tigrinya (tir)
- isiXhosa (xho)
- Yorùbá (yor)
The train/validation/test sets are available for all the 16 languages.
For more details see *** arXiv link ** | 0 | 0 |
chronbmm/sanskrit-sandhi-split-sighum | false | [] | null | 0 | 0 |
chronbmm/sanskrit-sandhi-split-hackathon | false | [] | null | 0 | 0 |
ZeroUnbound/kodayuapril | false | [] | null | 0 | 0 |
jasperan/redbull-analytics-hol | false | [
"license:gpl-3.0"
] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_190 | false | [] | null | 0 | 0 |
vincha77/generadai-sample-tutorial | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_189 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_191 | false | [] | null | 0 | 0 |
mprzymus/osm_tiles_small | false | [] | null | 0 | 0 |
TeraSpace/glados_ru_fred_tune | false | [
"task_categories:text-generation",
"language:ru",
"license:mit"
] | null | 0 | 0 |
Kanike54/cagliostro-colab-ui | false | [] | null | 0 | 0 |
SHS/cancer_test_data2 | false | [] | null | 0 | 0 |
thewall/Aptani2Param | false | [
"license:openrail"
] | null | 0 | 0 |
casey-martin/oa_cpp_annotate_gen | false | [
"task_categories:question-answering",
"task_categories:text-classification",
"language:en"
] | null | 0 | 0 |
Ayane22/asami | false | [] | null | 0 | 0 |
hieunguyen1053/vsec | false | [] | null | 0 | 0 |
ACCA225/111 | false | [] | null | 0 | 0 |
Grop/1 | false | [] | null | 0 | 0 |
doushabao4766/weibo_ner_knowledge_V3 | false | [] | null | 0 | 0 |
EdwardLin2023/MELD-Audio | false | [
"license:cc-by-4.0"
] | Multimodal EmotionLines Dataset (MELD) has been created by enhancing and extending EmotionLines dataset.
MELD contains the same dialogue instances available in EmotionLines, but it also encompasses audio and
visual modality along with text. MELD has more than 1400 dialogues and 13000 utterances from Friends TV series.
Multiple speakers participated in the dialogues. Each utterance in a dialogue has been labeled by any of these
seven emotions -- Anger, Disgust, Sadness, Joy, Neutral, Surprise and Fear. MELD also has sentiment (positive,
negative and neutral) annotation for each utterance.
This dataset is slightly modified, so that it concentrates on Emotion recognition in audio input only. | 0 | 0 |
Vincent-luo/hagrid50k | false | [] | null | 0 | 0 |
chronbmm/vedic-dependency-parsing | false | [] | null | 0 | 0 |
miausdioa/mijulkaromana | false | [
"license:other"
] | null | 0 | 0 |
b-mc2/sql-create-context | false | [
"task_categories:text-generation",
"task_categories:question-answering",
"task_categories:table-question-answering",
"size_categories:10K<n<100K",
"language:en",
"license:cc-by-4.0",
"SQL",
"code",
"NLP",
"text-to-sql",
"context-sql",
"spider",
"wikisql",
"sqlglot"
] | null | 0 | 0 |
chetahy0711/CS6301_sampledata | false | [] | null | 0 | 0 |
Spico/ChCatExt | false | [
"language:zh",
"license:apache-2.0",
"finance"
] | null | 0 | 0 |
Zerenidel/kodayuapril | false | [] | null | 0 | 0 |
chunpingvi/audio_1 | false | [] | null | 0 | 0 |
nRuaif/Vietnamese_x_Alpaca | false | [
"license:mit"
] | null | 0 | 2 |
markkerzner/cool_new_dataset | false | [] | null | 0 | 0 |
ethers/cat-waifu-datasets | false | [] | null | 0 | 0 |
JerryMo/db-simpsons-dataset | false | [] | null | 0 | 0 |
strictiondbloodsugar/StrictionDBloodSugarReviews | false | [] | null | 0 | 0 |
quocanh34/youtube_dataset_wer_10 | false | [] | null | 0 | 0 |
hypeFit/HypeFit | false | [] | null | 0 | 0 |
nuveiskintagremoval/SharkTankSkinTagRemover | false | [] | null | 0 | 0 |
minxdragon/ddpm-butterflies-128 | false | [
"license:apache-2.0"
] | null | 0 | 0 |
quocanh34/youtube_dataset_wer_0 | false | [] | null | 0 | 0 |
bigcode/commits-8192 | false | [] | null | 0 | 0 |
korean-corpus/namu_wiki | false | [] | null | 0 | 0 |
thewall/Simulation | false | [
"license:openrail"
] | null | 0 | 0 |
ParisNeo/GPT4All-Community-Discussions | false | [
"task_categories:conversational",
"language:en",
"language:fr",
"language:de",
"language:ar",
"language:it",
"language:es",
"license:apache-2.0"
] | null | 0 | 0 |
lponsard/admCom | false | [] | null | 0 | 0 |
Phonecharger/news-programmatic-labeling | false | [] | null | 0 | 0 |
rika22/pretrained | false | [] | null | 0 | 0 |
Circularmachines/batch_indexing_machine_720f_768px | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_197 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_200 | false | [] | null | 0 | 0 |
Maadhu/allenai | false | [] | null | 0 | 0 |
carnival13/hpqa-fid-input | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_194 | false | [] | null | 0 | 0 |
sdasdadas/food | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_195 | false | [] | null | 0 | 0 |
ruanchaves/assin2_por_Latn_to_eng_Latn | false | [] | null | 0 | 0 |
ruanchaves/rerelem_por_Latn_to_eng_Latn | false | [] | null | 0 | 0 |
ruanchaves/porsimplessent_por_Latn_to_eng_Latn | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_198 | false | [] | null | 0 | 0 |
13GP/training | false | [
"license:mit"
] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_201 | false | [] | null | 0 | 0 |
FASOXO/Kaggle-and-colab-NoveAI-Stabled-diffusion-ipynb | false | [
"license:openrail"
] | null | 0 | 1 |
distilled-one-sec-cv12/chunk_199 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_192 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_196 | false | [] | null | 0 | 0 |
Laskari-Naveen/donut_800 | false | [] | null | 0 | 0 |
aravind-selvam/chart_processed_512 | false | [
"license:mit"
] | null | 0 | 0 |
marasama/nva-yorktown | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_202 | false | [] | null | 0 | 0 |
Haribert/your-dataset-name | false | [] | null | 0 | 0 |
jkot/parliament_hearings_processed | false | [] | null | 0 | 0 |
prajwalsahu5/smiles40m | false | [] | null | 0 | 0 |
BrunoHays/ESLO | false | [
"task_categories:automatic-speech-recognition",
"language:fr",
"license:cc-by-nc-4.0"
] | ESLO dataset, each utterance are taken out individually | 0 | 0 |
saivn98/Sd-webui | false | [] | null | 0 | 0 |
prajwalsahu5/smiles4m | false | [] | null | 0 | 0 |
reginaboateng/pubmed_long_tokenised | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_204 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_205 | false | [] | null | 0 | 0 |
marasama/nva-kuroneko | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_206 | false | [] | null | 0 | 0 |
fuwlstudioab/test | false | [
"license:cc-by-nc-sa-2.0"
] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_208 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_203 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_210 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_207 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_209 | false | [] | null | 0 | 0 |
tejasbale02/friends-s03 | false | [] | null | 0 | 0 |
LevMuchnik/SupremeCourtOfIsrael | false | [
"size_categories:100K<n<1M",
"language:he",
"license:openrail",
"legal, verdicts, metadata"
] | null | 0 | 0 |
chilge/vits | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_211 | false | [] | null | 0 | 0 |
distilled-one-sec-cv12/chunk_212 | false | [] | null | 0 | 0 |
carnival13/hpqa-fid-input-wo-sp | false | [] | null | 0 | 0 |
factored/fr_crawler2 | false | [] | null | 0 | 0 |
sander-wood/wikimusictext | false | [
"task_categories:text-classification",
"task_categories:text2text-generation",
"size_categories:1K<n<10K",
"language:en",
"license:mit",
"music",
"arxiv:2304.11029"
] | null | 0 | 0 |
nouman-10/americas_nlp_quy_everything | false | [] | null | 0 | 0 |
lighteval/mutual | false | [] | null | 0 | 0 |
lighteval/mutual_plus | false | [] | null | 0 | 0 |
lighteval/ThePileEval | false | [] | null | 0 | 0 |
slvnwhrl/tenkgnad-clustering-s2s | false | [
"size_categories:10K<n<100K",
"language:de",
"license:cc-by-nc-sa-4.0",
"embeddings",
"clustering",
"benchmark",
"arxiv:2210.07316"
] | null | 0 | 0 |
slvnwhrl/tenkgnad-clustering-p2p | false | [
"size_categories:10K<n<100K",
"language:de",
"license:cc-by-nc-sa-4.0",
"embeddings",
"clustering",
"benchmark",
"arxiv:2210.07316"
] | null | 0 | 0 |
renumics/speech_commands_enriched | false | [
"license:apache-2.0"
] | This is a set of one-second .wav audio files, each containing a single spoken
English word or background noise. These words are from a small set of commands, and are spoken by a
variety of different speakers. This data set is designed to help train simple
machine learning models. This dataset is covered in more detail at
[https://arxiv.org/abs/1804.03209](https://arxiv.org/abs/1804.03209).
Version 0.01 of the data set (configuration `"v0.01"`) was released on August 3rd 2017 and contains
64,727 audio files.
In version 0.01 thirty different words were recoded: "Yes", "No", "Up", "Down", "Left",
"Right", "On", "Off", "Stop", "Go", "Zero", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine",
"Bed", "Bird", "Cat", "Dog", "Happy", "House", "Marvin", "Sheila", "Tree", "Wow".
In version 0.02 more words were added: "Backward", "Forward", "Follow", "Learn", "Visual".
In both versions, ten of them are used as commands by convention: "Yes", "No", "Up", "Down", "Left",
"Right", "On", "Off", "Stop", "Go". Other words are considered to be auxiliary (in current implementation
it is marked by `True` value of `"is_unknown"` feature). Their function is to teach a model to distinguish core words
from unrecognized ones.
The `_silence_` class contains a set of longer audio clips that are either recordings or
a mathematical simulation of noise. | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.