sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
sequencelengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
sequencelengths
0
25
languages
sequencelengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
sequencelengths
0
352
processed_texts
sequencelengths
1
353
22f325a1eb48c29c63398fd3a98c433082a983fc
# Dataset Card for "english_librispeech_asr_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jeapaul/english_librispeech_asr_processed
[ "region:us" ]
2022-11-24T08:39:32+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 281376389, "num_examples": 1892723}], "download_size": 156070397, "dataset_size": 281376389}}
2022-11-24T09:22:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "english_librispeech_asr_processed" More Information needed
[ "# Dataset Card for \"english_librispeech_asr_processed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"english_librispeech_asr_processed\"\n\nMore Information needed" ]
0742eee00b8a75cdf712b4c27b7a85687a70c63e
# Dataset Card for "dalio-book-handwritten-io-sorted-v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AlekseyKorshuk/dalio-book-handwritten-io-sorted-v2
[ "region:us" ]
2022-11-24T09:10:55+00:00
{"dataset_info": {"features": [{"name": "input_text", "dtype": "string"}, {"name": "output_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 804881.0, "num_examples": 599}, {"name": "validation", "num_bytes": 540344, "num_examples": 364}, {"name": "test", "num_bytes": 14786, "num_examples": 10}], "download_size": 721129, "dataset_size": 1360011.0}}
2022-11-24T09:16:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dalio-book-handwritten-io-sorted-v2" More Information needed
[ "# Dataset Card for \"dalio-book-handwritten-io-sorted-v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dalio-book-handwritten-io-sorted-v2\"\n\nMore Information needed" ]
d6d5a2eb7dce6816b9c3d92270ec699443ecba8b
# Dataset Card for "sentences_truth" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/sentences_truth
[ "region:us" ]
2022-11-24T11:10:53+00:00
{"dataset_info": {"features": [{"name": "sentences", "dtype": "string"}, {"name": "labels", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 445901, "num_examples": 7000}, {"name": "test", "num_bytes": 134824, "num_examples": 2000}, {"name": "val", "num_bytes": 151364, "num_examples": 2528}], "download_size": 0, "dataset_size": 732089}}
2022-11-24T11:15:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sentences_truth" More Information needed
[ "# Dataset Card for \"sentences_truth\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sentences_truth\"\n\nMore Information needed" ]
7613bc08f71af972a0f67ca52d40f89d72d977ee
# Dataset Card for "sentences_truthv2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/sentences_truthv2
[ "region:us" ]
2022-11-24T11:16:02+00:00
{"dataset_info": {"features": [{"name": "sentences", "dtype": "string"}, {"name": "labels", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 445901, "num_examples": 7000}, {"name": "test", "num_bytes": 134824, "num_examples": 2000}, {"name": "val", "num_bytes": 151364, "num_examples": 2528}], "download_size": 254696, "dataset_size": 732089}}
2022-11-24T11:16:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sentences_truthv2" More Information needed
[ "# Dataset Card for \"sentences_truthv2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sentences_truthv2\"\n\nMore Information needed" ]
91501130537415b2597ca686efb7f75d56a273f9
# Dataset Card for "small-the_pile" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ola13/small-the_pile
[ "region:us" ]
2022-11-24T11:40:27+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "meta", "struct": [{"name": "perplexity_score", "dtype": "float64"}, {"name": "pile_set_name", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 606056668, "num_examples": 100000}], "download_size": 328667964, "dataset_size": 606056668}}
2022-11-24T11:40:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "small-the_pile" More Information needed
[ "# Dataset Card for \"small-the_pile\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"small-the_pile\"\n\nMore Information needed" ]
f975fa88ccfea268f412be33ed62cd3644d9d140
# Dataset Card for "small-c4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
datablations/c4-filter-small
[ "region:us" ]
2022-11-24T11:43:28+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "timestamp", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "meta", "struct": [{"name": "perplexity_score", "dtype": "float64"}]}, {"name": "text_length", "dtype": "int64"}, {"name": "domain", "dtype": "null"}, {"name": "perplexity", "dtype": "float64"}, {"name": "dup_ratio", "dtype": "float64"}, {"name": "pairs", "sequence": {"sequence": "int64"}}, {"name": "repetitions", "sequence": "binary"}, {"name": "cluster", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 236459743, "num_examples": 100000}], "download_size": 140935431, "dataset_size": 236459743}}
2023-01-17T18:52:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "small-c4" More Information needed
[ "# Dataset Card for \"small-c4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"small-c4\"\n\nMore Information needed" ]
f4d35f7523c2156660fa2a06b0a1b35cb4b9308e
# Dataset Card for "small-oscar" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
datablations/oscar-filter-small
[ "region:us" ]
2022-11-24T11:44:37+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "meta", "struct": [{"name": "annotations", "sequence": "string"}, {"name": "identification", "struct": [{"name": "label", "dtype": "string"}, {"name": "prob", "dtype": "float64"}]}, {"name": "line_identifications", "list": [{"name": "label", "dtype": "string"}, {"name": "prob", "dtype": "float64"}]}, {"name": "perplexity_score", "dtype": "float64"}, {"name": "warc_headers", "struct": [{"name": "content-length", "dtype": "int64"}, {"name": "content-type", "dtype": "string"}, {"name": "warc-block-digest", "dtype": "string"}, {"name": "warc-date", "dtype": "string"}, {"name": "warc-identified-content-language", "dtype": "string"}, {"name": "warc-record-id", "dtype": "string"}, {"name": "warc-refers-to", "dtype": "string"}, {"name": "warc-target-uri", "dtype": "string"}, {"name": "warc-type", "dtype": "string"}]}]}], "splits": [{"name": "train", "num_bytes": 658480427, "num_examples": 100000}], "download_size": 347756473, "dataset_size": 658480427}}
2022-11-24T11:45:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "small-oscar" More Information needed
[ "# Dataset Card for \"small-oscar\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"small-oscar\"\n\nMore Information needed" ]
f40d8c4463aed3f9eb67c4b7ff4baaab1aa18e15
## Dataset Description - **Homepage:** [CUB 200 2011](http://www.vision.caltech.edu/datasets/cub_200_2011/) - **Repository:** [Caltech Vision Lab](http://www.vision.caltech.edu/datasets/cub_200_2011/) - **Paper:** [The Caltech-UCSD Birds-200-2011 Dataset](https://authors.library.caltech.edu/27452/1/CUB_200_2011.pdf) - **Leaderboard:** [Paperswithcode](https://paperswithcode.com/dataset/cub-200-2011) - **Point of Contact:** [Catherine Wah](https://scholar.google.com/citations?user=rCDdLUsAAAAJ&hl=en) # CC6204: Hackaton Deep Learning 2022 **Nota:** esta fue un actividad del curso CC6204: Deep Learning, Universidad de Chile, año 2022. Dictado por el profesor Iván Sipiran, material del curso [aquí](https://github.com/ivansipiran/CC6204-Deep-Learning). En esta actividad intentaremos resolver un problema de clasificación multimodal. En un problema de clasificación multimodal, cada pieza de información viene en diferentes representaciones (imágenes, texto, audios, etc) y la idea es determinar cómo usar esos datos para un problema de clasificación. En este caso trabajaremos con un dataset que contiene datos sobre especies de pájaros. ## Dataset ### Data Instances Una muestra del _dataset_ se encuentra a continuación: ``` {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=334x500 at 0x7F59DE348AF0>, 'description': 'this bird has a short orange bill, white breast and body and white eyes.\na medium sized bird with a orange bill and a black crown and white eyes\nthis white-breasted bird has a short, squat, orange bill, a black head and wings, and small white eyes above a white stripe.\nthis bird has a white breast, a black head, a short red beak, and webbed feet.\nthis bird is white with black on its neck and has a long, pointy beak.\nthis bird has wings that are black and has a white belly\nthis bird has wings that are black and has a long bill\nthis is a medium sized bird, with a white belly, and a grey head and wings, with a short yellow bill.\nthis bird is white and gray in color, and has a bright orange beak.\nthis bird has a blunt orange beak with mostly black above the neck, the belly is solid white.\n', 'label': 6, 'file_name': 'Parakeet_Auklet_0048_795980.jpg'} ``` ### Data Fields Cada instancia de datos tiene los siguientes campos: - `image`: imagen RGB de un pájaro - `description`: texto con 10 descripciones del pájaro en la foto, cada descripción esta separado por un salto de linea (i.e. `\n`) - `label`: un número entero que representa el id de la especie a la que pertenece el pájaro <details> <summary>Id2String</summary> ```bash 1 001.Black_footed_Albatross 2 002.Laysan_Albatross 3 003.Sooty_Albatross 4 004.Groove_billed_Ani 5 005.Crested_Auklet 6 006.Least_Auklet 7 007.Parakeet_Auklet 8 008.Rhinoceros_Auklet 9 009.Brewer_Blackbird 10 010.Red_winged_Blackbird 11 011.Rusty_Blackbird 12 012.Yellow_headed_Blackbird 13 013.Bobolink 14 014.Indigo_Bunting 15 015.Lazuli_Bunting 16 016.Painted_Bunting 17 017.Cardinal 18 018.Spotted_Catbird 19 019.Gray_Catbird 20 020.Yellow_breasted_Chat 21 021.Eastern_Towhee 22 022.Chuck_will_Widow 23 023.Brandt_Cormorant 24 024.Red_faced_Cormorant 25 025.Pelagic_Cormorant 26 026.Bronzed_Cowbird 27 027.Shiny_Cowbird 28 028.Brown_Creeper 29 029.American_Crow 30 030.Fish_Crow 31 031.Black_billed_Cuckoo 32 032.Mangrove_Cuckoo 33 033.Yellow_billed_Cuckoo 34 034.Gray_crowned_Rosy_Finch 35 035.Purple_Finch 36 036.Northern_Flicker 37 037.Acadian_Flycatcher 38 038.Great_Crested_Flycatcher 39 039.Least_Flycatcher 40 040.Olive_sided_Flycatcher 41 041.Scissor_tailed_Flycatcher 42 042.Vermilion_Flycatcher 43 043.Yellow_bellied_Flycatcher 44 044.Frigatebird 45 045.Northern_Fulmar 46 046.Gadwall 47 047.American_Goldfinch 48 048.European_Goldfinch 49 049.Boat_tailed_Grackle 50 050.Eared_Grebe 51 051.Horned_Grebe 52 052.Pied_billed_Grebe 53 053.Western_Grebe 54 054.Blue_Grosbeak 55 055.Evening_Grosbeak 56 056.Pine_Grosbeak 57 057.Rose_breasted_Grosbeak 58 058.Pigeon_Guillemot 59 059.California_Gull 60 060.Glaucous_winged_Gull 61 061.Heermann_Gull 62 062.Herring_Gull 63 063.Ivory_Gull 64 064.Ring_billed_Gull 65 065.Slaty_backed_Gull 66 066.Western_Gull 67 067.Anna_Hummingbird 68 068.Ruby_throated_Hummingbird 69 069.Rufous_Hummingbird 70 070.Green_Violetear 71 071.Long_tailed_Jaeger 72 072.Pomarine_Jaeger 73 073.Blue_Jay 74 074.Florida_Jay 75 075.Green_Jay 76 076.Dark_eyed_Junco 77 077.Tropical_Kingbird 78 078.Gray_Kingbird 79 079.Belted_Kingfisher 80 080.Green_Kingfisher 81 081.Pied_Kingfisher 82 082.Ringed_Kingfisher 83 083.White_breasted_Kingfisher 84 084.Red_legged_Kittiwake 85 085.Horned_Lark 86 086.Pacific_Loon 87 087.Mallard 88 088.Western_Meadowlark 89 089.Hooded_Merganser 90 090.Red_breasted_Merganser 91 091.Mockingbird 92 092.Nighthawk 93 093.Clark_Nutcracker 94 094.White_breasted_Nuthatch 95 095.Baltimore_Oriole 96 096.Hooded_Oriole 97 097.Orchard_Oriole 98 098.Scott_Oriole 99 099.Ovenbird 100 100.Brown_Pelican 101 101.White_Pelican 102 102.Western_Wood_Pewee 103 103.Sayornis 104 104.American_Pipit 105 105.Whip_poor_Will 106 106.Horned_Puffin 107 107.Common_Raven 108 108.White_necked_Raven 109 109.American_Redstart 110 110.Geococcyx 111 111.Loggerhead_Shrike 112 112.Great_Grey_Shrike 113 113.Baird_Sparrow 114 114.Black_throated_Sparrow 115 115.Brewer_Sparrow 116 116.Chipping_Sparrow 117 117.Clay_colored_Sparrow 118 118.House_Sparrow 119 119.Field_Sparrow 120 120.Fox_Sparrow 121 121.Grasshopper_Sparrow 122 122.Harris_Sparrow 123 123.Henslow_Sparrow 124 124.Le_Conte_Sparrow 125 125.Lincoln_Sparrow 126 126.Nelson_Sharp_tailed_Sparrow 127 127.Savannah_Sparrow 128 128.Seaside_Sparrow 129 129.Song_Sparrow 130 130.Tree_Sparrow 131 131.Vesper_Sparrow 132 132.White_crowned_Sparrow 133 133.White_throated_Sparrow 134 134.Cape_Glossy_Starling 135 135.Bank_Swallow 136 136.Barn_Swallow 137 137.Cliff_Swallow 138 138.Tree_Swallow 139 139.Scarlet_Tanager 140 140.Summer_Tanager 141 141.Artic_Tern 142 142.Black_Tern 143 143.Caspian_Tern 144 144.Common_Tern 145 145.Elegant_Tern 146 146.Forsters_Tern 147 147.Least_Tern 148 148.Green_tailed_Towhee 149 149.Brown_Thrasher 150 150.Sage_Thrasher 151 151.Black_capped_Vireo 152 152.Blue_headed_Vireo 153 153.Philadelphia_Vireo 154 154.Red_eyed_Vireo 155 155.Warbling_Vireo 156 156.White_eyed_Vireo 157 157.Yellow_throated_Vireo 158 158.Bay_breasted_Warbler 159 159.Black_and_white_Warbler 160 160.Black_throated_Blue_Warbler 161 161.Blue_winged_Warbler 162 162.Canada_Warbler 163 163.Cape_May_Warbler 164 164.Cerulean_Warbler 165 165.Chestnut_sided_Warbler 166 166.Golden_winged_Warbler 167 167.Hooded_Warbler 168 168.Kentucky_Warbler 169 169.Magnolia_Warbler 170 170.Mourning_Warbler 171 171.Myrtle_Warbler 172 172.Nashville_Warbler 173 173.Orange_crowned_Warbler 174 174.Palm_Warbler 175 175.Pine_Warbler 176 176.Prairie_Warbler 177 177.Prothonotary_Warbler 178 178.Swainson_Warbler 179 179.Tennessee_Warbler 180 180.Wilson_Warbler 181 181.Worm_eating_Warbler 182 182.Yellow_Warbler 183 183.Northern_Waterthrush 184 184.Louisiana_Waterthrush 185 185.Bohemian_Waxwing 186 186.Cedar_Waxwing 187 187.American_Three_toed_Woodpecker 188 188.Pileated_Woodpecker 189 189.Red_bellied_Woodpecker 190 190.Red_cockaded_Woodpecker 191 191.Red_headed_Woodpecker 192 192.Downy_Woodpecker 193 193.Bewick_Wren 194 194.Cactus_Wren 195 195.Carolina_Wren 196 196.House_Wren 197 197.Marsh_Wren 198 198.Rock_Wren 199 199.Winter_Wren 200 200.Common_Yellowthroat ``` </details> - `file_name`: nombre del archivo que tiene la imagen ### Data Splits | |train| test| |------------------|----:|----:| |# de observaciones|5994 |5794 | ## Problema El problema consiste en entrenar un modelo que clasifique instancias del dataset CUB de la mejor manera posible. Algunas preguntas que podrían guiar nuestro desarrollo son: * Se podrá obtener un buen _performance_ de clasificación solo usando las imágenes del dataset? Este tipo de problema sería el clásico problema de clasificar imágenes. * Se podrá obtener un buen _performance_ de clasificación solo usando los textos del dataset? Este tipo de problema sería el clásico problema de clasificar texto. * Se podrá obtener un mejor _performance_ si combino la información en un modelo multimodal? Cómo construyo un modelo multimodal que reciba una imagen y un texto y clasifique la instancia con su respectiva especie? Hint: piense en cómo una red neuronal (la que sea) es simplemente una función que recibe un dato y genera una representación de alto nivel (vector característico) de ese dato. Una red CNN podría hacerse cargo de calcular la representación de una imagen y una red RNN podría hacerse cargo de calcular la representación del texto. Finalmente concateno ambas representaciones y entreno un MLP final que hace la clasificación. ## Experimentación Como el dataset es grande y los recursos de computación son muy limitados, una estrategia para hacer los experimentos es tomar una muestra más pequeña de datos para ir probando las ideas. Para esta estrategia, éstas son dos ideas válidas: * Tomar menos instancias por cada clase para el desarrollo y solo dejar el dataset final para hacer el entrenamiento final y la evaluación final con testing. * Tomar menos clases para el desarrollo inicial y solo dejar el dataset final para hacer el entrenamiento final y la evaluación final con testing. Ambas estrategias nos permiten lidiar con los recursos limitados que tenemos, pero cuáles son sus ventajas o desventajas? Si usas alguna de estas estrategias, puedes comentar este punto en tu desarrollo final. ## Métrica de Evaluación La métrica que se debe reportar es el accuracy en conjunto de test. ## Citation Information Sitio web del [_dataset_ CUB200](http://www.vision.caltech.edu/datasets/cub_200_2011/), y reporte técnico [aquí](https://authors.library.caltech.edu/27452/1/CUB_200_2011.pdf). ``` @techreport{WahCUB_200_2011, Title = The Caltech-UCSD Birds-200-2011 Dataset, Author = {Wah, C. and Branson, S. and Welinder, P. and Perona, P. and Belongie, S.}, Year = {2011} Institution = {California Institute of Technology}, Number = {CNS-TR-2011-001} } ``` ## Contributions Creación y adaptación del material de la actividad en un Hugging Face dataset por Cristóbal Alcázar.
alkzar90/CC6204-Hackaton-Cub-Dataset
[ "task_categories:image-classification", "task_categories:text-classification", "task_ids:multi-class-image-classification", "size_categories:10K<n<15K", "source_datasets:extended|other", "language:en", "license:apache-2.0", "region:us" ]
2022-11-24T13:29:55+00:00
{"language": ["en"], "license": ["apache-2.0"], "size_categories": ["10K<n<15K"], "source_datasets": ["extended|other"], "task_categories": ["image-classification", "text-classification"], "task_ids": ["multi-class-image-classification"], "paperswithcode_id": "cub-200-2011", "pretty_name": "CC6204-Hackaton-CUB200"}
2023-01-12T12:14:32+00:00
[]
[ "en" ]
TAGS #task_categories-image-classification #task_categories-text-classification #task_ids-multi-class-image-classification #size_categories-10K<n<15K #source_datasets-extended|other #language-English #license-apache-2.0 #region-us
Dataset Description ------------------- * Homepage: CUB 200 2011 * Repository: Caltech Vision Lab * Paper: The Caltech-UCSD Birds-200-2011 Dataset * Leaderboard: Paperswithcode * Point of Contact: Catherine Wah CC6204: Hackaton Deep Learning 2022 =================================== Nota: esta fue un actividad del curso CC6204: Deep Learning, Universidad de Chile, año 2022. Dictado por el profesor Iván Sipiran, material del curso aquí. En esta actividad intentaremos resolver un problema de clasificación multimodal. En un problema de clasificación multimodal, cada pieza de información viene en diferentes representaciones (imágenes, texto, audios, etc) y la idea es determinar cómo usar esos datos para un problema de clasificación. En este caso trabajaremos con un dataset que contiene datos sobre especies de pájaros. Dataset ------- ### Data Instances Una muestra del *dataset* se encuentra a continuación: ### Data Fields Cada instancia de datos tiene los siguientes campos: * 'image': imagen RGB de un pájaro * 'description': texto con 10 descripciones del pájaro en la foto, cada descripción esta separado por un salto de linea (i.e. '\n') * 'label': un número entero que representa el id de la especie a la que pertenece el pájaro Id2String - 'file\_name': nombre del archivo que tiene la imagen ### Data Splits Problema -------- El problema consiste en entrenar un modelo que clasifique instancias del dataset CUB de la mejor manera posible. Algunas preguntas que podrían guiar nuestro desarrollo son: * Se podrá obtener un buen *performance* de clasificación solo usando las imágenes del dataset? Este tipo de problema sería el clásico problema de clasificar imágenes. * Se podrá obtener un buen *performance* de clasificación solo usando los textos del dataset? Este tipo de problema sería el clásico problema de clasificar texto. * Se podrá obtener un mejor *performance* si combino la información en un modelo multimodal? Cómo construyo un modelo multimodal que reciba una imagen y un texto y clasifique la instancia con su respectiva especie? Hint: piense en cómo una red neuronal (la que sea) es simplemente una función que recibe un dato y genera una representación de alto nivel (vector característico) de ese dato. Una red CNN podría hacerse cargo de calcular la representación de una imagen y una red RNN podría hacerse cargo de calcular la representación del texto. Finalmente concateno ambas representaciones y entreno un MLP final que hace la clasificación. Experimentación --------------- Como el dataset es grande y los recursos de computación son muy limitados, una estrategia para hacer los experimentos es tomar una muestra más pequeña de datos para ir probando las ideas. Para esta estrategia, éstas son dos ideas válidas: * Tomar menos instancias por cada clase para el desarrollo y solo dejar el dataset final para hacer el entrenamiento final y la evaluación final con testing. * Tomar menos clases para el desarrollo inicial y solo dejar el dataset final para hacer el entrenamiento final y la evaluación final con testing. Ambas estrategias nos permiten lidiar con los recursos limitados que tenemos, pero cuáles son sus ventajas o desventajas? Si usas alguna de estas estrategias, puedes comentar este punto en tu desarrollo final. Métrica de Evaluación --------------------- La métrica que se debe reportar es el accuracy en conjunto de test. Sitio web del *dataset* CUB200, y reporte técnico aquí. Contributions ------------- Creación y adaptación del material de la actividad en un Hugging Face dataset por Cristóbal Alcázar.
[ "### Data Instances\n\n\nUna muestra del *dataset* se encuentra a continuación:", "### Data Fields\n\n\nCada instancia de datos tiene los siguientes campos:\n\n\n* 'image': imagen RGB de un pájaro\n* 'description': texto con 10 descripciones del pájaro en la foto, cada descripción esta separado por un salto de linea (i.e. '\\n')\n* 'label': un número entero que representa el id de la especie a la que pertenece el pájaro\n\n\n\nId2String\n\n- 'file\\_name': nombre del archivo que tiene la imagen", "### Data Splits\n\n\n\nProblema\n--------\n\n\nEl problema consiste en entrenar un modelo que clasifique instancias del dataset CUB de la mejor manera posible. Algunas preguntas que podrían guiar nuestro desarrollo son:\n\n\n* Se podrá obtener un buen *performance* de clasificación solo usando las imágenes del dataset? Este tipo de problema sería el clásico problema de clasificar imágenes.\n* Se podrá obtener un buen *performance* de clasificación solo usando los textos del dataset? Este tipo de problema sería el clásico problema de clasificar texto.\n* Se podrá obtener un mejor *performance* si combino la información en un modelo multimodal? Cómo construyo un modelo multimodal que reciba una imagen y un texto y clasifique la instancia con su respectiva especie? Hint: piense en cómo una red neuronal (la que sea) es simplemente una función que recibe un dato y genera una representación de alto nivel (vector característico) de ese dato. Una red CNN podría hacerse cargo de calcular la representación de una imagen y una red RNN podría hacerse cargo de calcular la representación del texto. Finalmente concateno ambas representaciones y entreno un MLP final que hace la clasificación.\n\n\nExperimentación\n---------------\n\n\nComo el dataset es grande y los recursos de computación son muy limitados, una estrategia para hacer los experimentos es tomar una muestra más pequeña de datos para ir probando las ideas. Para esta estrategia, éstas son dos ideas válidas:\n\n\n* Tomar menos instancias por cada clase para el desarrollo y solo dejar el dataset final para hacer el entrenamiento final y la evaluación final con testing.\n* Tomar menos clases para el desarrollo inicial y solo dejar el dataset final para hacer el entrenamiento final y la evaluación final con testing.\n\n\nAmbas estrategias nos permiten lidiar con los recursos limitados que tenemos, pero cuáles son sus ventajas o desventajas? Si usas alguna de estas estrategias, puedes comentar este punto en tu desarrollo final.\n\n\nMétrica de Evaluación\n---------------------\n\n\nLa métrica que se debe reportar es el accuracy en conjunto de test.\n\n\nSitio web del *dataset* CUB200, y reporte técnico aquí.\n\n\nContributions\n-------------\n\n\nCreación y adaptación del material de la actividad en un Hugging Face dataset por Cristóbal Alcázar." ]
[ "TAGS\n#task_categories-image-classification #task_categories-text-classification #task_ids-multi-class-image-classification #size_categories-10K<n<15K #source_datasets-extended|other #language-English #license-apache-2.0 #region-us \n", "### Data Instances\n\n\nUna muestra del *dataset* se encuentra a continuación:", "### Data Fields\n\n\nCada instancia de datos tiene los siguientes campos:\n\n\n* 'image': imagen RGB de un pájaro\n* 'description': texto con 10 descripciones del pájaro en la foto, cada descripción esta separado por un salto de linea (i.e. '\\n')\n* 'label': un número entero que representa el id de la especie a la que pertenece el pájaro\n\n\n\nId2String\n\n- 'file\\_name': nombre del archivo que tiene la imagen", "### Data Splits\n\n\n\nProblema\n--------\n\n\nEl problema consiste en entrenar un modelo que clasifique instancias del dataset CUB de la mejor manera posible. Algunas preguntas que podrían guiar nuestro desarrollo son:\n\n\n* Se podrá obtener un buen *performance* de clasificación solo usando las imágenes del dataset? Este tipo de problema sería el clásico problema de clasificar imágenes.\n* Se podrá obtener un buen *performance* de clasificación solo usando los textos del dataset? Este tipo de problema sería el clásico problema de clasificar texto.\n* Se podrá obtener un mejor *performance* si combino la información en un modelo multimodal? Cómo construyo un modelo multimodal que reciba una imagen y un texto y clasifique la instancia con su respectiva especie? Hint: piense en cómo una red neuronal (la que sea) es simplemente una función que recibe un dato y genera una representación de alto nivel (vector característico) de ese dato. Una red CNN podría hacerse cargo de calcular la representación de una imagen y una red RNN podría hacerse cargo de calcular la representación del texto. Finalmente concateno ambas representaciones y entreno un MLP final que hace la clasificación.\n\n\nExperimentación\n---------------\n\n\nComo el dataset es grande y los recursos de computación son muy limitados, una estrategia para hacer los experimentos es tomar una muestra más pequeña de datos para ir probando las ideas. Para esta estrategia, éstas son dos ideas válidas:\n\n\n* Tomar menos instancias por cada clase para el desarrollo y solo dejar el dataset final para hacer el entrenamiento final y la evaluación final con testing.\n* Tomar menos clases para el desarrollo inicial y solo dejar el dataset final para hacer el entrenamiento final y la evaluación final con testing.\n\n\nAmbas estrategias nos permiten lidiar con los recursos limitados que tenemos, pero cuáles son sus ventajas o desventajas? Si usas alguna de estas estrategias, puedes comentar este punto en tu desarrollo final.\n\n\nMétrica de Evaluación\n---------------------\n\n\nLa métrica que se debe reportar es el accuracy en conjunto de test.\n\n\nSitio web del *dataset* CUB200, y reporte técnico aquí.\n\n\nContributions\n-------------\n\n\nCreación y adaptación del material de la actividad en un Hugging Face dataset por Cristóbal Alcázar." ]
5b5a2cacbf80b444dc055eb9b47f63ebac4b1a4c
# AutoTrain Dataset for project: shapes ## Dataset Description This dataset has been automatically processed by AutoTrain for project shapes. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "image": "<28x28 RGB PIL image>", "target": 0 }, { "image": "<28x28 RGB PIL image>", "target": 2 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "image": "Image(decode=True, id=None)", "target": "ClassLabel(num_classes=3, names=['circles', 'squares', 'triangles'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 240 | | valid | 60 |
Ana85baul/autotrain-data-shapes
[ "task_categories:image-classification", "region:us" ]
2022-11-24T14:19:50+00:00
{"task_categories": ["image-classification"]}
2022-11-24T14:21:22+00:00
[]
[]
TAGS #task_categories-image-classification #region-us
AutoTrain Dataset for project: shapes ===================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project shapes. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-image-classification #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
d6b2746a90edfa25bc6c0aca778dc52bcd950e2a
# Dataset Card for "europarl_bilingual_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jeapaul/europarl_bilingual_processed
[ "region:us" ]
2022-11-24T18:30:02+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 281100121, "num_examples": 1892723}], "download_size": 155904108, "dataset_size": 281100121}}
2022-11-24T18:30:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "europarl_bilingual_processed" More Information needed
[ "# Dataset Card for \"europarl_bilingual_processed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"europarl_bilingual_processed\"\n\nMore Information needed" ]
01db0fcdbbf75ea4ed4f454fa7f1c3b6ebf88d68
# Dataset Card for "english_europarl_bilingual_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jeapaul/english_europarl_bilingual_processed
[ "region:us" ]
2022-11-24T18:33:55+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 281100121, "num_examples": 1892723}], "download_size": 155904108, "dataset_size": 281100121}}
2022-11-24T18:34:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "english_europarl_bilingual_processed" More Information needed
[ "# Dataset Card for \"english_europarl_bilingual_processed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"english_europarl_bilingual_processed\"\n\nMore Information needed" ]
f35cee89382278b8d63d6d6f9c27cfeda3deb3f5
# Dataset Card for VIMA-Data ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Dataset Structure](#dataset-structure) - [Dataset Creation](#dataset-creation) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://vimalabs.github.io/ - **Repository:** https://github.com/vimalabs/VimaBench - **Paper:** https://arxiv.org/abs/2210.03094 ### Dataset Summary This is the official dataset used to train general robot manipulation agents with multimodal prompts, as presented in [paper](https://arxiv.org/abs/2210.03094). It contains 650K trajectories for 13 tasks in [VIMA-Bench](https://github.com/vimalabs/VimaBench). All demonstrations are generated by oracles. ## Dataset Structure Data are grouped into different tasks. Within each trajectory's folder, there are two folders `rgb_front` and `rgb_top`, and three files `obs.pkl`, `action.pkl`, and `trajectory.pkl`. RGB frames from a certain perspective are separately stored in corresponding folder. `obs.pkl` includes segmentation and state of end effector. `action.pkl` contains oracle actions. `trajectory.pkl` contains meta information such as elapsed steps, task information, and object information. Users can build their custom data piepline starting from here. More details and examples can be found [here](https://github.com/vimalabs/VimaBench#training-data). ## Dataset Creation All demonstrations are generated by scripted oracles. ## Additional Information ### Licensing Information This dataset is released under the [Creative Commons Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/legalcode) license. ### Citation Information If you find our work useful, please consider citing us! ```bibtex @inproceedings{jiang2023vima, title = {VIMA: General Robot Manipulation with Multimodal Prompts}, author = {Yunfan Jiang and Agrim Gupta and Zichen Zhang and Guanzhi Wang and Yongqiang Dou and Yanjun Chen and Li Fei-Fei and Anima Anandkumar and Yuke Zhu and Linxi Fan}, booktitle = {Fortieth International Conference on Machine Learning}, year = {2023} } ```
VIMA/VIMA-Data
[ "license:cc-by-4.0", "arxiv:2210.03094", "region:us" ]
2022-11-24T19:59:13+00:00
{"license": "cc-by-4.0"}
2023-06-17T03:52:09+00:00
[ "2210.03094" ]
[]
TAGS #license-cc-by-4.0 #arxiv-2210.03094 #region-us
# Dataset Card for VIMA-Data ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Dataset Structure - Dataset Creation - Additional Information - Licensing Information - Citation Information ## Dataset Description - Homepage: URL - Repository: URL - Paper: URL ### Dataset Summary This is the official dataset used to train general robot manipulation agents with multimodal prompts, as presented in paper. It contains 650K trajectories for 13 tasks in VIMA-Bench. All demonstrations are generated by oracles. ## Dataset Structure Data are grouped into different tasks. Within each trajectory's folder, there are two folders 'rgb_front' and 'rgb_top', and three files 'URL', 'URL', and 'URL'. RGB frames from a certain perspective are separately stored in corresponding folder. 'URL' includes segmentation and state of end effector. 'URL' contains oracle actions. 'URL' contains meta information such as elapsed steps, task information, and object information. Users can build their custom data piepline starting from here. More details and examples can be found here. ## Dataset Creation All demonstrations are generated by scripted oracles. ## Additional Information ### Licensing Information This dataset is released under the Creative Commons Attribution 4.0 International (CC BY 4.0) license. If you find our work useful, please consider citing us!
[ "# Dataset Card for VIMA-Data", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n- Dataset Structure\n- Dataset Creation\n- Additional Information\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: URL", "### Dataset Summary\n\nThis is the official dataset used to train general robot manipulation agents with multimodal prompts, as presented in paper. It contains 650K trajectories for 13 tasks in VIMA-Bench. All demonstrations are generated by oracles.", "## Dataset Structure\n\nData are grouped into different tasks. Within each trajectory's folder, there are two folders 'rgb_front' and 'rgb_top', and three files 'URL', 'URL', and 'URL'. RGB frames from a certain perspective are separately stored in corresponding folder. 'URL' includes segmentation and state of end effector. 'URL' contains oracle actions. 'URL' contains meta information such as elapsed steps, task information, and object information. Users can build their custom data piepline starting from here. More details and examples can be found here.", "## Dataset Creation\n\nAll demonstrations are generated by scripted oracles.", "## Additional Information", "### Licensing Information\n\nThis dataset is released under the Creative Commons Attribution 4.0 International (CC BY 4.0) license.\n\n\n\nIf you find our work useful, please consider citing us!" ]
[ "TAGS\n#license-cc-by-4.0 #arxiv-2210.03094 #region-us \n", "# Dataset Card for VIMA-Data", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n- Dataset Structure\n- Dataset Creation\n- Additional Information\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: URL", "### Dataset Summary\n\nThis is the official dataset used to train general robot manipulation agents with multimodal prompts, as presented in paper. It contains 650K trajectories for 13 tasks in VIMA-Bench. All demonstrations are generated by oracles.", "## Dataset Structure\n\nData are grouped into different tasks. Within each trajectory's folder, there are two folders 'rgb_front' and 'rgb_top', and three files 'URL', 'URL', and 'URL'. RGB frames from a certain perspective are separately stored in corresponding folder. 'URL' includes segmentation and state of end effector. 'URL' contains oracle actions. 'URL' contains meta information such as elapsed steps, task information, and object information. Users can build their custom data piepline starting from here. More details and examples can be found here.", "## Dataset Creation\n\nAll demonstrations are generated by scripted oracles.", "## Additional Information", "### Licensing Information\n\nThis dataset is released under the Creative Commons Attribution 4.0 International (CC BY 4.0) license.\n\n\n\nIf you find our work useful, please consider citing us!" ]
5a5d94ce00f90000c2d88904f5cd62657089a142
# Dataset Card for VASR - [Dataset Description](#dataset-description) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [How to Submit Predictions?](#how-to-submit-predictions?) - [Colab notebook code for VASR evaluation with ViT](#colab-notebook-code-for-vasr-evaluation-with-clip) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description VASR is a challenging dataset for evaluating computer vision commonsense reasoning abilities. Given a triplet of images, the task is to select an image candidate B' that completes the analogy (A to A' is like B to what?). Unlike previous work on visual analogy that focused on simple image transformations, we tackle complex analogies requiring understanding of scenes. Our experiments demonstrate that state-of-the-art models struggle with carefully chosen distractors (±53%, compared to 90% human accuracy). - **Homepage:** https://vasr-dataset.github.io/ - **Colab** https://colab.research.google.com/drive/1HUg0aHonFDK3hVFrIRYdSEfpUJeY-4dI - **Repository:** https://github.com/vasr-dataset/vasr/tree/main/experiments - **Paper:** https://arxiv.org/abs/2212.04542 - **Leaderboard:** https://vasr-dataset.github.io/ - **Point of Contact:** [email protected] ## Supported Tasks and Leaderboards https://vasr.github.io/leaderboard. https://paperswithcode.com/dataset/vasr. ## How to Submit Predictions? To submit predictions, please send a prediction CSV file to [email protected] / [email protected]. The prediction file should include a "B'" column with the predicted candidate name that best solves the analogy, and an index from 1 to 4 indicating the location of the predicted candidate in the given candidate list. An example prediction file is available [HERE](https://drive.google.com/file/d/1NvBNdvlWmEOYjIVi2xdmQ_tUm-TXo42u/view?usp=share_link). A submission is allowed once a week, and you will receive a response within a week. ## Colab notebook code for VASR evaluation with ViT https://colab.research.google.com/drive/1HUg0aHonFDK3hVFrIRYdSEfpUJeY-4dI ### Languages English. ## Dataset Structure ### Data Fields A: datasets.Image() - the first input image, **A**:A'. A': datasets.Image() - the second input image, different from A in a single key, A:**A'**. B: datasets.Image() - the third input image, has the same different item as A, **B**:B'. B': datasets.Image() - the forth image, which is the analogy solution. Different from B in a single key (the same different one as in A:A'), B:**B'**. Hidden in the test set. candidates_images: [datasets.Image()] - a list of candidate images solutions to the analogy. label: datasets.Value("int64") - the index of the ground-truth solution. Hidden in the test set. candidates: [datasets.Value("string")] - a list of candidate string solutions to the analogy. ### Data Splits There are three splits, TRAIN, VALIDATION, and TEST. Since there are four candidates and one solution, random chance is 25%. ## Dataset Creation We leverage situation recognition annotations and the CLIP model to generate a large set of 500k candidate analogies. There are two types of labels: - Silver labels, obtained from the automatic generation. - Gold labels, obtained from human annotations over the silver annotations. In the huggingface version we provide only the gold labeled dataset. Please refer to the project website download page if you want to download the silver labels version. ### Annotations #### Annotation process We paid Amazon Mechanical Turk Workers to solve analogies, five annotators for each analogy. Workers were asked to select the image that best solves the analogy. The resulting dataset is composed of the 3,820 instances agreed upon with a majority vote of at least 3 annotators, which was obtained in 93% of the cases. ## Considerations for Using the Data All associations were obtained with human annotators. All used images are from the imSitu dataset (http://imsitu.org/) Using this data is allowed for academic research alone. ### Licensing Information CC-By 4.0 ### Citation Information NA
nlphuji/vasr
[ "annotations_creators:crowdsourced", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:cc-by-4.0", "commonsense-reasoning", "visual-reasoning", "arxiv:2212.04542", "region:us" ]
2022-11-24T21:05:27+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["found"], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_ids": [], "paperswithcode_id": "vasr", "pretty_name": "VASR", "tags": ["commonsense-reasoning", "visual-reasoning"], "extra_gated_prompt": "By clicking on \u201cAccess repository\u201d below, you also agree that you are using it solely for research purposes. The full license agreement is available in the dataset files."}
2022-12-30T19:39:46+00:00
[ "2212.04542" ]
[ "en" ]
TAGS #annotations_creators-crowdsourced #language_creators-found #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-English #license-cc-by-4.0 #commonsense-reasoning #visual-reasoning #arxiv-2212.04542 #region-us
# Dataset Card for VASR - Dataset Description - Supported Tasks and Leaderboards - How to Submit Predictions? - Colab notebook code for VASR evaluation with ViT - Languages - Dataset Structure - Data Fields - Data Splits - Dataset Creation - Considerations for Using the Data - Licensing Information - Citation Information ## Dataset Description VASR is a challenging dataset for evaluating computer vision commonsense reasoning abilities. Given a triplet of images, the task is to select an image candidate B' that completes the analogy (A to A' is like B to what?). Unlike previous work on visual analogy that focused on simple image transformations, we tackle complex analogies requiring understanding of scenes. Our experiments demonstrate that state-of-the-art models struggle with carefully chosen distractors (±53%, compared to 90% human accuracy). - Homepage: URL - Colab URL - Repository: URL - Paper: URL - Leaderboard: URL - Point of Contact: URL@URL ## Supported Tasks and Leaderboards URL URL ## How to Submit Predictions? To submit predictions, please send a prediction CSV file to vasr.benchmark@URL / URL@URL. The prediction file should include a "B'" column with the predicted candidate name that best solves the analogy, and an index from 1 to 4 indicating the location of the predicted candidate in the given candidate list. An example prediction file is available HERE. A submission is allowed once a week, and you will receive a response within a week. ## Colab notebook code for VASR evaluation with ViT URL ### Languages English. ## Dataset Structure ### Data Fields A: datasets.Image() - the first input image, A:A'. A': datasets.Image() - the second input image, different from A in a single key, A:A'. B: datasets.Image() - the third input image, has the same different item as A, B:B'. B': datasets.Image() - the forth image, which is the analogy solution. Different from B in a single key (the same different one as in A:A'), B:B'. Hidden in the test set. candidates_images: [datasets.Image()] - a list of candidate images solutions to the analogy. label: datasets.Value("int64") - the index of the ground-truth solution. Hidden in the test set. candidates: [datasets.Value("string")] - a list of candidate string solutions to the analogy. ### Data Splits There are three splits, TRAIN, VALIDATION, and TEST. Since there are four candidates and one solution, random chance is 25%. ## Dataset Creation We leverage situation recognition annotations and the CLIP model to generate a large set of 500k candidate analogies. There are two types of labels: - Silver labels, obtained from the automatic generation. - Gold labels, obtained from human annotations over the silver annotations. In the huggingface version we provide only the gold labeled dataset. Please refer to the project website download page if you want to download the silver labels version. ### Annotations #### Annotation process We paid Amazon Mechanical Turk Workers to solve analogies, five annotators for each analogy. Workers were asked to select the image that best solves the analogy. The resulting dataset is composed of the 3,820 instances agreed upon with a majority vote of at least 3 annotators, which was obtained in 93% of the cases. ## Considerations for Using the Data All associations were obtained with human annotators. All used images are from the imSitu dataset (URL Using this data is allowed for academic research alone. ### Licensing Information CC-By 4.0 NA
[ "# Dataset Card for VASR\n- Dataset Description\n - Supported Tasks and Leaderboards\n - How to Submit Predictions?\n - Colab notebook code for VASR evaluation with ViT\n - Languages\n- Dataset Structure\n - Data Fields\n - Data Splits\n- Dataset Creation\n- Considerations for Using the Data\n - Licensing Information\n - Citation Information", "## Dataset Description\nVASR is a challenging dataset for evaluating computer vision commonsense reasoning abilities. Given a triplet of images, the task is to select an image candidate B' that completes the analogy (A to A' is like B to what?). Unlike previous work on visual analogy that focused on simple image transformations, we tackle complex analogies requiring understanding of scenes. Our experiments demonstrate that state-of-the-art models struggle with carefully chosen distractors (±53%, compared to 90% human accuracy).\n- Homepage: \nURL\n- Colab\nURL\n- Repository:\nURL\n- Paper: URL \n- Leaderboard:\nURL\n- Point of Contact:\nURL@URL", "## Supported Tasks and Leaderboards\nURL \nURL", "## How to Submit Predictions?\nTo submit predictions, please send a prediction CSV file to vasr.benchmark@URL / URL@URL. \nThe prediction file should include a \"B'\" column with the predicted candidate name that best solves the analogy, and an index from 1 to 4 indicating the location of the predicted candidate in the given candidate list. \nAn example prediction file is available HERE. \nA submission is allowed once a week, and you will receive a response within a week.", "## Colab notebook code for VASR evaluation with ViT\nURL", "### Languages\nEnglish.", "## Dataset Structure", "### Data Fields\nA: datasets.Image() - the first input image, A:A'. \nA': datasets.Image() - the second input image, different from A in a single key, A:A'. \nB: datasets.Image() - the third input image, has the same different item as A, B:B'. \nB': datasets.Image() - the forth image, which is the analogy solution. Different from B in a single key (the same different one as in A:A'), B:B'. Hidden in the test set. \ncandidates_images: [datasets.Image()] - a list of candidate images solutions to the analogy. \nlabel: datasets.Value(\"int64\") - the index of the ground-truth solution. Hidden in the test set. \ncandidates: [datasets.Value(\"string\")] - a list of candidate string solutions to the analogy.", "### Data Splits\nThere are three splits, TRAIN, VALIDATION, and TEST. \nSince there are four candidates and one solution, random chance is 25%.", "## Dataset Creation\n\nWe leverage situation recognition annotations and the CLIP model to generate a large set of 500k candidate analogies.\nThere are two types of labels: \n- Silver labels, obtained from the automatic generation.\n- Gold labels, obtained from human annotations over the silver annotations.\n\nIn the huggingface version we provide only the gold labeled dataset. Please refer to the project website download page if you want to download the silver labels version.", "### Annotations", "#### Annotation process\n\nWe paid Amazon Mechanical Turk Workers to solve analogies, five annotators for each analogy.\nWorkers were asked to select the image that best solves the analogy. \nThe resulting dataset is composed of the 3,820 instances agreed upon with a majority vote of at least 3 annotators, which was obtained in 93% of the cases.", "## Considerations for Using the Data\n\nAll associations were obtained with human annotators. \nAll used images are from the imSitu dataset (URL \nUsing this data is allowed for academic research alone.", "### Licensing Information\n\nCC-By 4.0 \n\n\n\nNA" ]
[ "TAGS\n#annotations_creators-crowdsourced #language_creators-found #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-English #license-cc-by-4.0 #commonsense-reasoning #visual-reasoning #arxiv-2212.04542 #region-us \n", "# Dataset Card for VASR\n- Dataset Description\n - Supported Tasks and Leaderboards\n - How to Submit Predictions?\n - Colab notebook code for VASR evaluation with ViT\n - Languages\n- Dataset Structure\n - Data Fields\n - Data Splits\n- Dataset Creation\n- Considerations for Using the Data\n - Licensing Information\n - Citation Information", "## Dataset Description\nVASR is a challenging dataset for evaluating computer vision commonsense reasoning abilities. Given a triplet of images, the task is to select an image candidate B' that completes the analogy (A to A' is like B to what?). Unlike previous work on visual analogy that focused on simple image transformations, we tackle complex analogies requiring understanding of scenes. Our experiments demonstrate that state-of-the-art models struggle with carefully chosen distractors (±53%, compared to 90% human accuracy).\n- Homepage: \nURL\n- Colab\nURL\n- Repository:\nURL\n- Paper: URL \n- Leaderboard:\nURL\n- Point of Contact:\nURL@URL", "## Supported Tasks and Leaderboards\nURL \nURL", "## How to Submit Predictions?\nTo submit predictions, please send a prediction CSV file to vasr.benchmark@URL / URL@URL. \nThe prediction file should include a \"B'\" column with the predicted candidate name that best solves the analogy, and an index from 1 to 4 indicating the location of the predicted candidate in the given candidate list. \nAn example prediction file is available HERE. \nA submission is allowed once a week, and you will receive a response within a week.", "## Colab notebook code for VASR evaluation with ViT\nURL", "### Languages\nEnglish.", "## Dataset Structure", "### Data Fields\nA: datasets.Image() - the first input image, A:A'. \nA': datasets.Image() - the second input image, different from A in a single key, A:A'. \nB: datasets.Image() - the third input image, has the same different item as A, B:B'. \nB': datasets.Image() - the forth image, which is the analogy solution. Different from B in a single key (the same different one as in A:A'), B:B'. Hidden in the test set. \ncandidates_images: [datasets.Image()] - a list of candidate images solutions to the analogy. \nlabel: datasets.Value(\"int64\") - the index of the ground-truth solution. Hidden in the test set. \ncandidates: [datasets.Value(\"string\")] - a list of candidate string solutions to the analogy.", "### Data Splits\nThere are three splits, TRAIN, VALIDATION, and TEST. \nSince there are four candidates and one solution, random chance is 25%.", "## Dataset Creation\n\nWe leverage situation recognition annotations and the CLIP model to generate a large set of 500k candidate analogies.\nThere are two types of labels: \n- Silver labels, obtained from the automatic generation.\n- Gold labels, obtained from human annotations over the silver annotations.\n\nIn the huggingface version we provide only the gold labeled dataset. Please refer to the project website download page if you want to download the silver labels version.", "### Annotations", "#### Annotation process\n\nWe paid Amazon Mechanical Turk Workers to solve analogies, five annotators for each analogy.\nWorkers were asked to select the image that best solves the analogy. \nThe resulting dataset is composed of the 3,820 instances agreed upon with a majority vote of at least 3 annotators, which was obtained in 93% of the cases.", "## Considerations for Using the Data\n\nAll associations were obtained with human annotators. \nAll used images are from the imSitu dataset (URL \nUsing this data is allowed for academic research alone.", "### Licensing Information\n\nCC-By 4.0 \n\n\n\nNA" ]
6fef69a5a3af8cbbd2e679a1f9b33602aef98ece
# Dataset Card for "proofs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/proofs
[ "region:us" ]
2022-11-24T21:45:22+00:00
{"dataset_info": {"features": [{"name": "intermediate_conclusions", "struct": [{"name": "int1", "dtype": "string"}, {"name": "int10", "dtype": "string"}, {"name": "int11", "dtype": "string"}, {"name": "int12", "dtype": "string"}, {"name": "int13", "dtype": "string"}, {"name": "int14", "dtype": "string"}, {"name": "int15", "dtype": "string"}, {"name": "int16", "dtype": "string"}, {"name": "int17", "dtype": "string"}, {"name": "int2", "dtype": "string"}, {"name": "int3", "dtype": "string"}, {"name": "int4", "dtype": "string"}, {"name": "int5", "dtype": "string"}, {"name": "int6", "dtype": "string"}, {"name": "int7", "dtype": "string"}, {"name": "int8", "dtype": "string"}, {"name": "int9", "dtype": "string"}]}, {"name": "step_proof", "dtype": "string"}, {"name": "triples", "struct": [{"name": "sent1", "dtype": "string"}, {"name": "sent10", "dtype": "string"}, {"name": "sent11", "dtype": "string"}, {"name": "sent12", "dtype": "string"}, {"name": "sent13", "dtype": "string"}, {"name": "sent14", "dtype": "string"}, {"name": "sent15", "dtype": "string"}, {"name": "sent16", "dtype": "string"}, {"name": "sent17", "dtype": "string"}, {"name": "sent2", "dtype": "string"}, {"name": "sent3", "dtype": "string"}, {"name": "sent4", "dtype": "string"}, {"name": "sent5", "dtype": "string"}, {"name": "sent6", "dtype": "string"}, {"name": "sent7", "dtype": "string"}, {"name": "sent8", "dtype": "string"}, {"name": "sent9", "dtype": "string"}]}, {"name": "hypothesis", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1296774, "num_examples": 1313}], "download_size": 609276, "dataset_size": 1296774}}
2022-11-24T21:45:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "proofs" More Information needed
[ "# Dataset Card for \"proofs\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"proofs\"\n\nMore Information needed" ]
42ca06c2a5fd2d693fd71f18e483dcccdf0269b8
# Dataset Card for "proofs2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/proofs2
[ "region:us" ]
2022-11-24T21:54:07+00:00
{"dataset_info": {"features": [{"name": "intermediate_conclusions", "struct": [{"name": "int1", "dtype": "string"}, {"name": "int10", "dtype": "string"}, {"name": "int11", "dtype": "string"}, {"name": "int12", "dtype": "string"}, {"name": "int13", "dtype": "string"}, {"name": "int14", "dtype": "string"}, {"name": "int15", "dtype": "string"}, {"name": "int16", "dtype": "string"}, {"name": "int17", "dtype": "string"}, {"name": "int2", "dtype": "string"}, {"name": "int3", "dtype": "string"}, {"name": "int4", "dtype": "string"}, {"name": "int5", "dtype": "string"}, {"name": "int6", "dtype": "string"}, {"name": "int7", "dtype": "string"}, {"name": "int8", "dtype": "string"}, {"name": "int9", "dtype": "string"}]}, {"name": "step_proof", "dtype": "string"}, {"name": "triples", "struct": [{"name": "sent1", "dtype": "string"}, {"name": "sent10", "dtype": "string"}, {"name": "sent11", "dtype": "string"}, {"name": "sent12", "dtype": "string"}, {"name": "sent13", "dtype": "string"}, {"name": "sent14", "dtype": "string"}, {"name": "sent15", "dtype": "string"}, {"name": "sent16", "dtype": "string"}, {"name": "sent17", "dtype": "string"}, {"name": "sent2", "dtype": "string"}, {"name": "sent3", "dtype": "string"}, {"name": "sent4", "dtype": "string"}, {"name": "sent5", "dtype": "string"}, {"name": "sent6", "dtype": "string"}, {"name": "sent7", "dtype": "string"}, {"name": "sent8", "dtype": "string"}, {"name": "sent9", "dtype": "string"}]}, {"name": "hypothesis", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1307278, "num_examples": 1313}], "download_size": 609969, "dataset_size": 1307278}}
2022-11-24T21:54:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "proofs2" More Information needed
[ "# Dataset Card for \"proofs2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"proofs2\"\n\nMore Information needed" ]
7e4fe122079c375fa30cc43155be872520033cde
--- annotations_creators: - found language: - en language_creators: - found license: - mit multilinguality: - monolingual pretty_name: covid-qa-tts size_categories: - 1K<n<10K source_datasets: - extended|covid_qa_deepset tags: [] task_categories: - question-answering task_ids: - closed-domain-qa --- # Dataset Card for covid-qa-tts ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
dattatreya303/covid-qa-tts
[ "license:mit", "region:us" ]
2022-11-24T22:16:37+00:00
{"license": "mit"}
2022-11-29T19:54:58+00:00
[]
[]
TAGS #license-mit #region-us
--- annotations_creators: - found language: - en language_creators: - found license: - mit multilinguality: - monolingual pretty_name: covid-qa-tts size_categories: - 1K<n<10K source_datasets: - extended|covid_qa_deepset tags: [] task_categories: - question-answering task_ids: - closed-domain-qa --- # Dataset Card for covid-qa-tts ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @github-username for adding this dataset.
[ "# Dataset Card for covid-qa-tts", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @github-username for adding this dataset." ]
[ "TAGS\n#license-mit #region-us \n", "# Dataset Card for covid-qa-tts", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @github-username for adding this dataset." ]
5e5709454c13b79e0138e3842b0508fed4af80e1
# Dataset Card for "rock-paper-scissors" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Javtor/rock-paper-scissors
[ "region:us" ]
2022-11-25T01:45:06+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "paper", "1": "rock", "2": "scissors"}}}}], "splits": [{"name": "test", "num_bytes": 29457688.0, "num_examples": 372}, {"name": "train", "num_bytes": 196585089.6, "num_examples": 2520}], "download_size": 229783612, "dataset_size": 226042777.6}}
2022-11-25T01:55:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rock-paper-scissors" More Information needed
[ "# Dataset Card for \"rock-paper-scissors\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rock-paper-scissors\"\n\nMore Information needed" ]
e0ab3489edfe72c968261bffed5243b6fefddd22
# Re-DocRED Dataset This repository contains the dataset of our EMNLP 2022 research paper [Revisiting DocRED – Addressing the False Negative Problem in Relation Extraction](https://arxiv.org/pdf/2205.12696.pdf). DocRED is a widely used benchmark for document-level relation extraction. However, the DocRED dataset contains a significant percentage of false negative examples (incomplete annotation). We revised 4,053 documents in the DocRED dataset and resolved its problems. We released this dataset as: Re-DocRED dataset. The Re-DocRED Dataset resolved the following problems of DocRED: 1. Resolved the incompleteness problem by supplementing large amounts of relation triples. 2. Addressed the logical inconsistencies in DocRED. 3. Corrected the coreferential errors within DocRED. # Statistics of Re-DocRED The Re-DocRED dataset is located as ./data directory, the statistics of the dataset are shown below: | | Train | Dev | Test | | :---: | :-: | :-: |:-: | | # Documents | 3,053 | 500 | 500 | | Avg. # Triples | 28.1 | 34.6 | 34.9 | | Avg. # Entities | 19.4 | 19.4 | 19.6 | | Avg. # Sents | 7.9 | 8.2 | 7.9 | # Citation If you find our work useful, please cite our work as: ```bibtex @inproceedings{tan2022revisiting, title={Revisiting DocRED – Addressing the False Negative Problem in Relation Extraction}, author={Tan, Qingyu and Xu, Lu and Bing, Lidong and Ng, Hwee Tou and Aljunied, Sharifah Mahani}, booktitle={Proceedings of EMNLP}, url={https://arxiv.org/abs/2205.12696}, year={2022} } ```
tonytan48/Re-DocRED
[ "license:mit", "arxiv:2205.12696", "region:us" ]
2022-11-25T02:42:48+00:00
{"license": "mit"}
2022-11-25T02:48:32+00:00
[ "2205.12696" ]
[]
TAGS #license-mit #arxiv-2205.12696 #region-us
Re-DocRED Dataset ================= This repository contains the dataset of our EMNLP 2022 research paper Revisiting DocRED – Addressing the False Negative Problem in Relation Extraction. DocRED is a widely used benchmark for document-level relation extraction. However, the DocRED dataset contains a significant percentage of false negative examples (incomplete annotation). We revised 4,053 documents in the DocRED dataset and resolved its problems. We released this dataset as: Re-DocRED dataset. The Re-DocRED Dataset resolved the following problems of DocRED: 1. Resolved the incompleteness problem by supplementing large amounts of relation triples. 2. Addressed the logical inconsistencies in DocRED. 3. Corrected the coreferential errors within DocRED. Statistics of Re-DocRED ======================= The Re-DocRED dataset is located as ./data directory, the statistics of the dataset are shown below: If you find our work useful, please cite our work as:
[]
[ "TAGS\n#license-mit #arxiv-2205.12696 #region-us \n" ]
d8d082ac1da6eebcad741481fb49a0da4d241fc4
# Dataset Card for "proofs3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/proofs3
[ "region:us" ]
2022-11-25T09:55:04+00:00
{"dataset_info": {"features": [{"name": "intermediate_conclusions", "struct": [{"name": "int1", "dtype": "string"}, {"name": "int10", "dtype": "string"}, {"name": "int11", "dtype": "string"}, {"name": "int12", "dtype": "string"}, {"name": "int13", "dtype": "string"}, {"name": "int14", "dtype": "string"}, {"name": "int15", "dtype": "string"}, {"name": "int16", "dtype": "string"}, {"name": "int17", "dtype": "string"}, {"name": "int2", "dtype": "string"}, {"name": "int3", "dtype": "string"}, {"name": "int4", "dtype": "string"}, {"name": "int5", "dtype": "string"}, {"name": "int6", "dtype": "string"}, {"name": "int7", "dtype": "string"}, {"name": "int8", "dtype": "string"}, {"name": "int9", "dtype": "string"}]}, {"name": "step_proof", "dtype": "string"}, {"name": "triples", "struct": [{"name": "sent1", "dtype": "string"}, {"name": "sent10", "dtype": "string"}, {"name": "sent11", "dtype": "string"}, {"name": "sent12", "dtype": "string"}, {"name": "sent13", "dtype": "string"}, {"name": "sent14", "dtype": "string"}, {"name": "sent15", "dtype": "string"}, {"name": "sent16", "dtype": "string"}, {"name": "sent17", "dtype": "string"}, {"name": "sent2", "dtype": "string"}, {"name": "sent3", "dtype": "string"}, {"name": "sent4", "dtype": "string"}, {"name": "sent5", "dtype": "string"}, {"name": "sent6", "dtype": "string"}, {"name": "sent7", "dtype": "string"}, {"name": "sent8", "dtype": "string"}, {"name": "sent9", "dtype": "string"}]}, {"name": "hypothesis", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2614556, "num_examples": 2626}], "download_size": 1188057, "dataset_size": 2614556}}
2022-11-25T09:55:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "proofs3" More Information needed
[ "# Dataset Card for \"proofs3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"proofs3\"\n\nMore Information needed" ]
b9ffbbee5b42308c2114d9c359c14e2ceaaf6623
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@alamhanz](https://github.com/alamhanz) and [@andreaschandra](https://github.com/andreaschandra) for adding this dataset.
jakartaresearch/causalqa
[ "task_categories:question-answering", "task_ids:extractive-qa", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "language:en", "question-answering", "english", "causal", "region:us" ]
2022-11-25T10:23:48+00:00
{"annotations_creators": ["found"], "language_creators": ["found"], "language": ["en"], "license": [], "multilinguality": ["monolingual"], "size_categories": ["1M<n<10M"], "source_datasets": ["original"], "task_categories": ["question-answering"], "task_ids": ["extractive-qa"], "pretty_name": "CausalQA", "tags": ["question-answering", "english", "causal"]}
2022-11-25T12:26:42+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #task_ids-extractive-qa #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-English #question-answering #english #causal #region-us
# Dataset Card for [Dataset Name] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @alamhanz and @andreaschandra for adding this dataset.
[ "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @alamhanz and @andreaschandra for adding this dataset." ]
[ "TAGS\n#task_categories-question-answering #task_ids-extractive-qa #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-English #question-answering #english #causal #region-us \n", "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @alamhanz and @andreaschandra for adding this dataset." ]
1aa2e853bebc95cd8d46b317bcfb6fcea265ceb0
# Dataset Card for germanDPR-beir ## Dataset Summary This dataset can be used for [BEIR](https://arxiv.org/abs/2104.08663) evaluation based on [deepset/germanDPR](https://huggingface.co/datasets/deepset/germandpr). It already has been used to evaluate a newly trained [bi-encoder model](https://huggingface.co/PM-AI/bi-encoder_msmarco_bert-base_german). The benchmark framework requires a particular dataset structure by default which has been created locally and uploaded here. Acknowledgement: The dataset was initially created as "[germanDPR](https://www.deepset.ai/germanquad)" by Timo Möller, Julian Risch, Malte Pietsch, Julian Gutsch, Tom Hersperger, Luise Köhler, Iuliia Mozhina, and Justus Peter, during work done at deepset.ai. ## Dataset Creation First, the original dataset [deepset/germanDPR](https://huggingface.co/datasets/deepset/germandpr) was converted into three files for BEIR compatibility: - The first file is `queries.jsonl` and contains an ID and a question in each line. - The second file, `corpus.jsonl`, contains in each line an ID, a title, a text and some metadata. - In the `qrel` folder is the third file. It connects every question from `queries.json` (via `q_id`) with a relevant text/answer from `corpus.jsonl` (via `c_id`) This process has been done for `train` and `test` split separately based on the original germanDPR dataset. Approaching the dataset creation like that is necessary because queries AND corpus both differ in deepset's germanDPR dataset and it might be confusion changing this specific split. In conclusion, queries and corpus differ between train and test split and not only qrels data! Note: If you want one big corpus use `datasets.concatenate_datasets()`. In the original dataset, there is one passage containing the answer and three "wrong" passages for each question. During the creation of this customized dataset, all four passages are added, but only if they are not already present (... meaning they have been deduplicated). It should be noted, that BEIR is combining `title` + `text` in `corpus.jsonl` to a new string which may produce odd results: The original germanDPR dataset does not always contain "classical" titles (i.e. short), but sometimes consists of whole sentences, which are also present in the "text" field. This results in very long passages as well as duplications. In addition, both title and text contain specially formatted content. For example, the words used in titles are often connected with underscores: > `Apple_Magic_Mouse` And texts begin with special characters to distinguish headings and subheadings: > `Wirtschaft_der_Vereinigten_Staaten\n\n== Verschuldung ==\nEin durchschnittlicher Haushalt (...)` Line breaks are also frequently found, as you can see. Of course, it depends on the application whether these things become a problem or not. However, it was decided to release two variants of the original dataset: - The `original` variant leaves the titles and texts as they are. There are no modifications. - The `processed` variant removes the title completely and simplifies the texts by removing the special formatting. The creation of both variants can be viewed in [create_dataset.py](https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/create_dataset.py). In particular, the following parameters were used: - `original`: `SPLIT=test/train, TEXT_PREPROCESSING=False, KEEP_TITLE=True` - `processed`: `SPLIT=test/Train, TEXT_PREPROCESSING=True, KEEP_TITLE=False` One final thing to mention: The IDs for queries and the corpus should not match!!! During the evaluation using BEIR, it was found that if these IDs match, the result for that entry is completely removed. This means some of the results are missing. A correct calculation of the overall result is no longer possible. Have a look into [BEIR's evaluation.py](https://github.com/beir-cellar/beir/blob/c3334fd5b336dba03c5e3e605a82fcfb1bdf667d/beir/retrieval/evaluation.py#L49) for further understanding. ## Dataset Usage As earlier mentioned, this dataset is intended to be used with the BEIR benchmark framework. The file and data structure required for BEIR can only be used to a limited extent with Huggingface Datasets or it is necessary to define multiple dataset repositories at once. To make it easier, the [dl_dataset.py](https://huggingface.co/datasets/PM-AI/germandpr-beir/tree/main/dl_dataset.py) script is provided to download the dataset and to ensure the correct file and folder structure. ```python # dl_dataset.py import json import os import datasets from beir.datasets.data_loader import GenericDataLoader # ---------------------------------------- # This scripts downloads the BEIR compatible deepsetDPR dataset from "Huggingface Datasets" to your local machine. # Please see dataset's description/readme to learn more about how the dataset was created. # If you want to use deepset/germandpr without any changes, use TYPE "original" # If you want to reproduce PM-AI/bi-encoder_msmarco_bert-base_german, use TYPE "processed" # ---------------------------------------- TYPE = "processed" # or "original" SPLIT = "train" # or "train" DOWNLOAD_DIR = "germandpr-beir-dataset" DOWNLOAD_DIR = os.path.join(DOWNLOAD_DIR, f'{TYPE}/{SPLIT}') DOWNLOAD_QREL_DIR = os.path.join(DOWNLOAD_DIR, f'qrels/') os.makedirs(DOWNLOAD_QREL_DIR, exist_ok=True) # for BEIR compatibility we need queries, corpus and qrels all together # ensure to always load these three based on the same type (all "processed" or all "original") for subset_name in ["queries", "corpus", "qrels"]: subset = datasets.load_dataset("PM-AI/germandpr-beir", f'{TYPE}-{subset_name}', split=SPLIT) if subset_name == "qrels": out_path = os.path.join(DOWNLOAD_QREL_DIR, f'{SPLIT}.tsv') subset.to_csv(out_path, sep="\t", index=False) else: if subset_name == "queries": _row_to_json = lambda row: json.dumps({"_id": row["_id"], "text": row["text"]}, ensure_ascii=False) else: _row_to_json = lambda row: json.dumps({"_id": row["_id"], "title": row["title"], "text": row["text"]}, ensure_ascii=False) with open(os.path.join(DOWNLOAD_DIR, f'{subset_name}.jsonl'), "w", encoding="utf-8") as out_file: for row in subset: out_file.write(_row_to_json(row) + "\n") # GenericDataLoader is part of BEIR. If everything is working correctly we can now load the dataset corpus, queries, qrels = GenericDataLoader(data_folder=DOWNLOAD_DIR).load(SPLIT) print(f'{SPLIT} corpus size: {len(corpus)}\n' f'{SPLIT} queries size: {len(queries)}\n' f'{SPLIT} qrels: {len(qrels)}\n') print("--------------------------------------------------------------------------------------------------------------\n" "Now you can use the downloaded files in BEIR framework\n" "Example: https://github.com/beir-cellar/beir/blob/v1.0.1/examples/retrieval/evaluation/dense/evaluate_sbert.py\n" "--------------------------------------------------------------------------------------------------------------") ``` Alternatively, the data sets can be downloaded directly: - https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/original.tar.gz - https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/data/processed.tar.gz Now you can use the downloaded files in BEIR framework: - For Example: [evaluate_sbert.py](https://github.com/beir-cellar/beir/blob/v1.0.1/examples/retrieval/evaluation/dense/evaluate_sbert.py) - Just set variable `"dataset"` to `"germandpr-beir-dataset/processed/test"` or `"germandpr-beir-dataset/original/test"`. - Same goes for `"train"`. ## Dataset Sizes - Original **train** `corpus` size, `queries` size and `qrels` size: `24009`, `9275` and `9275` - Original **test** `corpus` size, `queries` size and `qrels` size: `2876`, `1025` and `1025` - Processed **train** `corpus` size, `queries` size and `qrels` size: `23993`, `9275` and `9275` - Processed **test** `corpus` size, `queries` size and `qrels` size: `2875` and `1025` and `1025` ## Languages This dataset only supports german (aka. de, DE). ## Acknowledgment The dataset was initially created as "[deepset/germanDPR](https://www.deepset.ai/germanquad)" by Timo Möller, Julian Risch, Malte Pietsch, Julian Gutsch, Tom Hersperger, Luise Köhler, Iuliia Mozhina, and Justus Peter, during work done at [deepset.ai](https://www.deepset.ai/). This work is a collaboration between [Technical University of Applied Sciences Wildau (TH Wildau)](https://en.th-wildau.de/) and [sense.ai.tion GmbH](https://senseaition.com/). You can contact us via: * [Philipp Müller (M.Eng.)](https://www.linkedin.com/in/herrphilipps); Author * [Prof. Dr. Janett Mohnke](mailto:[email protected]); TH Wildau * [Dr. Matthias Boldt, Jörg Oehmichen](mailto:[email protected]); sense.AI.tion GmbH This work was funded by the European Regional Development Fund (EFRE) and the State of Brandenburg. Project/Vorhaben: "ProFIT: Natürlichsprachliche Dialogassistenten in der Pflege". <div style="display:flex"> <div style="padding-left:20px;"> <a href="https://efre.brandenburg.de/efre/de/"><img src="https://huggingface.co/datasets/PM-AI/germandpr-beir/resolve/main/res/EFRE-Logo_rechts_oweb_en_rgb.jpeg" alt="Logo of European Regional Development Fund (EFRE)" width="200"/></a> </div> <div style="padding-left:20px;"> <a href="https://www.senseaition.com"><img src="https://senseaition.com/wp-content/uploads/thegem-logos/logo_c847aaa8f42141c4055d4a8665eb208d_3x.png" alt="Logo of senseaition GmbH" width="200"/></a> </div> <div style="padding-left:20px;"> <a href="https://www.th-wildau.de"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/f/f6/TH_Wildau_Logo.png/640px-TH_Wildau_Logo.png" alt="Logo of TH Wildau" width="180"/></a> </div> </div>
PM-AI/germandpr-beir
[ "task_categories:sentence-similarity", "task_categories:feature-extraction", "task_categories:text-retrieval", "task_categories:question-answering", "task_categories:other", "task_ids:document-retrieval", "task_ids:open-domain-qa", "task_ids:closed-domain-qa", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:de", "information retrieval", "ir", "documents retrieval", "passage retrieval", "beir", "benchmark", "qrel", "sts", "semantic search", "arxiv:2104.08663", "region:us" ]
2022-11-25T12:28:49+00:00
{"annotations_creators": [], "language_creators": [], "language": ["de"], "license": [], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": [], "task_categories": ["sentence-similarity", "feature-extraction", "text-retrieval", "question-answering", "other"], "task_ids": ["document-retrieval", "open-domain-qa", "closed-domain-qa"], "pretty_name": "germandpr-beir", "tags": ["information retrieval", "ir", "documents retrieval", "passage retrieval", "beir", "benchmark", "qrel", "sts", "semantic search"], "viewer": true}
2022-11-26T13:04:33+00:00
[ "2104.08663" ]
[ "de" ]
TAGS #task_categories-sentence-similarity #task_categories-feature-extraction #task_categories-text-retrieval #task_categories-question-answering #task_categories-other #task_ids-document-retrieval #task_ids-open-domain-qa #task_ids-closed-domain-qa #multilinguality-monolingual #size_categories-10K<n<100K #language-German #information retrieval #ir #documents retrieval #passage retrieval #beir #benchmark #qrel #sts #semantic search #arxiv-2104.08663 #region-us
# Dataset Card for germanDPR-beir ## Dataset Summary This dataset can be used for BEIR evaluation based on deepset/germanDPR. It already has been used to evaluate a newly trained bi-encoder model. The benchmark framework requires a particular dataset structure by default which has been created locally and uploaded here. Acknowledgement: The dataset was initially created as "germanDPR" by Timo Möller, Julian Risch, Malte Pietsch, Julian Gutsch, Tom Hersperger, Luise Köhler, Iuliia Mozhina, and Justus Peter, during work done at URL. ## Dataset Creation First, the original dataset deepset/germanDPR was converted into three files for BEIR compatibility: - The first file is 'URL' and contains an ID and a question in each line. - The second file, 'URL', contains in each line an ID, a title, a text and some metadata. - In the 'qrel' folder is the third file. It connects every question from 'URL' (via 'q_id') with a relevant text/answer from 'URL' (via 'c_id') This process has been done for 'train' and 'test' split separately based on the original germanDPR dataset. Approaching the dataset creation like that is necessary because queries AND corpus both differ in deepset's germanDPR dataset and it might be confusion changing this specific split. In conclusion, queries and corpus differ between train and test split and not only qrels data! Note: If you want one big corpus use 'datasets.concatenate_datasets()'. In the original dataset, there is one passage containing the answer and three "wrong" passages for each question. During the creation of this customized dataset, all four passages are added, but only if they are not already present (... meaning they have been deduplicated). It should be noted, that BEIR is combining 'title' + 'text' in 'URL' to a new string which may produce odd results: The original germanDPR dataset does not always contain "classical" titles (i.e. short), but sometimes consists of whole sentences, which are also present in the "text" field. This results in very long passages as well as duplications. In addition, both title and text contain specially formatted content. For example, the words used in titles are often connected with underscores: > 'Apple_Magic_Mouse' And texts begin with special characters to distinguish headings and subheadings: > 'Wirtschaft_der_Vereinigten_Staaten\n\n== Verschuldung ==\nEin durchschnittlicher Haushalt (...)' Line breaks are also frequently found, as you can see. Of course, it depends on the application whether these things become a problem or not. However, it was decided to release two variants of the original dataset: - The 'original' variant leaves the titles and texts as they are. There are no modifications. - The 'processed' variant removes the title completely and simplifies the texts by removing the special formatting. The creation of both variants can be viewed in create_dataset.py. In particular, the following parameters were used: - 'original': 'SPLIT=test/train, TEXT_PREPROCESSING=False, KEEP_TITLE=True' - 'processed': 'SPLIT=test/Train, TEXT_PREPROCESSING=True, KEEP_TITLE=False' One final thing to mention: The IDs for queries and the corpus should not match!!! During the evaluation using BEIR, it was found that if these IDs match, the result for that entry is completely removed. This means some of the results are missing. A correct calculation of the overall result is no longer possible. Have a look into BEIR's URL for further understanding. ## Dataset Usage As earlier mentioned, this dataset is intended to be used with the BEIR benchmark framework. The file and data structure required for BEIR can only be used to a limited extent with Huggingface Datasets or it is necessary to define multiple dataset repositories at once. To make it easier, the dl_dataset.py script is provided to download the dataset and to ensure the correct file and folder structure. Alternatively, the data sets can be downloaded directly: - URL - URL Now you can use the downloaded files in BEIR framework: - For Example: evaluate_sbert.py - Just set variable '"dataset"' to '"germandpr-beir-dataset/processed/test"' or '"germandpr-beir-dataset/original/test"'. - Same goes for '"train"'. ## Dataset Sizes - Original train 'corpus' size, 'queries' size and 'qrels' size: '24009', '9275' and '9275' - Original test 'corpus' size, 'queries' size and 'qrels' size: '2876', '1025' and '1025' - Processed train 'corpus' size, 'queries' size and 'qrels' size: '23993', '9275' and '9275' - Processed test 'corpus' size, 'queries' size and 'qrels' size: '2875' and '1025' and '1025' ## Languages This dataset only supports german (aka. de, DE). ## Acknowledgment The dataset was initially created as "deepset/germanDPR" by Timo Möller, Julian Risch, Malte Pietsch, Julian Gutsch, Tom Hersperger, Luise Köhler, Iuliia Mozhina, and Justus Peter, during work done at URL. This work is a collaboration between Technical University of Applied Sciences Wildau (TH Wildau) and URL GmbH. You can contact us via: * Philipp Müller (M.Eng.); Author * Prof. Dr. Janett Mohnke; TH Wildau * Dr. Matthias Boldt, Jörg Oehmichen; URL GmbH This work was funded by the European Regional Development Fund (EFRE) and the State of Brandenburg. Project/Vorhaben: "ProFIT: Natürlichsprachliche Dialogassistenten in der Pflege". <div style="display:flex"> <div style="padding-left:20px;"> <a href="URL src="URL alt="Logo of European Regional Development Fund (EFRE)" width="200"/></a> </div> <div style="padding-left:20px;"> <a href="URL"><img src="URL alt="Logo of senseaition GmbH" width="200"/></a> </div> <div style="padding-left:20px;"> <a href="URL"><img src="URL alt="Logo of TH Wildau" width="180"/></a> </div> </div>
[ "# Dataset Card for germanDPR-beir", "## Dataset Summary\n\nThis dataset can be used for BEIR evaluation based on deepset/germanDPR.\nIt already has been used to evaluate a newly trained bi-encoder model.\nThe benchmark framework requires a particular dataset structure by default which has been created locally and uploaded here.\n\nAcknowledgement: The dataset was initially created as \"germanDPR\" by Timo Möller, Julian Risch, Malte Pietsch, Julian Gutsch, Tom Hersperger, Luise Köhler, Iuliia Mozhina, and Justus Peter, during work done at URL.", "## Dataset Creation\nFirst, the original dataset deepset/germanDPR was converted into three files for BEIR compatibility:\n - The first file is 'URL' and contains an ID and a question in each line.\n - The second file, 'URL', contains in each line an ID, a title, a text and some metadata.\n - In the 'qrel' folder is the third file. It connects every question from 'URL' (via 'q_id') with a relevant text/answer from 'URL' (via 'c_id')\n \nThis process has been done for 'train' and 'test' split separately based on the original germanDPR dataset.\nApproaching the dataset creation like that is necessary because queries AND corpus both differ in deepset's germanDPR dataset\nand it might be confusion changing this specific split.\nIn conclusion, queries and corpus differ between train and test split and not only qrels data!\nNote: If you want one big corpus use 'datasets.concatenate_datasets()'.\n\nIn the original dataset, there is one passage containing the answer and three \"wrong\" passages for each question.\nDuring the creation of this customized dataset, all four passages are added, but only if they are not already present (... meaning they have been deduplicated).\n\nIt should be noted, that BEIR is combining 'title' + 'text' in 'URL' to a new string which may produce odd results:\nThe original germanDPR dataset does not always contain \"classical\" titles (i.e. short), but sometimes consists of whole sentences, which are also present in the \"text\" field.\nThis results in very long passages as well as duplications.\nIn addition, both title and text contain specially formatted content.\nFor example, the words used in titles are often connected with underscores:\n\n> 'Apple_Magic_Mouse'\n\nAnd texts begin with special characters to distinguish headings and subheadings:\n\n> 'Wirtschaft_der_Vereinigten_Staaten\\n\\n== Verschuldung ==\\nEin durchschnittlicher Haushalt (...)'\n\nLine breaks are also frequently found, as you can see.\n\nOf course, it depends on the application whether these things become a problem or not.\nHowever, it was decided to release two variants of the original dataset:\n- The 'original' variant leaves the titles and texts as they are. There are no modifications.\n- The 'processed' variant removes the title completely and simplifies the texts by removing the special formatting.\n\nThe creation of both variants can be viewed in create_dataset.py.\nIn particular, the following parameters were used:\n- 'original': 'SPLIT=test/train, TEXT_PREPROCESSING=False, KEEP_TITLE=True'\n- 'processed': 'SPLIT=test/Train, TEXT_PREPROCESSING=True, KEEP_TITLE=False'\n\nOne final thing to mention: The IDs for queries and the corpus should not match!!!\nDuring the evaluation using BEIR, it was found that if these IDs match, the result for that entry is completely removed. \nThis means some of the results are missing.\nA correct calculation of the overall result is no longer possible.\nHave a look into BEIR's URL for further understanding.", "## Dataset Usage\nAs earlier mentioned, this dataset is intended to be used with the BEIR benchmark framework.\nThe file and data structure required for BEIR can only be used to a limited extent with Huggingface Datasets or it is necessary to define multiple dataset repositories at once.\nTo make it easier, the dl_dataset.py script is provided to download the dataset and to ensure the correct file and folder structure.\n\n\n\nAlternatively, the data sets can be downloaded directly:\n- URL\n- URL\n\nNow you can use the downloaded files in BEIR framework:\n- For Example: evaluate_sbert.py\n- Just set variable '\"dataset\"' to '\"germandpr-beir-dataset/processed/test\"' or '\"germandpr-beir-dataset/original/test\"'.\n- Same goes for '\"train\"'.", "## Dataset Sizes\n- Original train 'corpus' size, 'queries' size and 'qrels' size: '24009', '9275' and '9275'\n- Original test 'corpus' size, 'queries' size and 'qrels' size: '2876', '1025' and '1025'\n\n- Processed train 'corpus' size, 'queries' size and 'qrels' size: '23993', '9275' and '9275'\n- Processed test 'corpus' size, 'queries' size and 'qrels' size: '2875' and '1025' and '1025'", "## Languages\n\nThis dataset only supports german (aka. de, DE).", "## Acknowledgment\n\nThe dataset was initially created as \"deepset/germanDPR\" by Timo Möller, Julian Risch, Malte Pietsch, Julian Gutsch, Tom Hersperger, Luise Köhler, Iuliia Mozhina, and Justus Peter, during work done at URL.\n\nThis work is a collaboration between Technical University of Applied Sciences Wildau (TH Wildau) and URL GmbH.\nYou can contact us via:\n* Philipp Müller (M.Eng.); Author\n* Prof. Dr. Janett Mohnke; TH Wildau\n* Dr. Matthias Boldt, Jörg Oehmichen; URL GmbH \n\nThis work was funded by the European Regional Development Fund (EFRE) and the State of Brandenburg. Project/Vorhaben: \"ProFIT: Natürlichsprachliche Dialogassistenten in der Pflege\".\n\n<div style=\"display:flex\">\n <div style=\"padding-left:20px;\">\n <a href=\"URL src=\"URL alt=\"Logo of European Regional Development Fund (EFRE)\" width=\"200\"/></a>\n </div>\n <div style=\"padding-left:20px;\">\n <a href=\"URL\"><img src=\"URL alt=\"Logo of senseaition GmbH\" width=\"200\"/></a>\n </div>\n <div style=\"padding-left:20px;\">\n <a href=\"URL\"><img src=\"URL alt=\"Logo of TH Wildau\" width=\"180\"/></a>\n </div>\n</div>" ]
[ "TAGS\n#task_categories-sentence-similarity #task_categories-feature-extraction #task_categories-text-retrieval #task_categories-question-answering #task_categories-other #task_ids-document-retrieval #task_ids-open-domain-qa #task_ids-closed-domain-qa #multilinguality-monolingual #size_categories-10K<n<100K #language-German #information retrieval #ir #documents retrieval #passage retrieval #beir #benchmark #qrel #sts #semantic search #arxiv-2104.08663 #region-us \n", "# Dataset Card for germanDPR-beir", "## Dataset Summary\n\nThis dataset can be used for BEIR evaluation based on deepset/germanDPR.\nIt already has been used to evaluate a newly trained bi-encoder model.\nThe benchmark framework requires a particular dataset structure by default which has been created locally and uploaded here.\n\nAcknowledgement: The dataset was initially created as \"germanDPR\" by Timo Möller, Julian Risch, Malte Pietsch, Julian Gutsch, Tom Hersperger, Luise Köhler, Iuliia Mozhina, and Justus Peter, during work done at URL.", "## Dataset Creation\nFirst, the original dataset deepset/germanDPR was converted into three files for BEIR compatibility:\n - The first file is 'URL' and contains an ID and a question in each line.\n - The second file, 'URL', contains in each line an ID, a title, a text and some metadata.\n - In the 'qrel' folder is the third file. It connects every question from 'URL' (via 'q_id') with a relevant text/answer from 'URL' (via 'c_id')\n \nThis process has been done for 'train' and 'test' split separately based on the original germanDPR dataset.\nApproaching the dataset creation like that is necessary because queries AND corpus both differ in deepset's germanDPR dataset\nand it might be confusion changing this specific split.\nIn conclusion, queries and corpus differ between train and test split and not only qrels data!\nNote: If you want one big corpus use 'datasets.concatenate_datasets()'.\n\nIn the original dataset, there is one passage containing the answer and three \"wrong\" passages for each question.\nDuring the creation of this customized dataset, all four passages are added, but only if they are not already present (... meaning they have been deduplicated).\n\nIt should be noted, that BEIR is combining 'title' + 'text' in 'URL' to a new string which may produce odd results:\nThe original germanDPR dataset does not always contain \"classical\" titles (i.e. short), but sometimes consists of whole sentences, which are also present in the \"text\" field.\nThis results in very long passages as well as duplications.\nIn addition, both title and text contain specially formatted content.\nFor example, the words used in titles are often connected with underscores:\n\n> 'Apple_Magic_Mouse'\n\nAnd texts begin with special characters to distinguish headings and subheadings:\n\n> 'Wirtschaft_der_Vereinigten_Staaten\\n\\n== Verschuldung ==\\nEin durchschnittlicher Haushalt (...)'\n\nLine breaks are also frequently found, as you can see.\n\nOf course, it depends on the application whether these things become a problem or not.\nHowever, it was decided to release two variants of the original dataset:\n- The 'original' variant leaves the titles and texts as they are. There are no modifications.\n- The 'processed' variant removes the title completely and simplifies the texts by removing the special formatting.\n\nThe creation of both variants can be viewed in create_dataset.py.\nIn particular, the following parameters were used:\n- 'original': 'SPLIT=test/train, TEXT_PREPROCESSING=False, KEEP_TITLE=True'\n- 'processed': 'SPLIT=test/Train, TEXT_PREPROCESSING=True, KEEP_TITLE=False'\n\nOne final thing to mention: The IDs for queries and the corpus should not match!!!\nDuring the evaluation using BEIR, it was found that if these IDs match, the result for that entry is completely removed. \nThis means some of the results are missing.\nA correct calculation of the overall result is no longer possible.\nHave a look into BEIR's URL for further understanding.", "## Dataset Usage\nAs earlier mentioned, this dataset is intended to be used with the BEIR benchmark framework.\nThe file and data structure required for BEIR can only be used to a limited extent with Huggingface Datasets or it is necessary to define multiple dataset repositories at once.\nTo make it easier, the dl_dataset.py script is provided to download the dataset and to ensure the correct file and folder structure.\n\n\n\nAlternatively, the data sets can be downloaded directly:\n- URL\n- URL\n\nNow you can use the downloaded files in BEIR framework:\n- For Example: evaluate_sbert.py\n- Just set variable '\"dataset\"' to '\"germandpr-beir-dataset/processed/test\"' or '\"germandpr-beir-dataset/original/test\"'.\n- Same goes for '\"train\"'.", "## Dataset Sizes\n- Original train 'corpus' size, 'queries' size and 'qrels' size: '24009', '9275' and '9275'\n- Original test 'corpus' size, 'queries' size and 'qrels' size: '2876', '1025' and '1025'\n\n- Processed train 'corpus' size, 'queries' size and 'qrels' size: '23993', '9275' and '9275'\n- Processed test 'corpus' size, 'queries' size and 'qrels' size: '2875' and '1025' and '1025'", "## Languages\n\nThis dataset only supports german (aka. de, DE).", "## Acknowledgment\n\nThe dataset was initially created as \"deepset/germanDPR\" by Timo Möller, Julian Risch, Malte Pietsch, Julian Gutsch, Tom Hersperger, Luise Köhler, Iuliia Mozhina, and Justus Peter, during work done at URL.\n\nThis work is a collaboration between Technical University of Applied Sciences Wildau (TH Wildau) and URL GmbH.\nYou can contact us via:\n* Philipp Müller (M.Eng.); Author\n* Prof. Dr. Janett Mohnke; TH Wildau\n* Dr. Matthias Boldt, Jörg Oehmichen; URL GmbH \n\nThis work was funded by the European Regional Development Fund (EFRE) and the State of Brandenburg. Project/Vorhaben: \"ProFIT: Natürlichsprachliche Dialogassistenten in der Pflege\".\n\n<div style=\"display:flex\">\n <div style=\"padding-left:20px;\">\n <a href=\"URL src=\"URL alt=\"Logo of European Regional Development Fund (EFRE)\" width=\"200\"/></a>\n </div>\n <div style=\"padding-left:20px;\">\n <a href=\"URL\"><img src=\"URL alt=\"Logo of senseaition GmbH\" width=\"200\"/></a>\n </div>\n <div style=\"padding-left:20px;\">\n <a href=\"URL\"><img src=\"URL alt=\"Logo of TH Wildau\" width=\"180\"/></a>\n </div>\n</div>" ]
b447aaedeaf311efcc18f84620bbc12d1e7375fb
# Dataset Card for "small-roots_en" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ola13/small-roots_en
[ "region:us" ]
2022-11-25T13:15:00+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "meta", "struct": [{"name": "perplexity_score", "dtype": "float64"}]}], "splits": [{"name": "train", "num_bytes": 1165939191, "num_examples": 100000}], "download_size": 640582687, "dataset_size": 1165939191}}
2022-11-25T13:15:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "small-roots_en" More Information needed
[ "# Dataset Card for \"small-roots_en\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"small-roots_en\"\n\nMore Information needed" ]
63ab0e116253064bee51edf66a8416b46c155b0e
# Dataset Card for "libri_augmented_test_set" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DTU54DL/libri_augmented_test_set
[ "region:us" ]
2022-11-25T13:58:21+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 623397698.5, "num_examples": 2620}], "download_size": 610524259, "dataset_size": 623397698.5}}
2022-11-25T14:03:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "libri_augmented_test_set" More Information needed
[ "# Dataset Card for \"libri_augmented_test_set\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"libri_augmented_test_set\"\n\nMore Information needed" ]
ebaa2f77ef427740338df9fc65ef8100252b08f7
# Supreme Module Module embedding ## Samples <img alt="Samples" src="https://huggingface.co/datasets/DJSoft/supreme_module/resolve/main/samples.jpg" style="max-height: 80vh"/> <img alt="Comparsion" src="https://huggingface.co/datasets/DJSoft/supreme_module/resolve/main/steps.png" style="max-height: 80vh"/> ## About Use this Stable Diffusion embedding to achieve the Project Diva Supreme outfit ## Usage To use this embedding you have to download the file and put it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt add __supreme_module-*__ *Note, that this embedding may require more than usual seed rolling and prompt finetuning* Add **( :1.0)** around it to modify its weight ## Additional info In order to improve some details you can use the following words: **Positive prompt:** blue eyes, white blouse, black thighhighs **Negative prompt:** black blouse ## Included Files - 10000 steps Usage: **supreme_module-10000** - 15000 steps Usage: **supreme_module-15000** ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
DJSoft/supreme_module
[ "license:creativeml-openrail-m", "region:us" ]
2022-11-25T14:19:49+00:00
{"license": "creativeml-openrail-m"}
2022-12-05T20:36:17+00:00
[]
[]
TAGS #license-creativeml-openrail-m #region-us
# Supreme Module Module embedding ## Samples <img alt="Samples" src="URL style="max-height: 80vh"/> <img alt="Comparsion" src="URL style="max-height: 80vh"/> ## About Use this Stable Diffusion embedding to achieve the Project Diva Supreme outfit ## Usage To use this embedding you have to download the file and put it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt add __supreme_module-*__ *Note, that this embedding may require more than usual seed rolling and prompt finetuning* Add ( :1.0) around it to modify its weight ## Additional info In order to improve some details you can use the following words: Positive prompt: blue eyes, white blouse, black thighhighs Negative prompt: black blouse ## Included Files - 10000 steps Usage: supreme_module-10000 - 15000 steps Usage: supreme_module-15000 ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here
[ "# Supreme Module Module embedding", "## Samples\n\n<img alt=\"Samples\" src=\"URL style=\"max-height: 80vh\"/>\n<img alt=\"Comparsion\" src=\"URL style=\"max-height: 80vh\"/>", "## About\n\nUse this Stable Diffusion embedding to achieve the Project Diva Supreme outfit", "## Usage\n\nTo use this embedding you have to download the file and put it into the \"\\stable-diffusion-webui\\embeddings\" folder \nTo use it in a prompt add __supreme_module-*__ \n*Note, that this embedding may require more than usual seed rolling and prompt finetuning*\n\nAdd ( :1.0) around it to modify its weight", "## Additional info\n\nIn order to improve some details you can use the following words: \nPositive prompt: blue eyes, white blouse, black thighhighs \nNegative prompt: black blouse", "## Included Files\n\n- 10000 steps Usage: supreme_module-10000\n- 15000 steps Usage: supreme_module-15000", "## License\n\nThis embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies:\n1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content\n2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license\n3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here" ]
[ "TAGS\n#license-creativeml-openrail-m #region-us \n", "# Supreme Module Module embedding", "## Samples\n\n<img alt=\"Samples\" src=\"URL style=\"max-height: 80vh\"/>\n<img alt=\"Comparsion\" src=\"URL style=\"max-height: 80vh\"/>", "## About\n\nUse this Stable Diffusion embedding to achieve the Project Diva Supreme outfit", "## Usage\n\nTo use this embedding you have to download the file and put it into the \"\\stable-diffusion-webui\\embeddings\" folder \nTo use it in a prompt add __supreme_module-*__ \n*Note, that this embedding may require more than usual seed rolling and prompt finetuning*\n\nAdd ( :1.0) around it to modify its weight", "## Additional info\n\nIn order to improve some details you can use the following words: \nPositive prompt: blue eyes, white blouse, black thighhighs \nNegative prompt: black blouse", "## Included Files\n\n- 10000 steps Usage: supreme_module-10000\n- 15000 steps Usage: supreme_module-15000", "## License\n\nThis embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies:\n1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content\n2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license\n3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here" ]
bd2d0d5363e77dc6e317e7cb0e1290d175d6d16c
18-22/10 pixiv Monthly ranking top 50 768 x * total:38532 with txt
haor/pixiv_month_top50
[ "license:openrail", "doi:10.57967/hf/0308", "region:us" ]
2022-11-25T14:56:26+00:00
{"license": "openrail"}
2023-08-09T06:55:00+00:00
[]
[]
TAGS #license-openrail #doi-10.57967/hf/0308 #region-us
18-22/10 pixiv Monthly ranking top 50 768 x * total:38532 with txt
[]
[ "TAGS\n#license-openrail #doi-10.57967/hf/0308 #region-us \n" ]
8ebd8dfa5c54813415db952c7a1b3933d4482d03
# Dataset Card for "biomedical-topic-categorization-cased" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Javtor/biomedical-topic-categorization-cased
[ "region:us" ]
2022-11-25T15:00:09+00:00
{"dataset_info": {"features": [{"name": "Title/Abstract", "dtype": "string"}, {"name": "T001", "dtype": "int64"}, {"name": "T002", "dtype": "int64"}, {"name": "T004", "dtype": "int64"}, {"name": "T005", "dtype": "int64"}, {"name": "T007", "dtype": "int64"}, {"name": "T008", "dtype": "int64"}, {"name": "T010", "dtype": "int64"}, {"name": "T011", "dtype": "int64"}, {"name": "T012", "dtype": "int64"}, {"name": "T013", "dtype": "int64"}, {"name": "T014", "dtype": "int64"}, {"name": "T015", "dtype": "int64"}, {"name": "T016", "dtype": "int64"}, {"name": "T017", "dtype": "int64"}, {"name": "T018", "dtype": "int64"}, {"name": "T019", "dtype": "int64"}, {"name": "T020", "dtype": "int64"}, {"name": "T022", "dtype": "int64"}, {"name": "T023", "dtype": "int64"}, {"name": "T024", "dtype": "int64"}, {"name": "T025", "dtype": "int64"}, {"name": "T026", "dtype": "int64"}, {"name": "T028", "dtype": "int64"}, {"name": "T029", "dtype": "int64"}, {"name": "T030", "dtype": "int64"}, {"name": "T031", "dtype": "int64"}, {"name": "T032", "dtype": "int64"}, {"name": "T033", "dtype": "int64"}, {"name": "T034", "dtype": "int64"}, {"name": "T037", "dtype": "int64"}, {"name": "T038", "dtype": "int64"}, {"name": "T039", "dtype": "int64"}, {"name": "T040", "dtype": "int64"}, {"name": "T041", "dtype": "int64"}, {"name": "T042", "dtype": "int64"}, {"name": "T043", "dtype": "int64"}, {"name": "T044", "dtype": "int64"}, {"name": "T045", "dtype": "int64"}, {"name": "T046", "dtype": "int64"}, {"name": "T047", "dtype": "int64"}, {"name": "T048", "dtype": "int64"}, {"name": "T049", "dtype": "int64"}, {"name": "T050", "dtype": "int64"}, {"name": "T051", "dtype": "int64"}, {"name": "T052", "dtype": "int64"}, {"name": "T053", "dtype": "int64"}, {"name": "T054", "dtype": "int64"}, {"name": "T055", "dtype": "int64"}, {"name": "T056", "dtype": "int64"}, {"name": "T057", "dtype": "int64"}, {"name": "T058", "dtype": "int64"}, {"name": "T059", "dtype": "int64"}, {"name": "T060", "dtype": "int64"}, {"name": "T061", "dtype": "int64"}, {"name": "T062", "dtype": "int64"}, {"name": "T063", "dtype": "int64"}, {"name": "T064", "dtype": "int64"}, {"name": "T065", "dtype": "int64"}, {"name": "T066", "dtype": "int64"}, {"name": "T067", "dtype": "int64"}, {"name": "T068", "dtype": "int64"}, {"name": "T069", "dtype": "int64"}, {"name": "T070", "dtype": "int64"}, {"name": "T071", "dtype": "int64"}, {"name": "T072", "dtype": "int64"}, {"name": "T073", "dtype": "int64"}, {"name": "T074", "dtype": "int64"}, {"name": "T075", "dtype": "int64"}, {"name": "T077", "dtype": "int64"}, {"name": "T078", "dtype": "int64"}, {"name": "T079", "dtype": "int64"}, {"name": "T080", "dtype": "int64"}, {"name": "T081", "dtype": "int64"}, {"name": "T082", "dtype": "int64"}, {"name": "T083", "dtype": "int64"}, {"name": "T085", "dtype": "int64"}, {"name": "T086", "dtype": "int64"}, {"name": "T087", "dtype": "int64"}, {"name": "T089", "dtype": "int64"}, {"name": "T090", "dtype": "int64"}, {"name": "T091", "dtype": "int64"}, {"name": "T092", "dtype": "int64"}, {"name": "T093", "dtype": "int64"}, {"name": "T094", "dtype": "int64"}, {"name": "T095", "dtype": "int64"}, {"name": "T096", "dtype": "int64"}, {"name": "T097", "dtype": "int64"}, {"name": "T098", "dtype": "int64"}, {"name": "T099", "dtype": "int64"}, {"name": "T100", "dtype": "int64"}, {"name": "T101", "dtype": "int64"}, {"name": "T102", "dtype": "int64"}, {"name": "T103", "dtype": "int64"}, {"name": "T104", "dtype": "int64"}, {"name": "T109", "dtype": "int64"}, {"name": "T114", "dtype": "int64"}, {"name": "T116", "dtype": "int64"}, {"name": "T120", "dtype": "int64"}, {"name": "T121", "dtype": "int64"}, {"name": "T122", "dtype": "int64"}, {"name": "T123", "dtype": "int64"}, {"name": "T125", "dtype": "int64"}, {"name": "T126", "dtype": "int64"}, {"name": "T127", "dtype": "int64"}, {"name": "T129", "dtype": "int64"}, {"name": "T130", "dtype": "int64"}, {"name": "T131", "dtype": "int64"}, {"name": "T167", "dtype": "int64"}, {"name": "T168", "dtype": "int64"}, {"name": "T169", "dtype": "int64"}, {"name": "T170", "dtype": "int64"}, {"name": "T171", "dtype": "int64"}, {"name": "T184", "dtype": "int64"}, {"name": "T185", "dtype": "int64"}, {"name": "T190", "dtype": "int64"}, {"name": "T191", "dtype": "int64"}, {"name": "T192", "dtype": "int64"}, {"name": "T194", "dtype": "int64"}, {"name": "T195", "dtype": "int64"}, {"name": "T196", "dtype": "int64"}, {"name": "T197", "dtype": "int64"}, {"name": "T200", "dtype": "int64"}, {"name": "T201", "dtype": "int64"}, {"name": "T204", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 4499925927.860925, "num_examples": 2155595}, {"name": "test", "num_bytes": 1499976005.1390755, "num_examples": 718532}], "download_size": 1955085371, "dataset_size": 5999901933.0}}
2022-11-25T16:15:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "biomedical-topic-categorization-cased" More Information needed
[ "# Dataset Card for \"biomedical-topic-categorization-cased\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"biomedical-topic-categorization-cased\"\n\nMore Information needed" ]
930dc9cc2da0373ffe8ff175db7462273ef3c002
# Dataset Card for "DL_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
happycute123/DL_dataset
[ "region:us" ]
2022-11-25T15:36:52+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 679382539.464, "num_examples": 1057}, {"name": "test", "num_bytes": 167054773.0, "num_examples": 264}], "download_size": 0, "dataset_size": 846437312.464}}
2022-11-26T06:30:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DL_dataset" More Information needed
[ "# Dataset Card for \"DL_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DL_dataset\"\n\nMore Information needed" ]
6e52524f57953e6be7486fc16885934405f3b7a8
## Examples ![1504107204.png](https://s3.amazonaws.com/moonup/production/uploads/1669392177394-6380e35bf496d57325c1c769.png) > a cityscape with a cityscape of buildings
hlcheung92/KimiNoNaWa
[ "task_categories:text-to-image", "annotations_creators:machine-generated", "language_creators:other", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:huggan/KimiNoNaWa", "language:en", "license:cc-by-nc-sa-4.0", "region:us" ]
2022-11-25T15:54:09+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["other"], "language": ["en"], "license": "cc-by-nc-sa-4.0", "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["huggan/KimiNoNaWa"], "task_categories": ["text-to-image"], "task_ids": [], "pretty_name": "KimiNoNaWa", "tags": []}
2022-11-25T17:03:46+00:00
[]
[ "en" ]
TAGS #task_categories-text-to-image #annotations_creators-machine-generated #language_creators-other #multilinguality-monolingual #size_categories-n<1K #source_datasets-huggan/KimiNoNaWa #language-English #license-cc-by-nc-sa-4.0 #region-us
## Examples !URL > a cityscape with a cityscape of buildings
[ "## Examples\n!URL\n\n> a cityscape with a cityscape of buildings" ]
[ "TAGS\n#task_categories-text-to-image #annotations_creators-machine-generated #language_creators-other #multilinguality-monolingual #size_categories-n<1K #source_datasets-huggan/KimiNoNaWa #language-English #license-cc-by-nc-sa-4.0 #region-us \n", "## Examples\n!URL\n\n> a cityscape with a cityscape of buildings" ]
17218ba0956ce7cd4de69529b3471e9435c453b3
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b7 * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-1de085-2240171542
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:55+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-1b7", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T16:37:05+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b7 * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b7\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b7\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
5eec09ecccda4d8ade9aa405ab7c8a0d4e62bbc0
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-3b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-1de085-2240171540
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:55+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-3b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T16:53:24+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-3b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-3b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-3b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
445c53f9be44f44fa35fa9ffb6353df4289ec973
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-7b1 * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-e32ef4-2240271546
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:55+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-7b1", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T19:36:41+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-7b1 * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-7b1\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-7b1\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
c8b84a8cb7878681a8221361f58dddacfa276375
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b7 * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-e32ef4-2240271547
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-1b7", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T16:48:32+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b7 * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b7\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b7\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
f519c13ea74b957221c03570d8287a36caf95f8a
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b1 * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-1de085-2240171543
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-1b1", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T16:21:45+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b1 * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b1\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b1\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
968a04a200dcb14d5b0dfef60c1b1592109de17e
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-1de085-2240171544
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-560m", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T16:21:24+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
b3578a59b78d34fe283014069f9e4d55009866f6
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-7b1 * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-1de085-2240171541
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-7b1", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T18:39:54+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-7b1 * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-7b1\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-7b1\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
f8621141b338cb721eada849e1fa83c5ad5ca27d
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-3b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-e32ef4-2240271545
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-3b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T17:17:59+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-3b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-3b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-3b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
074ffa6a9daa3c4d382d18a0d87afd174973eefd
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-e32ef4-2240271549
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:58+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-560m", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T16:27:14+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
25d608f535cfd9adf48a3e53e58b2ad0e07519e6
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b1 * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-e32ef4-2240271548
[ "autotrain", "evaluation", "region:us" ]
2022-11-25T15:56:59+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-1b1", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-25T16:30:32+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b1 * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b1\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b1\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
bfafee884f55b9de89597049b1ce14430e6c5ff5
# Dataset Card for masri_test ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [MASRI Project](https://www.um.edu.mt/projects/masri/) - **Repository:** [MASRI Data Repo](https://github.com/UMSpeech/) - **Point of Contact:** [Carlos Mena](mailto:[email protected]), [Andrea De Marco](mailto:[email protected]), [Claudia Borg](mailto:[email protected]) ### Dataset Summary The MASRI-TEST CORPUS was created out of YouTube videos belonging to the channel of the [University of Malta](www.youtube.com/user/universityofmalta). It has a length of 1 hour and it is gender balanced, as it has the same number of male and female speakers. ### Example Usage The MASRI-TEST contains only the test split: ```python from datasets import load_dataset masri_test = load_dataset("MLRS/masri_test") ``` It is also valid to do: ```python from datasets import load_dataset masri_test = load_dataset("MLRS/masri_test",split="test") ``` ### Supported Tasks automatic-speech-recognition: The dataset can be used to test a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The language of the corpus is Maltese. ## Dataset Structure ### Data Instances ```python { 'audio_id': 'MSRTS_M_17_TS_00001', 'audio': { 'path': '/home/carlos/.cache/HuggingFace/datasets/downloads/extracted/9158ecbeeb3532038f3fe3d53e0adda1f790c9363a613bac32c454a39d9c682c/test/male/M_17/MSRTS_M_17_TS_00001.flac', 'array': array([ 0.0020752 , 0.00283813, 0.00167847, ..., -0.0010376 , -0.00091553, -0.00100708], dtype=float32), 'sampling_rate': 16000 }, 'speaker_id': 'M_17', 'gender': 'male', 'duration': 5.920000076293945, 'normalized_text': 'ignazio saverio mifsud kien qed jippjana kien qed iħejji tliet volumi tal-biblijoteka maltese' } ``` ### Data Fields * `audio_id` (string) - id of audio segment * `audio` (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * `speaker_id` (string) - id of speaker * `gender` (string) - gender of speaker (male or female) * `duration` (float32) - duration of the audio file in seconds. * `normalized_text` (string) - normalized audio segment transcription ### Data Splits The corpus counts just with the test split which has a total of 668 speech files from 17 male speakers and 17 female speakers with a total duration of 1 hour. ## Dataset Creation ### Curation Rationale The MASRI-TEST CORPUS (MTSC) has the following characteristics: * The MTSC has an exact duration of 1 hours and 0 minutes. It has 668 audio files. * The MTSC has recordings from 34 different speakers: 17 men and 17 women. * Data in MTSC is classified by speaker. Therefore, all the recordings of each individual speaker are stored in one single directory. * Data is also classified according to the gender (male/female) of the speakers. * Every audio file in the MTSC has a duration between 3 and 10 seconds approximately. * Audio files in the MTSC are distributed in a 16khz@16bit mono format. * Transcriptions in MTSC are in lowercase. No punctuation marks are permitted except for dashes (-) and apostrophes (') due to their importance in Maltese orthography. ### Source Data #### Initial Data Collection and Normalization The MASRI-TEST CORPUS was possible due to a collaboration of two different Universities. The data selection and audio segmentation was performed by the [CIEMPIESS-UNAM Project](http://www.ciempiess.org/) at the [Universidad Nacional Autónoma de México (UNAM)](https://www.unam.mx/) in Mexico City. The audio transcription and corpus edition was performed by the [MASRI Team](https://www.um.edu.mt/projects/masri/) at the [University of Malta](https://www.um.edu.mt/) in the Msida Campus. ### Annotations #### Annotation process Proper nouns and other words pronounced in languages other than Maltese (mainly from English, Italian, French and German) were transcribed in their respective orthographic system. #### Who are the annotators? The audio transcription was performed by expert native speakers at the [University of Malta](https://www.um.edu.mt/) in the Msida Campus. ### Personal and Sensitive Information The dataset could contain names revealing the identity of some speakers; on the other side, the recordings come from a publicly repository (YouTube), so, there is not a real intent of the participants to be anonymized. Anyway, you agree to not attempt to determine the identity of speakers in this dataset. **Notice:** Should you consider that our data contains material that is owned by you and should therefore not be reproduced here?, please: * Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted. * Clearly identify the copyrighted work claimed to be infringed. * Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate the material. * Send the request to [Carlos Mena](mailto:[email protected]) Take down: We will comply to legitimate requests by removing the affected sources from the corpus. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is challenging because it contains spontaneous speech; so, it will be helpful for the ASR community to evaluate their acoustic models in Maltese with it. ### Discussion of Biases The dataset intents to be gender balanced. It is comprised of 17 male speakers and 17 female speakers. ### Other Known Limitations Neither the MASRI Team or the CIEMPIESS-UNAM Project guarantee the accuracy of this corpus, nor its suitability for any specific purpose. As a matter of fact, a number of errors, omissions and inconsistencies are expected to be found within the corpus. ### Dataset Curators The audio recordings were collected and segmented by students belonging to the social service program ["Desarrollo de Tecnologías del Habla"](http://profesores.fi-b.unam.mx/carlos_mena/servicio.html), it was curated by Carlos Daniel Hernández Mena and its transcriptions were manually performed by Ayrton-Didier Brincat during 2020. ### Licensing Information [CC-BY-NC-SA-4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). The copyright remains with the original owners of the video. As the data is taken from YouTube, we invoke the same argument of "fair use" as in the [Voxlingua107](http://bark.phon.ioc.ee/voxlingua107/) dataset, which is: **"While YouTube users own the copyright to their own videos, using the audio in the videos for training speech recognition models has very limited and transformative purpose and qualifies thus as "fair use" of copyrighted materials. YouTube’s terms of service forbid downloading, storing and distribution of videos. However, the aim of this rule is clearly to forbid unfair monetization of the content by third-party sites and applications. Our dataset contains the videos in segmented audio-only form that makes the monetization of the actual distributed content extremely difficult."** ### Citation Information ``` @misc{carlosmenamasritest2020, title={MASRI-TEST CORPUS: Audio and Transcriptions in Maltese extracted from the YouTube channel of the University of Malta.}, author={Hernandez Mena, Carlos Daniel and Brincat, Ayrton-Didier and Gatt, Albert and DeMarco, Andrea and Borg, Claudia and van der Plas, Lonneke and Meza Ruiz, Iván Vladimir}, journal={MASRI Project, Malta}, year={2020}, url={https://huggingface.co/datasets/MLRS/masri_test}, } ``` ### Contributions The authors would like to thank to Alberto Templos Carbajal, Elena Vera and Angélica Gutiérrez for their support to the social service program ["Desarrollo de Tecnologías del Habla"](http://profesores.fi-b.unam.mx/carlos_mena/servicio.html) at the ["Facultad de Ingeniería (FI)"](https://www.ingenieria.unam.mx/) of the [Universidad Nacional Autónoma de México (UNAM)](https://www.unam.mx/). We also thank to the social service students for all the hard work during the audio segmentation.
MLRS/masri_test
[ "task_categories:automatic-speech-recognition", "annotations_creators:expert-generated", "language_creators:other", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:mt", "license:cc-by-nc-sa-4.0", "masri", "maltese", "masri-project", "malta", "test corpus", "region:us" ]
2022-11-25T17:06:57+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["other"], "language": ["mt"], "license": "cc-by-nc-sa-4.0", "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "MASRI-TEST CORPUS: Audio and Transcriptions in Maltese extracted from the YouTube channel of the University of Malta.", "tags": ["masri", "maltese", "masri-project", "malta", "test corpus"]}
2023-03-30T10:08:22+00:00
[]
[ "mt" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-expert-generated #language_creators-other #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-Maltese #license-cc-by-nc-sa-4.0 #masri #maltese #masri-project #malta #test corpus #region-us
# Dataset Card for masri_test ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: MASRI Project - Repository: MASRI Data Repo - Point of Contact: Carlos Mena, Andrea De Marco, Claudia Borg ### Dataset Summary The MASRI-TEST CORPUS was created out of YouTube videos belonging to the channel of the University of Malta. It has a length of 1 hour and it is gender balanced, as it has the same number of male and female speakers. ### Example Usage The MASRI-TEST contains only the test split: It is also valid to do: ### Supported Tasks automatic-speech-recognition: The dataset can be used to test a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The language of the corpus is Maltese. ## Dataset Structure ### Data Instances ### Data Fields * 'audio_id' (string) - id of audio segment * 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * 'speaker_id' (string) - id of speaker * 'gender' (string) - gender of speaker (male or female) * 'duration' (float32) - duration of the audio file in seconds. * 'normalized_text' (string) - normalized audio segment transcription ### Data Splits The corpus counts just with the test split which has a total of 668 speech files from 17 male speakers and 17 female speakers with a total duration of 1 hour. ## Dataset Creation ### Curation Rationale The MASRI-TEST CORPUS (MTSC) has the following characteristics: * The MTSC has an exact duration of 1 hours and 0 minutes. It has 668 audio files. * The MTSC has recordings from 34 different speakers: 17 men and 17 women. * Data in MTSC is classified by speaker. Therefore, all the recordings of each individual speaker are stored in one single directory. * Data is also classified according to the gender (male/female) of the speakers. * Every audio file in the MTSC has a duration between 3 and 10 seconds approximately. * Audio files in the MTSC are distributed in a 16khz@16bit mono format. * Transcriptions in MTSC are in lowercase. No punctuation marks are permitted except for dashes (-) and apostrophes (') due to their importance in Maltese orthography. ### Source Data #### Initial Data Collection and Normalization The MASRI-TEST CORPUS was possible due to a collaboration of two different Universities. The data selection and audio segmentation was performed by the CIEMPIESS-UNAM Project at the Universidad Nacional Autónoma de México (UNAM) in Mexico City. The audio transcription and corpus edition was performed by the MASRI Team at the University of Malta in the Msida Campus. ### Annotations #### Annotation process Proper nouns and other words pronounced in languages other than Maltese (mainly from English, Italian, French and German) were transcribed in their respective orthographic system. #### Who are the annotators? The audio transcription was performed by expert native speakers at the University of Malta in the Msida Campus. ### Personal and Sensitive Information The dataset could contain names revealing the identity of some speakers; on the other side, the recordings come from a publicly repository (YouTube), so, there is not a real intent of the participants to be anonymized. Anyway, you agree to not attempt to determine the identity of speakers in this dataset. Notice: Should you consider that our data contains material that is owned by you and should therefore not be reproduced here?, please: * Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted. * Clearly identify the copyrighted work claimed to be infringed. * Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate the material. * Send the request to Carlos Mena Take down: We will comply to legitimate requests by removing the affected sources from the corpus. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is challenging because it contains spontaneous speech; so, it will be helpful for the ASR community to evaluate their acoustic models in Maltese with it. ### Discussion of Biases The dataset intents to be gender balanced. It is comprised of 17 male speakers and 17 female speakers. ### Other Known Limitations Neither the MASRI Team or the CIEMPIESS-UNAM Project guarantee the accuracy of this corpus, nor its suitability for any specific purpose. As a matter of fact, a number of errors, omissions and inconsistencies are expected to be found within the corpus. ### Dataset Curators The audio recordings were collected and segmented by students belonging to the social service program "Desarrollo de Tecnologías del Habla", it was curated by Carlos Daniel Hernández Mena and its transcriptions were manually performed by Ayrton-Didier Brincat during 2020. ### Licensing Information CC-BY-NC-SA-4.0. The copyright remains with the original owners of the video. As the data is taken from YouTube, we invoke the same argument of "fair use" as in the Voxlingua107 dataset, which is: "While YouTube users own the copyright to their own videos, using the audio in the videos for training speech recognition models has very limited and transformative purpose and qualifies thus as "fair use" of copyrighted materials. YouTube’s terms of service forbid downloading, storing and distribution of videos. However, the aim of this rule is clearly to forbid unfair monetization of the content by third-party sites and applications. Our dataset contains the videos in segmented audio-only form that makes the monetization of the actual distributed content extremely difficult." ### Contributions The authors would like to thank to Alberto Templos Carbajal, Elena Vera and Angélica Gutiérrez for their support to the social service program "Desarrollo de Tecnologías del Habla" at the "Facultad de Ingeniería (FI)" of the Universidad Nacional Autónoma de México (UNAM). We also thank to the social service students for all the hard work during the audio segmentation.
[ "# Dataset Card for masri_test", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: MASRI Project\n- Repository: MASRI Data Repo\n- Point of Contact: Carlos Mena, Andrea De Marco, Claudia Borg", "### Dataset Summary\nThe MASRI-TEST CORPUS was created out of YouTube videos belonging to the channel of the University of Malta. It has a length of 1 hour and it is gender balanced, as it has the same number of male and female speakers.", "### Example Usage\nThe MASRI-TEST contains only the test split:\n\nIt is also valid to do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used to test a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe language of the corpus is Maltese.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'speaker_id' (string) - id of speaker\n* 'gender' (string) - gender of speaker (male or female)\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'normalized_text' (string) - normalized audio segment transcription", "### Data Splits\nThe corpus counts just with the test split which has a total of 668 speech files from 17 male speakers and 17 female speakers with a total duration of 1 hour.", "## Dataset Creation", "### Curation Rationale\nThe MASRI-TEST CORPUS (MTSC) has the following characteristics:\n* The MTSC has an exact duration of 1 hours and 0 minutes. It has 668 audio files.\n* The MTSC has recordings from 34 different speakers: 17 men and 17 women.\n* Data in MTSC is classified by speaker. Therefore, all the recordings of each individual speaker are stored in one single directory.\n* Data is also classified according to the gender (male/female) of the speakers.\n* Every audio file in the MTSC has a duration between 3 and 10 seconds approximately.\n* Audio files in the MTSC are distributed in a 16khz@16bit mono format.\n* Transcriptions in MTSC are in lowercase. No punctuation marks are permitted except for dashes (-) and apostrophes (') due to their importance in Maltese orthography.", "### Source Data", "#### Initial Data Collection and Normalization\nThe MASRI-TEST CORPUS was possible due to a collaboration of two different Universities. The data selection and audio segmentation was performed by the CIEMPIESS-UNAM Project at the Universidad Nacional Autónoma de México (UNAM) in Mexico City. The audio transcription and corpus edition was performed by the MASRI Team at the University of Malta in the Msida Campus.", "### Annotations", "#### Annotation process\nProper nouns and other words pronounced in languages other than Maltese (mainly from English, Italian, French and German) were transcribed in their respective orthographic system.", "#### Who are the annotators?\nThe audio transcription was performed by expert native speakers at the University of Malta in the Msida Campus.", "### Personal and Sensitive Information\nThe dataset could contain names revealing the identity of some speakers; on the other side, the recordings come from a publicly repository (YouTube), so, there is not a real intent of the participants to be anonymized. Anyway, you agree to not attempt to determine the identity of speakers in this dataset.\nNotice: Should you consider that our data contains material that is owned by you and should therefore not be reproduced here?, please:\n* Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted.\n* Clearly identify the copyrighted work claimed to be infringed.\n* Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate the material.\n* Send the request to Carlos Mena\nTake down: We will comply to legitimate requests by removing the affected sources from the corpus.", "## Considerations for Using the Data", "### Social Impact of Dataset\nThis dataset is challenging because it contains spontaneous speech; so, it will be helpful for the ASR community to evaluate their acoustic models in Maltese with it.", "### Discussion of Biases\nThe dataset intents to be gender balanced. It is comprised of 17 male speakers and 17 female speakers.", "### Other Known Limitations\nNeither the MASRI Team or the CIEMPIESS-UNAM Project guarantee the accuracy of this corpus, nor its suitability for any specific purpose. As a matter of fact, a number of errors, omissions and inconsistencies are expected to be found within the corpus.", "### Dataset Curators\nThe audio recordings were collected and segmented by students belonging to the social service program \"Desarrollo de Tecnologías del Habla\", it was curated by Carlos Daniel Hernández Mena and its transcriptions were manually performed by Ayrton-Didier Brincat during 2020.", "### Licensing Information\nCC-BY-NC-SA-4.0. The copyright remains with the original owners of the video.\nAs the data is taken from YouTube, we invoke the same argument of \"fair use\" as in the Voxlingua107 dataset, which is:\n\"While YouTube users own the copyright to their own videos, using the audio in the videos for training speech recognition models has very limited and transformative purpose and qualifies thus as \"fair use\" of copyrighted materials. YouTube’s terms of service forbid downloading, storing and distribution of videos. However, the aim of this rule is clearly to forbid unfair monetization of the content by third-party sites and applications. Our dataset contains the videos in segmented audio-only form that makes the monetization of the actual distributed content extremely difficult.\"", "### Contributions\nThe authors would like to thank to Alberto Templos Carbajal, Elena Vera and Angélica Gutiérrez for their support to the social service program \"Desarrollo de Tecnologías del Habla\" at the \"Facultad de Ingeniería (FI)\" of the Universidad Nacional Autónoma de México (UNAM). We also thank to the social service students for all the hard work during the audio segmentation." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-expert-generated #language_creators-other #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-Maltese #license-cc-by-nc-sa-4.0 #masri #maltese #masri-project #malta #test corpus #region-us \n", "# Dataset Card for masri_test", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: MASRI Project\n- Repository: MASRI Data Repo\n- Point of Contact: Carlos Mena, Andrea De Marco, Claudia Borg", "### Dataset Summary\nThe MASRI-TEST CORPUS was created out of YouTube videos belonging to the channel of the University of Malta. It has a length of 1 hour and it is gender balanced, as it has the same number of male and female speakers.", "### Example Usage\nThe MASRI-TEST contains only the test split:\n\nIt is also valid to do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used to test a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe language of the corpus is Maltese.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'speaker_id' (string) - id of speaker\n* 'gender' (string) - gender of speaker (male or female)\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'normalized_text' (string) - normalized audio segment transcription", "### Data Splits\nThe corpus counts just with the test split which has a total of 668 speech files from 17 male speakers and 17 female speakers with a total duration of 1 hour.", "## Dataset Creation", "### Curation Rationale\nThe MASRI-TEST CORPUS (MTSC) has the following characteristics:\n* The MTSC has an exact duration of 1 hours and 0 minutes. It has 668 audio files.\n* The MTSC has recordings from 34 different speakers: 17 men and 17 women.\n* Data in MTSC is classified by speaker. Therefore, all the recordings of each individual speaker are stored in one single directory.\n* Data is also classified according to the gender (male/female) of the speakers.\n* Every audio file in the MTSC has a duration between 3 and 10 seconds approximately.\n* Audio files in the MTSC are distributed in a 16khz@16bit mono format.\n* Transcriptions in MTSC are in lowercase. No punctuation marks are permitted except for dashes (-) and apostrophes (') due to their importance in Maltese orthography.", "### Source Data", "#### Initial Data Collection and Normalization\nThe MASRI-TEST CORPUS was possible due to a collaboration of two different Universities. The data selection and audio segmentation was performed by the CIEMPIESS-UNAM Project at the Universidad Nacional Autónoma de México (UNAM) in Mexico City. The audio transcription and corpus edition was performed by the MASRI Team at the University of Malta in the Msida Campus.", "### Annotations", "#### Annotation process\nProper nouns and other words pronounced in languages other than Maltese (mainly from English, Italian, French and German) were transcribed in their respective orthographic system.", "#### Who are the annotators?\nThe audio transcription was performed by expert native speakers at the University of Malta in the Msida Campus.", "### Personal and Sensitive Information\nThe dataset could contain names revealing the identity of some speakers; on the other side, the recordings come from a publicly repository (YouTube), so, there is not a real intent of the participants to be anonymized. Anyway, you agree to not attempt to determine the identity of speakers in this dataset.\nNotice: Should you consider that our data contains material that is owned by you and should therefore not be reproduced here?, please:\n* Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted.\n* Clearly identify the copyrighted work claimed to be infringed.\n* Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate the material.\n* Send the request to Carlos Mena\nTake down: We will comply to legitimate requests by removing the affected sources from the corpus.", "## Considerations for Using the Data", "### Social Impact of Dataset\nThis dataset is challenging because it contains spontaneous speech; so, it will be helpful for the ASR community to evaluate their acoustic models in Maltese with it.", "### Discussion of Biases\nThe dataset intents to be gender balanced. It is comprised of 17 male speakers and 17 female speakers.", "### Other Known Limitations\nNeither the MASRI Team or the CIEMPIESS-UNAM Project guarantee the accuracy of this corpus, nor its suitability for any specific purpose. As a matter of fact, a number of errors, omissions and inconsistencies are expected to be found within the corpus.", "### Dataset Curators\nThe audio recordings were collected and segmented by students belonging to the social service program \"Desarrollo de Tecnologías del Habla\", it was curated by Carlos Daniel Hernández Mena and its transcriptions were manually performed by Ayrton-Didier Brincat during 2020.", "### Licensing Information\nCC-BY-NC-SA-4.0. The copyright remains with the original owners of the video.\nAs the data is taken from YouTube, we invoke the same argument of \"fair use\" as in the Voxlingua107 dataset, which is:\n\"While YouTube users own the copyright to their own videos, using the audio in the videos for training speech recognition models has very limited and transformative purpose and qualifies thus as \"fair use\" of copyrighted materials. YouTube’s terms of service forbid downloading, storing and distribution of videos. However, the aim of this rule is clearly to forbid unfair monetization of the content by third-party sites and applications. Our dataset contains the videos in segmented audio-only form that makes the monetization of the actual distributed content extremely difficult.\"", "### Contributions\nThe authors would like to thank to Alberto Templos Carbajal, Elena Vera and Angélica Gutiérrez for their support to the social service program \"Desarrollo de Tecnologías del Habla\" at the \"Facultad de Ingeniería (FI)\" of the Universidad Nacional Autónoma de México (UNAM). We also thank to the social service students for all the hard work during the audio segmentation." ]
6b1f397993cb5550ded42d1c95ed60b6de5af77f
# Dataset Card for masri_dev ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [MASRI Project](https://www.um.edu.mt/projects/masri/) - **Repository:** [MASRI Data Repo](https://github.com/UMSpeech/) - **Point of Contact:** [Carlos Mena](mailto:[email protected]), [Andrea De Marco](mailto:[email protected]), [Claudia Borg](mailto:[email protected]) ### Dataset Summary The MASRI-DEV CORPUS was created out of YouTube videos belonging to the channel of the [University of Malta](www.youtube.com/user/universityofmalta). It has a length of 1 hour and it is gender balanced, as it has the same number of male and female speakers. ### Example Usage The MASRI-DEV contains only the validation split: ```python from datasets import load_dataset masri_dev = load_dataset("MLRS/masri_dev") ``` It is also valid to do: ```python from datasets import load_dataset masri_dev = load_dataset("MLRS/masri_dev",split="validation") ``` ### Supported Tasks automatic-speech-recognition: The dataset can be used as a development/validation corpus when training a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The language of the corpus is Maltese. ## Dataset Structure ### Data Instances ```python { 'audio_id': 'MSRDV_F_08_DV_00005', 'audio': { 'path': '/home/carlos/.cache/HuggingFace/datasets/downloads/extracted/ebfc6f70292385c6e730ccadc575de507963960c28b4faf2d8bbfcd61589d2cc/dev/female/F_08/MSRDV_F_08_DV_00005.flac', 'array': array([ 0.00140381, 0.00210571, 0.00189209, ..., -0.00210571, -0.00289917, -0.00372314], dtype=float32), 'sampling_rate': 16000 }, 'speaker_id': 'F_08', 'gender': 'female', 'duration': 5.900000095367432, 'normalized_text': 'dawn ma kinux żminijiet sbieħ l-inkwiżizzjoni ddeċidiet li teqred lil kull min' } ``` ### Data Fields * `audio_id` (string) - id of audio segment * `audio` (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * `speaker_id` (string) - id of speaker * `gender` (string) - gender of speaker (male or female) * `duration` (float32) - duration of the audio file in seconds. * `normalized_text` (string) - normalized audio segment transcription ### Data Splits The corpus counts just with the validation split which has a total of 669 speech files from 9 male speakers and 9 female speakers with a total duration of 1 hour. ## Dataset Creation ### Curation Rationale The MASRI-DEV CORPUS (MDVC) has the following characteristics: * The MDVC has an exact duration of 1 hours and 0 minutes. It has 669 audio files. * The MDVC has recordings from 18 different speakers: 9 men and 9 women. * Data in MDVC is classified by speaker. Therefore, all the recordings of each individual speaker are stored in one single directory. * Data is also classified according to the gender (male/female) of the speakers. * Every audio file in the MDVC has a duration between 3 and 10 seconds approximately. * Audio files in the MDVC are distributed in a 16khz@16bit mono format. * Transcriptions in MDVC are in lowercase. No punctuation marks are permitted except for dashes (-) and apostrophes (') due to their importance in Maltese orthography. ### Source Data #### Initial Data Collection and Normalization The MASRI-DEV CORPUS was possible due to a collaboration of two different Universities. The data selection and audio segmentation was performed by the [CIEMPIESS-UNAM Project](http://www.ciempiess.org/) at the [Universidad Nacional Autónoma de México (UNAM)](https://www.unam.mx/) in Mexico City. The audio transcription and corpus edition was performed by the [MASRI Team](https://www.um.edu.mt/projects/masri/) at the [University of Malta](https://www.um.edu.mt/) in the Msida Campus. ### Annotations #### Annotation process Proper nouns and other words pronounced in languages other than Maltese (mainly from English, Italian, French and German) were transcribed in their respective orthographic system. #### Who are the annotators? The audio transcription was performed by expert native speakers at the [University of Malta](https://www.um.edu.mt/) in the Msida Campus. ### Personal and Sensitive Information The dataset could contain names revealing the identity of some speakers; on the other side, the recordings come from a publicly repository (YouTube), so, there is not a real intent of the participants to be anonymized. Anyway, you agree to not attempt to determine the identity of speakers in this dataset. **Notice:** Should you consider that our data contains material that is owned by you and should therefore not be reproduced here?, please: * Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted. * Clearly identify the copyrighted work claimed to be infringed. * Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate the material. * Send the request to [Carlos Mena](mailto:[email protected]) Take down: We will comply to legitimate requests by removing the affected sources from the corpus. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is challenging because it contains spontaneous speech; so, it will be helpful for the ASR community to validate their acoustic models in Maltese with it. ### Discussion of Biases The dataset intents to be gender balanced. It is comprised of 9 male speakers and 9 female speakers. ### Other Known Limitations Neither the MASRI Team or the CIEMPIESS-UNAM Project guarantee the accuracy of this corpus, nor its suitability for any specific purpose. As a matter of fact, a number of errors, omissions and inconsistencies are expected to be found within the corpus. ### Dataset Curators The audio recordings were collected and segmented by students belonging to the social service program ["Desarrollo de Tecnologías del Habla"](http://profesores.fi-b.unam.mx/carlos_mena/servicio.html), it was curated by Carlos Daniel Hernández Mena and its transcriptions were manually performed by Ayrton-Didier Brincat during 2020. ### Licensing Information [CC-BY-NC-SA-4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). The copyright remains with the original owners of the video. As the data is taken from YouTube, we invoke the same argument of "fair use" as in the [Voxlingua107](http://bark.phon.ioc.ee/voxlingua107/) dataset, which is: **"While YouTube users own the copyright to their own videos, using the audio in the videos for training speech recognition models has very limited and transformative purpose and qualifies thus as "fair use" of copyrighted materials. YouTube’s terms of service forbid downloading, storing and distribution of videos. However, the aim of this rule is clearly to forbid unfair monetization of the content by third-party sites and applications. Our dataset contains the videos in segmented audio-only form that makes the monetization of the actual distributed content extremely difficult."** ### Citation Information ``` @misc{carlosmenamasridev2020, title={MASRI-DEV CORPUS: Audio and Transcriptions in Maltese extracted from the YouTube channel of the University of Malta.}, author={Hernandez Mena, Carlos Daniel and Brincat, Ayrton-Didier and Gatt, Albert and DeMarco, Andrea and Borg, Claudia and van der Plas, Lonneke and Meza Ruiz, Iván Vladimir}, journal={MASRI Project, Malta}, year={2020}, url={https://huggingface.co/datasets/MLRS/masri_dev}, } ``` ### Contributions The authors would like to thank to Alberto Templos Carbajal, Elena Vera and Angélica Gutiérrez for their support to the social service program ["Desarrollo de Tecnologías del Habla"](http://profesores.fi-b.unam.mx/carlos_mena/servicio.html) at the ["Facultad de Ingeniería (FI)"](https://www.ingenieria.unam.mx/) of the [Universidad Nacional Autónoma de México (UNAM)](https://www.unam.mx/). We also thank to the social service students for all the hard work during the audio segmentation.
MLRS/masri_dev
[ "task_categories:automatic-speech-recognition", "annotations_creators:expert-generated", "language_creators:other", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:mt", "license:cc-by-nc-sa-4.0", "masri", "maltese", "masri-project", "malta", "evaluation corpus", "development corpus", "dev", "region:us" ]
2022-11-25T17:13:11+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["other"], "language": ["mt"], "license": "cc-by-nc-sa-4.0", "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "MASRI-DEV CORPUS: Audio and Transcriptions in Maltese extracted from the YouTube channel of the University of Malta.", "tags": ["masri", "maltese", "masri-project", "malta", "evaluation corpus", "development corpus", "dev"]}
2023-03-30T10:05:14+00:00
[]
[ "mt" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-expert-generated #language_creators-other #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-Maltese #license-cc-by-nc-sa-4.0 #masri #maltese #masri-project #malta #evaluation corpus #development corpus #dev #region-us
# Dataset Card for masri_dev ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: MASRI Project - Repository: MASRI Data Repo - Point of Contact: Carlos Mena, Andrea De Marco, Claudia Borg ### Dataset Summary The MASRI-DEV CORPUS was created out of YouTube videos belonging to the channel of the University of Malta. It has a length of 1 hour and it is gender balanced, as it has the same number of male and female speakers. ### Example Usage The MASRI-DEV contains only the validation split: It is also valid to do: ### Supported Tasks automatic-speech-recognition: The dataset can be used as a development/validation corpus when training a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The language of the corpus is Maltese. ## Dataset Structure ### Data Instances ### Data Fields * 'audio_id' (string) - id of audio segment * 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * 'speaker_id' (string) - id of speaker * 'gender' (string) - gender of speaker (male or female) * 'duration' (float32) - duration of the audio file in seconds. * 'normalized_text' (string) - normalized audio segment transcription ### Data Splits The corpus counts just with the validation split which has a total of 669 speech files from 9 male speakers and 9 female speakers with a total duration of 1 hour. ## Dataset Creation ### Curation Rationale The MASRI-DEV CORPUS (MDVC) has the following characteristics: * The MDVC has an exact duration of 1 hours and 0 minutes. It has 669 audio files. * The MDVC has recordings from 18 different speakers: 9 men and 9 women. * Data in MDVC is classified by speaker. Therefore, all the recordings of each individual speaker are stored in one single directory. * Data is also classified according to the gender (male/female) of the speakers. * Every audio file in the MDVC has a duration between 3 and 10 seconds approximately. * Audio files in the MDVC are distributed in a 16khz@16bit mono format. * Transcriptions in MDVC are in lowercase. No punctuation marks are permitted except for dashes (-) and apostrophes (') due to their importance in Maltese orthography. ### Source Data #### Initial Data Collection and Normalization The MASRI-DEV CORPUS was possible due to a collaboration of two different Universities. The data selection and audio segmentation was performed by the CIEMPIESS-UNAM Project at the Universidad Nacional Autónoma de México (UNAM) in Mexico City. The audio transcription and corpus edition was performed by the MASRI Team at the University of Malta in the Msida Campus. ### Annotations #### Annotation process Proper nouns and other words pronounced in languages other than Maltese (mainly from English, Italian, French and German) were transcribed in their respective orthographic system. #### Who are the annotators? The audio transcription was performed by expert native speakers at the University of Malta in the Msida Campus. ### Personal and Sensitive Information The dataset could contain names revealing the identity of some speakers; on the other side, the recordings come from a publicly repository (YouTube), so, there is not a real intent of the participants to be anonymized. Anyway, you agree to not attempt to determine the identity of speakers in this dataset. Notice: Should you consider that our data contains material that is owned by you and should therefore not be reproduced here?, please: * Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted. * Clearly identify the copyrighted work claimed to be infringed. * Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate the material. * Send the request to Carlos Mena Take down: We will comply to legitimate requests by removing the affected sources from the corpus. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is challenging because it contains spontaneous speech; so, it will be helpful for the ASR community to validate their acoustic models in Maltese with it. ### Discussion of Biases The dataset intents to be gender balanced. It is comprised of 9 male speakers and 9 female speakers. ### Other Known Limitations Neither the MASRI Team or the CIEMPIESS-UNAM Project guarantee the accuracy of this corpus, nor its suitability for any specific purpose. As a matter of fact, a number of errors, omissions and inconsistencies are expected to be found within the corpus. ### Dataset Curators The audio recordings were collected and segmented by students belonging to the social service program "Desarrollo de Tecnologías del Habla", it was curated by Carlos Daniel Hernández Mena and its transcriptions were manually performed by Ayrton-Didier Brincat during 2020. ### Licensing Information CC-BY-NC-SA-4.0. The copyright remains with the original owners of the video. As the data is taken from YouTube, we invoke the same argument of "fair use" as in the Voxlingua107 dataset, which is: "While YouTube users own the copyright to their own videos, using the audio in the videos for training speech recognition models has very limited and transformative purpose and qualifies thus as "fair use" of copyrighted materials. YouTube’s terms of service forbid downloading, storing and distribution of videos. However, the aim of this rule is clearly to forbid unfair monetization of the content by third-party sites and applications. Our dataset contains the videos in segmented audio-only form that makes the monetization of the actual distributed content extremely difficult." ### Contributions The authors would like to thank to Alberto Templos Carbajal, Elena Vera and Angélica Gutiérrez for their support to the social service program "Desarrollo de Tecnologías del Habla" at the "Facultad de Ingeniería (FI)" of the Universidad Nacional Autónoma de México (UNAM). We also thank to the social service students for all the hard work during the audio segmentation.
[ "# Dataset Card for masri_dev", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: MASRI Project\n- Repository: MASRI Data Repo\n- Point of Contact: Carlos Mena, Andrea De Marco, Claudia Borg", "### Dataset Summary\nThe MASRI-DEV CORPUS was created out of YouTube videos belonging to the channel of the University of Malta. It has a length of 1 hour and it is gender balanced, as it has the same number of male and female speakers.", "### Example Usage\nThe MASRI-DEV contains only the validation split:\n\nIt is also valid to do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used as a development/validation corpus when training a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe language of the corpus is Maltese.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'speaker_id' (string) - id of speaker\n* 'gender' (string) - gender of speaker (male or female)\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'normalized_text' (string) - normalized audio segment transcription", "### Data Splits\nThe corpus counts just with the validation split which has a total of 669 speech files from 9 male speakers and 9 female speakers with a total duration of 1 hour.", "## Dataset Creation", "### Curation Rationale\nThe MASRI-DEV CORPUS (MDVC) has the following characteristics:\n* The MDVC has an exact duration of 1 hours and 0 minutes. It has 669 audio files.\n* The MDVC has recordings from 18 different speakers: 9 men and 9 women.\n* Data in MDVC is classified by speaker. Therefore, all the recordings of each individual speaker are stored in one single directory.\n* Data is also classified according to the gender (male/female) of the speakers.\n* Every audio file in the MDVC has a duration between 3 and 10 seconds approximately.\n* Audio files in the MDVC are distributed in a 16khz@16bit mono format.\n* Transcriptions in MDVC are in lowercase. No punctuation marks are permitted except for dashes (-) and apostrophes (') due to their importance in Maltese orthography.", "### Source Data", "#### Initial Data Collection and Normalization\nThe MASRI-DEV CORPUS was possible due to a collaboration of two different Universities. The data selection and audio segmentation was performed by the CIEMPIESS-UNAM Project at the Universidad Nacional Autónoma de México (UNAM) in Mexico City. The audio transcription and corpus edition was performed by the MASRI Team at the University of Malta in the Msida Campus.", "### Annotations", "#### Annotation process\nProper nouns and other words pronounced in languages other than Maltese (mainly from English, Italian, French and German) were transcribed in their respective orthographic system.", "#### Who are the annotators?\nThe audio transcription was performed by expert native speakers at the University of Malta in the Msida Campus.", "### Personal and Sensitive Information\nThe dataset could contain names revealing the identity of some speakers; on the other side, the recordings come from a publicly repository (YouTube), so, there is not a real intent of the participants to be anonymized. Anyway, you agree to not attempt to determine the identity of speakers in this dataset.\nNotice: Should you consider that our data contains material that is owned by you and should therefore not be reproduced here?, please:\n* Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted.\n* Clearly identify the copyrighted work claimed to be infringed.\n* Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate the material.\n* Send the request to Carlos Mena\nTake down: We will comply to legitimate requests by removing the affected sources from the corpus.", "## Considerations for Using the Data", "### Social Impact of Dataset\nThis dataset is challenging because it contains spontaneous speech; so, it will be helpful for the ASR community to validate their acoustic models in Maltese with it.", "### Discussion of Biases\nThe dataset intents to be gender balanced. It is comprised of 9 male speakers and 9 female speakers.", "### Other Known Limitations\nNeither the MASRI Team or the CIEMPIESS-UNAM Project guarantee the accuracy of this corpus, nor its suitability for any specific purpose. As a matter of fact, a number of errors, omissions and inconsistencies are expected to be found within the corpus.", "### Dataset Curators\nThe audio recordings were collected and segmented by students belonging to the social service program \"Desarrollo de Tecnologías del Habla\", it was curated by Carlos Daniel Hernández Mena and its transcriptions were manually performed by Ayrton-Didier Brincat during 2020.", "### Licensing Information\nCC-BY-NC-SA-4.0. The copyright remains with the original owners of the video.\nAs the data is taken from YouTube, we invoke the same argument of \"fair use\" as in the Voxlingua107 dataset, which is:\n\"While YouTube users own the copyright to their own videos, using the audio in the videos for training speech recognition models has very limited and transformative purpose and qualifies thus as \"fair use\" of copyrighted materials. YouTube’s terms of service forbid downloading, storing and distribution of videos. However, the aim of this rule is clearly to forbid unfair monetization of the content by third-party sites and applications. Our dataset contains the videos in segmented audio-only form that makes the monetization of the actual distributed content extremely difficult.\"", "### Contributions\nThe authors would like to thank to Alberto Templos Carbajal, Elena Vera and Angélica Gutiérrez for their support to the social service program \"Desarrollo de Tecnologías del Habla\" at the \"Facultad de Ingeniería (FI)\" of the Universidad Nacional Autónoma de México (UNAM). We also thank to the social service students for all the hard work during the audio segmentation." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-expert-generated #language_creators-other #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-Maltese #license-cc-by-nc-sa-4.0 #masri #maltese #masri-project #malta #evaluation corpus #development corpus #dev #region-us \n", "# Dataset Card for masri_dev", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: MASRI Project\n- Repository: MASRI Data Repo\n- Point of Contact: Carlos Mena, Andrea De Marco, Claudia Borg", "### Dataset Summary\nThe MASRI-DEV CORPUS was created out of YouTube videos belonging to the channel of the University of Malta. It has a length of 1 hour and it is gender balanced, as it has the same number of male and female speakers.", "### Example Usage\nThe MASRI-DEV contains only the validation split:\n\nIt is also valid to do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used as a development/validation corpus when training a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe language of the corpus is Maltese.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'speaker_id' (string) - id of speaker\n* 'gender' (string) - gender of speaker (male or female)\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'normalized_text' (string) - normalized audio segment transcription", "### Data Splits\nThe corpus counts just with the validation split which has a total of 669 speech files from 9 male speakers and 9 female speakers with a total duration of 1 hour.", "## Dataset Creation", "### Curation Rationale\nThe MASRI-DEV CORPUS (MDVC) has the following characteristics:\n* The MDVC has an exact duration of 1 hours and 0 minutes. It has 669 audio files.\n* The MDVC has recordings from 18 different speakers: 9 men and 9 women.\n* Data in MDVC is classified by speaker. Therefore, all the recordings of each individual speaker are stored in one single directory.\n* Data is also classified according to the gender (male/female) of the speakers.\n* Every audio file in the MDVC has a duration between 3 and 10 seconds approximately.\n* Audio files in the MDVC are distributed in a 16khz@16bit mono format.\n* Transcriptions in MDVC are in lowercase. No punctuation marks are permitted except for dashes (-) and apostrophes (') due to their importance in Maltese orthography.", "### Source Data", "#### Initial Data Collection and Normalization\nThe MASRI-DEV CORPUS was possible due to a collaboration of two different Universities. The data selection and audio segmentation was performed by the CIEMPIESS-UNAM Project at the Universidad Nacional Autónoma de México (UNAM) in Mexico City. The audio transcription and corpus edition was performed by the MASRI Team at the University of Malta in the Msida Campus.", "### Annotations", "#### Annotation process\nProper nouns and other words pronounced in languages other than Maltese (mainly from English, Italian, French and German) were transcribed in their respective orthographic system.", "#### Who are the annotators?\nThe audio transcription was performed by expert native speakers at the University of Malta in the Msida Campus.", "### Personal and Sensitive Information\nThe dataset could contain names revealing the identity of some speakers; on the other side, the recordings come from a publicly repository (YouTube), so, there is not a real intent of the participants to be anonymized. Anyway, you agree to not attempt to determine the identity of speakers in this dataset.\nNotice: Should you consider that our data contains material that is owned by you and should therefore not be reproduced here?, please:\n* Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted.\n* Clearly identify the copyrighted work claimed to be infringed.\n* Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate the material.\n* Send the request to Carlos Mena\nTake down: We will comply to legitimate requests by removing the affected sources from the corpus.", "## Considerations for Using the Data", "### Social Impact of Dataset\nThis dataset is challenging because it contains spontaneous speech; so, it will be helpful for the ASR community to validate their acoustic models in Maltese with it.", "### Discussion of Biases\nThe dataset intents to be gender balanced. It is comprised of 9 male speakers and 9 female speakers.", "### Other Known Limitations\nNeither the MASRI Team or the CIEMPIESS-UNAM Project guarantee the accuracy of this corpus, nor its suitability for any specific purpose. As a matter of fact, a number of errors, omissions and inconsistencies are expected to be found within the corpus.", "### Dataset Curators\nThe audio recordings were collected and segmented by students belonging to the social service program \"Desarrollo de Tecnologías del Habla\", it was curated by Carlos Daniel Hernández Mena and its transcriptions were manually performed by Ayrton-Didier Brincat during 2020.", "### Licensing Information\nCC-BY-NC-SA-4.0. The copyright remains with the original owners of the video.\nAs the data is taken from YouTube, we invoke the same argument of \"fair use\" as in the Voxlingua107 dataset, which is:\n\"While YouTube users own the copyright to their own videos, using the audio in the videos for training speech recognition models has very limited and transformative purpose and qualifies thus as \"fair use\" of copyrighted materials. YouTube’s terms of service forbid downloading, storing and distribution of videos. However, the aim of this rule is clearly to forbid unfair monetization of the content by third-party sites and applications. Our dataset contains the videos in segmented audio-only form that makes the monetization of the actual distributed content extremely difficult.\"", "### Contributions\nThe authors would like to thank to Alberto Templos Carbajal, Elena Vera and Angélica Gutiérrez for their support to the social service program \"Desarrollo de Tecnologías del Habla\" at the \"Facultad de Ingeniería (FI)\" of the Universidad Nacional Autónoma de México (UNAM). We also thank to the social service students for all the hard work during the audio segmentation." ]
2815c12a8a62f8ff2603b78cc2866d43c09b8a0d
# Dataset Card for "common3k-train-prepared" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DTU54DL/common3k-train-prepared
[ "region:us" ]
2022-11-25T17:46:21+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 2881415928, "num_examples": 3000}], "download_size": 493426586, "dataset_size": 2881415928}}
2022-11-25T17:48:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "common3k-train-prepared" More Information needed
[ "# Dataset Card for \"common3k-train-prepared\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"common3k-train-prepared\"\n\nMore Information needed" ]
ebf57304a26bc95228df1dae14a27c7890312266
# Dataset Card for "stackoverflow-open-status-classification" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
reubenjohn/stackoverflow-unified-text-open-status-classification-sample
[ "region:us" ]
2022-11-25T20:19:43+00:00
{"dataset_info": {"features": [{"name": "PostId", "dtype": "int64"}, {"name": "PostCreationDate", "dtype": "string"}, {"name": "OwnerUserId", "dtype": "int64"}, {"name": "OwnerCreationDate", "dtype": "string"}, {"name": "ReputationAtPostCreation", "dtype": "int64"}, {"name": "OwnerUndeletedAnswerCountAtPostTime", "dtype": "int64"}, {"name": "Title", "dtype": "string"}, {"name": "BodyMarkdown", "dtype": "string"}, {"name": "Tag1", "dtype": "string"}, {"name": "Tag2", "dtype": "string"}, {"name": "Tag3", "dtype": "string"}, {"name": "Tag4", "dtype": "string"}, {"name": "Tag5", "dtype": "string"}, {"name": "PostClosedDate", "dtype": "string"}, {"name": "OpenStatus", "dtype": "string"}, {"name": "unified_texts", "dtype": "string"}, {"name": "OpenStatus_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 216256197, "num_examples": 112217}, {"name": "valid", "num_bytes": 43398940, "num_examples": 22443}, {"name": "test", "num_bytes": 43398940, "num_examples": 22443}], "download_size": 163036345, "dataset_size": 303054077}}
2022-11-25T20:23:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "stackoverflow-open-status-classification" More Information needed
[ "# Dataset Card for \"stackoverflow-open-status-classification\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"stackoverflow-open-status-classification\"\n\nMore Information needed" ]
871b10dfe89c60c394d13d3b91b83250b4537aa3
# Dataset Card for "stackoverflow-open-status-classification-albert-tokenized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
reubenjohn/stackoverflow-open-status-classification-sample-albert-tokenized
[ "region:us" ]
2022-11-25T20:38:03+00:00
{"dataset_info": {"features": [{"name": "OpenStatus_id", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "token_type_ids", "sequence": "int8"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "test", "num_bytes": 69393756, "num_examples": 22443}, {"name": "train", "num_bytes": 346974964, "num_examples": 112217}, {"name": "valid", "num_bytes": 69393756, "num_examples": 22443}], "download_size": 55848593, "dataset_size": 485762476}}
2022-11-25T20:38:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "stackoverflow-open-status-classification-albert-tokenized" More Information needed
[ "# Dataset Card for \"stackoverflow-open-status-classification-albert-tokenized\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"stackoverflow-open-status-classification-albert-tokenized\"\n\nMore Information needed" ]
a1a1c4659cdba31da45e5f084999034784b4b441
# Dataset Card for "wiki_lingua_ar" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/wiki_lingua_ar
[ "region:us" ]
2022-11-26T01:02:13+00:00
{"dataset_info": {"features": [{"name": "article", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 22744300, "num_examples": 5841}, {"name": "train", "num_bytes": 79113081, "num_examples": 20441}, {"name": "validation", "num_bytes": 11620265, "num_examples": 2919}], "download_size": 55826192, "dataset_size": 113477646}}
2022-11-26T01:02:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wiki_lingua_ar" More Information needed
[ "# Dataset Card for \"wiki_lingua_ar\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wiki_lingua_ar\"\n\nMore Information needed" ]
1afc7e616a0dee0c515193815f0e28abe6b3c3fe
imagenes
serpion/hector
[ "region:us" ]
2022-11-26T01:39:42+00:00
{}
2022-11-26T01:46:01+00:00
[]
[]
TAGS #region-us
imagenes
[]
[ "TAGS\n#region-us \n" ]
0177fee7b6e15a7a4794a95c526d5654223dbfc1
# Dataset Card for "Sentence_eval" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/Sentence_eval
[ "region:us" ]
2022-11-26T02:06:48+00:00
{"dataset_info": {"features": [{"name": "sentences", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 730710, "num_examples": 11528}, {"name": "test", "num_bytes": 386248, "num_examples": 6104}, {"name": "val", "num_bytes": 104917, "num_examples": 1632}], "download_size": 485767, "dataset_size": 1221875}}
2022-11-26T02:07:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Sentence_eval" More Information needed
[ "# Dataset Card for \"Sentence_eval\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Sentence_eval\"\n\nMore Information needed" ]
a23816d28aa710136cab59f5bca3cde475e5926a
# Dataset Card for "hldatasetv2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jmi6/hldatasetv2
[ "region:us" ]
2022-11-26T03:02:56+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2206466.0, "num_examples": 56}], "download_size": 0, "dataset_size": 2206466.0}}
2022-11-26T03:24:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hldatasetv2" More Information needed
[ "# Dataset Card for \"hldatasetv2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hldatasetv2\"\n\nMore Information needed" ]
6bb44ee982da842051e42e1bbe8f81c3ffd04daf
# Dataset Card for samromur_children ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Samrómur Children Icelandic Speech 1.0](https://samromur.is/) - **Repository:** [LDC](https://catalog.ldc.upenn.edu/LDC2022S11) - **Paper:** [Samrómur Children: An Icelandic Speech Corpus](https://aclanthology.org/2022.lrec-1.105.pdf) - **Point of Contact:** [Carlos Mena](mailto:[email protected]), [Jón Guðnason](mailto:[email protected]) ### Dataset Summary The Samrómur Children Corpus consists of audio recordings and metadata files containing prompts read by the participants. It contains more than 137000 validated speech-recordings uttered by Icelandic children. The corpus is a result of the crowd-sourcing effort run by the Language and Voice Lab (LVL) at the Reykjavik University, in cooperation with Almannarómur, Center for Language Technology. The recording process has started in October 2019 and continues to this day (Spetember 2021). ### Example Usage The Samrómur Children Corpus is divided in 3 splits: train, validation and test. To load a specific split pass its name as a config name: ```python from datasets import load_dataset samromur_children = load_dataset("language-and-voice-lab/samromur_children") ``` To load an specific split (for example, the validation split) do: ```python from datasets import load_dataset samromur_children = load_dataset("language-and-voice-lab/samromur_children",split="validation") ``` ### Supported Tasks automatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The audio is in Icelandic. The reading prompts were gathered from a variety of sources, mainly from the [Icelandic Gigaword Corpus](http://clarin.is/en/resources/gigaword). The corpus includes text from novels, news, plays, and from a list of location names in Iceland. The prompts also came from the [Icelandic Web of Science](https://www.visindavefur.is/). ## Dataset Structure ### Data Instances ```python { 'audio_id': '015652-0717240', 'audio': { 'path': '/home/carlos/.cache/HuggingFace/datasets/downloads/extracted/2c6b0d82de2ef0dc0879732f726809cccbe6060664966099f43276e8c94b03f2/test/015652/015652-0717240.flac', 'array': array([ 0. , 0. , 0. , ..., -0.00311279, -0.0007019 , 0.00128174], dtype=float32), 'sampling_rate': 16000 }, 'speaker_id': '015652', 'gender': 'female', 'age': '11', 'duration': 4.179999828338623, 'normalized_text': 'eiginlega var hann hin unga rússneska bylting lifandi komin' } ``` ### Data Fields * `audio_id` (string) - id of audio segment * `audio` (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * `speaker_id` (string) - id of speaker * `gender` (string) - gender of speaker (male or female) * `age` (string) - range of age of the speaker: Younger (15-35), Middle-aged (36-60) or Elderly (61+). * `duration` (float32) - duration of the audio file in seconds. * `normalized_text` (string) - normalized audio segment transcription. ### Data Splits The corpus is split into train, dev, and test portions. Lenghts of every portion are: train = 127h25m, test = 1h50m, dev=1h50m. To load an specific portion please see the above section "Example Usage". ## Dataset Creation ### Curation Rationale In the field of Automatic Speech Recognition (ASR) is a known fact that the children's speech is particularly hard to recognise due to its high variability produced by developmental changes in children's anatomy and speech production skills. For this reason, the criteria of selection for the train/dev/test portions have to take into account the children's age. Nevertheless, the Samrómur Children is an unbalanced corpus in terms of gender and age of the speakers. This means that the corpus has, for example, a total of 1667 female speakers (73h38m) versus 1412 of male speakers (52h26m). These unbalances impose conditions in the type of the experiments than can be performed with the corpus. For example, a equal number of female and male speakers through certain ranges of age is impossible. So, if one can't have a perfectly balance corpus in the training set, at least one can have it in the test portion. The test portion of the Samrómur Children was meticulously selected to cover ages between 6 to 16 years in both female and male speakers. Every of these range of age in both genders have a total duration of 5 minutes each. The development portion of the corpus contains only speakers with an unknown gender information. Both test and dev sets have a total duration of 1h50m each. In order to perform fairer experiments, speakers in the train and test sets are not shared. Nevertheless, there is only one speaker shared between the train and development set. It can be identified with the speaker ID=010363. However, no audio files are shared between these two sets. ### Source Data #### Initial Data Collection and Normalization The data was collected using the website https://samromur.is, code of which is available at https://github.com/cadia-lvl/samromur. The age range selected for this corpus is between 4 and 17 years. The original audio was collected at 44.1 kHz or 48 kHz sampling rate as *.wav files, which was down-sampled to 16 kHz and converted to *.flac. Each recording contains one read sentence from a script. The script contains 85.080 unique sentences and 90.838 unique tokens. There was no identifier other than the session ID, which is used as the speaker ID. The corpus is distributed with a metadata file with a detailed information on each utterance and speaker. The madata file is encoded as UTF-8 Unicode. The prompts were gathered from a variety of sources, mainly from The Icelandic Gigaword Corpus, which is available at http://clarin.is/en/resources/gigaword. The corpus includes text from novels, news, plays, and from a list of location names in Iceland. The prompts also came from the [Icelandic Web of Science](https://www.visindavefur.is/). ### Annotations #### Annotation process Prompts were pulled from these corpora if they met the criteria of having only letters which are present in the Icelandic alphabet, and if they are listed in the [DIM: Database Icelandic Morphology](https://aclanthology.org/W19-6116.pdf). There are also synthesised prompts consisting of a name followed by a question or a demand, in order to simulate a dialogue with a smart-device. #### Who are the annotators? The audio files content was manually verified against the prompts by one or more listener (summer students mainly). ### Personal and Sensitive Information The dataset consists of people who have donated their voice. You agree to not attempt to determine the identity of speakers in this dataset. ## Considerations for Using the Data ### Social Impact of Dataset This is the first ASR corpus of Icelandic children. ### Discussion of Biases * The utterances were recorded by a smartphone or the web app. * Participants self-reported their age group, gender, and the native language. * Participants are aged between 4 to 17 years. * The corpus contains 137597 utterances from 3175 speakers, totalling 131 hours. * The amount of data due to female speakers is 73h38m, the amount of data due to male speakers is 52h26m and the amount of data due to speakers with an unknown gender information is 05h02m * The number of female speakers is 1667, the number of male speakers is 1412. The number of speakers with an unknown gender information is 96. * The audios due to female speakers are 78993, the audios due to male speakers are 53927 and the audios due to speakers with an unknown gender information are 4677. ### Other Known Limitations "Samrómur Children: Icelandic Speech 21.09" by the Language and Voice Laboratory (LVL) at the Reykjavik University is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## Additional Information ### Dataset Curators The corpus is a result of the crowd-sourcing effort run by the Language and Voice Lab (LVL) at the Reykjavik University, in cooperation with Almannarómur, Center for Language Technology. The recording process has started in October 2019 and continues to this day (Spetember 2021). The corpus was curated by Carlos Daniel Hernández Mena in 2021. ### Licensing Information [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/) ### Citation Information ``` @misc{menasamromurchildren2021, title={Samrómur Children Icelandic Speech 1.0}, ldc_catalog_no={LDC2022S11}, DOI={https://doi.org/10.35111/frrj-qd60}, author={Hernández Mena, Carlos Daniel and Borsky, Michal and Mollberg, David Erik and Guðmundsson, Smári Freyr and Hedström, Staffan and Pálsson, Ragnar and Jónsson, Ólafur Helgi and Þorsteinsdóttir, Sunneva and Guðmundsdóttir, Jóhanna Vigdís and Magnúsdóttir, Eydís Huld and Þórhallsdóttir, Ragnheiður and Guðnason, Jón}, publisher={Reykjavík University}, journal={Linguistic Data Consortium, Philadelphia}, year={2021}, url={https://catalog.ldc.upenn.edu/LDC2022S11}, } ``` ### Contributions This project was funded by the Language Technology Programme for Icelandic 2019-2023. The programme, which is managed and coordinated by Almannarómur, is funded by the Icelandic Ministry of Education, Science and Culture. The verification for the dataset was funded by the the Icelandic Directorate of Labour's Student Summer Job Program in 2020 and 2021. Special thanks for the summer students for all the hard work.
language-and-voice-lab/samromur_children
[ "task_categories:automatic-speech-recognition", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:is", "license:cc-by-4.0", "samromur", "children's speech", "icelandic: iceland", "icelandic children", "icelandic kids", "kids", "region:us" ]
2022-11-26T03:15:54+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["crowdsourced"], "language": ["is"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "Samr\u00f3mur Children Icelandic Speech 1.0", "tags": ["samromur", "children's speech", "icelandic: iceland", "icelandic children", "icelandic kids", "kids"]}
2023-10-15T15:02:44+00:00
[]
[ "is" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-Icelandic #license-cc-by-4.0 #samromur #children's speech #icelandic- iceland #icelandic children #icelandic kids #kids #region-us
# Dataset Card for samromur_children ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: Samrómur Children Icelandic Speech 1.0 - Repository: LDC - Paper: Samrómur Children: An Icelandic Speech Corpus - Point of Contact: Carlos Mena, Jón Guðnason ### Dataset Summary The Samrómur Children Corpus consists of audio recordings and metadata files containing prompts read by the participants. It contains more than 137000 validated speech-recordings uttered by Icelandic children. The corpus is a result of the crowd-sourcing effort run by the Language and Voice Lab (LVL) at the Reykjavik University, in cooperation with Almannarómur, Center for Language Technology. The recording process has started in October 2019 and continues to this day (Spetember 2021). ### Example Usage The Samrómur Children Corpus is divided in 3 splits: train, validation and test. To load a specific split pass its name as a config name: To load an specific split (for example, the validation split) do: ### Supported Tasks automatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The audio is in Icelandic. The reading prompts were gathered from a variety of sources, mainly from the Icelandic Gigaword Corpus. The corpus includes text from novels, news, plays, and from a list of location names in Iceland. The prompts also came from the Icelandic Web of Science. ## Dataset Structure ### Data Instances ### Data Fields * 'audio_id' (string) - id of audio segment * 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * 'speaker_id' (string) - id of speaker * 'gender' (string) - gender of speaker (male or female) * 'age' (string) - range of age of the speaker: Younger (15-35), Middle-aged (36-60) or Elderly (61+). * 'duration' (float32) - duration of the audio file in seconds. * 'normalized_text' (string) - normalized audio segment transcription. ### Data Splits The corpus is split into train, dev, and test portions. Lenghts of every portion are: train = 127h25m, test = 1h50m, dev=1h50m. To load an specific portion please see the above section "Example Usage". ## Dataset Creation ### Curation Rationale In the field of Automatic Speech Recognition (ASR) is a known fact that the children's speech is particularly hard to recognise due to its high variability produced by developmental changes in children's anatomy and speech production skills. For this reason, the criteria of selection for the train/dev/test portions have to take into account the children's age. Nevertheless, the Samrómur Children is an unbalanced corpus in terms of gender and age of the speakers. This means that the corpus has, for example, a total of 1667 female speakers (73h38m) versus 1412 of male speakers (52h26m). These unbalances impose conditions in the type of the experiments than can be performed with the corpus. For example, a equal number of female and male speakers through certain ranges of age is impossible. So, if one can't have a perfectly balance corpus in the training set, at least one can have it in the test portion. The test portion of the Samrómur Children was meticulously selected to cover ages between 6 to 16 years in both female and male speakers. Every of these range of age in both genders have a total duration of 5 minutes each. The development portion of the corpus contains only speakers with an unknown gender information. Both test and dev sets have a total duration of 1h50m each. In order to perform fairer experiments, speakers in the train and test sets are not shared. Nevertheless, there is only one speaker shared between the train and development set. It can be identified with the speaker ID=010363. However, no audio files are shared between these two sets. ### Source Data #### Initial Data Collection and Normalization The data was collected using the website URL, code of which is available at URL The age range selected for this corpus is between 4 and 17 years. The original audio was collected at 44.1 kHz or 48 kHz sampling rate as *.wav files, which was down-sampled to 16 kHz and converted to *.flac. Each recording contains one read sentence from a script. The script contains 85.080 unique sentences and 90.838 unique tokens. There was no identifier other than the session ID, which is used as the speaker ID. The corpus is distributed with a metadata file with a detailed information on each utterance and speaker. The madata file is encoded as UTF-8 Unicode. The prompts were gathered from a variety of sources, mainly from The Icelandic Gigaword Corpus, which is available at URL The corpus includes text from novels, news, plays, and from a list of location names in Iceland. The prompts also came from the Icelandic Web of Science. ### Annotations #### Annotation process Prompts were pulled from these corpora if they met the criteria of having only letters which are present in the Icelandic alphabet, and if they are listed in the DIM: Database Icelandic Morphology. There are also synthesised prompts consisting of a name followed by a question or a demand, in order to simulate a dialogue with a smart-device. #### Who are the annotators? The audio files content was manually verified against the prompts by one or more listener (summer students mainly). ### Personal and Sensitive Information The dataset consists of people who have donated their voice. You agree to not attempt to determine the identity of speakers in this dataset. ## Considerations for Using the Data ### Social Impact of Dataset This is the first ASR corpus of Icelandic children. ### Discussion of Biases * The utterances were recorded by a smartphone or the web app. * Participants self-reported their age group, gender, and the native language. * Participants are aged between 4 to 17 years. * The corpus contains 137597 utterances from 3175 speakers, totalling 131 hours. * The amount of data due to female speakers is 73h38m, the amount of data due to male speakers is 52h26m and the amount of data due to speakers with an unknown gender information is 05h02m * The number of female speakers is 1667, the number of male speakers is 1412. The number of speakers with an unknown gender information is 96. * The audios due to female speakers are 78993, the audios due to male speakers are 53927 and the audios due to speakers with an unknown gender information are 4677. ### Other Known Limitations "Samrómur Children: Icelandic Speech 21.09" by the Language and Voice Laboratory (LVL) at the Reykjavik University is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## Additional Information ### Dataset Curators The corpus is a result of the crowd-sourcing effort run by the Language and Voice Lab (LVL) at the Reykjavik University, in cooperation with Almannarómur, Center for Language Technology. The recording process has started in October 2019 and continues to this day (Spetember 2021). The corpus was curated by Carlos Daniel Hernández Mena in 2021. ### Licensing Information CC-BY-4.0 ### Contributions This project was funded by the Language Technology Programme for Icelandic 2019-2023. The programme, which is managed and coordinated by Almannarómur, is funded by the Icelandic Ministry of Education, Science and Culture. The verification for the dataset was funded by the the Icelandic Directorate of Labour's Student Summer Job Program in 2020 and 2021. Special thanks for the summer students for all the hard work.
[ "# Dataset Card for samromur_children", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: Samrómur Children Icelandic Speech 1.0\n- Repository: LDC\n- Paper: Samrómur Children: An Icelandic Speech Corpus\n- Point of Contact: Carlos Mena, Jón Guðnason", "### Dataset Summary\n\nThe Samrómur Children Corpus consists of audio recordings and metadata files containing prompts read by the participants. It contains more than 137000 validated speech-recordings uttered by Icelandic children.\n\nThe corpus is a result of the crowd-sourcing effort run by the Language and Voice Lab (LVL) at the Reykjavik University, in cooperation with Almannarómur, Center for Language Technology. The recording process has started in October 2019 and continues to this day (Spetember 2021).", "### Example Usage\nThe Samrómur Children Corpus is divided in 3 splits: train, validation and test. To load a specific split pass its name as a config name:\n\nTo load an specific split (for example, the validation split) do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe audio is in Icelandic.\nThe reading prompts were gathered from a variety of sources, mainly from the Icelandic Gigaword Corpus. The corpus includes text from novels, news, plays, and from a list of location names in Iceland. The prompts also came from the Icelandic Web of Science.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'speaker_id' (string) - id of speaker\n* 'gender' (string) - gender of speaker (male or female)\n* 'age' (string) - range of age of the speaker: Younger (15-35), Middle-aged (36-60) or Elderly (61+).\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'normalized_text' (string) - normalized audio segment transcription.", "### Data Splits\nThe corpus is split into train, dev, and test portions. Lenghts of every portion are: train = 127h25m, test = 1h50m, dev=1h50m.\n\nTo load an specific portion please see the above section \"Example Usage\".", "## Dataset Creation", "### Curation Rationale\n\nIn the field of Automatic Speech Recognition (ASR) is a known fact that the children's speech is particularly hard to recognise due to its high variability produced by developmental changes in children's anatomy and speech production skills.\n\nFor this reason, the criteria of selection for the train/dev/test portions have to take into account the children's age. Nevertheless, the Samrómur Children is an unbalanced corpus in terms of gender and age of the speakers. This means that the corpus has, for example, a total of 1667 female speakers (73h38m) versus 1412 of male speakers (52h26m).\n\nThese unbalances impose conditions in the type of the experiments than can be performed with the corpus. For example, a equal number of female and male speakers through certain ranges of age is impossible. So, if one can't have a perfectly balance corpus in the training set, at least one can have it in the test portion.\n\nThe test portion of the Samrómur Children was meticulously selected to cover ages between 6 to 16 years in both female and male speakers. Every of these range of age in both genders have a total duration of 5 minutes each.\n\nThe development portion of the corpus contains only speakers with an unknown gender information. Both test and dev sets have a total duration of 1h50m each.\n\nIn order to perform fairer experiments, speakers in the train and test sets are not shared. Nevertheless, there is only one speaker shared between the train and development set. It can be identified with the speaker ID=010363. However, no audio files are shared between these two sets.", "### Source Data", "#### Initial Data Collection and Normalization\n\nThe data was collected using the website URL, code of which is available at URL The age range selected for this corpus is between 4 and 17 years.\n\nThe original audio was collected at 44.1 kHz or 48 kHz sampling rate as *.wav files, which was down-sampled to 16 kHz and converted to *.flac. Each recording contains one read sentence from a script. The script contains 85.080 unique sentences and 90.838 unique tokens. \n\nThere was no identifier other than the session ID, which is used as the speaker ID. The corpus is distributed with a metadata file with a detailed information on each utterance and speaker. The madata file is encoded as UTF-8 Unicode.\n\nThe prompts were gathered from a variety of sources, mainly from The Icelandic Gigaword Corpus, which is available at URL The corpus includes text from novels, news, plays, and from a list of location names in Iceland. The prompts also came from the Icelandic Web of Science.", "### Annotations", "#### Annotation process\n\nPrompts were pulled from these corpora if they met the criteria of having only letters which are present in the Icelandic alphabet, and if they are listed in the DIM: Database Icelandic Morphology. \n\nThere are also synthesised prompts consisting of a name followed by a question or a demand, in order to simulate a dialogue with a smart-device.", "#### Who are the annotators?\nThe audio files content was manually verified against the prompts by one or more listener (summer students mainly).", "### Personal and Sensitive Information\nThe dataset consists of people who have donated their voice. You agree to not attempt to determine the identity of speakers in this dataset.", "## Considerations for Using the Data", "### Social Impact of Dataset\nThis is the first ASR corpus of Icelandic children.", "### Discussion of Biases\n\n* The utterances were recorded by a smartphone or the web app. \n\n* Participants self-reported their age group, gender, and the native language.\n\n* Participants are aged between 4 to 17 years. \n \n* The corpus contains 137597 utterances from 3175 speakers, totalling 131 hours.\n\n* The amount of data due to female speakers is 73h38m, the amount of data due to male speakers is 52h26m and the amount of data due to speakers with an unknown gender information is 05h02m\n\n* The number of female speakers is 1667, the number of male speakers is 1412. The number of speakers with an unknown gender information is 96.\n\n* The audios due to female speakers are 78993, the audios due to male speakers are 53927 and the audios due to speakers with an unknown gender information are 4677.", "### Other Known Limitations\n\"Samrómur Children: Icelandic Speech 21.09\" by the Language and Voice Laboratory (LVL) at the Reykjavik University is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "## Additional Information", "### Dataset Curators\n\nThe corpus is a result of the crowd-sourcing effort run by the Language and Voice Lab (LVL) at the Reykjavik University, in cooperation with Almannarómur, Center for Language Technology. The recording process has started in October 2019 and continues to this day (Spetember 2021). The corpus was curated by Carlos Daniel Hernández Mena in 2021.", "### Licensing Information\nCC-BY-4.0", "### Contributions\nThis project was funded by the Language Technology Programme for Icelandic 2019-2023. The programme, which is managed and coordinated by Almannarómur, is funded by the Icelandic Ministry of Education, Science and Culture.\n\nThe verification for the dataset was funded by the the Icelandic Directorate of Labour's Student Summer Job Program in 2020 and 2021.\n\nSpecial thanks for the summer students for all the hard work." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-Icelandic #license-cc-by-4.0 #samromur #children's speech #icelandic- iceland #icelandic children #icelandic kids #kids #region-us \n", "# Dataset Card for samromur_children", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: Samrómur Children Icelandic Speech 1.0\n- Repository: LDC\n- Paper: Samrómur Children: An Icelandic Speech Corpus\n- Point of Contact: Carlos Mena, Jón Guðnason", "### Dataset Summary\n\nThe Samrómur Children Corpus consists of audio recordings and metadata files containing prompts read by the participants. It contains more than 137000 validated speech-recordings uttered by Icelandic children.\n\nThe corpus is a result of the crowd-sourcing effort run by the Language and Voice Lab (LVL) at the Reykjavik University, in cooperation with Almannarómur, Center for Language Technology. The recording process has started in October 2019 and continues to this day (Spetember 2021).", "### Example Usage\nThe Samrómur Children Corpus is divided in 3 splits: train, validation and test. To load a specific split pass its name as a config name:\n\nTo load an specific split (for example, the validation split) do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe audio is in Icelandic.\nThe reading prompts were gathered from a variety of sources, mainly from the Icelandic Gigaword Corpus. The corpus includes text from novels, news, plays, and from a list of location names in Iceland. The prompts also came from the Icelandic Web of Science.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'speaker_id' (string) - id of speaker\n* 'gender' (string) - gender of speaker (male or female)\n* 'age' (string) - range of age of the speaker: Younger (15-35), Middle-aged (36-60) or Elderly (61+).\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'normalized_text' (string) - normalized audio segment transcription.", "### Data Splits\nThe corpus is split into train, dev, and test portions. Lenghts of every portion are: train = 127h25m, test = 1h50m, dev=1h50m.\n\nTo load an specific portion please see the above section \"Example Usage\".", "## Dataset Creation", "### Curation Rationale\n\nIn the field of Automatic Speech Recognition (ASR) is a known fact that the children's speech is particularly hard to recognise due to its high variability produced by developmental changes in children's anatomy and speech production skills.\n\nFor this reason, the criteria of selection for the train/dev/test portions have to take into account the children's age. Nevertheless, the Samrómur Children is an unbalanced corpus in terms of gender and age of the speakers. This means that the corpus has, for example, a total of 1667 female speakers (73h38m) versus 1412 of male speakers (52h26m).\n\nThese unbalances impose conditions in the type of the experiments than can be performed with the corpus. For example, a equal number of female and male speakers through certain ranges of age is impossible. So, if one can't have a perfectly balance corpus in the training set, at least one can have it in the test portion.\n\nThe test portion of the Samrómur Children was meticulously selected to cover ages between 6 to 16 years in both female and male speakers. Every of these range of age in both genders have a total duration of 5 minutes each.\n\nThe development portion of the corpus contains only speakers with an unknown gender information. Both test and dev sets have a total duration of 1h50m each.\n\nIn order to perform fairer experiments, speakers in the train and test sets are not shared. Nevertheless, there is only one speaker shared between the train and development set. It can be identified with the speaker ID=010363. However, no audio files are shared between these two sets.", "### Source Data", "#### Initial Data Collection and Normalization\n\nThe data was collected using the website URL, code of which is available at URL The age range selected for this corpus is between 4 and 17 years.\n\nThe original audio was collected at 44.1 kHz or 48 kHz sampling rate as *.wav files, which was down-sampled to 16 kHz and converted to *.flac. Each recording contains one read sentence from a script. The script contains 85.080 unique sentences and 90.838 unique tokens. \n\nThere was no identifier other than the session ID, which is used as the speaker ID. The corpus is distributed with a metadata file with a detailed information on each utterance and speaker. The madata file is encoded as UTF-8 Unicode.\n\nThe prompts were gathered from a variety of sources, mainly from The Icelandic Gigaword Corpus, which is available at URL The corpus includes text from novels, news, plays, and from a list of location names in Iceland. The prompts also came from the Icelandic Web of Science.", "### Annotations", "#### Annotation process\n\nPrompts were pulled from these corpora if they met the criteria of having only letters which are present in the Icelandic alphabet, and if they are listed in the DIM: Database Icelandic Morphology. \n\nThere are also synthesised prompts consisting of a name followed by a question or a demand, in order to simulate a dialogue with a smart-device.", "#### Who are the annotators?\nThe audio files content was manually verified against the prompts by one or more listener (summer students mainly).", "### Personal and Sensitive Information\nThe dataset consists of people who have donated their voice. You agree to not attempt to determine the identity of speakers in this dataset.", "## Considerations for Using the Data", "### Social Impact of Dataset\nThis is the first ASR corpus of Icelandic children.", "### Discussion of Biases\n\n* The utterances were recorded by a smartphone or the web app. \n\n* Participants self-reported their age group, gender, and the native language.\n\n* Participants are aged between 4 to 17 years. \n \n* The corpus contains 137597 utterances from 3175 speakers, totalling 131 hours.\n\n* The amount of data due to female speakers is 73h38m, the amount of data due to male speakers is 52h26m and the amount of data due to speakers with an unknown gender information is 05h02m\n\n* The number of female speakers is 1667, the number of male speakers is 1412. The number of speakers with an unknown gender information is 96.\n\n* The audios due to female speakers are 78993, the audios due to male speakers are 53927 and the audios due to speakers with an unknown gender information are 4677.", "### Other Known Limitations\n\"Samrómur Children: Icelandic Speech 21.09\" by the Language and Voice Laboratory (LVL) at the Reykjavik University is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "## Additional Information", "### Dataset Curators\n\nThe corpus is a result of the crowd-sourcing effort run by the Language and Voice Lab (LVL) at the Reykjavik University, in cooperation with Almannarómur, Center for Language Technology. The recording process has started in October 2019 and continues to this day (Spetember 2021). The corpus was curated by Carlos Daniel Hernández Mena in 2021.", "### Licensing Information\nCC-BY-4.0", "### Contributions\nThis project was funded by the Language Technology Programme for Icelandic 2019-2023. The programme, which is managed and coordinated by Almannarómur, is funded by the Icelandic Ministry of Education, Science and Culture.\n\nThe verification for the dataset was funded by the the Icelandic Directorate of Labour's Student Summer Job Program in 2020 and 2021.\n\nSpecial thanks for the summer students for all the hard work." ]
60a8b7231ebf8bf13c179ddebfdfe03d3b5e8e13
# Dataset Card for [XBMU-AMDO31] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:**https://github.com/sendream/xbmu_amdo31 - **Repository:**https://github.com/sendream/xbmu_amdo31 - **Paper:** - **Leaderboard:**https://github.com/sendream/xbmu_amdo31#leaderboard - **Point of Contact:**[[email protected]](mailto:[email protected]) ### Dataset Summary XBMU-AMDO31 dataset is a speech recognition corpus of Amdo Tibetan dialect. The open source corpus contains 31 hours of speech data and resources related to build speech recognition systems, including transcribed texts and a Tibetan pronunciation dictionary. ### Supported Tasks and Leaderboards automatic-speech-recognition: The dataset can be used to train a model for Amdo Tibetan Automatic Speech Recognition (ASR). It was recorded by 66 native speakers of Amdo Tibetan, and the recorded audio was processed and manually inspected. The most common evaluation metric is the word error rate (WER). The task has an active leaderboard which can be found at https://github.com/sendream/xbmu_amdo31#leaderboard and ranks models based on their WER. ### Languages XBMU-AMDO31 contains audio, a Tibetan pronunciation dictionary and transcription data in Amdo Tibetan. ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits The dataset has three splits: train, evaluation (dev) and test.Each speaker had approximately 450 sentences, with a small number of individuals having fewer than 200 sen | Subset | Hours | Male | Female | Remarks | | ------ | ----- | ---- | ------ | --------------------------------------- | | Train | 25.41 | 27 | 27 | 18539 sentences recorded by 54 speakers | | Dev | 2.81 | 2 | 4 | 2050 sentences recorded by 6 speakers | | Test | 2.85 | 3 | 3 | 2041 sentences recorded by 6 speakers | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information This dataset is distributed under CC BY-SA 4.0. ### Citation Information [More Information Needed] ### Contributions Thanks to [@speechless-z](https://github.com/speechless-z) for adding this dataset.
syzym/xbmu_amdo31
[ "task_categories:automatic-speech-recognition", "annotations_creators:expert-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:tib", "license:cc-by-sa-4.0", "region:us" ]
2022-11-26T04:35:03+00:00
{"annotations_creators": ["expert-generated"], "language": ["tib"], "license": ["cc-by-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "task_categories": ["automatic-speech-recognition"], "pretty_name": "XBMU-AMDO31", "language_creatosr": ["expert-generated"]}
2022-11-28T12:02:16+00:00
[]
[ "tib" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #language-Tibetan #license-cc-by-sa-4.0 #region-us
Dataset Card for [XBMU-AMDO31] ============================== Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage:URL * Repository:URL * Paper: * Leaderboard:URL * Point of Contact:xxlgy@URL ### Dataset Summary XBMU-AMDO31 dataset is a speech recognition corpus of Amdo Tibetan dialect. The open source corpus contains 31 hours of speech data and resources related to build speech recognition systems, including transcribed texts and a Tibetan pronunciation dictionary. ### Supported Tasks and Leaderboards automatic-speech-recognition: The dataset can be used to train a model for Amdo Tibetan Automatic Speech Recognition (ASR). It was recorded by 66 native speakers of Amdo Tibetan, and the recorded audio was processed and manually inspected. The most common evaluation metric is the word error rate (WER). The task has an active leaderboard which can be found at URL and ranks models based on their WER. ### Languages XBMU-AMDO31 contains audio, a Tibetan pronunciation dictionary and transcription data in Amdo Tibetan. Dataset Structure ----------------- ### Data Instances ### Data Fields ### Data Splits The dataset has three splits: train, evaluation (dev) and test.Each speaker had approximately 450 sentences, with a small number of individuals having fewer than 200 sen Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information This dataset is distributed under CC BY-SA 4.0. ### Contributions Thanks to @speechless-z for adding this dataset.
[ "### Dataset Summary\n\n\nXBMU-AMDO31 dataset is a speech recognition corpus of Amdo Tibetan dialect. The open source corpus contains 31 hours of speech data and resources related to build speech recognition systems, including transcribed texts and a Tibetan pronunciation dictionary.", "### Supported Tasks and Leaderboards\n\n\nautomatic-speech-recognition: The dataset can be used to train a model for Amdo Tibetan Automatic Speech Recognition (ASR). It was recorded by 66 native speakers of Amdo Tibetan, and the recorded audio was processed and manually inspected. The most common evaluation metric is the word error rate (WER). The task has an active leaderboard which can be found at URL and ranks models based on their WER.", "### Languages\n\n\nXBMU-AMDO31 contains audio, a Tibetan pronunciation dictionary and transcription data in Amdo Tibetan.\n\n\nDataset Structure\n-----------------", "### Data Instances", "### Data Fields", "### Data Splits\n\n\nThe dataset has three splits: train, evaluation (dev) and test.Each speaker had approximately 450 sentences, with a small number of individuals having fewer than 200 sen\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information\n\n\nThis dataset is distributed under CC BY-SA 4.0.", "### Contributions\n\n\nThanks to @speechless-z for adding this dataset." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #language-Tibetan #license-cc-by-sa-4.0 #region-us \n", "### Dataset Summary\n\n\nXBMU-AMDO31 dataset is a speech recognition corpus of Amdo Tibetan dialect. The open source corpus contains 31 hours of speech data and resources related to build speech recognition systems, including transcribed texts and a Tibetan pronunciation dictionary.", "### Supported Tasks and Leaderboards\n\n\nautomatic-speech-recognition: The dataset can be used to train a model for Amdo Tibetan Automatic Speech Recognition (ASR). It was recorded by 66 native speakers of Amdo Tibetan, and the recorded audio was processed and manually inspected. The most common evaluation metric is the word error rate (WER). The task has an active leaderboard which can be found at URL and ranks models based on their WER.", "### Languages\n\n\nXBMU-AMDO31 contains audio, a Tibetan pronunciation dictionary and transcription data in Amdo Tibetan.\n\n\nDataset Structure\n-----------------", "### Data Instances", "### Data Fields", "### Data Splits\n\n\nThe dataset has three splits: train, evaluation (dev) and test.Each speaker had approximately 450 sentences, with a small number of individuals having fewer than 200 sen\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information\n\n\nThis dataset is distributed under CC BY-SA 4.0.", "### Contributions\n\n\nThanks to @speechless-z for adding this dataset." ]
77b4260b39a3845b953b92e9718473bbc032a263
# Dataset Card for "stackoverflow-unified-text-open-status-classification" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
reubenjohn/stackoverflow-unified-text-open-status-classification
[ "region:us" ]
2022-11-26T05:01:19+00:00
{"dataset_info": {"features": [{"name": "PostId", "dtype": "int64"}, {"name": "PostCreationDate", "dtype": "string"}, {"name": "OwnerUserId", "dtype": "int64"}, {"name": "OwnerCreationDate", "dtype": "string"}, {"name": "ReputationAtPostCreation", "dtype": "int64"}, {"name": "OwnerUndeletedAnswerCountAtPostTime", "dtype": "int64"}, {"name": "Title", "dtype": "string"}, {"name": "BodyMarkdown", "dtype": "string"}, {"name": "Tag1", "dtype": "string"}, {"name": "Tag2", "dtype": "string"}, {"name": "Tag3", "dtype": "string"}, {"name": "Tag4", "dtype": "string"}, {"name": "Tag5", "dtype": "string"}, {"name": "PostClosedDate", "dtype": "string"}, {"name": "OpenStatus", "dtype": "string"}, {"name": "unified_texts", "dtype": "string"}, {"name": "OpenStatus_id", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 7201490555, "num_examples": 3336822}, {"name": "valid", "num_bytes": 67095345, "num_examples": 26964}, {"name": "test", "num_bytes": 17104151, "num_examples": 6742}], "download_size": 3883040160, "dataset_size": 7285690051}}
2022-11-26T05:53:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "stackoverflow-unified-text-open-status-classification" More Information needed
[ "# Dataset Card for \"stackoverflow-unified-text-open-status-classification\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"stackoverflow-unified-text-open-status-classification\"\n\nMore Information needed" ]
cc5b516792181f391699da31895091d21175c09a
# Dataset Card for "vocal_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
plphuc017/vocal_dataset
[ "region:us" ]
2022-11-26T05:41:13+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 679382539.464, "num_examples": 1057}, {"name": "test", "num_bytes": 167054773.0, "num_examples": 264}], "download_size": 832476390, "dataset_size": 846437312.464}}
2022-11-26T05:41:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "vocal_dataset" More Information needed
[ "# Dataset Card for \"vocal_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"vocal_dataset\"\n\nMore Information needed" ]
f215ff884df9be70a68fb6bca52a1775fef7e8f6
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: Artifact-AI/led_large_16384_billsum_summarization * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Artifact-AI](https://huggingface.co/Artifact-AI) for evaluating this model.
autoevaluate/autoeval-eval-billsum-default-e7f679-2243071585
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T06:24:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["billsum"], "eval_info": {"task": "summarization", "model": "Artifact-AI/led_large_16384_billsum_summarization", "metrics": [], "dataset_name": "billsum", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "summary"}}}
2022-11-26T13:53:22+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: Artifact-AI/led_large_16384_billsum_summarization * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @Artifact-AI for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: Artifact-AI/led_large_16384_billsum_summarization\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @Artifact-AI for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: Artifact-AI/led_large_16384_billsum_summarization\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @Artifact-AI for evaluating this model." ]
ab460b557fa4f950c19f96d8a32dc0a6a3386a1e
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: Artifact-AI/led_large_16384_billsum_summarization * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Artifact-AI](https://huggingface.co/Artifact-AI) for evaluating this model.
autoevaluate/autoeval-eval-billsum-default-7dc88f-2243171586
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T06:25:01+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["billsum"], "eval_info": {"task": "summarization", "model": "Artifact-AI/led_large_16384_billsum_summarization", "metrics": [], "dataset_name": "billsum", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "summary"}}}
2022-11-26T13:56:39+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: Artifact-AI/led_large_16384_billsum_summarization * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @Artifact-AI for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: Artifact-AI/led_large_16384_billsum_summarization\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @Artifact-AI for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: Artifact-AI/led_large_16384_billsum_summarization\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @Artifact-AI for evaluating this model." ]
f0bb9f037cd0f88fb14af20e9f1dd25cb27312e6
# Dataset Card for "stackoverflow-open-status-classification-albert-tokenized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
reubenjohn/stackoverflow-open-status-classification-albert-tokenized
[ "region:us" ]
2022-11-26T06:41:43+00:00
{"dataset_info": {"features": [{"name": "unified_texts", "dtype": "string"}, {"name": "OpenStatus_id", "dtype": "int64"}, {"name": "input_ids", "sequence": "int32"}, {"name": "token_type_ids", "sequence": "int8"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 13686390757, "num_examples": 3336822}, {"name": "valid", "num_bytes": 115029917, "num_examples": 26964}, {"name": "test", "num_bytes": 28926524, "num_examples": 6742}], "download_size": 3218793793, "dataset_size": 13830347198}}
2022-11-26T06:45:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "stackoverflow-open-status-classification-albert-tokenized" More Information needed
[ "# Dataset Card for \"stackoverflow-open-status-classification-albert-tokenized\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"stackoverflow-open-status-classification-albert-tokenized\"\n\nMore Information needed" ]
545992e81576e69fcea1e00b4417ee7c3b05260b
# Dataset Card for "step_proofs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/step_proofs
[ "region:us" ]
2022-11-26T08:07:49+00:00
{"dataset_info": {"features": [{"name": "sentences", "sequence": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2580049, "num_examples": 3939}, {"name": "dev", "num_bytes": 377139, "num_examples": 561}, {"name": "test", "num_bytes": 683199, "num_examples": 1020}], "download_size": 1138168, "dataset_size": 3640387}}
2022-11-26T08:07:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "step_proofs" More Information needed
[ "# Dataset Card for \"step_proofs\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"step_proofs\"\n\nMore Information needed" ]
821ed9871bb305cbda4173f991dd9e411c44ec40
# Dataset Card for "libri-augmented-test-prepared" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DTU54DL/libri-augmented-test-prepared
[ "region:us" ]
2022-11-26T08:12:50+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 2517004616, "num_examples": 2620}], "download_size": 712936594, "dataset_size": 2517004616}}
2022-11-26T08:16:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "libri-augmented-test-prepared" More Information needed
[ "# Dataset Card for \"libri-augmented-test-prepared\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"libri-augmented-test-prepared\"\n\nMore Information needed" ]
e6bec3c030e74da7c8c1b517d0e5bca72c5e11c3
# Dataset Card for "step_proofs2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/step_proofs2
[ "region:us" ]
2022-11-26T08:58:10+00:00
{"dataset_info": {"features": [{"name": "sentences", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2614964, "num_examples": 12525}, {"name": "dev", "num_bytes": 382036, "num_examples": 1791}, {"name": "test", "num_bytes": 692593, "num_examples": 3327}], "download_size": 1160241, "dataset_size": 3689593}}
2022-11-26T08:58:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "step_proofs2" More Information needed
[ "# Dataset Card for \"step_proofs2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"step_proofs2\"\n\nMore Information needed" ]
8f8f2ab494b285a3ac070d456c76192c82ed4347
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: arogyaGurkha/kobert-finetuned-squad_kor_v1 * Dataset: squad_kor_v1 * Config: squad_kor_v1 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Kdogs](https://huggingface.co/Kdogs) for evaluating this model.
autoevaluate/autoeval-eval-squad_kor_v1-squad_kor_v1-7a81b4-2244371597
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T10:49:35+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["squad_kor_v1"], "eval_info": {"task": "extractive_question_answering", "model": "arogyaGurkha/kobert-finetuned-squad_kor_v1", "metrics": [], "dataset_name": "squad_kor_v1", "dataset_config": "squad_kor_v1", "dataset_split": "validation", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-11-26T10:51:20+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: arogyaGurkha/kobert-finetuned-squad_kor_v1 * Dataset: squad_kor_v1 * Config: squad_kor_v1 * Split: validation To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @Kdogs for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: arogyaGurkha/kobert-finetuned-squad_kor_v1\n* Dataset: squad_kor_v1\n* Config: squad_kor_v1\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @Kdogs for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: arogyaGurkha/kobert-finetuned-squad_kor_v1\n* Dataset: squad_kor_v1\n* Config: squad_kor_v1\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @Kdogs for evaluating this model." ]
23a2ba6cc16b6cddc6de7951468244b068220840
# Dataset Card for Collocations Dictionary of Modern Slovene KSSS 1.0 Also known as "Kolokacije 1.0". Available in application form online: https://viri.cjvt.si/kolokacije/eng/. ### Dataset Summary The database of the Collocations Dictionary of Modern Slovene 1.0 contains entries for 35,862 headwords (18,043 nouns, 5,148 verbs, 10,259 adjectives and 2,412 adverbs) and 7,310,983 collocations that were automatically extracted from the Gigafida 1.0 corpus. For the automatic extraction via the Sketch Engine API the authors used a specially adapted Sketch grammar for Slovene, and, based on manual evaluation, a set of parameters that determined: maximum number of collocates per grammatical relation, minimum frequency of a collocate, minimum frequency of a grammatical relation, minimum salience (logDice) score of a collocate, and minimum salience of a grammatical relation. The procedure of automatic extraction, which produced a list of collocates (lemmas) in a particular relation, was followed by a set of post-processing steps: - removal of collocations that were represented by repetitions of the same sentence - preparation of full collocations by the addition of the headword, and, if needed, the third element in the grammatical relation (such as preposition). The headwords/collocates were also put in the correct case, depending on the grammatical relation. - addition of IDs from the Slovenian morphological lexicon [Sloleks](http://hdl.handle.net/11356/1230) to every element in the collocation. For a detailed description of the data, please see the paper Kosem et al. (2018). ### Supported Tasks and Leaderboards Other (the data is a knowledge base). ### Languages Slovenian. ## Dataset Structure ### Data Instances The structure of the original data is flattened, meaning that each collocation is its own instance. The following example shows the entry for collocation `"idealizirati preteklost"` (*to idealize the past*), which is a collocation of the lexical unit `"idealizirati"` (*to idealize*). ``` { 'collocation': 'idealizirati preteklost', 'cluster': 1, 'words': ['idealizirati', 'preteklost'], 'sloleks_ids': ['LE_08e2de61d9f23f949a21f37639afdff2', 'LE_92b3b802fe9baeff25bdd6deafde10ca'], 'gramrel': 'GBZ sbz4', 'sense': 0, 'id_lex_unit': '1372', 'lex_unit': 'idealizirati', 'lex_unit_category': 'verb' } ``` ### Data Fields - `collocation`: the string form of the collocation; - `cluster`: cluster of the collocation - sometimes, but not always, corresponds to the sense; - `words`: tokenized collocation; - `sloleks_ids`: [Sloleks](http://hdl.handle.net/11356/1230) IDs of collocation words; - `gramrel`: grammatical relation; - `sense`: sense of the collocation - curently constant (see `cluster` for a slightly better approximate division); - `id_lex_unit`: ID of the lexical unit that the collocation belongs to; - `lex_unit`: lexical unit; - `lex_unit_category`: category of the lexical unit. ## Additional Information ### Dataset Curators Iztok Kosem; et al. (please see http://hdl.handle.net/11356/1250 for the full list). ### Licensing Information CC BY-SA 4.0 ### Citation Information ``` @inproceedings{kosem2018collocations, title={Collocations dictionary of modern Slovene}, author={Kosem, Iztok and Krek, Simon and Gantar, Polona and Arhar Holdt, {\v{S}}pela and {\v{C}}ibej, Jaka and Laskowski, Cyprian}, booktitle={Proceedings of the XVIII EURALEX International Congress: Lexicography in Global Contexts}, pages={989--997}, year={2018}, organization={Znanstvena zalo{\v{z}}ba Filozofske fakultete Univerze v Ljubljani} } ``` ### Contributions Thanks to [@matejklemen](https://github.com/matejklemen) for adding this dataset.
cjvt/slo_collocations
[ "task_categories:other", "annotations_creators:expert-generated", "annotations_creators:machine-generated", "language_creators:found", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:1M<n<10M", "language:sl", "license:cc-by-sa-4.0", "kolokacije", "gigafida", "region:us" ]
2022-11-26T11:44:47+00:00
{"annotations_creators": ["expert-generated", "machine-generated"], "language_creators": ["found", "machine-generated"], "language": ["sl"], "license": ["cc-by-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["1M<n<10M"], "source_datasets": [], "task_categories": ["other"], "task_ids": [], "pretty_name": "Collocations Dictionary of Modern Slovene 1.0", "tags": ["kolokacije", "gigafida"]}
2022-11-27T11:09:16+00:00
[]
[ "sl" ]
TAGS #task_categories-other #annotations_creators-expert-generated #annotations_creators-machine-generated #language_creators-found #language_creators-machine-generated #multilinguality-monolingual #size_categories-1M<n<10M #language-Slovenian #license-cc-by-sa-4.0 #kolokacije #gigafida #region-us
# Dataset Card for Collocations Dictionary of Modern Slovene KSSS 1.0 Also known as "Kolokacije 1.0". Available in application form online: URL ### Dataset Summary The database of the Collocations Dictionary of Modern Slovene 1.0 contains entries for 35,862 headwords (18,043 nouns, 5,148 verbs, 10,259 adjectives and 2,412 adverbs) and 7,310,983 collocations that were automatically extracted from the Gigafida 1.0 corpus. For the automatic extraction via the Sketch Engine API the authors used a specially adapted Sketch grammar for Slovene, and, based on manual evaluation, a set of parameters that determined: maximum number of collocates per grammatical relation, minimum frequency of a collocate, minimum frequency of a grammatical relation, minimum salience (logDice) score of a collocate, and minimum salience of a grammatical relation. The procedure of automatic extraction, which produced a list of collocates (lemmas) in a particular relation, was followed by a set of post-processing steps: - removal of collocations that were represented by repetitions of the same sentence - preparation of full collocations by the addition of the headword, and, if needed, the third element in the grammatical relation (such as preposition). The headwords/collocates were also put in the correct case, depending on the grammatical relation. - addition of IDs from the Slovenian morphological lexicon Sloleks to every element in the collocation. For a detailed description of the data, please see the paper Kosem et al. (2018). ### Supported Tasks and Leaderboards Other (the data is a knowledge base). ### Languages Slovenian. ## Dataset Structure ### Data Instances The structure of the original data is flattened, meaning that each collocation is its own instance. The following example shows the entry for collocation '"idealizirati preteklost"' (*to idealize the past*), which is a collocation of the lexical unit '"idealizirati"' (*to idealize*). ### Data Fields - 'collocation': the string form of the collocation; - 'cluster': cluster of the collocation - sometimes, but not always, corresponds to the sense; - 'words': tokenized collocation; - 'sloleks_ids': Sloleks IDs of collocation words; - 'gramrel': grammatical relation; - 'sense': sense of the collocation - curently constant (see 'cluster' for a slightly better approximate division); - 'id_lex_unit': ID of the lexical unit that the collocation belongs to; - 'lex_unit': lexical unit; - 'lex_unit_category': category of the lexical unit. ## Additional Information ### Dataset Curators Iztok Kosem; et al. (please see URL for the full list). ### Licensing Information CC BY-SA 4.0 ### Contributions Thanks to @matejklemen for adding this dataset.
[ "# Dataset Card for Collocations Dictionary of Modern Slovene KSSS 1.0\n\nAlso known as \"Kolokacije 1.0\". Available in application form online: URL", "### Dataset Summary\n\nThe database of the Collocations Dictionary of Modern Slovene 1.0 contains entries for 35,862 headwords (18,043 nouns, 5,148 verbs, 10,259 adjectives and 2,412 adverbs) and 7,310,983 collocations that were automatically extracted from the Gigafida 1.0 corpus. \nFor the automatic extraction via the Sketch Engine API the authors used a specially adapted Sketch grammar for Slovene, and, based on manual evaluation, a set of parameters that determined: maximum number of collocates per grammatical relation, \nminimum frequency of a collocate, minimum frequency of a grammatical relation, minimum salience (logDice) score of a collocate, and minimum salience of a grammatical relation. \n \nThe procedure of automatic extraction, which produced a list of collocates (lemmas) in a particular relation, was followed by a set of post-processing steps:\n- removal of collocations that were represented by repetitions of the same sentence\n- preparation of full collocations by the addition of the headword, and, if needed, the third element in the grammatical relation (such as preposition). The headwords/collocates were also put in the correct case, depending on the grammatical relation.\n- addition of IDs from the Slovenian morphological lexicon Sloleks to every element in the collocation.\n\nFor a detailed description of the data, please see the paper Kosem et al. (2018).", "### Supported Tasks and Leaderboards\n\nOther (the data is a knowledge base).", "### Languages\n\nSlovenian.", "## Dataset Structure", "### Data Instances\n\nThe structure of the original data is flattened, meaning that each collocation is its own instance.\nThe following example shows the entry for collocation '\"idealizirati preteklost\"' (*to idealize the past*), which is a collocation of the lexical unit '\"idealizirati\"' (*to idealize*).", "### Data Fields\n\n- 'collocation': the string form of the collocation;\n- 'cluster': cluster of the collocation - sometimes, but not always, corresponds to the sense;\n- 'words': tokenized collocation;\n- 'sloleks_ids': Sloleks IDs of collocation words;\n- 'gramrel': grammatical relation; \n- 'sense': sense of the collocation - curently constant (see 'cluster' for a slightly better approximate division);\n- 'id_lex_unit': ID of the lexical unit that the collocation belongs to;\n- 'lex_unit': lexical unit;\n- 'lex_unit_category': category of the lexical unit.", "## Additional Information", "### Dataset Curators\n\nIztok Kosem; et al. (please see URL for the full list).", "### Licensing Information\n\nCC BY-SA 4.0", "### Contributions\n\nThanks to @matejklemen for adding this dataset." ]
[ "TAGS\n#task_categories-other #annotations_creators-expert-generated #annotations_creators-machine-generated #language_creators-found #language_creators-machine-generated #multilinguality-monolingual #size_categories-1M<n<10M #language-Slovenian #license-cc-by-sa-4.0 #kolokacije #gigafida #region-us \n", "# Dataset Card for Collocations Dictionary of Modern Slovene KSSS 1.0\n\nAlso known as \"Kolokacije 1.0\". Available in application form online: URL", "### Dataset Summary\n\nThe database of the Collocations Dictionary of Modern Slovene 1.0 contains entries for 35,862 headwords (18,043 nouns, 5,148 verbs, 10,259 adjectives and 2,412 adverbs) and 7,310,983 collocations that were automatically extracted from the Gigafida 1.0 corpus. \nFor the automatic extraction via the Sketch Engine API the authors used a specially adapted Sketch grammar for Slovene, and, based on manual evaluation, a set of parameters that determined: maximum number of collocates per grammatical relation, \nminimum frequency of a collocate, minimum frequency of a grammatical relation, minimum salience (logDice) score of a collocate, and minimum salience of a grammatical relation. \n \nThe procedure of automatic extraction, which produced a list of collocates (lemmas) in a particular relation, was followed by a set of post-processing steps:\n- removal of collocations that were represented by repetitions of the same sentence\n- preparation of full collocations by the addition of the headword, and, if needed, the third element in the grammatical relation (such as preposition). The headwords/collocates were also put in the correct case, depending on the grammatical relation.\n- addition of IDs from the Slovenian morphological lexicon Sloleks to every element in the collocation.\n\nFor a detailed description of the data, please see the paper Kosem et al. (2018).", "### Supported Tasks and Leaderboards\n\nOther (the data is a knowledge base).", "### Languages\n\nSlovenian.", "## Dataset Structure", "### Data Instances\n\nThe structure of the original data is flattened, meaning that each collocation is its own instance.\nThe following example shows the entry for collocation '\"idealizirati preteklost\"' (*to idealize the past*), which is a collocation of the lexical unit '\"idealizirati\"' (*to idealize*).", "### Data Fields\n\n- 'collocation': the string form of the collocation;\n- 'cluster': cluster of the collocation - sometimes, but not always, corresponds to the sense;\n- 'words': tokenized collocation;\n- 'sloleks_ids': Sloleks IDs of collocation words;\n- 'gramrel': grammatical relation; \n- 'sense': sense of the collocation - curently constant (see 'cluster' for a slightly better approximate division);\n- 'id_lex_unit': ID of the lexical unit that the collocation belongs to;\n- 'lex_unit': lexical unit;\n- 'lex_unit_category': category of the lexical unit.", "## Additional Information", "### Dataset Curators\n\nIztok Kosem; et al. (please see URL for the full list).", "### Licensing Information\n\nCC BY-SA 4.0", "### Contributions\n\nThanks to @matejklemen for adding this dataset." ]
f6dcb49e183fe6e164c646cd62cb6d3aac0a1c88
# Dataset Card for "masked_step_label" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/masked_step_label
[ "region:us" ]
2022-11-26T12:17:14+00:00
{"dataset_info": {"features": [{"name": "step", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4985662, "num_examples": 23330}, {"name": "test", "num_bytes": 1319178, "num_examples": 6216}, {"name": "dev", "num_bytes": 739704, "num_examples": 3403}], "download_size": 1123248, "dataset_size": 7044544}}
2022-11-26T13:03:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "masked_step_label" More Information needed
[ "# Dataset Card for \"masked_step_label\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"masked_step_label\"\n\nMore Information needed" ]
ced92b9d6522b3555e7744e6d2acd332367070c6
# Dataset Card for "masked_step_label2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/masked_step_label2
[ "region:us" ]
2022-11-26T13:03:49+00:00
{"dataset_info": {"features": [{"name": "step", "dtype": "string"}, {"name": "label", "dtype": "string"}, {"name": "transformed_sentence", "dtype": "string"}, {"name": "token_strs", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2636233, "num_examples": 6216}], "download_size": 432737, "dataset_size": 2636233}}
2022-11-26T13:04:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "masked_step_label2" More Information needed
[ "# Dataset Card for \"masked_step_label2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"masked_step_label2\"\n\nMore Information needed" ]
dd48e953270ec70400aa49d1ebb398a63190c800
# Dataset Card for "gal_yair_8300_256x256_fixed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_8300_256x256_fixed
[ "region:us" ]
2022-11-26T13:18:03+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": {"class_label": {"names": {"0": "ad", "1": "ae", "2": "al", "3": "aq", "4": "ar", "5": "au", "6": "bd", "7": "be", "8": "bg", "9": "bm", "10": "bo", "11": "br", "12": "bt", "13": "bw", "14": "ca", "15": "ch", "16": "cl", "17": "co", "18": "cz", "19": "de", "20": "dk", "21": "ec", "22": "ee", "23": "es", "24": "fi", "25": "fr", "26": "gb", "27": "gh", "28": "gl", "29": "gr", "30": "gt", "31": "hk", "32": "hr", "33": "hu", "34": "id", "35": "ie", "36": "il", "37": "is", "38": "it", "39": "ix", "40": "jp", "41": "kg", "42": "kh", "43": "kr", "44": "la", "45": "lk", "46": "ls", "47": "lt", "48": "lu", "49": "lv", "50": "me", "51": "mg", "52": "mk", "53": "mn", "54": "mo", "55": "mt", "56": "mx", "57": "my", "58": "nl", "59": "no", "60": "nz", "61": "pe", "62": "ph", "63": "pl", "64": "pt", "65": "ro", "66": "rs", "67": "ru", "68": "se", "69": "sg", "70": "si", "71": "sk", "72": "sn", "73": "sz", "74": "th", "75": "tn", "76": "tr", "77": "tw", "78": "ua", "79": "ug", "80": "us", "81": "uy", "82": "za"}}}}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 805028017.5, "num_examples": 8300}], "download_size": 804437967, "dataset_size": 805028017.5}}
2022-11-26T13:18:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gal_yair_8300_256x256_fixed" More Information needed
[ "# Dataset Card for \"gal_yair_8300_256x256_fixed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gal_yair_8300_256x256_fixed\"\n\nMore Information needed" ]
6b179a916468d7eee0d320007781ae92b3580eb8
# Dataset Card for "gal_yair_83000_100x100_fixed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_83000_100x100_fixed
[ "region:us" ]
2022-11-26T13:21:03+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": {"class_label": {"names": {"0": "ad", "1": "ae", "2": "al", "3": "aq", "4": "ar", "5": "au", "6": "bd", "7": "be", "8": "bg", "9": "bm", "10": "bo", "11": "br", "12": "bt", "13": "bw", "14": "ca", "15": "ch", "16": "cl", "17": "co", "18": "cz", "19": "de", "20": "dk", "21": "ec", "22": "ee", "23": "es", "24": "fi", "25": "fr", "26": "gb", "27": "gh", "28": "gl", "29": "gr", "30": "gt", "31": "hk", "32": "hr", "33": "hu", "34": "id", "35": "ie", "36": "il", "37": "is", "38": "it", "39": "ix", "40": "jp", "41": "kg", "42": "kh", "43": "kr", "44": "la", "45": "lk", "46": "ls", "47": "lt", "48": "lu", "49": "lv", "50": "me", "51": "mg", "52": "mk", "53": "mn", "54": "mo", "55": "mt", "56": "mx", "57": "my", "58": "nl", "59": "no", "60": "nz", "61": "pe", "62": "ph", "63": "pl", "64": "pt", "65": "ro", "66": "rs", "67": "ru", "68": "se", "69": "sg", "70": "si", "71": "sk", "72": "sn", "73": "sz", "74": "th", "75": "tn", "76": "tr", "77": "tw", "78": "ua", "79": "ug", "80": "us", "81": "uy", "82": "za"}}}}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1423392222.0, "num_examples": 83000}], "download_size": 1416409951, "dataset_size": 1423392222.0}}
2022-11-26T13:21:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gal_yair_83000_100x100_fixed" More Information needed
[ "# Dataset Card for \"gal_yair_83000_100x100_fixed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gal_yair_83000_100x100_fixed\"\n\nMore Information needed" ]
f62e0962b88b1568594351815a9ff17799d238ca
# Dataset Card for "gal_yair_8300_1664x832_fixed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_8300_1664x832_fixed
[ "region:us" ]
2022-11-26T13:23:47+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": {"class_label": {"names": {"0": "ad", "1": "ae", "2": "al", "3": "aq", "4": "ar", "5": "au", "6": "bd", "7": "be", "8": "bg", "9": "bm", "10": "bo", "11": "br", "12": "bt", "13": "bw", "14": "ca", "15": "ch", "16": "cl", "17": "co", "18": "cz", "19": "de", "20": "dk", "21": "ec", "22": "ee", "23": "es", "24": "fi", "25": "fr", "26": "gb", "27": "gh", "28": "gl", "29": "gr", "30": "gt", "31": "hk", "32": "hr", "33": "hu", "34": "id", "35": "ie", "36": "il", "37": "is", "38": "it", "39": "ix", "40": "jp", "41": "kg", "42": "kh", "43": "kr", "44": "la", "45": "lk", "46": "ls", "47": "lt", "48": "lu", "49": "lv", "50": "me", "51": "mg", "52": "mk", "53": "mn", "54": "mo", "55": "mt", "56": "mx", "57": "my", "58": "nl", "59": "no", "60": "nz", "61": "pe", "62": "ph", "63": "pl", "64": "pt", "65": "ro", "66": "rs", "67": "ru", "68": "se", "69": "sg", "70": "si", "71": "sk", "72": "sn", "73": "sz", "74": "th", "75": "tn", "76": "tr", "77": "tw", "78": "ua", "79": "ug", "80": "us", "81": "uy", "82": "za"}}}}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1412450526.5, "num_examples": 8300}], "download_size": 1411162723, "dataset_size": 1412450526.5}}
2022-11-26T13:24:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gal_yair_8300_1664x832_fixed" More Information needed
[ "# Dataset Card for \"gal_yair_8300_1664x832_fixed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gal_yair_8300_1664x832_fixed\"\n\nMore Information needed" ]
8dd3c0236ee1fc5e97610c67d4ecee8e988e24d0
# Dataset Card for "gal_yair_83000_256x256_fixed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_83000_256x256_fixed
[ "region:us" ]
2022-11-26T13:33:24+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": {"class_label": {"names": {"0": "ad", "1": "ae", "2": "al", "3": "aq", "4": "ar", "5": "au", "6": "bd", "7": "be", "8": "bg", "9": "bm", "10": "bo", "11": "br", "12": "bt", "13": "bw", "14": "ca", "15": "ch", "16": "cl", "17": "co", "18": "cz", "19": "de", "20": "dk", "21": "ec", "22": "ee", "23": "es", "24": "fi", "25": "fr", "26": "gb", "27": "gh", "28": "gl", "29": "gr", "30": "gt", "31": "hk", "32": "hr", "33": "hu", "34": "id", "35": "ie", "36": "il", "37": "is", "38": "it", "39": "ix", "40": "jp", "41": "kg", "42": "kh", "43": "kr", "44": "la", "45": "lk", "46": "ls", "47": "lt", "48": "lu", "49": "lv", "50": "me", "51": "mg", "52": "mk", "53": "mn", "54": "mo", "55": "mt", "56": "mx", "57": "my", "58": "nl", "59": "no", "60": "nz", "61": "pe", "62": "ph", "63": "pl", "64": "pt", "65": "ro", "66": "rs", "67": "ru", "68": "se", "69": "sg", "70": "si", "71": "sk", "72": "sn", "73": "sz", "74": "th", "75": "tn", "76": "tr", "77": "tw", "78": "ua", "79": "ug", "80": "us", "81": "uy", "82": "za"}}}}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 8075723633.0, "num_examples": 83000}], "download_size": 8055991198, "dataset_size": 8075723633.0}}
2022-11-26T13:37:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gal_yair_83000_256x256_fixed" More Information needed
[ "# Dataset Card for \"gal_yair_83000_256x256_fixed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gal_yair_83000_256x256_fixed\"\n\nMore Information needed" ]
d4bd85d679f899a3a1cecd1c3b5733489884fe97
# Dataset Card for "Godardv1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Transform21/Godardv1
[ "region:us" ]
2022-11-26T13:39:02+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 228793812.668, "num_examples": 9623}], "download_size": 1122951385, "dataset_size": 228793812.668}}
2022-11-26T13:39:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Godardv1" More Information needed
[ "# Dataset Card for \"Godardv1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Godardv1\"\n\nMore Information needed" ]
410663e7391d2781672d1c9c7775d997e8eba5ec
ye
Bauyrjan/kkcorp
[ "region:us" ]
2022-11-26T13:40:51+00:00
{}
2022-11-26T18:22:18+00:00
[]
[]
TAGS #region-us
ye
[]
[ "TAGS\n#region-us \n" ]
7e5562d0d65270688f05e7b406dd1e74aea196a7
# Dataset Card for "lmqg/qa_squad" ## Dataset Description - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://rajpurkar.github.io/SQuAD-explorer/](https://rajpurkar.github.io/SQuAD-explorer/) - **Point of Contact:** [Asahi Ushio](http://asahiushio.com/) ### Dataset Summary This is the SQuAD v1 dataset with the train/validatio/test split used in [qg_squad](https://huggingface.co/datasets/lmqg/qg_squad). ### Supported Tasks and Leaderboards * `question-answering` ### Languages English (en) ## Dataset Structure ### Data Fields The data fields are the same among all splits. #### plain_text - `id`: a `string` feature of id - `title`: a `string` feature of title of the paragraph - `context`: a `string` feature of paragraph - `question`: a `string` feature of question - `answers`: a `json` feature of answers ### Data Splits |train |validation|test | |--------:|---------:|-------:| | 75,722| 10,570| 11,877| ## Citation Information ``` @article{2016arXiv160605250R, author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev}, Konstantin and {Liang}, Percy}, title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}", journal = {arXiv e-prints}, year = 2016, eid = {arXiv:1606.05250}, pages = {arXiv:1606.05250}, archivePrefix = {arXiv}, eprint = {1606.05250}, } ```
lmqg/qa_squad
[ "task_categories:question-answering", "task_ids:extractive-qa", "multilinguality:monolingual", "size_categories:1M<", "source_datasets:extended|wikipedia", "language:en", "license:cc-by-4.0", "arxiv:1606.05250", "region:us" ]
2022-11-26T13:47:18+00:00
{"language": "en", "license": "cc-by-4.0", "multilinguality": "monolingual", "size_categories": "1M<", "source_datasets": ["extended|wikipedia"], "task_categories": ["question-answering"], "task_ids": ["extractive-qa"], "pretty_name": "SQuAD with QG split."}
2022-11-26T14:24:44+00:00
[ "1606.05250" ]
[ "en" ]
TAGS #task_categories-question-answering #task_ids-extractive-qa #multilinguality-monolingual #size_categories-1M< #source_datasets-extended|wikipedia #language-English #license-cc-by-4.0 #arxiv-1606.05250 #region-us
Dataset Card for "lmqg/qa\_squad" ================================= Dataset Description ------------------- * Repository: URL * Paper: URL * Point of Contact: Asahi Ushio ### Dataset Summary This is the SQuAD v1 dataset with the train/validatio/test split used in qg\_squad. ### Supported Tasks and Leaderboards * 'question-answering' ### Languages English (en) Dataset Structure ----------------- ### Data Fields The data fields are the same among all splits. #### plain\_text * 'id': a 'string' feature of id * 'title': a 'string' feature of title of the paragraph * 'context': a 'string' feature of paragraph * 'question': a 'string' feature of question * 'answers': a 'json' feature of answers ### Data Splits
[ "### Dataset Summary\n\n\nThis is the SQuAD v1 dataset with the train/validatio/test split used in qg\\_squad.", "### Supported Tasks and Leaderboards\n\n\n* 'question-answering'", "### Languages\n\n\nEnglish (en)\n\n\nDataset Structure\n-----------------", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### plain\\_text\n\n\n* 'id': a 'string' feature of id\n* 'title': a 'string' feature of title of the paragraph\n* 'context': a 'string' feature of paragraph\n* 'question': a 'string' feature of question\n* 'answers': a 'json' feature of answers", "### Data Splits" ]
[ "TAGS\n#task_categories-question-answering #task_ids-extractive-qa #multilinguality-monolingual #size_categories-1M< #source_datasets-extended|wikipedia #language-English #license-cc-by-4.0 #arxiv-1606.05250 #region-us \n", "### Dataset Summary\n\n\nThis is the SQuAD v1 dataset with the train/validatio/test split used in qg\\_squad.", "### Supported Tasks and Leaderboards\n\n\n* 'question-answering'", "### Languages\n\n\nEnglish (en)\n\n\nDataset Structure\n-----------------", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### plain\\_text\n\n\n* 'id': a 'string' feature of id\n* 'title': a 'string' feature of title of the paragraph\n* 'context': a 'string' feature of paragraph\n* 'question': a 'string' feature of question\n* 'answers': a 'json' feature of answers", "### Data Splits" ]
1179f081da50d81c2f0f07f50529bdcaaae6bba1
# Dataset Card for "gal_yair_83000_1664x832_fixed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_83000_1664x832_fixed
[ "region:us" ]
2022-11-26T14:10:15+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": {"class_label": {"names": {"0": "ad", "1": "ae", "2": "al", "3": "aq", "4": "ar", "5": "au", "6": "bd", "7": "be", "8": "bg", "9": "bm", "10": "bo", "11": "br", "12": "bt", "13": "bw", "14": "ca", "15": "ch", "16": "cl", "17": "co", "18": "cz", "19": "de", "20": "dk", "21": "ec", "22": "ee", "23": "es", "24": "fi", "25": "fr", "26": "gb", "27": "gh", "28": "gl", "29": "gr", "30": "gt", "31": "hk", "32": "hr", "33": "hu", "34": "id", "35": "ie", "36": "il", "37": "is", "38": "it", "39": "ix", "40": "jp", "41": "kg", "42": "kh", "43": "kr", "44": "la", "45": "lk", "46": "ls", "47": "lt", "48": "lu", "49": "lv", "50": "me", "51": "mg", "52": "mk", "53": "mn", "54": "mo", "55": "mt", "56": "mx", "57": "my", "58": "nl", "59": "no", "60": "nz", "61": "pe", "62": "ph", "63": "pl", "64": "pt", "65": "ro", "66": "rs", "67": "ru", "68": "se", "69": "sg", "70": "si", "71": "sk", "72": "sn", "73": "sz", "74": "th", "75": "tn", "76": "tr", "77": "tw", "78": "ua", "79": "ug", "80": "us", "81": "uy", "82": "za"}}}}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 14180076638.0, "num_examples": 83000}], "download_size": 13657100476, "dataset_size": 14180076638.0}}
2022-11-26T14:18:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gal_yair_83000_1664x832_fixed" More Information needed
[ "# Dataset Card for \"gal_yair_83000_1664x832_fixed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gal_yair_83000_1664x832_fixed\"\n\nMore Information needed" ]
99a7721348cb53e655959396203ce1353abf31c3
# Dataset Card for "cat-toy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
lewtun/cat-toy
[ "region:us" ]
2022-11-26T14:19:31+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1322754.0, "num_examples": 4}], "download_size": 1265258, "dataset_size": 1322754.0}}
2022-11-26T14:20:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cat-toy" More Information needed
[ "# Dataset Card for \"cat-toy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cat-toy\"\n\nMore Information needed" ]
db99c2d161774858876b933c9347a4dd57308099
# Dataset Card for raddromur_asr ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** Raddrómur Icelandic Speech 22.09 - **Repository:** [Clarin.is](http://hdl.handle.net/20.500.12537/286) - **Point of Contact:** [Carlos Mena](mailto:[email protected]), [Jón Guðnason](mailto:[email protected]) ### Dataset Summary The "Raddrómur Icelandic Speech 22.09" ("Raddrómur Corpus" for short) is an Icelandic corpus created by the Language and Voice Laboratory (LVL) at Reykjavík University (RU) in 2022. It is made out of radio podcasts mostly taken from RÚV (ruv.is). ### Example Usage The Raddrómur Corpus counts with the train split only. To load the training split pass its name as a config name: ```python from datasets import load_dataset raddromur_asr = load_dataset("language-and-voice-lab/raddromur_asr") ``` To load the specific "train" split do: ```python from datasets import load_dataset raddromur_asr = load_dataset("language-and-voice-lab/raddromur_asr",split="train") ``` ### Supported Tasks automatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The audio is in Icelandic. ## Dataset Structure ### Data Instances ```python { 'audio_id': 'leikfangavelin_007-0066-00:18:5686-00:00:0392', 'audio': { 'path': '/home/carlos/.cache/HuggingFace/datasets/downloads/extracted/f9a8b6e2ea4539571f6e88659a63aa485daa99d47d9c1c95e968fce7ab96664a/train/leikfangavelin/leikfangavelin_007/leikfangavelin_007-0066-00:18:5686-00:00:0392.flac', 'array': array([-0.03311157, -0.08340454, -0.11801147, ..., 0. , 0.00033569, 0.00054932], dtype=float32), 'sampling_rate': 16000 }, 'podcast_id': 'leikfangavelin_007', 'segment_num': 66, 'start_time': '00:18:56.86', 'duration': 3.9679999351501465, 'mafia_score': 0.0, 'normalized_text': 'hætti í bandinu skömmu eftir að platan sem ekki kom út var tekin upp' } ``` ### Data Fields * `audio_id` (string) - id of audio segment * `audio` (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * `podcast_id` (string) - id of the podcast * `segment_num` (int32) - integer identifing the number of segment * `duration` (float32) - duration of the audio file in seconds. * `mafia_score` (float32) - In order to distinguish the transcriptions with fewer expected mistakes, a quality measure called "MAFIA Score" was added. A MAFIA Score close to zero implies a better quality transcription. * `normalized_text` (string) - normalized audio segment transcription. ### Data Splits The corpus is split into train only. The lenght of the train portion is 49h09m in 13030 utterances. ## Dataset Creation ### Curation Rationale * The corpus was automatically segmented using the tool [inaSpeechSegmenter](https://pypi.org/project/inaSpeechSegmenter/). * The forced alignment was performed using the tool [MAFIA aligner](http://hdl.handle.net/20.500.12537/215). * The corpus comes with a metadata file wich is in TSV format. This file contain the normalized transcription of the corpus and the filenames among other relevant information. * The corpus contains 13030 utterances, totalling 49 hours and 09 minutes. * The corpus is not split into train/dev/test portions. * The corpus is distrubuted in the following format: flac, 16kHz@16bits mono. * The column "mafia_score" in the metadata file indicates the expected precision of the transcription. Zero is the highest precision. ### Source Data #### Initial Data Collection and Normalization The Raddrómur Corpus is composed of different radio podcasts in Icelandic. More information about the origin of these podcasts comes as follows: * Rokkland | Author: Ólafur Páll Gunnarsson | Podcast/Radio show hosted by RUV. * A Tonsvidinu | Author: Una Margrét Jónsdóttir | Podcast/Radio show hosted by RUV. * I ljosu Sogunnar | Author: Vera Illugadóttir | Podcast/Radio show hosted by RUV. * Nedanmals | Authors: Elísabet Rún Þorsteinsdóttir and Marta Eir Sigurðardóttir. | Elísabet Rún Þorsteinsdóttir og Marta Eir Sigurðardóttir. * Leikfangavelin | Author: Atla Hergeirssonar | Independent Podcast/Radio show. ### Annotations #### Annotation process The podcasts from https://www.ruv.is/ were selected because they count with a text script that matches with certain fidelity what is said during the show. After automatic segmentation of the episodes, the transcriptions were inferred using the scripts along with a forced alignment technique. #### Who are the annotators? The corpus was automatically segmented and aligned by the [MAFIA aligner](http://hdl.handle.net/20.500.12537/215). The MAFIA aligner is designed to take a podcast episode along with a text script reflecting what is spoken in the podcast, then segment the podcast and find a transcription that better fits what is in the script. When the script is not accurate, MAFIA is able to infer a transcription using Automatic Speech Recognition. ### Personal and Sensitive Information The corpus is comprised of speech utterances from professional podcasters. Nevertheless, you agree to not attempt to determine the identity of speakers in this dataset. ## Considerations for Using the Data ### Social Impact of Dataset This ASR corpus is one of the few available Icelandic copora with spontaneous speech. ### Discussion of Biases In order to distinguish the transcriptions with fewer expected mistakes, a quality measure called "MAFIA Score" was added in the metadata file included with the corpus. A MAFIA Score close to zero implies a better quality transcription. To infer a transcription using the vocabulary of the text script, MAFIA creates a 3-gram language model with SRILM [4] using the text of all the podcasts available at the moment of running it. After this, MAFIA transcribes all the segments using a speech recognizer based on [NVIDIA-NeMo](https://developer.nvidia.com/nvidia-nemo). In order to calculate the MAFIA Score, a second round of speech recognition is passed to all the segments but using a way more robust [6-gram language model](http://hdl.handle.net/20.500.12537/226) with a size of 5GB. The MAFIA score is then obtained by measuring the Word Error Rate bewteen the first pass transcriptions (reference) and the second pass transcriptions (hyphotesis). According to this, a MAFIA score of 0 reflects a transcription that is equal in both passes and therefore, it is a high quality transcription. ### Other Known Limitations "Raddrómur Icelandic Speech 22.09" by the Language and Voice Laboratory (LVL) from Reykjavík University (RU) is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## Additional Information ### Dataset Curators The corpus was curated by Carlos Daniel Hernández Mena in 2022. ### Licensing Information [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/) ### Citation Information ``` @misc{carlosmenaraddromur2022, title={Raddrómur Icelandic Speech 22.09}, author={Hernández Mena, Carlos Daniel and Hedström, Staffan and Þórhallsdóttir, Ragnheiður and Fong, Judy Y. and Gunnarsson, Þorsteinn Daði and Sigurðardóttir, Helga Svala and Þorsteinsdóttir, Helga Lára and Guðnason, Jón}, year={2022}, url={http://hdl.handle.net/20.500.12537/286}, } ``` ### Contributions This project was funded by the Language Technology Programme for Icelandic 2019-2022. The programme, which is managed and coordinated by Almannarómur, is funded by the Icelandic Ministry of Education, Science and Culture. Special thanks to the podcasters and to Aron Berg from RÚV.
language-and-voice-lab/raddromur_asr
[ "task_categories:automatic-speech-recognition", "annotations_creators:machine-generated", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:is", "license:cc-by-4.0", "icelandic podcasts", "spontaneous icelandic", "forced-aligned", "ruv.is", "mafia aligner", "region:us" ]
2022-11-26T14:25:29+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["machine-generated"], "language": ["is"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "Raddr\u00f3mur Icelandic Speech 22.09", "tags": ["icelandic podcasts", "spontaneous icelandic", "forced-aligned", "ruv.is", "mafia aligner"]}
2023-02-24T22:16:23+00:00
[]
[ "is" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-machine-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-Icelandic #license-cc-by-4.0 #icelandic podcasts #spontaneous icelandic #forced-aligned #ruv.is #mafia aligner #region-us
# Dataset Card for raddromur_asr ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: Raddrómur Icelandic Speech 22.09 - Repository: URL - Point of Contact: Carlos Mena, Jón Guðnason ### Dataset Summary The "Raddrómur Icelandic Speech 22.09" ("Raddrómur Corpus" for short) is an Icelandic corpus created by the Language and Voice Laboratory (LVL) at Reykjavík University (RU) in 2022. It is made out of radio podcasts mostly taken from RÚV (URL). ### Example Usage The Raddrómur Corpus counts with the train split only. To load the training split pass its name as a config name: To load the specific "train" split do: ### Supported Tasks automatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The audio is in Icelandic. ## Dataset Structure ### Data Instances ### Data Fields * 'audio_id' (string) - id of audio segment * 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * 'podcast_id' (string) - id of the podcast * 'segment_num' (int32) - integer identifing the number of segment * 'duration' (float32) - duration of the audio file in seconds. * 'mafia_score' (float32) - In order to distinguish the transcriptions with fewer expected mistakes, a quality measure called "MAFIA Score" was added. A MAFIA Score close to zero implies a better quality transcription. * 'normalized_text' (string) - normalized audio segment transcription. ### Data Splits The corpus is split into train only. The lenght of the train portion is 49h09m in 13030 utterances. ## Dataset Creation ### Curation Rationale * The corpus was automatically segmented using the tool inaSpeechSegmenter. * The forced alignment was performed using the tool MAFIA aligner. * The corpus comes with a metadata file wich is in TSV format. This file contain the normalized transcription of the corpus and the filenames among other relevant information. * The corpus contains 13030 utterances, totalling 49 hours and 09 minutes. * The corpus is not split into train/dev/test portions. * The corpus is distrubuted in the following format: flac, 16kHz@16bits mono. * The column "mafia_score" in the metadata file indicates the expected precision of the transcription. Zero is the highest precision. ### Source Data #### Initial Data Collection and Normalization The Raddrómur Corpus is composed of different radio podcasts in Icelandic. More information about the origin of these podcasts comes as follows: * Rokkland | Author: Ólafur Páll Gunnarsson | Podcast/Radio show hosted by RUV. * A Tonsvidinu | Author: Una Margrét Jónsdóttir | Podcast/Radio show hosted by RUV. * I ljosu Sogunnar | Author: Vera Illugadóttir | Podcast/Radio show hosted by RUV. * Nedanmals | Authors: Elísabet Rún Þorsteinsdóttir and Marta Eir Sigurðardóttir. | Elísabet Rún Þorsteinsdóttir og Marta Eir Sigurðardóttir. * Leikfangavelin | Author: Atla Hergeirssonar | Independent Podcast/Radio show. ### Annotations #### Annotation process The podcasts from URL were selected because they count with a text script that matches with certain fidelity what is said during the show. After automatic segmentation of the episodes, the transcriptions were inferred using the scripts along with a forced alignment technique. #### Who are the annotators? The corpus was automatically segmented and aligned by the MAFIA aligner. The MAFIA aligner is designed to take a podcast episode along with a text script reflecting what is spoken in the podcast, then segment the podcast and find a transcription that better fits what is in the script. When the script is not accurate, MAFIA is able to infer a transcription using Automatic Speech Recognition. ### Personal and Sensitive Information The corpus is comprised of speech utterances from professional podcasters. Nevertheless, you agree to not attempt to determine the identity of speakers in this dataset. ## Considerations for Using the Data ### Social Impact of Dataset This ASR corpus is one of the few available Icelandic copora with spontaneous speech. ### Discussion of Biases In order to distinguish the transcriptions with fewer expected mistakes, a quality measure called "MAFIA Score" was added in the metadata file included with the corpus. A MAFIA Score close to zero implies a better quality transcription. To infer a transcription using the vocabulary of the text script, MAFIA creates a 3-gram language model with SRILM [4] using the text of all the podcasts available at the moment of running it. After this, MAFIA transcribes all the segments using a speech recognizer based on NVIDIA-NeMo. In order to calculate the MAFIA Score, a second round of speech recognition is passed to all the segments but using a way more robust 6-gram language model with a size of 5GB. The MAFIA score is then obtained by measuring the Word Error Rate bewteen the first pass transcriptions (reference) and the second pass transcriptions (hyphotesis). According to this, a MAFIA score of 0 reflects a transcription that is equal in both passes and therefore, it is a high quality transcription. ### Other Known Limitations "Raddrómur Icelandic Speech 22.09" by the Language and Voice Laboratory (LVL) from Reykjavík University (RU) is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ## Additional Information ### Dataset Curators The corpus was curated by Carlos Daniel Hernández Mena in 2022. ### Licensing Information CC-BY-4.0 ### Contributions This project was funded by the Language Technology Programme for Icelandic 2019-2022. The programme, which is managed and coordinated by Almannarómur, is funded by the Icelandic Ministry of Education, Science and Culture. Special thanks to the podcasters and to Aron Berg from RÚV.
[ "# Dataset Card for raddromur_asr", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: Raddrómur Icelandic Speech 22.09\n- Repository: URL\n- Point of Contact: Carlos Mena, Jón Guðnason", "### Dataset Summary\n\nThe \"Raddrómur Icelandic Speech 22.09\" (\"Raddrómur Corpus\" for short) is an Icelandic corpus created by the Language and Voice Laboratory (LVL) at Reykjavík University (RU) in 2022. It is made out of radio podcasts mostly taken from RÚV (URL).", "### Example Usage\nThe Raddrómur Corpus counts with the train split only. To load the training split pass its name as a config name:\n\nTo load the specific \"train\" split do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe audio is in Icelandic.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'podcast_id' (string) - id of the podcast\n* 'segment_num' (int32) - integer identifing the number of segment\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'mafia_score' (float32) - In order to distinguish the transcriptions with fewer expected mistakes, a quality measure called \"MAFIA Score\" was added. A MAFIA Score close to zero implies a better quality transcription.\n* 'normalized_text' (string) - normalized audio segment transcription.", "### Data Splits\nThe corpus is split into train only. The lenght of the train portion is 49h09m in 13030 utterances.", "## Dataset Creation", "### Curation Rationale\n\n* The corpus was automatically segmented using the tool inaSpeechSegmenter.\n \n* The forced alignment was performed using the tool MAFIA aligner.\n \n* The corpus comes with a metadata file wich is in TSV format. This file contain the normalized transcription of the corpus and the filenames among other relevant information.\n \n* The corpus contains 13030 utterances, totalling 49 hours and 09 minutes.\n\n* The corpus is not split into train/dev/test portions.\n\n* The corpus is distrubuted in the following format: flac, 16kHz@16bits mono.\n\n* The column \"mafia_score\" in the metadata file indicates the expected precision of the transcription. Zero is the highest precision.", "### Source Data", "#### Initial Data Collection and Normalization\n\nThe Raddrómur Corpus is composed of different radio podcasts in Icelandic. More information about the origin of these podcasts comes as follows:\n\n* Rokkland | Author: Ólafur Páll Gunnarsson | Podcast/Radio show hosted by RUV.\n\n* A Tonsvidinu | Author: Una Margrét Jónsdóttir | Podcast/Radio show hosted by RUV.\n\n* I ljosu Sogunnar | Author: Vera Illugadóttir | Podcast/Radio show hosted by RUV.\n\n* Nedanmals | Authors: Elísabet Rún Þorsteinsdóttir and Marta Eir Sigurðardóttir. | Elísabet Rún Þorsteinsdóttir og Marta Eir Sigurðardóttir.\n\n* Leikfangavelin | Author: Atla Hergeirssonar | Independent Podcast/Radio show.", "### Annotations", "#### Annotation process\n\nThe podcasts from URL were selected because they count with a text script that matches with certain \nfidelity what is said during the show. After automatic segmentation of the episodes, the transcriptions were inferred using the scripts along with a forced alignment technique.", "#### Who are the annotators?\n\nThe corpus was automatically segmented and aligned by the MAFIA aligner.\n\nThe MAFIA aligner is designed to take a podcast episode along with a text script reflecting what is spoken in the podcast, then segment the podcast and find a transcription that better fits what is in the script. When the script is not accurate, MAFIA is able to infer a transcription using Automatic Speech Recognition.", "### Personal and Sensitive Information\nThe corpus is comprised of speech utterances from professional podcasters. Nevertheless, you agree to not attempt to determine the identity of speakers in this dataset.", "## Considerations for Using the Data", "### Social Impact of Dataset\nThis ASR corpus is one of the few available Icelandic copora with spontaneous speech.", "### Discussion of Biases\n\nIn order to distinguish the transcriptions with fewer expected mistakes, a quality measure called \"MAFIA Score\" was added in the metadata file included with the corpus. A MAFIA Score close to zero implies a better quality transcription.\n\nTo infer a transcription using the vocabulary of the text script, MAFIA creates a 3-gram language model with SRILM [4] using the text of all the podcasts available at the moment of running it. After this, MAFIA transcribes all the segments using a speech recognizer based on NVIDIA-NeMo.\n\nIn order to calculate the MAFIA Score, a second round of speech recognition is passed to all the segments but using a way more robust 6-gram language model with a size of 5GB. The MAFIA score is then obtained by measuring the Word Error Rate bewteen the first pass transcriptions (reference) and the second pass transcriptions (hyphotesis). According to this, a MAFIA score of 0 reflects a transcription that is equal in both passes and therefore, it is a high quality transcription.", "### Other Known Limitations\n\"Raddrómur Icelandic Speech 22.09\" by the Language and Voice Laboratory (LVL) from Reykjavík University (RU) is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "## Additional Information", "### Dataset Curators\n\nThe corpus was curated by Carlos Daniel Hernández Mena in 2022.", "### Licensing Information\nCC-BY-4.0", "### Contributions\n\nThis project was funded by the Language Technology Programme for Icelandic 2019-2022. The programme, which is managed and coordinated by Almannarómur, is funded by the Icelandic Ministry of Education, Science and Culture.\n\nSpecial thanks to the podcasters and to Aron Berg from RÚV." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-machine-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-Icelandic #license-cc-by-4.0 #icelandic podcasts #spontaneous icelandic #forced-aligned #ruv.is #mafia aligner #region-us \n", "# Dataset Card for raddromur_asr", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: Raddrómur Icelandic Speech 22.09\n- Repository: URL\n- Point of Contact: Carlos Mena, Jón Guðnason", "### Dataset Summary\n\nThe \"Raddrómur Icelandic Speech 22.09\" (\"Raddrómur Corpus\" for short) is an Icelandic corpus created by the Language and Voice Laboratory (LVL) at Reykjavík University (RU) in 2022. It is made out of radio podcasts mostly taken from RÚV (URL).", "### Example Usage\nThe Raddrómur Corpus counts with the train split only. To load the training split pass its name as a config name:\n\nTo load the specific \"train\" split do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe audio is in Icelandic.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'podcast_id' (string) - id of the podcast\n* 'segment_num' (int32) - integer identifing the number of segment\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'mafia_score' (float32) - In order to distinguish the transcriptions with fewer expected mistakes, a quality measure called \"MAFIA Score\" was added. A MAFIA Score close to zero implies a better quality transcription.\n* 'normalized_text' (string) - normalized audio segment transcription.", "### Data Splits\nThe corpus is split into train only. The lenght of the train portion is 49h09m in 13030 utterances.", "## Dataset Creation", "### Curation Rationale\n\n* The corpus was automatically segmented using the tool inaSpeechSegmenter.\n \n* The forced alignment was performed using the tool MAFIA aligner.\n \n* The corpus comes with a metadata file wich is in TSV format. This file contain the normalized transcription of the corpus and the filenames among other relevant information.\n \n* The corpus contains 13030 utterances, totalling 49 hours and 09 minutes.\n\n* The corpus is not split into train/dev/test portions.\n\n* The corpus is distrubuted in the following format: flac, 16kHz@16bits mono.\n\n* The column \"mafia_score\" in the metadata file indicates the expected precision of the transcription. Zero is the highest precision.", "### Source Data", "#### Initial Data Collection and Normalization\n\nThe Raddrómur Corpus is composed of different radio podcasts in Icelandic. More information about the origin of these podcasts comes as follows:\n\n* Rokkland | Author: Ólafur Páll Gunnarsson | Podcast/Radio show hosted by RUV.\n\n* A Tonsvidinu | Author: Una Margrét Jónsdóttir | Podcast/Radio show hosted by RUV.\n\n* I ljosu Sogunnar | Author: Vera Illugadóttir | Podcast/Radio show hosted by RUV.\n\n* Nedanmals | Authors: Elísabet Rún Þorsteinsdóttir and Marta Eir Sigurðardóttir. | Elísabet Rún Þorsteinsdóttir og Marta Eir Sigurðardóttir.\n\n* Leikfangavelin | Author: Atla Hergeirssonar | Independent Podcast/Radio show.", "### Annotations", "#### Annotation process\n\nThe podcasts from URL were selected because they count with a text script that matches with certain \nfidelity what is said during the show. After automatic segmentation of the episodes, the transcriptions were inferred using the scripts along with a forced alignment technique.", "#### Who are the annotators?\n\nThe corpus was automatically segmented and aligned by the MAFIA aligner.\n\nThe MAFIA aligner is designed to take a podcast episode along with a text script reflecting what is spoken in the podcast, then segment the podcast and find a transcription that better fits what is in the script. When the script is not accurate, MAFIA is able to infer a transcription using Automatic Speech Recognition.", "### Personal and Sensitive Information\nThe corpus is comprised of speech utterances from professional podcasters. Nevertheless, you agree to not attempt to determine the identity of speakers in this dataset.", "## Considerations for Using the Data", "### Social Impact of Dataset\nThis ASR corpus is one of the few available Icelandic copora with spontaneous speech.", "### Discussion of Biases\n\nIn order to distinguish the transcriptions with fewer expected mistakes, a quality measure called \"MAFIA Score\" was added in the metadata file included with the corpus. A MAFIA Score close to zero implies a better quality transcription.\n\nTo infer a transcription using the vocabulary of the text script, MAFIA creates a 3-gram language model with SRILM [4] using the text of all the podcasts available at the moment of running it. After this, MAFIA transcribes all the segments using a speech recognizer based on NVIDIA-NeMo.\n\nIn order to calculate the MAFIA Score, a second round of speech recognition is passed to all the segments but using a way more robust 6-gram language model with a size of 5GB. The MAFIA score is then obtained by measuring the Word Error Rate bewteen the first pass transcriptions (reference) and the second pass transcriptions (hyphotesis). According to this, a MAFIA score of 0 reflects a transcription that is equal in both passes and therefore, it is a high quality transcription.", "### Other Known Limitations\n\"Raddrómur Icelandic Speech 22.09\" by the Language and Voice Laboratory (LVL) from Reykjavík University (RU) is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "## Additional Information", "### Dataset Curators\n\nThe corpus was curated by Carlos Daniel Hernández Mena in 2022.", "### Licensing Information\nCC-BY-4.0", "### Contributions\n\nThis project was funded by the Language Technology Programme for Icelandic 2019-2022. The programme, which is managed and coordinated by Almannarómur, is funded by the Icelandic Ministry of Education, Science and Culture.\n\nSpecial thanks to the podcasters and to Aron Berg from RÚV." ]
7d770d8b2cfff2fe23cc2710422abe42deaf5a8c
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b7 * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_vi_-7f787f-2245771646
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:55+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-1b7", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T16:04:36+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b7 * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b7\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b7\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
d814632be9a689b101c0d1ce39b29fd313d8fd3d
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_vi_-0f1239-2245871652
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-560m", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T15:30:12+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
7a7459b8e989696ceafea727e7dc5822cd7fa3f4
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_vi_-7f787f-2245771643
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-560m", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T15:36:28+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
dca1c43deea4cf0bca649fcca07c0ea974b15f36
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-30b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-7dbe88-2245971654
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-30b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-27T00:26:39+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-30b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-30b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-30b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
b5593f92d4ebec14395a86c6edebf6536851c1ab
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-3b * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_vi_-7f787f-2245771644
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:56+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-3b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T16:27:46+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-3b * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-3b\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-3b\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
a0711c9ff90bd78a6c5f0d47f5ba01b438f1b7eb
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b1 * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_vi_-0f1239-2245871651
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-1b1", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T15:36:28+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b1 * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b1\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b1\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
6674551952fffac4b3acda432f4699925d329e82
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b1 * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_vi_-7f787f-2245771647
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-1b1", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T15:43:53+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b1 * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b1\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b1\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
2651bbd88c6edc74a3bf0410b2d02118c171997d
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-66b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-7dbe88-2245971653
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-66b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-27T11:35:07+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-66b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-66b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-66b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
1f29a57bafc1bc30d78de11c1c1c9a9cb67a71bf
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-3b * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_vi_-0f1239-2245871648
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-3b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T16:09:48+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-3b * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-3b\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-3b\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
e498dc31acbef026a852cfbe1a333889f33ded4a
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-7b1 * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_vi_-0f1239-2245871649
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-7b1", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T17:56:25+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-7b1 * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-7b1\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-7b1\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
bb0335fe6fa58fdbba7b41b5169c25de8a602ad3
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-13b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-7dbe88-2245971655
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:57+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-13b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T19:05:49+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-13b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-13b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-13b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
388c2e8dced0501f7e4a0b9c418d56393542fc95
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-6.7b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-7dbe88-2245971656
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:11:58+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-6.7b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T17:30:38+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-6.7b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-6.7b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-6.7b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
a573e9d64082775c48ff1c9e4c93c08961de76cc
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b7 * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_vi_-0f1239-2245871650
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:12:03+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-1b7", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T15:51:24+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-1b7 * Dataset: futin/feed * Config: sen_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b7\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-1b7\n* Dataset: futin/feed\n* Config: sen_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
1dc006ceaca814b15e845cc969e27ce150287ba9
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-7b1 * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_vi_-7f787f-2245771645
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:12:07+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-7b1", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_vi_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T18:55:42+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-7b1 * Dataset: futin/feed * Config: top_vi_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-7b1\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-7b1\n* Dataset: futin/feed\n* Config: top_vi_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
28a5185e1cf2bb88fc8ce999ee3b6f2d40b4eaa1
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-350m * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-7dbe88-2245971657
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:12:11+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-350m", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T15:21:24+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-350m * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-350m\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-350m\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
3d4cbd76f1eb49595dd62ab6db0104eff56f8601
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-2.7b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-7dbe88-2245971658
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:29:04+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-2.7b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T16:11:40+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-2.7b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-2.7b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-2.7b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
0f9a248807fc6216bdb3cb87a0cee913ef92cc0f
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-125m * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-7dbe88-2245971659
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:37:40+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-125m", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T15:42:20+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-125m * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-125m\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-125m\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
9b725b0b2bcdc6b26bad0a320e7ffac1b15ca21f
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-1.3b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-sen_en_-7dbe88-2245971660
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:43:09+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-1.3b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "sen_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T16:10:12+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-1.3b * Dataset: futin/feed * Config: sen_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-1.3b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-1.3b\n* Dataset: futin/feed\n* Config: sen_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
e14a59f7fabc5b86329ffe87ae94a2f0f1b2dafa
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-66b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-3f631c-2246071661
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:43:11+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-66b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-27T22:21:46+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-66b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-66b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-66b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
5ecaf1f07b865f2e36013c7422772bc73a5d0377
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-30b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-3f631c-2246071662
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:48:55+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-30b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-27T05:37:13+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-30b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-30b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-30b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
7837801979d6677cfbd6eec3bae3714914808f0b
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-13b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-3f631c-2246071663
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:49:31+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-13b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T21:41:46+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-13b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-13b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-13b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
340e7ab0468b85fbe241b141c058177b1d64d301
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-6.7b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-3f631c-2246071664
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T15:57:52+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-6.7b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T19:08:46+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-6.7b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-6.7b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-6.7b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
5577de5e9f7205124dff1f68af860398694f58a7
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-350m * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-3f631c-2246071665
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T16:11:49+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-350m", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T16:23:01+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-350m * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-350m\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-350m\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
078236a29f3a373fd649654ba5787607b79e5ad8
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-125m * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-3f631c-2246071667
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T16:16:33+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-125m", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T16:22:25+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-125m * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-125m\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-125m\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
2bddaffa4ae5035f7cbe0ba6eb25a80c8e28688e
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-2.7b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-3f631c-2246071666
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T16:16:35+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-2.7b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T17:19:48+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-2.7b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-2.7b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-2.7b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
09462af6e9304b634d99b81cda9873a8adcfe95b
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-1.3b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@futin](https://huggingface.co/futin) for evaluating this model.
autoevaluate/autoeval-eval-futin__feed-top_en_-3f631c-2246071668
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T16:17:16+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["futin/feed"], "eval_info": {"task": "text_zero_shot_classification", "model": "facebook/opt-1.3b", "metrics": [], "dataset_name": "futin/feed", "dataset_config": "top_en_", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-11-26T16:51:25+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: facebook/opt-1.3b * Dataset: futin/feed * Config: top_en_ * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @futin for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-1.3b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: facebook/opt-1.3b\n* Dataset: futin/feed\n* Config: top_en_\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @futin for evaluating this model." ]
dba68926976671325b6aca0a328ba4db6b0dc0bc
# Dataset Card for "librispeech-augmentated-train-prepared" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DTU54DL/librispeech-augmentated-train-prepared
[ "region:us" ]
2022-11-26T21:55:59+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train.360", "num_bytes": 6796928865.0, "num_examples": 5000}], "download_size": 3988873165, "dataset_size": 6796928865.0}}
2022-12-05T14:21:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech-augmentated-train-prepared" More Information needed
[ "# Dataset Card for \"librispeech-augmentated-train-prepared\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech-augmentated-train-prepared\"\n\nMore Information needed" ]
bda61255e36907752ae817d9dabc6dca06921cee
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: Artifact-AI/led_base_16384_billsum_summarization * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Artifact-AI](https://huggingface.co/Artifact-AI) for evaluating this model.
autoevaluate/autoeval-eval-billsum-default-8a9925-2249471725
[ "autotrain", "evaluation", "region:us" ]
2022-11-26T22:24:31+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["billsum"], "eval_info": {"task": "summarization", "model": "Artifact-AI/led_base_16384_billsum_summarization", "metrics": [], "dataset_name": "billsum", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "summary"}}}
2022-11-27T04:01:08+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: Artifact-AI/led_base_16384_billsum_summarization * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @Artifact-AI for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: Artifact-AI/led_base_16384_billsum_summarization\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @Artifact-AI for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: Artifact-AI/led_base_16384_billsum_summarization\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @Artifact-AI for evaluating this model." ]
502758da08386a4e19bebfe2cbf0b81e1c25674c
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
DTU54DL/common-proc-whisper
[ "task_categories:token-classification", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:mit", "region:us" ]
2022-11-26T22:26:38+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["found"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": ["token-classification-other-acronym-identification"], "paperswithcode_id": "acronym-identification", "pretty_name": "Acronym Identification Dataset", "train-eval-index": [{"col_mapping": {"labels": "tags", "tokens": "tokens"}, "config": "default", "splits": {"eval_split": "test"}, "task": "token-classification", "task_id": "entity_extraction"}]}
2022-11-26T23:32:29+00:00
[]
[ "en" ]
TAGS #task_categories-token-classification #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-mit #region-us
# Dataset Card for [Dataset Name] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @github-username for adding this dataset.
[ "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @github-username for adding this dataset." ]
[ "TAGS\n#task_categories-token-classification #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-mit #region-us \n", "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @github-username for adding this dataset." ]