sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
sequencelengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
sequencelengths
0
25
languages
sequencelengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
sequencelengths
0
352
processed_texts
sequencelengths
1
353
4483d3730d71ab1f7700b2e80b97d31e75997d3b
# Dataset Card for "eclassQuery" This Dataset consists of paraphrases of ECLASS-standard pump-properties. It can be used to evaluate models on the task of matching these paraphrases to the actual ECLASS-standard pump-properties based on their semantics.
gart-labor/eclassQuery
[ "task_categories:sentence-similarity", "size_categories:1K<n<10K", "language:en", "doi:10.57967/hf/0409", "region:us" ]
2023-01-07T12:38:27+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["sentence-similarity"], "dataset_info": {"features": [{"name": "did", "dtype": "int64"}, {"name": "query", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "duplicate_id", "dtype": "int64"}, {"name": "metalabel", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 147176, "num_examples": 1040}, {"name": "eval", "num_bytes": 100846, "num_examples": 671}], "download_size": 113268, "dataset_size": 248022}}
2023-01-07T12:42:40+00:00
[]
[ "en" ]
TAGS #task_categories-sentence-similarity #size_categories-1K<n<10K #language-English #doi-10.57967/hf/0409 #region-us
# Dataset Card for "eclassQuery" This Dataset consists of paraphrases of ECLASS-standard pump-properties. It can be used to evaluate models on the task of matching these paraphrases to the actual ECLASS-standard pump-properties based on their semantics.
[ "# Dataset Card for \"eclassQuery\"\n\nThis Dataset consists of paraphrases of ECLASS-standard pump-properties. It can be used to evaluate models on the task of matching these paraphrases to the actual ECLASS-standard pump-properties based on their semantics." ]
[ "TAGS\n#task_categories-sentence-similarity #size_categories-1K<n<10K #language-English #doi-10.57967/hf/0409 #region-us \n", "# Dataset Card for \"eclassQuery\"\n\nThis Dataset consists of paraphrases of ECLASS-standard pump-properties. It can be used to evaluate models on the task of matching these paraphrases to the actual ECLASS-standard pump-properties based on their semantics." ]
d15b89deb2186aaaae788ae9efe261daebd28839
# Dataset Card for vada-sambhar ## Dataset Description The dataset contains of images of my favorite south indian dish - Vada Sambhar. ### Dataset Curators The data has been downloaded from Google images. ### Licensing Information The vada-sambhar dataset version 1.0.0 is released under the Apache-2.0 License.
Ashish08/vada-sambhar
[ "size_categories:n<1K", "source_datasets:google", "language:en", "license:apache-2.0", "images ", "food", "vada sambhar", "dreambooth-hackathon", "region:us" ]
2023-01-07T12:51:40+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["n<1K"], "source_datasets": ["google"], "pretty_name": "vada sambhar", "tags": ["images ", "food", "vada sambhar", "dreambooth-hackathon"]}
2023-01-07T12:59:47+00:00
[]
[ "en" ]
TAGS #size_categories-n<1K #source_datasets-google #language-English #license-apache-2.0 #images #food #vada sambhar #dreambooth-hackathon #region-us
# Dataset Card for vada-sambhar ## Dataset Description The dataset contains of images of my favorite south indian dish - Vada Sambhar. ### Dataset Curators The data has been downloaded from Google images. ### Licensing Information The vada-sambhar dataset version 1.0.0 is released under the Apache-2.0 License.
[ "# Dataset Card for vada-sambhar", "## Dataset Description\n\n The dataset contains of images of my favorite south indian dish - Vada Sambhar.", "### Dataset Curators\n\nThe data has been downloaded from Google images.", "### Licensing Information\n\nThe vada-sambhar dataset version 1.0.0 is released under the Apache-2.0 License." ]
[ "TAGS\n#size_categories-n<1K #source_datasets-google #language-English #license-apache-2.0 #images #food #vada sambhar #dreambooth-hackathon #region-us \n", "# Dataset Card for vada-sambhar", "## Dataset Description\n\n The dataset contains of images of my favorite south indian dish - Vada Sambhar.", "### Dataset Curators\n\nThe data has been downloaded from Google images.", "### Licensing Information\n\nThe vada-sambhar dataset version 1.0.0 is released under the Apache-2.0 License." ]
37ded182545688883979ac19aa4175cf71f9be85
<b>Dataset Description</b>:- MIS Farm Pond Change Detection Dataset consists of a total of 694 images of size 1024 x 768 pixels at zoom level 18 with a very high resolution up to 1 meter) were collected from Google Earth images. The region of Indian state of Maharashtra was chosen for the dataset. The villages collected have timestamps in months of Jan-April and the minimum year difference is 2 years and the maximum year difference is 9 years, oldest being 2007 and latest being 2021. The types of farm ponds being covered in the dataset are Wet Farm Pond - Lined, Wet Farm Pond - Unlined, Dry Farm Pond - Lined, Dry Farm Pond - Unlined. The change classes are mainly - Farm Pond Constructed, Farm Pond Demolished, Farm Pond Dried and Farm Pond Wetted. Most of the changes are from the farm pond constructed class showing that there is an increase in farm pond construction across villages in Maharashtra in past 8-9 years. <b>T0.zip</b> : Consists of images of time T0 i.e. initial image <br> <b>T1.zip</b> : Consists of images of time T1 i.e. changed image <br> <b>task_1_masks.zip</b> : Consists of binary masks of task_1 i.e. Farm Pond Constructed and Farm Pond Demolished <br> <b>task_2_masks.zip</b> : Consists of binary masks of task_2 i.e. Farm Pond Dried and Farm Pond Wetted <br> <b>task_3_masks.zip</b> : Consists of binary masks of task_3 i.e. All 4 classes combined: Farm Pond Constructed, Farm Pond Demolished, Farm Pond Dried and Farm Pond Wetted <br> <b>multi_class_masks.zip(new)</b>: Consists of indexed masks for multi class change detection. Each mask consists of pixels with values as an integer in the range 0-4, 0 - Background, 1 - Farm Pond Constructed, 2 - Farm Pond Demolished, 3 - Farm Pond Dried and 4 - Farm Pond Wetted <br> <b>cd_dataset_train.txt</b> : Contains file_names of train set to be taken from T0, T1 and masks of one of the tasks(task_1, task_2, task_3) <br> <b>cd_dataset_test.txt</b> : Contains file_names of test set to be taken from T0, T1 and masks of one of the tasks(task_1, task_2, task_3) <br> <b>object_annotations_train_coco.json</b> : Contains positive images (having annotations) taken from both T0 and T1 in coco format to be used for training - Total 499 <br> <b>object_annotations_test_coco.json</b> : Contains positive images (having annotations) taken from both T0 and T1 in coco format to be used for testing - Total 92 <br>
ctundia/FPCD
[ "license:cc-by-sa-4.0", "region:us" ]
2023-01-07T13:16:22+00:00
{"license": "cc-by-sa-4.0"}
2023-06-20T15:55:24+00:00
[]
[]
TAGS #license-cc-by-sa-4.0 #region-us
<b>Dataset Description</b>:- MIS Farm Pond Change Detection Dataset consists of a total of 694 images of size 1024 x 768 pixels at zoom level 18 with a very high resolution up to 1 meter) were collected from Google Earth images. The region of Indian state of Maharashtra was chosen for the dataset. The villages collected have timestamps in months of Jan-April and the minimum year difference is 2 years and the maximum year difference is 9 years, oldest being 2007 and latest being 2021. The types of farm ponds being covered in the dataset are Wet Farm Pond - Lined, Wet Farm Pond - Unlined, Dry Farm Pond - Lined, Dry Farm Pond - Unlined. The change classes are mainly - Farm Pond Constructed, Farm Pond Demolished, Farm Pond Dried and Farm Pond Wetted. Most of the changes are from the farm pond constructed class showing that there is an increase in farm pond construction across villages in Maharashtra in past 8-9 years. <b>URL</b> : Consists of images of time T0 i.e. initial image <br> <b>URL</b> : Consists of images of time T1 i.e. changed image <br> <b>task_1_masks.zip</b> : Consists of binary masks of task_1 i.e. Farm Pond Constructed and Farm Pond Demolished <br> <b>task_2_masks.zip</b> : Consists of binary masks of task_2 i.e. Farm Pond Dried and Farm Pond Wetted <br> <b>task_3_masks.zip</b> : Consists of binary masks of task_3 i.e. All 4 classes combined: Farm Pond Constructed, Farm Pond Demolished, Farm Pond Dried and Farm Pond Wetted <br> <b>multi_class_masks.zip(new)</b>: Consists of indexed masks for multi class change detection. Each mask consists of pixels with values as an integer in the range 0-4, 0 - Background, 1 - Farm Pond Constructed, 2 - Farm Pond Demolished, 3 - Farm Pond Dried and 4 - Farm Pond Wetted <br> <b>cd_dataset_train.txt</b> : Contains file_names of train set to be taken from T0, T1 and masks of one of the tasks(task_1, task_2, task_3) <br> <b>cd_dataset_test.txt</b> : Contains file_names of test set to be taken from T0, T1 and masks of one of the tasks(task_1, task_2, task_3) <br> <b>object_annotations_train_coco.json</b> : Contains positive images (having annotations) taken from both T0 and T1 in coco format to be used for training - Total 499 <br> <b>object_annotations_test_coco.json</b> : Contains positive images (having annotations) taken from both T0 and T1 in coco format to be used for testing - Total 92 <br>
[]
[ "TAGS\n#license-cc-by-sa-4.0 #region-us \n" ]
50c1109fe617f75a7c0b67e696a99cb1599ea91e
### FrenchHateSpeechSuperset This dataset is a superset of multiple datasets including hate speech, harasment, sexist, racist, etc...messages from various platforms. Included datasets : - MLMA dataset - CAA dataset - FTR dataset - "An Annotated Corpus for Sexism Detection in French Tweets" dataset - UC-Berkeley-Measuring-Hate-Speech dataset (translated from english*) #### References ``` @inproceedings{chiril2020annotated, title={An Annotated Corpus for Sexism Detection in French Tweets}, author={Chiril, Patricia and Moriceau, V{\'e}ronique and Benamara, Farah and Mari, Alda and Origgi, Gloria and Coulomb-Gully, Marl{\`e}ne}, booktitle={Proceedings of The 12th Language Resources and Evaluation Conference}, pages={1397--1403}, year={2020} } ``` ``` @inproceedings{ousidhoum-etal-multilingual-hate-speech-2019, title = "Multilingual and Multi-Aspect Hate Speech Analysis", author = "Ousidhoum, Nedjma and Lin, Zizheng and Zhang, Hongming and Song, Yangqiu and Yeung, Dit-Yan", booktitle = "Proceedings of EMNLP", year = "2019", publisher = "Association for Computational Linguistics", } ``` ``` Vanetik, N.; Mimoun, E. Detection of Racist Language in French Tweets. Information 2022, 13, 318. https://doi.org/10.3390/info13070318 ``` ``` @article{kennedy2020constructing, title={Constructing interval variables via faceted Rasch measurement and multitask deep learning: a hate speech application}, author={Kennedy, Chris J and Bacon, Geoff and Sahn, Alexander and von Vacano, Claudia}, journal={arXiv preprint arXiv:2009.10277}, year={2020} } ``` ``` Anaïs Ollagnier, Elena Cabrio, Serena Villata, Catherine Blaya. CyberAgressionAdo-v1: a Dataset of Annotated Online Aggressions in French Collected through a Role-playing Game. Language Resources and Evaluation Conference, Jun 2022, Marseille, France. ⟨hal-03765860⟩ ``` ### Translation French datasets for hate speech are quite rare. To augment current dataset, messages from other languages (english only for now) have been integrated. To integrate other languages dataset, MT model were used and manually selected for each dataset. - UC-Berkeley-Measuring-Hate-Speech dataset : Abelll/marian-finetuned-kde4-en-to-fr ### Language verification Since MT models are not perfect, some messages are not entirely translated or not translated at all. To check for obvious errors in pipeline, a general language detection model is used to prune non french texts. Language detection model : papluca/xlm-roberta-base-language-detection ### Annotation Since "hate speech" dimension is highly subjective, and datasets comes with different annotations types, a conventional labeling stategy is required. Each sample is annotated with "0" if negative sample and "1" if positive sample. ### Filtering rules : - FTR dataset : [wip] - MLMA dataset : [wip] - CAA dataset : [wip] - "Annotated Corpus" dataset : [wip] - UC-Berkeley Measuring Hate Speech dataset : average hate_speech_score > 0 -> 1
Poulpidot/FrenchHateSpeechSuperset
[ "license:unknown", "doi:10.57967/hf/0284", "region:us" ]
2023-01-07T13:19:59+00:00
{"license": "unknown"}
2023-02-04T21:17:04+00:00
[]
[]
TAGS #license-unknown #doi-10.57967/hf/0284 #region-us
### FrenchHateSpeechSuperset This dataset is a superset of multiple datasets including hate speech, harasment, sexist, racist, etc...messages from various platforms. Included datasets : - MLMA dataset - CAA dataset - FTR dataset - "An Annotated Corpus for Sexism Detection in French Tweets" dataset - UC-Berkeley-Measuring-Hate-Speech dataset (translated from english*) #### References ### Translation French datasets for hate speech are quite rare. To augment current dataset, messages from other languages (english only for now) have been integrated. To integrate other languages dataset, MT model were used and manually selected for each dataset. - UC-Berkeley-Measuring-Hate-Speech dataset : Abelll/marian-finetuned-kde4-en-to-fr ### Language verification Since MT models are not perfect, some messages are not entirely translated or not translated at all. To check for obvious errors in pipeline, a general language detection model is used to prune non french texts. Language detection model : papluca/xlm-roberta-base-language-detection ### Annotation Since "hate speech" dimension is highly subjective, and datasets comes with different annotations types, a conventional labeling stategy is required. Each sample is annotated with "0" if negative sample and "1" if positive sample. ### Filtering rules : - FTR dataset : [wip] - MLMA dataset : [wip] - CAA dataset : [wip] - "Annotated Corpus" dataset : [wip] - UC-Berkeley Measuring Hate Speech dataset : average hate_speech_score > 0 -> 1
[ "### FrenchHateSpeechSuperset\n\nThis dataset is a superset of multiple datasets including hate speech, harasment, sexist, racist, etc...messages from various platforms.\n\nIncluded datasets :\n\n- MLMA dataset\n- CAA dataset\n- FTR dataset\n- \"An Annotated Corpus for Sexism Detection in French Tweets\" dataset\n- UC-Berkeley-Measuring-Hate-Speech dataset (translated from english*)", "#### References", "### Translation\n\nFrench datasets for hate speech are quite rare. To augment current dataset, messages from other languages (english only for now) have been integrated.\nTo integrate other languages dataset, MT model were used and manually selected for each dataset.\n\n- UC-Berkeley-Measuring-Hate-Speech dataset : Abelll/marian-finetuned-kde4-en-to-fr", "### Language verification\n\nSince MT models are not perfect, some messages are not entirely translated or not translated at all.\nTo check for obvious errors in pipeline, a general language detection model is used to prune non french texts.\n\nLanguage detection model : papluca/xlm-roberta-base-language-detection", "### Annotation\n\nSince \"hate speech\" dimension is highly subjective, and datasets comes with different annotations types, a conventional labeling stategy is required.\n\nEach sample is annotated with \"0\" if negative sample and \"1\" if positive sample.", "### Filtering rules :\n\n- FTR dataset : [wip]\n- MLMA dataset : [wip]\n- CAA dataset : [wip]\n- \"Annotated Corpus\" dataset : [wip]\n- UC-Berkeley Measuring Hate Speech dataset : average hate_speech_score > 0 -> 1" ]
[ "TAGS\n#license-unknown #doi-10.57967/hf/0284 #region-us \n", "### FrenchHateSpeechSuperset\n\nThis dataset is a superset of multiple datasets including hate speech, harasment, sexist, racist, etc...messages from various platforms.\n\nIncluded datasets :\n\n- MLMA dataset\n- CAA dataset\n- FTR dataset\n- \"An Annotated Corpus for Sexism Detection in French Tweets\" dataset\n- UC-Berkeley-Measuring-Hate-Speech dataset (translated from english*)", "#### References", "### Translation\n\nFrench datasets for hate speech are quite rare. To augment current dataset, messages from other languages (english only for now) have been integrated.\nTo integrate other languages dataset, MT model were used and manually selected for each dataset.\n\n- UC-Berkeley-Measuring-Hate-Speech dataset : Abelll/marian-finetuned-kde4-en-to-fr", "### Language verification\n\nSince MT models are not perfect, some messages are not entirely translated or not translated at all.\nTo check for obvious errors in pipeline, a general language detection model is used to prune non french texts.\n\nLanguage detection model : papluca/xlm-roberta-base-language-detection", "### Annotation\n\nSince \"hate speech\" dimension is highly subjective, and datasets comes with different annotations types, a conventional labeling stategy is required.\n\nEach sample is annotated with \"0\" if negative sample and \"1\" if positive sample.", "### Filtering rules :\n\n- FTR dataset : [wip]\n- MLMA dataset : [wip]\n- CAA dataset : [wip]\n- \"Annotated Corpus\" dataset : [wip]\n- UC-Berkeley Measuring Hate Speech dataset : average hate_speech_score > 0 -> 1" ]
114709884276379a01e0722d71cd590c8ad3a05d
# Dataset Card for "ArASL_Database_Grayscale" ## Dataset Description - **Homepage:** https://data.mendeley.com/datasets/y7pckrw6z2/1 - **Paper:** [ArASL: Arabic Alphabets Sign Language Dataset](https://www.sciencedirect.com/science/article/pii/S2352340919301283) ### Dataset Summary A new dataset consists of 54,049 images of ArSL alphabets performed by more than 40 people for 32 standard Arabic signs and alphabets. The number of images per class differs from one class to another. Sample image of all Arabic Language Signs is also attached. The CSV file contains the Label of each corresponding Arabic Sign Language Image based on the image file name. ### Supported Tasks and Leaderboards - `image-classification`: The goal of this task is to classify a given image into one of 32 classes. ### Languages Arabic ### Data Instances A sample from the training set is provided below: ``` { 'img': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=32x32 at 0x201FA6EE748>, 'label': 0 } ``` ### Citation Information ``` @article{LATIF2019103777, title = {ArASL: Arabic Alphabets Sign Language Dataset}, journal = {Data in Brief}, volume = {23}, pages = {103777}, year = {2019}, issn = {2352-3409}, doi = {https://doi.org/10.1016/j.dib.2019.103777}, url = {https://www.sciencedirect.com/science/article/pii/S2352340919301283}, author = {Ghazanfar Latif and Nazeeruddin Mohammad and Jaafar Alghazo and Roaa AlKhalaf and Rawan AlKhalaf}, abstract = {A fully-labelled dataset of Arabic Sign Language (ArSL) images is developed for research related to sign language recognition. The dataset will provide researcher the opportunity to investigate and develop automated systems for the deaf and hard of hearing people using machine learning, computer vision and deep learning algorithms. The contribution is a large fully-labelled dataset for Arabic Sign Language (ArSL) which is made publically available and free for all researchers. The dataset which is named ArSL2018 consists of 54,049 images for the 32 Arabic sign language sign and alphabets collected from 40 participants in different age groups. Different dimensions and different variations were present in images which can be cleared using pre-processing techniques to remove noise, center the image, etc. The dataset is made available publicly at https://data.mendeley.com/datasets/y7pckrw6z2/1.} } ``` ### Contributions Thanks to [MOHAMMAD ALBARHAM](https://github.com/PAIN-BARHAM) for adding this dataset to huggingface hub.
pain/ArASL_Database_Grayscale
[ "task_categories:image-classification", "language:ar", "license:cc-by-4.0", "image_classification", "Arabic_Sign_Language", "region:us" ]
2023-01-07T14:04:43+00:00
{"language": ["ar"], "license": "cc-by-4.0", "task_categories": ["image-classification"], "splits": [{"name": "train", "num_bytes": 41355564.009, "num_examples": 54049}], "download_size": 30479019, "dataset_size": 41355564.009, "tags": ["image_classification", "Arabic_Sign_Language"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "ain", "1": "al", "2": "aleff", "3": "bb", "4": "dal", "5": "dha", "6": "dhad", "7": "fa", "8": "gaaf", "9": "ghain", "10": "ha", "11": "haa", "12": "jeem", "13": "kaaf", "14": "khaa", "15": "la", "16": "laam", "17": "meem", "18": "nun", "19": "ra", "20": "saad", "21": "seen", "22": "sheen", "23": "ta", "24": "taa", "25": "thaa", "26": "thal", "27": "toot", "28": "waw", "29": "ya", "30": "yaa", "31": "zay"}}}}]}}
2023-01-07T14:44:35+00:00
[]
[ "ar" ]
TAGS #task_categories-image-classification #language-Arabic #license-cc-by-4.0 #image_classification #Arabic_Sign_Language #region-us
# Dataset Card for "ArASL_Database_Grayscale" ## Dataset Description - Homepage: URL - Paper: ArASL: Arabic Alphabets Sign Language Dataset ### Dataset Summary A new dataset consists of 54,049 images of ArSL alphabets performed by more than 40 people for 32 standard Arabic signs and alphabets. The number of images per class differs from one class to another. Sample image of all Arabic Language Signs is also attached. The CSV file contains the Label of each corresponding Arabic Sign Language Image based on the image file name. ### Supported Tasks and Leaderboards - 'image-classification': The goal of this task is to classify a given image into one of 32 classes. ### Languages Arabic ### Data Instances A sample from the training set is provided below: ### Contributions Thanks to MOHAMMAD ALBARHAM for adding this dataset to huggingface hub.
[ "# Dataset Card for \"ArASL_Database_Grayscale\"", "## Dataset Description\n\n- Homepage: URL\n- Paper: ArASL: Arabic Alphabets Sign Language Dataset", "### Dataset Summary\n\nA new dataset consists of 54,049 images of ArSL alphabets performed by more than 40 people for 32 standard Arabic signs and alphabets.\nThe number of images per class differs from one class to another. Sample image of all Arabic Language Signs is also attached. The CSV file contains the Label of each corresponding Arabic Sign Language Image based on the image file name.", "### Supported Tasks and Leaderboards\n\n- 'image-classification': The goal of this task is to classify a given image into one of 32 classes.", "### Languages\n\nArabic", "### Data Instances\n\nA sample from the training set is provided below:", "### Contributions\n\nThanks to MOHAMMAD ALBARHAM for adding this dataset to huggingface hub." ]
[ "TAGS\n#task_categories-image-classification #language-Arabic #license-cc-by-4.0 #image_classification #Arabic_Sign_Language #region-us \n", "# Dataset Card for \"ArASL_Database_Grayscale\"", "## Dataset Description\n\n- Homepage: URL\n- Paper: ArASL: Arabic Alphabets Sign Language Dataset", "### Dataset Summary\n\nA new dataset consists of 54,049 images of ArSL alphabets performed by more than 40 people for 32 standard Arabic signs and alphabets.\nThe number of images per class differs from one class to another. Sample image of all Arabic Language Signs is also attached. The CSV file contains the Label of each corresponding Arabic Sign Language Image based on the image file name.", "### Supported Tasks and Leaderboards\n\n- 'image-classification': The goal of this task is to classify a given image into one of 32 classes.", "### Languages\n\nArabic", "### Data Instances\n\nA sample from the training set is provided below:", "### Contributions\n\nThanks to MOHAMMAD ALBARHAM for adding this dataset to huggingface hub." ]
c04ae93c910764acae01047490b4e5aa88e7fb03
# Dataset Card for old-trafford ## Dataset Description The dataset contains images of Old Trafford - a football stadium that belongs to Manchester United Football Club. ### Dataset Curators The data has been downloaded from Google Images. ### Licensing Information The old-trafford dataset version 1.0.0 is released under the creativeml-openrail-m License.
Ashish08/old-trafford
[ "size_categories:n<1K", "source_datasets:original", "language:en", "license:creativeml-openrail-m", "images", "football stadium", "Manchester United", "Old Trafford", "dreambooth-hackathon", "region:us" ]
2023-01-07T14:49:23+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "size_categories": ["n<1K"], "source_datasets": ["original"], "pretty_name": "Old Trafford", "tags": ["images", "football stadium", "Manchester United", "Old Trafford", "dreambooth-hackathon"]}
2023-01-07T14:53:27+00:00
[]
[ "en" ]
TAGS #size_categories-n<1K #source_datasets-original #language-English #license-creativeml-openrail-m #images #football stadium #Manchester United #Old Trafford #dreambooth-hackathon #region-us
# Dataset Card for old-trafford ## Dataset Description The dataset contains images of Old Trafford - a football stadium that belongs to Manchester United Football Club. ### Dataset Curators The data has been downloaded from Google Images. ### Licensing Information The old-trafford dataset version 1.0.0 is released under the creativeml-openrail-m License.
[ "# Dataset Card for old-trafford", "## Dataset Description\n\n The dataset contains images of Old Trafford - a football stadium that belongs to Manchester United Football Club.", "### Dataset Curators\n\nThe data has been downloaded from Google Images.", "### Licensing Information\n\nThe old-trafford dataset version 1.0.0 is released under the creativeml-openrail-m License." ]
[ "TAGS\n#size_categories-n<1K #source_datasets-original #language-English #license-creativeml-openrail-m #images #football stadium #Manchester United #Old Trafford #dreambooth-hackathon #region-us \n", "# Dataset Card for old-trafford", "## Dataset Description\n\n The dataset contains images of Old Trafford - a football stadium that belongs to Manchester United Football Club.", "### Dataset Curators\n\nThe data has been downloaded from Google Images.", "### Licensing Information\n\nThe old-trafford dataset version 1.0.0 is released under the creativeml-openrail-m License." ]
569c0268b7bf4bea85c83f1718569ca035928682
# Dataset Card for "untitled_goose_game" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Arch4ngel/untitled_goose_game
[ "region:us" ]
2023-01-07T19:53:41+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1487961.0, "num_examples": 15}], "download_size": 1461841, "dataset_size": 1487961.0}}
2023-01-07T20:00:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "untitled_goose_game" More Information needed
[ "# Dataset Card for \"untitled_goose_game\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"untitled_goose_game\"\n\nMore Information needed" ]
fe779e44d3b8b228b23945e787788d48a22e2414
Dataset which can be loaded using this: https://github.com/mathyouf/ranked-aesthetic-scorer/blob/main/data/processURS.py
MathYouF/reddit-urs-sfw-nature
[ "license:openrail", "region:us" ]
2023-01-07T20:55:13+00:00
{"license": "openrail"}
2023-01-07T21:43:42+00:00
[]
[]
TAGS #license-openrail #region-us
Dataset which can be loaded using this: URL
[]
[ "TAGS\n#license-openrail #region-us \n" ]
d0adbb57d80bc283a34cc527bad190ff10fceb5c
# Dataset Card for "alphafold_issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tux/alphafold_issues
[ "region:us" ]
2023-01-07T21:28:07+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "repository_url", "dtype": "string"}, {"name": "labels_url", "dtype": "string"}, {"name": "comments_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "node_id", "dtype": "string"}, {"name": "number", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "user", "struct": [{"name": "avatar_url", "dtype": "string"}, {"name": "events_url", "dtype": "string"}, {"name": "followers_url", "dtype": "string"}, {"name": "following_url", "dtype": "string"}, {"name": "gists_url", "dtype": "string"}, {"name": "gravatar_id", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "login", "dtype": "string"}, {"name": "node_id", "dtype": "string"}, {"name": "organizations_url", "dtype": "string"}, {"name": "received_events_url", "dtype": "string"}, {"name": "repos_url", "dtype": "string"}, {"name": "site_admin", "dtype": "bool"}, {"name": "starred_url", "dtype": "string"}, {"name": "subscriptions_url", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "labels", "list": [{"name": "color", "dtype": "string"}, {"name": "default", "dtype": "bool"}, {"name": "description", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "name", "dtype": "string"}, {"name": "node_id", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "state", "dtype": "string"}, {"name": "locked", "dtype": "bool"}, {"name": "assignee", "dtype": "float64"}, {"name": "assignees", "sequence": "null"}, {"name": "milestone", "dtype": "float64"}, {"name": "comments", "sequence": "string"}, {"name": "created_at", "dtype": "timestamp[ns, tz=UTC]"}, {"name": "updated_at", "dtype": "timestamp[ns, tz=UTC]"}, {"name": "closed_at", "dtype": "timestamp[ns, tz=UTC]"}, {"name": "author_association", "dtype": "string"}, {"name": "active_lock_reason", "dtype": "float64"}, {"name": "body", "dtype": "string"}, {"name": "reactions", "struct": [{"name": "+1", "dtype": "int64"}, {"name": "-1", "dtype": "int64"}, {"name": "confused", "dtype": "int64"}, {"name": "eyes", "dtype": "int64"}, {"name": "heart", "dtype": "int64"}, {"name": "hooray", "dtype": "int64"}, {"name": "laugh", "dtype": "int64"}, {"name": "rocket", "dtype": "int64"}, {"name": "total_count", "dtype": "int64"}, {"name": "url", "dtype": "string"}]}, {"name": "timeline_url", "dtype": "string"}, {"name": "performed_via_github_app", "dtype": "float64"}, {"name": "state_reason", "dtype": "string"}, {"name": "draft", "dtype": "float64"}, {"name": "pull_request", "struct": [{"name": "diff_url", "dtype": "string"}, {"name": "html_url", "dtype": "string"}, {"name": "merged_at", "dtype": "null"}, {"name": "patch_url", "dtype": "string"}, {"name": "url", "dtype": "string"}]}, {"name": "is_pull_request", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 838906, "num_examples": 200}], "download_size": 195220, "dataset_size": 838906}}
2023-01-07T21:28:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "alphafold_issues" More Information needed
[ "# Dataset Card for \"alphafold_issues\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"alphafold_issues\"\n\nMore Information needed" ]
ef30f6a046230c843d79822b928267efd9453d5b
# Dataset Card for IMDb Movie Reviews ## Dataset Description - **Homepage:** [http://ai.stanford.edu/~amaas/data/sentiment/](http://ai.stanford.edu/~amaas/data/sentiment/) - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Size of downloaded dataset files:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Size of the generated dataset:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Total amount of disk used:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Dataset Summary This is a custom train/test/validation split of the IMDb Large Movie Review Dataset available from [http://ai.stanford.edu/~amaas/data/sentiment/](http://ai.stanford.edu/~amaas/data/sentiment/). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure #### IMDb_movie_reviews An example of 'train': ``` { "text": "Beautifully photographed and ably acted, generally, but the writing is very slipshod. There are scenes of such unbelievability that there is no joy in the watching. The fact that the young lover has a twin brother, for instance, is so contrived that I groaned out loud. And the "emotion-light bulb connection" seems gimmicky, too.<br /><br />I don\'t know, though. If you have a few glasses of wine and feel like relaxing with something pretty to look at with a few flaccid comedic scenes, this is a pretty good movie. No major effort on the part of the viewer required. But Italian film, especially Italian comedy, is usually much, much better than this." "label": 0, } ``` ### Data Fields The data fields are the same among all splits. #### IMDb_movie_reviews - `text`: a `string` feature. - `label`: a classification label, with values `neg` (0), `pos` (1). ### Data Splits | name | train | validation | test | |------------------|------:|-----------:|------:| |IMDb_movie_reviews| 36000 | 4000 | 10000 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @InProceedings{maas-EtAl:2011:ACL-HLT2011, author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher}, title = {Learning Word Vectors for Sentiment Analysis}, booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies}, month = {June}, year = {2011}, address = {Portland, Oregon, USA}, publisher = {Association for Computational Linguistics}, pages = {142--150}, url = {http://www.aclweb.org/anthology/P11-1015} } ``` ### Contributions [More Information Needed]
jahjinx/IMDb_movie_reviews
[ "task_categories:text-classification", "task_ids:sentiment-classification", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:en", "license:other", "region:us" ]
2023-01-07T22:36:33+00:00
{"language": ["en"], "license": ["other"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "task_ids": ["sentiment-classification"], "pretty_name": "IMDb"}
2023-01-08T15:47:19+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-sentiment-classification #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-other #region-us
Dataset Card for IMDb Movie Reviews =================================== Dataset Description ------------------- * Homepage: URL * Repository: * Paper: * Point of Contact: * Size of downloaded dataset files: * Size of the generated dataset: * Total amount of disk used: ### Dataset Summary This is a custom train/test/validation split of the IMDb Large Movie Review Dataset available from URL ### Supported Tasks and Leaderboards ### Languages Dataset Structure ----------------- #### IMDb\_movie\_reviews An example of 'train': ### Data Fields The data fields are the same among all splits. #### IMDb\_movie\_reviews * 'text': a 'string' feature. * 'label': a classification label, with values 'neg' (0), 'pos' (1). ### Data Splits Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions
[ "### Dataset Summary\n\n\nThis is a custom train/test/validation split of the IMDb Large Movie Review Dataset available from URL", "### Supported Tasks and Leaderboards", "### Languages\n\n\nDataset Structure\n-----------------", "#### IMDb\\_movie\\_reviews\n\n\nAn example of 'train':", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### IMDb\\_movie\\_reviews\n\n\n* 'text': a 'string' feature.\n* 'label': a classification label, with values 'neg' (0), 'pos' (1).", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#task_categories-text-classification #task_ids-sentiment-classification #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-other #region-us \n", "### Dataset Summary\n\n\nThis is a custom train/test/validation split of the IMDb Large Movie Review Dataset available from URL", "### Supported Tasks and Leaderboards", "### Languages\n\n\nDataset Structure\n-----------------", "#### IMDb\\_movie\\_reviews\n\n\nAn example of 'train':", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### IMDb\\_movie\\_reviews\n\n\n* 'text': a 'string' feature.\n* 'label': a classification label, with values 'neg' (0), 'pos' (1).", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
a2f7f35c36a4d551625a0607c7759ae7916fc6be
# Dataset Card for Superheroes ## Dataset Description 1400+ Superheroes history and powers description to apply text mining and NLP [Original source](https://www.kaggle.com/datasets/jonathanbesomi/superheroes-nlp-dataset/code?resource=download) ## Context The aim of this dataset is to make text analytics and NLP even funnier. All of us have dreamed to be like a superhero and save the world, yet we are still on Kaggle figuring out how python works. Then, why not improve our NLP competences by analyzing Superheros' history and powers? The particularity of this dataset is that it contains categorical and numerical features such as overall_score, intelligence_score, creator, alignment, gender, eye_color but also text features history_text and powers_text. By combining the two, a lot of interesting insights can be gathered! ## Content We collected all data from superherodb and cooked for you in a nice and clean tabular format. The dataset contains 1447 different Superheroes. Each superhero row has: * overall_score - derivated by superherodb from the power stats features. Can you find the relationship? * history_text - History of the Superhero (text features) * powers_text - Description of Superheros' powers (text features) * intelligence_score, strength_score, speed_score, durability_score, power_score and combat_score. (power stats features) * "Origin" (full_name, alter_egos, …) * "Connections" (occupation, base, teams, …) * "Appareance" (gender, type_race, height, weight, eye_color, …) ## Acknowledgements The following [Github repository](https://github.com/jbesomi/texthero/tree/master/dataset/Superheroes%20NLP%20Dataset) contains the code used to scrape this Dataset.
jrtec/Superheroes
[ "task_categories:summarization", "size_categories:1K<n<10K", "language:en", "license:cc0-1.0", "superheroes", "heroes", "anime", "manga", "marvel", "region:us" ]
2023-01-08T01:38:39+00:00
{"language": ["en"], "license": "cc0-1.0", "size_categories": ["1K<n<10K"], "task_categories": ["summarization"], "tags": ["superheroes", "heroes", "anime", "manga", "marvel"]}
2023-01-08T06:18:48+00:00
[]
[ "en" ]
TAGS #task_categories-summarization #size_categories-1K<n<10K #language-English #license-cc0-1.0 #superheroes #heroes #anime #manga #marvel #region-us
# Dataset Card for Superheroes ## Dataset Description 1400+ Superheroes history and powers description to apply text mining and NLP Original source ## Context The aim of this dataset is to make text analytics and NLP even funnier. All of us have dreamed to be like a superhero and save the world, yet we are still on Kaggle figuring out how python works. Then, why not improve our NLP competences by analyzing Superheros' history and powers? The particularity of this dataset is that it contains categorical and numerical features such as overall_score, intelligence_score, creator, alignment, gender, eye_color but also text features history_text and powers_text. By combining the two, a lot of interesting insights can be gathered! ## Content We collected all data from superherodb and cooked for you in a nice and clean tabular format. The dataset contains 1447 different Superheroes. Each superhero row has: * overall_score - derivated by superherodb from the power stats features. Can you find the relationship? * history_text - History of the Superhero (text features) * powers_text - Description of Superheros' powers (text features) * intelligence_score, strength_score, speed_score, durability_score, power_score and combat_score. (power stats features) * "Origin" (full_name, alter_egos, …) * "Connections" (occupation, base, teams, …) * "Appareance" (gender, type_race, height, weight, eye_color, …) ## Acknowledgements The following Github repository contains the code used to scrape this Dataset.
[ "# Dataset Card for Superheroes", "## Dataset Description\n1400+ Superheroes history and powers description to apply text mining and NLP Original source", "## Context\nThe aim of this dataset is to make text analytics and NLP even funnier. All of us have dreamed to be like a superhero and save the world, yet we are still on Kaggle figuring out how python works. Then, why not improve our NLP competences by analyzing Superheros' history and powers?\n\nThe particularity of this dataset is that it contains categorical and numerical features such as overall_score, intelligence_score, creator, alignment, gender, eye_color but also text features history_text and powers_text. By combining the two, a lot of interesting insights can be gathered!", "## Content\nWe collected all data from superherodb and cooked for you in a nice and clean tabular format.\n\nThe dataset contains 1447 different Superheroes. Each superhero row has:\n\n* overall_score - derivated by superherodb from the power stats features. Can you find the relationship?\n* history_text - History of the Superhero (text features)\n* powers_text - Description of Superheros' powers (text features)\n* intelligence_score, strength_score, speed_score, durability_score, power_score and combat_score. (power stats features)\n* \"Origin\" (full_name, alter_egos, …)\n* \"Connections\" (occupation, base, teams, …)\n* \"Appareance\" (gender, type_race, height, weight, eye_color, …)", "## Acknowledgements\nThe following Github repository contains the code used to scrape this Dataset." ]
[ "TAGS\n#task_categories-summarization #size_categories-1K<n<10K #language-English #license-cc0-1.0 #superheroes #heroes #anime #manga #marvel #region-us \n", "# Dataset Card for Superheroes", "## Dataset Description\n1400+ Superheroes history and powers description to apply text mining and NLP Original source", "## Context\nThe aim of this dataset is to make text analytics and NLP even funnier. All of us have dreamed to be like a superhero and save the world, yet we are still on Kaggle figuring out how python works. Then, why not improve our NLP competences by analyzing Superheros' history and powers?\n\nThe particularity of this dataset is that it contains categorical and numerical features such as overall_score, intelligence_score, creator, alignment, gender, eye_color but also text features history_text and powers_text. By combining the two, a lot of interesting insights can be gathered!", "## Content\nWe collected all data from superherodb and cooked for you in a nice and clean tabular format.\n\nThe dataset contains 1447 different Superheroes. Each superhero row has:\n\n* overall_score - derivated by superherodb from the power stats features. Can you find the relationship?\n* history_text - History of the Superhero (text features)\n* powers_text - Description of Superheros' powers (text features)\n* intelligence_score, strength_score, speed_score, durability_score, power_score and combat_score. (power stats features)\n* \"Origin\" (full_name, alter_egos, …)\n* \"Connections\" (occupation, base, teams, …)\n* \"Appareance\" (gender, type_race, height, weight, eye_color, …)", "## Acknowledgements\nThe following Github repository contains the code used to scrape this Dataset." ]
1a6e9bdf6f54e6d3df5480eb66d11aafe4b354e3
[Needs More Information] # Dataset Card for virus_dna_dataset ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** [Needs More Information] - **Repository:** [Needs More Information] - **Paper:** [Needs More Information] - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary A collection of full virus genome dna, the dataset was built from NCBI data ### Supported Tasks and Leaderboards [Needs More Information] ### Languages DNA ## Dataset Structure ### Data Instances { 'Description' : 'NC_030848.1 Haloarcula californiae icosahedral...', 'dna_sequence' : 'TCATCTC TCTCTCT CTCTCTT GTTCCCG CGCCCGC CCGCCC...', 'sequence_length':'35787', 'organism_id':' AB063393.2'} ### Data Fields { 'Description' : 'this contains the description about the DNA sequence contained in the NCBI dataset', 'dna_sequence' : 'this contains the dna sequence grouped by 7 nucleotides', 'sequence_length':'this contains the length of the dna sequence'} ### Data Splits [Needs More Information] ## Dataset Creation ### Curation Rationale The goal of this dataset was to make it easier to train an LLM on virus DNA ### Source Data #### Initial Data Collection and Normalization DNA sequences were grouped by 7 nucleotides to make it easier to tokenize. Only full genomes were selected #### Who are the source language producers? Viruses :) ### Annotations #### Annotation process NCBI #### Who are the annotators? NCBI ### Personal and Sensitive Information N/A ## Considerations for Using the Data ### Social Impact of Dataset Make it easier to train LLMs on virus DNA ### Discussion of Biases Only virus data that has been sequenced and upload into NCBI is contained in here ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators Hassan Ahmed ### Licensing Information [Needs More Information] ### Citation Information [Needs More Information]
Hack90/virus_dna_dataset
[ "region:us" ]
2023-01-08T02:21:44+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "sequence", "dtype": "string"}, {"name": "name", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "features", "dtype": "int64"}, {"name": "seq_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 6621468623, "num_examples": 2602437}], "download_size": 2319826398, "dataset_size": 6621468623}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2023-08-26T12:07:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for virus_dna_dataset ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary A collection of full virus genome dna, the dataset was built from NCBI data ### Supported Tasks and Leaderboards ### Languages DNA ## Dataset Structure ### Data Instances { 'Description' : 'NC_030848.1 Haloarcula californiae icosahedral...', 'dna_sequence' : 'TCATCTC TCTCTCT CTCTCTT GTTCCCG CGCCCGC CCGCCC...', 'sequence_length':'35787', 'organism_id':' AB063393.2'} ### Data Fields { 'Description' : 'this contains the description about the DNA sequence contained in the NCBI dataset', 'dna_sequence' : 'this contains the dna sequence grouped by 7 nucleotides', 'sequence_length':'this contains the length of the dna sequence'} ### Data Splits ## Dataset Creation ### Curation Rationale The goal of this dataset was to make it easier to train an LLM on virus DNA ### Source Data #### Initial Data Collection and Normalization DNA sequences were grouped by 7 nucleotides to make it easier to tokenize. Only full genomes were selected #### Who are the source language producers? Viruses :) ### Annotations #### Annotation process NCBI #### Who are the annotators? NCBI ### Personal and Sensitive Information N/A ## Considerations for Using the Data ### Social Impact of Dataset Make it easier to train LLMs on virus DNA ### Discussion of Biases Only virus data that has been sequenced and upload into NCBI is contained in here ### Other Known Limitations ## Additional Information ### Dataset Curators Hassan Ahmed ### Licensing Information
[ "# Dataset Card for virus_dna_dataset", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary\n\nA collection of full virus genome dna, the dataset was built from NCBI data", "### Supported Tasks and Leaderboards", "### Languages\n\nDNA", "## Dataset Structure", "### Data Instances\n\n{ 'Description' : 'NC_030848.1 Haloarcula californiae icosahedral...', 'dna_sequence' : 'TCATCTC TCTCTCT CTCTCTT GTTCCCG CGCCCGC CCGCCC...', \n'sequence_length':'35787', 'organism_id':' AB063393.2'}", "### Data Fields\n\n{ 'Description' : 'this contains the description about the DNA sequence contained in the NCBI dataset', 'dna_sequence' : 'this contains the dna sequence grouped by 7 nucleotides', \n'sequence_length':'this contains the length of the dna sequence'}", "### Data Splits", "## Dataset Creation", "### Curation Rationale\n\nThe goal of this dataset was to make it easier to train an LLM on virus DNA", "### Source Data", "#### Initial Data Collection and Normalization\n\nDNA sequences were grouped by 7 nucleotides to make it easier to tokenize. Only full genomes were selected", "#### Who are the source language producers?\n\nViruses :)", "### Annotations", "#### Annotation process\n\nNCBI", "#### Who are the annotators?\n\nNCBI", "### Personal and Sensitive Information\n\nN/A", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nMake it easier to train LLMs on virus DNA", "### Discussion of Biases\n\nOnly virus data that has been sequenced and upload into NCBI is contained in here", "### Other Known Limitations", "## Additional Information", "### Dataset Curators\n\nHassan Ahmed", "### Licensing Information" ]
[ "TAGS\n#region-us \n", "# Dataset Card for virus_dna_dataset", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary\n\nA collection of full virus genome dna, the dataset was built from NCBI data", "### Supported Tasks and Leaderboards", "### Languages\n\nDNA", "## Dataset Structure", "### Data Instances\n\n{ 'Description' : 'NC_030848.1 Haloarcula californiae icosahedral...', 'dna_sequence' : 'TCATCTC TCTCTCT CTCTCTT GTTCCCG CGCCCGC CCGCCC...', \n'sequence_length':'35787', 'organism_id':' AB063393.2'}", "### Data Fields\n\n{ 'Description' : 'this contains the description about the DNA sequence contained in the NCBI dataset', 'dna_sequence' : 'this contains the dna sequence grouped by 7 nucleotides', \n'sequence_length':'this contains the length of the dna sequence'}", "### Data Splits", "## Dataset Creation", "### Curation Rationale\n\nThe goal of this dataset was to make it easier to train an LLM on virus DNA", "### Source Data", "#### Initial Data Collection and Normalization\n\nDNA sequences were grouped by 7 nucleotides to make it easier to tokenize. Only full genomes were selected", "#### Who are the source language producers?\n\nViruses :)", "### Annotations", "#### Annotation process\n\nNCBI", "#### Who are the annotators?\n\nNCBI", "### Personal and Sensitive Information\n\nN/A", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nMake it easier to train LLMs on virus DNA", "### Discussion of Biases\n\nOnly virus data that has been sequenced and upload into NCBI is contained in here", "### Other Known Limitations", "## Additional Information", "### Dataset Curators\n\nHassan Ahmed", "### Licensing Information" ]
38a258997cb5e6dd9b973534d3f860e76a6936a5
# Dataset Card for "ui_refexp_saved_Jan2023" This is a saved snapshot of the dynamically generated [UI Bert](https://huggingface.co/datasets/ivelin/ui_refexp) dataset. Much faster download time than the dynamic version which pulls and filters large data files from remote sources.
ivelin/ui_refexp_saved
[ "task_categories:image-to-text", "size_categories:10K<n<100K", "language:en", "license:cc-by-4.0", "region:us" ]
2023-01-08T03:10:23+00:00
{"language": ["en"], "license": "cc-by-4.0", "size_categories": ["10K<n<100K"], "task_categories": ["image-to-text"], "pretty_name": "UIBert Referring Expressions Dataset", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "image_id", "dtype": "string"}, {"name": "image_file_path", "dtype": "string"}, {"name": "prompt", "dtype": "string"}, {"name": "target_bounding_box", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1910805137.216, "num_examples": 15624}, {"name": "validation", "num_bytes": 60403386, "num_examples": 471}, {"name": "test", "num_bytes": 69078983, "num_examples": 565}], "download_size": 1246541216, "dataset_size": 2040287506.216}}
2023-01-08T03:35:06+00:00
[]
[ "en" ]
TAGS #task_categories-image-to-text #size_categories-10K<n<100K #language-English #license-cc-by-4.0 #region-us
# Dataset Card for "ui_refexp_saved_Jan2023" This is a saved snapshot of the dynamically generated UI Bert dataset. Much faster download time than the dynamic version which pulls and filters large data files from remote sources.
[ "# Dataset Card for \"ui_refexp_saved_Jan2023\"\n\nThis is a saved snapshot of the dynamically generated UI Bert dataset. \nMuch faster download time than the dynamic version which pulls and filters large data files from remote sources." ]
[ "TAGS\n#task_categories-image-to-text #size_categories-10K<n<100K #language-English #license-cc-by-4.0 #region-us \n", "# Dataset Card for \"ui_refexp_saved_Jan2023\"\n\nThis is a saved snapshot of the dynamically generated UI Bert dataset. \nMuch faster download time than the dynamic version which pulls and filters large data files from remote sources." ]
a6a7e98320d20544b1d92ac27028496a5e7047cf
# Dataset Card for "bookcorpus_compact_1024_shard5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_1024_shard5_of_10
[ "region:us" ]
2023-01-08T03:28:36+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 739992156, "num_examples": 61605}], "download_size": 372896291, "dataset_size": 739992156}}
2023-01-08T03:29:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_compact_1024_shard5" More Information needed
[ "# Dataset Card for \"bookcorpus_compact_1024_shard5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_compact_1024_shard5\"\n\nMore Information needed" ]
7447513fe17ef531e005ea8ccc8f3b60f60324ed
# Dataset Card for "chocolate-captioned-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Umal-exvc/chocolate-captioned-dataset
[ "region:us" ]
2023-01-08T04:58:42+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 78434533.0, "num_examples": 500}], "download_size": 76921151, "dataset_size": 78434533.0}}
2023-01-08T04:58:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chocolate-captioned-dataset" More Information needed
[ "# Dataset Card for \"chocolate-captioned-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chocolate-captioned-dataset\"\n\nMore Information needed" ]
7fbf579be37189f452803ff19cc15f2b4e4ef0cf
# Dataset Card for "embedding_dataset_distilbert_base_uncased_ad_subwords" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sanjin7/embedding_dataset_distilbert_base_uncased_ad_subwords
[ "region:us" ]
2023-01-08T07:54:44+00:00
{"dataset_info": {"features": [{"name": "ad_id", "dtype": "int64"}, {"name": "shop_id", "dtype": "int64"}, {"name": "account_id", "dtype": "int64"}, {"name": "mean_embedding", "sequence": "float32"}, {"name": "cls_embedding", "sequence": "float32"}], "splits": [{"name": "test", "num_bytes": 5725152, "num_examples": 927}, {"name": "train", "num_bytes": 43769312, "num_examples": 7087}, {"name": "val", "num_bytes": 7726176, "num_examples": 1251}], "download_size": 69324552, "dataset_size": 57220640}}
2023-01-16T11:12:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "embedding_dataset_distilbert_base_uncased_ad_subwords" More Information needed
[ "# Dataset Card for \"embedding_dataset_distilbert_base_uncased_ad_subwords\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"embedding_dataset_distilbert_base_uncased_ad_subwords\"\n\nMore Information needed" ]
15104b7bbcaf1e2a8ca0613d2b3e73957a0eb8cc
# Dataset Card for "RedditProject" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nillo36/RedditProject
[ "region:us" ]
2023-01-08T10:56:19+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "movies", "1": "news", "2": "nfl", "3": "pcmasterrace", "4": "relationship_advice"}}}}], "splits": [{"name": "train", "num_bytes": 567429, "num_examples": 800}, {"name": "validation", "num_bytes": 67565, "num_examples": 100}, {"name": "test", "num_bytes": 89805, "num_examples": 100}], "download_size": 443894, "dataset_size": 724799}}
2023-01-09T17:06:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "RedditProject" More Information needed
[ "# Dataset Card for \"RedditProject\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"RedditProject\"\n\nMore Information needed" ]
2494a8c69e77c0c8284fe604456054c5975c6490
# Dataset Card for "tagesschau" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nillo36/tagesschau
[ "region:us" ]
2023-01-08T12:51:21+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "amerika", "1": "asien", "2": "finanzen", "3": "innenpolitik", "4": "sportschau", "5": "unternehmen", "6": "verbraucher"}}}}], "splits": [{"name": "train", "num_bytes": 4400114, "num_examples": 1200}, {"name": "validation", "num_bytes": 555716, "num_examples": 150}, {"name": "test", "num_bytes": 555716, "num_examples": 150}], "download_size": 3412290, "dataset_size": 5511546}}
2023-01-08T12:51:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tagesschau" More Information needed
[ "# Dataset Card for \"tagesschau\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tagesschau\"\n\nMore Information needed" ]
dfd4949be36ebfa7b9b9ec469046c64a2da9a7c9
# What is this dataset? This dataset is a collection of Pull Requests **that contain comments** from the [Accelerate](https://github.com/huggingface/accelerate). It contains the full contextual comments as well as code suggestions that exist inside of a code review
muellerzr/github-pr-history
[ "size_categories:n<1K", "language:en", "license:mit", "region:us" ]
2023-01-08T13:34:38+00:00
{"language": ["en"], "license": "mit", "size_categories": ["n<1K"], "pretty_name": "Github Pull Request History"}
2023-01-08T15:29:01+00:00
[]
[ "en" ]
TAGS #size_categories-n<1K #language-English #license-mit #region-us
# What is this dataset? This dataset is a collection of Pull Requests that contain comments from the Accelerate. It contains the full contextual comments as well as code suggestions that exist inside of a code review
[ "# What is this dataset?\n\nThis dataset is a collection of Pull Requests that contain comments from the Accelerate.\nIt contains the full contextual comments as well as code suggestions that exist inside of a code review" ]
[ "TAGS\n#size_categories-n<1K #language-English #license-mit #region-us \n", "# What is this dataset?\n\nThis dataset is a collection of Pull Requests that contain comments from the Accelerate.\nIt contains the full contextual comments as well as code suggestions that exist inside of a code review" ]
db101520da67e35a276c55402a6b8f543c700d39
# Dataset Card for BrWaC ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [BrWaC homepage](https://www.inf.ufrgs.br/pln/wiki/index.php?title=BrWaC) - **Repository:** [BrWaC repository](https://www.inf.ufrgs.br/pln/wiki/index.php?title=BrWaC) - **Paper:** [The brWaC Corpus: A New Open Resource for Brazilian Portuguese](https://www.aclweb.org/anthology/L18-1686/) - **Point of Contact:** [Jorge A. Wagner Filho](mailto:[email protected]) ### Dataset Summary The BrWaC (Brazilian Portuguese Web as Corpus) is a large corpus constructed following the Wacky framework, which was made public for research purposes. The current corpus version, released in January 2017, is composed by 3.53 million documents, 2.68 billion tokens and 5.79 million types. Please note that this resource is available solely for academic research purposes, and you agreed not to use it for any commercial applications. No need to manually download external sources. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages Portuguese ## Dataset Structure ### Data Instances An example from the BrWaC dataset looks as follows: ``` { "doc_id": "netg-1afc73", "text": { "paragraphs": [ [ "Conteúdo recente" ], [ "ESPUMA MARROM CHAMADA \"NINGUÉM MERECE\"" ], [ "31 de Agosto de 2015, 7:07 , por paulo soavinski - | No one following this article yet." ], [ "Visualizado 202 vezes" ], [ "JORNAL ELETRÔNICO DA ILHA DO MEL" ], [ "Uma espuma marrom escuro tem aparecido com frequência na Praia de Fora.", "Na faixa de areia ela aparece disseminada e não chama muito a atenção.", "No Buraco do Aipo, com muitas pedras, ela aparece concentrada.", "É fácil saber que esta espuma estranha está lá, quando venta.", "Pequenos algodões de espuma começam a flutuar no espaço, pertinho da Praia do Saquinho.", "Quem pode ajudar na coleta deste material, envio a laboratório renomado e pagamento de análises, favor entrar em contato com o site." ] ] }, "title": "ESPUMA MARROM CHAMADA ‟NINGUÉM MERECE‟ - paulo soavinski", "uri": "http://blogoosfero.cc/ilhadomel/pousadasilhadomel.com.br/espuma-marrom-chamada-ninguem-merece" } ``` ### Data Fields - `doc_id`: The document ID - `title`: The document title - `uri`: URI where the document was extracted from - `text`: A list of document paragraphs (with a list of sentences in it as a list of strings) ### Data Splits The data is only split into train set with size of 3530796 samples. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @inproceedings{wagner2018brwac, title={The brwac corpus: A new open resource for brazilian portuguese}, author={Wagner Filho, Jorge A and Wilkens, Rodrigo and Idiart, Marco and Villavicencio, Aline}, booktitle={Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)}, year={2018} } ```
dominguesm/brwac
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "annotations_creators:no-annotation", "language_creators:found", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "language:pt", "license:unknown", "region:us" ]
2023-01-08T14:08:57+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["found"], "language": ["pt"], "license": ["unknown"], "multilinguality": ["monolingual"], "size_categories": ["1M<n<10M"], "source_datasets": ["original"], "task_categories": ["text-generation", "fill-mask"], "task_ids": ["language-modeling", "masked-language-modeling"], "paperswithcode_id": "brwac", "pretty_name": "BrWaC", "dataset_info": {"features": [{"name": "doc_id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "uri", "dtype": "string"}, {"name": "text", "sequence": [{"name": "paragraphs", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 18828412956, "num_examples": 3530796}], "download_size": 11616550261, "dataset_size": 18828412956}}
2023-01-08T14:28:10+00:00
[]
[ "pt" ]
TAGS #task_categories-text-generation #task_categories-fill-mask #task_ids-language-modeling #task_ids-masked-language-modeling #annotations_creators-no-annotation #language_creators-found #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-Portuguese #license-unknown #region-us
# Dataset Card for BrWaC ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: BrWaC homepage - Repository: BrWaC repository - Paper: The brWaC Corpus: A New Open Resource for Brazilian Portuguese - Point of Contact: Jorge A. Wagner Filho ### Dataset Summary The BrWaC (Brazilian Portuguese Web as Corpus) is a large corpus constructed following the Wacky framework, which was made public for research purposes. The current corpus version, released in January 2017, is composed by 3.53 million documents, 2.68 billion tokens and 5.79 million types. Please note that this resource is available solely for academic research purposes, and you agreed not to use it for any commercial applications. No need to manually download external sources. ### Supported Tasks and Leaderboards ### Languages Portuguese ## Dataset Structure ### Data Instances An example from the BrWaC dataset looks as follows: ### Data Fields - 'doc_id': The document ID - 'title': The document title - 'uri': URI where the document was extracted from - 'text': A list of document paragraphs (with a list of sentences in it as a list of strings) ### Data Splits The data is only split into train set with size of 3530796 samples. ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information
[ "# Dataset Card for BrWaC", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: BrWaC homepage\n- Repository: BrWaC repository\n- Paper: The brWaC Corpus: A New Open Resource for Brazilian Portuguese\n- Point of Contact: Jorge A. Wagner Filho", "### Dataset Summary\n\nThe BrWaC (Brazilian Portuguese Web as Corpus) is a large corpus constructed following the Wacky framework, \nwhich was made public for research purposes. The current corpus version, released in January 2017, is composed by \n3.53 million documents, 2.68 billion tokens and 5.79 million types. Please note that this resource is available \nsolely for academic research purposes, and you agreed not to use it for any commercial applications. No need to manually download external sources.", "### Supported Tasks and Leaderboards", "### Languages\n\nPortuguese", "## Dataset Structure", "### Data Instances\n\nAn example from the BrWaC dataset looks as follows:", "### Data Fields\n\n- 'doc_id': The document ID\n- 'title': The document title\n- 'uri': URI where the document was extracted from\n- 'text': A list of document paragraphs (with a list of sentences in it as a list of strings)", "### Data Splits\n\nThe data is only split into train set with size of 3530796 samples.", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information" ]
[ "TAGS\n#task_categories-text-generation #task_categories-fill-mask #task_ids-language-modeling #task_ids-masked-language-modeling #annotations_creators-no-annotation #language_creators-found #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-Portuguese #license-unknown #region-us \n", "# Dataset Card for BrWaC", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: BrWaC homepage\n- Repository: BrWaC repository\n- Paper: The brWaC Corpus: A New Open Resource for Brazilian Portuguese\n- Point of Contact: Jorge A. Wagner Filho", "### Dataset Summary\n\nThe BrWaC (Brazilian Portuguese Web as Corpus) is a large corpus constructed following the Wacky framework, \nwhich was made public for research purposes. The current corpus version, released in January 2017, is composed by \n3.53 million documents, 2.68 billion tokens and 5.79 million types. Please note that this resource is available \nsolely for academic research purposes, and you agreed not to use it for any commercial applications. No need to manually download external sources.", "### Supported Tasks and Leaderboards", "### Languages\n\nPortuguese", "## Dataset Structure", "### Data Instances\n\nAn example from the BrWaC dataset looks as follows:", "### Data Fields\n\n- 'doc_id': The document ID\n- 'title': The document title\n- 'uri': URI where the document was extracted from\n- 'text': A list of document paragraphs (with a list of sentences in it as a list of strings)", "### Data Splits\n\nThe data is only split into train set with size of 3530796 samples.", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information" ]
e4781339d325dcce5080239ebfd43d1aa02484d5
# Dataset Card for Dataset Name titulos_noticias_rcn_clasificadas ## Dataset Description Se tomo las noticias de la pagina de RCN y se clasifico los titulos por ['salud' 'tecnologia' 'colombia' 'economia' 'deportes'] salud= 1805 datos, tecnologia= 1805 datos, colombia= 1805 datos, economia= 1805 datos, deportes= 1805 datos, Para dar un total de 9030 filas. pagina: https://www.noticiasrcn.com/ - **Homepage:** - **Repository:** - **Point of Contact:** ### Languages Español ## Dataset Structure text, label, url
Nicky0007/titulos_noticias_rcn_clasificadas
[ "task_categories:token-classification", "size_categories:1K<n<10K", "language:es", "region:us" ]
2023-01-08T14:29:50+00:00
{"language": ["es"], "size_categories": ["1K<n<10K"], "task_categories": ["token-classification"]}
2023-01-08T21:38:51+00:00
[]
[ "es" ]
TAGS #task_categories-token-classification #size_categories-1K<n<10K #language-Spanish #region-us
# Dataset Card for Dataset Name titulos_noticias_rcn_clasificadas ## Dataset Description Se tomo las noticias de la pagina de RCN y se clasifico los titulos por ['salud' 'tecnologia' 'colombia' 'economia' 'deportes'] salud= 1805 datos, tecnologia= 1805 datos, colombia= 1805 datos, economia= 1805 datos, deportes= 1805 datos, Para dar un total de 9030 filas. pagina: URL - Homepage: - Repository: - Point of Contact: ### Languages Español ## Dataset Structure text, label, url
[ "# Dataset Card for Dataset Name\ntitulos_noticias_rcn_clasificadas", "## Dataset Description\nSe tomo las noticias de la pagina de RCN y se clasifico los titulos por ['salud' 'tecnologia' 'colombia' 'economia' 'deportes']\n\nsalud= 1805 datos, \ntecnologia= 1805 datos, \ncolombia= 1805 datos, \neconomia= 1805 datos, \ndeportes= 1805 datos, \n\n\nPara dar un total de 9030 filas.\n\npagina: URL\n\n- Homepage: \n- Repository: \n- Point of Contact:", "### Languages\nEspañol", "## Dataset Structure\ntext, label, url" ]
[ "TAGS\n#task_categories-token-classification #size_categories-1K<n<10K #language-Spanish #region-us \n", "# Dataset Card for Dataset Name\ntitulos_noticias_rcn_clasificadas", "## Dataset Description\nSe tomo las noticias de la pagina de RCN y se clasifico los titulos por ['salud' 'tecnologia' 'colombia' 'economia' 'deportes']\n\nsalud= 1805 datos, \ntecnologia= 1805 datos, \ncolombia= 1805 datos, \neconomia= 1805 datos, \ndeportes= 1805 datos, \n\n\nPara dar un total de 9030 filas.\n\npagina: URL\n\n- Homepage: \n- Repository: \n- Point of Contact:", "### Languages\nEspañol", "## Dataset Structure\ntext, label, url" ]
b60f7964a8e708f9215d5c0f9a409397301cba20
A dataset for translation.
Jour/Translation
[ "task_categories:translation", "size_categories:100K<n<1M", "region:us" ]
2023-01-08T15:06:05+00:00
{"size_categories": ["100K<n<1M"], "task_categories": ["translation"]}
2023-01-08T15:32:13+00:00
[]
[]
TAGS #task_categories-translation #size_categories-100K<n<1M #region-us
A dataset for translation.
[]
[ "TAGS\n#task_categories-translation #size_categories-100K<n<1M #region-us \n" ]
d4837a0c0fdba1f3a4ad3783234f6c7f961eb9b5
# Dataset Card for "open-cm" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
michaelb1225/open-cm
[ "region:us" ]
2023-01-08T15:09:26+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}], "splits": [{"name": "train", "num_bytes": 6129427551.0, "num_examples": 671}], "download_size": 6071742068, "dataset_size": 6129427551.0}}
2023-01-08T15:23:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "open-cm" More Information needed
[ "# Dataset Card for \"open-cm\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"open-cm\"\n\nMore Information needed" ]
2875a61d509ab3012817736f5c7ba8898e9e6689
# About the Speech Corpus `luxembourgish-asr-rtl-lu` dataset is a speech corpus for the under-resourced Luxembourgish language. The audio-transcription pairs were collected from [RTL.lu](http://www.rtl.lu/). We used forced alignment to segment the audio files. The transcriptions were validated with the help of language experts at the [Center for the Luxembourgish Language](https://portal.education.lu/zls). # Citation ``` @misc{lb-wav2vec2, author = {Nguyen, Le Minh and Nayak, Shekhar and Coler, Matt.}, keywords = {Luxembourgish, multilingual speech recognition, language modelling, wav2vec 2.0 XLSR-53, under-resourced language}, title = {IMPROVING LUXEMBOURGISH SPEECH RECOGNITION WITH CROSS-LINGUAL SPEECH REPRESENTATIONS}, year = {2022}, copyright = {2023 IEEE} } ``` # Copyright notice Copyright © 2022 RTL.lu. All rights reserved.
Lemswasabi/luxembourgish-asr-rtl-lu
[ "language:lb", "license:cc-by-nc-nd-4.0", "region:us" ]
2023-01-08T15:29:50+00:00
{"language": ["lb"], "license": "cc-by-nc-nd-4.0"}
2023-01-08T15:44:54+00:00
[]
[ "lb" ]
TAGS #language-Luxembourgish #license-cc-by-nc-nd-4.0 #region-us
# About the Speech Corpus 'luxembourgish-asr-rtl-lu' dataset is a speech corpus for the under-resourced Luxembourgish language. The audio-transcription pairs were collected from URL. We used forced alignment to segment the audio files. The transcriptions were validated with the help of language experts at the Center for the Luxembourgish Language. # Copyright notice Copyright © 2022 URL. All rights reserved.
[ "# About the Speech Corpus\n\n'luxembourgish-asr-rtl-lu' dataset is a speech corpus for the under-resourced Luxembourgish language. The audio-transcription pairs were collected from URL.\nWe used forced alignment to segment the audio files. The transcriptions were validated with the help of language experts at the Center for the Luxembourgish Language.", "# Copyright notice\n\nCopyright © 2022 URL. All rights reserved." ]
[ "TAGS\n#language-Luxembourgish #license-cc-by-nc-nd-4.0 #region-us \n", "# About the Speech Corpus\n\n'luxembourgish-asr-rtl-lu' dataset is a speech corpus for the under-resourced Luxembourgish language. The audio-transcription pairs were collected from URL.\nWe used forced alignment to segment the audio files. The transcriptions were validated with the help of language experts at the Center for the Luxembourgish Language.", "# Copyright notice\n\nCopyright © 2022 URL. All rights reserved." ]
14eb3a0f95145852082058962bd968c62d754a34
# Dataset Card for "bookcorpus_compact_1024_shard4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_1024_shard4_of_10
[ "region:us" ]
2023-01-08T19:31:58+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 767978582, "num_examples": 61605}], "download_size": 389198129, "dataset_size": 767978582}}
2023-01-08T19:32:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_compact_1024_shard4" More Information needed
[ "# Dataset Card for \"bookcorpus_compact_1024_shard4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_compact_1024_shard4\"\n\nMore Information needed" ]
e6507d491fd697c11d11f95ef443eab5ecdfe5c6
# Note Captcha images are presented as base64 string. All csv files have a "\t" separator. # Dataset consists of several files ## fssp_*.csv I am publishing an updated version of the archive of 40,310 pictures, which I have divided into 4 categories: - 4 symbols on the picture - 6 747 pcs. - 5 symbols - 18 403 pcs. - 6 characters - 7,038 pcs. - 7 characters - 7 589 pcs. Symbols used in captcha 'б','в','г','д','ж','к','л','м','н','п','р','с','т','2','4','5','6','7','8','9' ## fms.csv About 15 thousand captcha imgs, which consists of 6 numbers. ## rosreestr.csv About 10 thousand captcha, which consists of English characters and numbers with a length of 5 elements. ## vk.csv About 19 thousand captcha, which consists of Russian characters and numbers from 5 to 6 elements long. Images from social network vk.com # Kaggle This Dataset is updated by the previous one, which I published on [Kaggle](https://www.kaggle.com/datasets/mrdaniilak/russian-captcha-images-base64) ### Citation ``` @misc{ russian_captcha_dataset, title = { Russian Captcha Dataset }, type = { Open Source Dataset }, author = { Daniil Agniashvili }, url = { https://huggingface.co/datasets/daniilak/russian_captcha_images/ }, note = { visited on 2023-02-24 }, } ``` ### License Public Domain
daniilak/russian_captcha_images
[ "language:ru", "license:cc", "image", "captcha", "region:us" ]
2023-01-08T19:37:34+00:00
{"language": ["ru"], "license": "cc", "tags": ["image", "captcha"]}
2023-02-24T15:20:17+00:00
[]
[ "ru" ]
TAGS #language-Russian #license-cc #image #captcha #region-us
# Note Captcha images are presented as base64 string. All csv files have a "\t" separator. # Dataset consists of several files ## fssp_*.csv I am publishing an updated version of the archive of 40,310 pictures, which I have divided into 4 categories: - 4 symbols on the picture - 6 747 pcs. - 5 symbols - 18 403 pcs. - 6 characters - 7,038 pcs. - 7 characters - 7 589 pcs. Symbols used in captcha 'б','в','г','д','ж','к','л','м','н','п','р','с','т','2','4','5','6','7','8','9' ## URL About 15 thousand captcha imgs, which consists of 6 numbers. ## URL About 10 thousand captcha, which consists of English characters and numbers with a length of 5 elements. ## URL About 19 thousand captcha, which consists of Russian characters and numbers from 5 to 6 elements long. Images from social network URL # Kaggle This Dataset is updated by the previous one, which I published on Kaggle ### License Public Domain
[ "# Note\nCaptcha images are presented as base64 string.\nAll csv files have a \"\\t\" separator.", "# Dataset consists of several files", "## fssp_*.csv\n\nI am publishing an updated version of the archive of 40,310 pictures, which I have divided into 4 categories:\n- 4 symbols on the picture - 6 747 pcs.\n- 5 symbols - 18 403 pcs.\n- 6 characters - 7,038 pcs.\n- 7 characters - 7 589 pcs.\n\nSymbols used in captcha\n'б','в','г','д','ж','к','л','м','н','п','р','с','т','2','4','5','6','7','8','9'", "## URL\n\nAbout 15 thousand captcha imgs, which consists of 6 numbers.", "## URL\n\nAbout 10 thousand captcha, which consists of English characters and numbers with a length of 5 elements.", "## URL\n\nAbout 19 thousand captcha, which consists of Russian characters and numbers from 5 to 6 elements long. Images from social network URL", "# Kaggle\n\nThis Dataset is updated by the previous one, which I published on Kaggle", "### License\nPublic Domain" ]
[ "TAGS\n#language-Russian #license-cc #image #captcha #region-us \n", "# Note\nCaptcha images are presented as base64 string.\nAll csv files have a \"\\t\" separator.", "# Dataset consists of several files", "## fssp_*.csv\n\nI am publishing an updated version of the archive of 40,310 pictures, which I have divided into 4 categories:\n- 4 symbols on the picture - 6 747 pcs.\n- 5 symbols - 18 403 pcs.\n- 6 characters - 7,038 pcs.\n- 7 characters - 7 589 pcs.\n\nSymbols used in captcha\n'б','в','г','д','ж','к','л','м','н','п','р','с','т','2','4','5','6','7','8','9'", "## URL\n\nAbout 15 thousand captcha imgs, which consists of 6 numbers.", "## URL\n\nAbout 10 thousand captcha, which consists of English characters and numbers with a length of 5 elements.", "## URL\n\nAbout 19 thousand captcha, which consists of Russian characters and numbers from 5 to 6 elements long. Images from social network URL", "# Kaggle\n\nThis Dataset is updated by the previous one, which I published on Kaggle", "### License\nPublic Domain" ]
52ae1c1fb3c3195ae7d69dc2bd1fad58c8131add
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
sajjadrauf/VQA
[ "task_categories:image-segmentation", "task_categories:image-classification", "task_categories:question-answering", "size_categories:10K<n<100K", "language:am", "license:afl-3.0", "region:us" ]
2023-01-08T20:17:26+00:00
{"language": ["am"], "license": "afl-3.0", "size_categories": ["10K<n<100K"], "task_categories": ["image-segmentation", "image-classification", "question-answering"]}
2023-01-08T20:19:36+00:00
[]
[ "am" ]
TAGS #task_categories-image-segmentation #task_categories-image-classification #task_categories-question-answering #size_categories-10K<n<100K #language-Amharic #license-afl-3.0 #region-us
# Dataset Card for Dataset Name ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#task_categories-image-segmentation #task_categories-image-classification #task_categories-question-answering #size_categories-10K<n<100K #language-Amharic #license-afl-3.0 #region-us \n", "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
be9a03fde01d9f05107b14941af1ad99897691cf
# Dataset Card for FrenchMedMCQA : A French Multiple-Choice Question Answering Corpus for Medical domain ## Table of Contents - [Dataset Card for FrenchMedMCQA : A French Multiple-Choice Question Answering Corpus for Medical domain](#dataset-card-for-frenchmedmcqa--a-french-multiple-choice-question-answering-corpus-for-medical-domain) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contact](#contact) ## Dataset Description - **Homepage:** https://deft2023.univ-avignon.fr/ - **Repository:** https://deft2023.univ-avignon.fr/ - **Paper:** [FrenchMedMCQA: A French Multiple-Choice Question Answering Dataset for Medical domain](https://hal.science/hal-03824241/document) - **Leaderboard:** Coming soon - **Point of Contact:** [Yanis LABRAK](mailto:[email protected]) ### Dataset Summary This paper introduces FrenchMedMCQA, the first publicly available Multiple-Choice Question Answering (MCQA) dataset in French for medical domain. It is composed of 3,105 questions taken from real exams of the French medical specialization diploma in pharmacy, mixing single and multiple answers. Each instance of the dataset contains an identifier, a question, five possible answers and their manual correction(s). We also propose first baseline models to automatically process this MCQA task in order to report on the current performances and to highlight the difficulty of the task. A detailed analysis of the results showed that it is necessary to have representations adapted to the medical domain or to the MCQA task: in our case, English specialized models yielded better results than generic French ones, even though FrenchMedMCQA is in French. Corpus, models and tools are available online. ### Supported Tasks and Leaderboards Multiple-Choice Question Answering (MCQA) ### Languages The questions and answers are available in French. ## Dataset Structure ### Data Instances ```json { "id": "1863462668476003678", "question": "Parmi les propositions suivantes, laquelle (lesquelles) est (sont) exacte(s) ? Les chylomicrons plasmatiques :", "answers": { "a": "Sont plus riches en cholestérol estérifié qu'en triglycérides", "b": "Sont synthétisés par le foie", "c": "Contiennent de l'apolipoprotéine B48", "d": "Contiennent de l'apolipoprotéine E", "e": "Sont transformés par action de la lipoprotéine lipase" }, "correct_answers": [ "c", "d", "e" ], "subject_name": "pharmacie", "type": "multiple" } ``` ### Data Fields - `id` : a string question identifier for each example - `question` : question text (a string) - `answer_a` : Option A - `answer_b` : Option B - `answer_c` : Option C - `answer_d` : Option D - `answer_e` : Option E - `correct_answers` : Correct options, i.e., A, D and E - `choice_type` ({"single", "multiple"}): Question choice type. - "single": Single-choice question, where each choice contains a single option. - "multiple": Multi-choice question, where each choice contains a combination of multiple options. ### Data Splits | # Answers | Training | Validation | Test | Total | |:---------:|:--------:|:----------:|:----:|:-----:| | 1 | 595 | 164 | 321 | 1,080 | | 2 | 528 | 45 | 97 | 670 | | 3 | 718 | 71 | 141 | 930 | | 4 | 296 | 30 | 56 | 382 | | 5 | 34 | 2 | 7 | 43 | | Total | 2171 | 312 | 622 | 3,105 | ## Dataset Creation ### Source Data #### Initial Data Collection and Normalization The questions and their associated candidate answer(s) were collected from real French pharmacy exams on the remede website. Questions and answers were manually created by medical experts and used during examinations. The dataset is composed of 2,025 questions with multiple answers and 1,080 with a single one, for a total of 3,105 questions. Each instance of the dataset contains an identifier, a question, five options (labeled from A to E) and correct answer(s). The average question length is 14.17 tokens and the average answer length is 6.44 tokens. The vocabulary size is of 13k words, of which 3.8k are estimated medical domain-specific words (i.e. a word related to the medical field). We find an average of 2.49 medical domain-specific words in each question (17 % of the words) and 2 in each answer (36 % of the words). On average, a medical domain-specific word is present in 2 questions and in 8 answers. ### Personal and Sensitive Information The corpora is free of personal or sensitive information. ## Additional Information ### Dataset Curators The dataset was created by Labrak Yanis and Bazoge Adrien and Dufour Richard and Daille Béatrice and Gourraud Pierre-Antoine and Morin Emmanuel and Rouvier Mickael. ### Licensing Information Apache 2.0 ### Citation Information If you find this useful in your research, please consider citing the dataset paper : ```latex @inproceedings{labrak-etal-2022-frenchmedmcqa, title = "{F}rench{M}ed{MCQA}: A {F}rench Multiple-Choice Question Answering Dataset for Medical domain", author = "Labrak, Yanis and Bazoge, Adrien and Dufour, Richard and Daille, Beatrice and Gourraud, Pierre-Antoine and Morin, Emmanuel and Rouvier, Mickael", booktitle = "Proceedings of the 13th International Workshop on Health Text Mining and Information Analysis (LOUHI)", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates (Hybrid)", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.louhi-1.5", pages = "41--46", abstract = "This paper introduces FrenchMedMCQA, the first publicly available Multiple-Choice Question Answering (MCQA) dataset in French for medical domain. It is composed of 3,105 questions taken from real exams of the French medical specialization diploma in pharmacy, mixing single and multiple answers. Each instance of the dataset contains an identifier, a question, five possible answers and their manual correction(s). We also propose first baseline models to automatically process this MCQA task in order to report on the current performances and to highlight the difficulty of the task. A detailed analysis of the results showed that it is necessary to have representations adapted to the medical domain or to the MCQA task: in our case, English specialized models yielded better results than generic French ones, even though FrenchMedMCQA is in French. Corpus, models and tools are available online.", } ``` ### Contact Thanks to contact [Yanis LABRAK](https://github.com/qanastek) for more information about this dataset.
qanastek/frenchmedmcqa
[ "task_categories:question-answering", "task_categories:multiple-choice", "task_ids:multiple-choice-qa", "task_ids:open-domain-qa", "annotations_creators:no-annotation", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:1k<n<10k", "source_datasets:original", "language:fr", "license:apache-2.0", "region:us" ]
2023-01-08T20:22:47+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["expert-generated"], "language": ["fr"], "license": ["apache-2.0"], "multilinguality": ["monolingual"], "size_categories": ["1k<n<10k"], "source_datasets": ["original"], "task_categories": ["question-answering", "multiple-choice"], "task_ids": ["multiple-choice-qa", "open-domain-qa"], "paperswithcode_id": "frenchmedmcqa", "pretty_name": "FrenchMedMCQA"}
2023-06-08T11:39:22+00:00
[]
[ "fr" ]
TAGS #task_categories-question-answering #task_categories-multiple-choice #task_ids-multiple-choice-qa #task_ids-open-domain-qa #annotations_creators-no-annotation #language_creators-expert-generated #multilinguality-monolingual #size_categories-1k<n<10k #source_datasets-original #language-French #license-apache-2.0 #region-us
Dataset Card for FrenchMedMCQA : A French Multiple-Choice Question Answering Corpus for Medical domain ====================================================================================================== Table of Contents ----------------- * Dataset Card for FrenchMedMCQA : A French Multiple-Choice Question Answering Corpus for Medical domain + Table of Contents + Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages + Dataset Structure - Data Instances - Data Fields - Data Splits + Dataset Creation - Source Data * Initial Data Collection and Normalization - Personal and Sensitive Information + Additional Information - Dataset Curators - Licensing Information - Citation Information - Contact Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: FrenchMedMCQA: A French Multiple-Choice Question Answering Dataset for Medical domain * Leaderboard: Coming soon * Point of Contact: Yanis LABRAK ### Dataset Summary This paper introduces FrenchMedMCQA, the first publicly available Multiple-Choice Question Answering (MCQA) dataset in French for medical domain. It is composed of 3,105 questions taken from real exams of the French medical specialization diploma in pharmacy, mixing single and multiple answers. Each instance of the dataset contains an identifier, a question, five possible answers and their manual correction(s). We also propose first baseline models to automatically process this MCQA task in order to report on the current performances and to highlight the difficulty of the task. A detailed analysis of the results showed that it is necessary to have representations adapted to the medical domain or to the MCQA task: in our case, English specialized models yielded better results than generic French ones, even though FrenchMedMCQA is in French. Corpus, models and tools are available online. ### Supported Tasks and Leaderboards Multiple-Choice Question Answering (MCQA) ### Languages The questions and answers are available in French. Dataset Structure ----------------- ### Data Instances ### Data Fields * 'id' : a string question identifier for each example * 'question' : question text (a string) * 'answer\_a' : Option A * 'answer\_b' : Option B * 'answer\_c' : Option C * 'answer\_d' : Option D * 'answer\_e' : Option E * 'correct\_answers' : Correct options, i.e., A, D and E * 'choice\_type' ({"single", "multiple"}): Question choice type. + "single": Single-choice question, where each choice contains a single option. + "multiple": Multi-choice question, where each choice contains a combination of multiple options. ### Data Splits Dataset Creation ---------------- ### Source Data #### Initial Data Collection and Normalization The questions and their associated candidate answer(s) were collected from real French pharmacy exams on the remede website. Questions and answers were manually created by medical experts and used during examinations. The dataset is composed of 2,025 questions with multiple answers and 1,080 with a single one, for a total of 3,105 questions. Each instance of the dataset contains an identifier, a question, five options (labeled from A to E) and correct answer(s). The average question length is 14.17 tokens and the average answer length is 6.44 tokens. The vocabulary size is of 13k words, of which 3.8k are estimated medical domain-specific words (i.e. a word related to the medical field). We find an average of 2.49 medical domain-specific words in each question (17 % of the words) and 2 in each answer (36 % of the words). On average, a medical domain-specific word is present in 2 questions and in 8 answers. ### Personal and Sensitive Information The corpora is free of personal or sensitive information. Additional Information ---------------------- ### Dataset Curators The dataset was created by Labrak Yanis and Bazoge Adrien and Dufour Richard and Daille Béatrice and Gourraud Pierre-Antoine and Morin Emmanuel and Rouvier Mickael. ### Licensing Information Apache 2.0 If you find this useful in your research, please consider citing the dataset paper : ### Contact Thanks to contact Yanis LABRAK for more information about this dataset.
[ "### Dataset Summary\n\n\nThis paper introduces FrenchMedMCQA, the first publicly available Multiple-Choice Question Answering (MCQA) dataset in French for medical domain. It is composed of 3,105 questions taken from real exams of the French medical specialization diploma in pharmacy, mixing single and multiple answers.\n\n\nEach instance of the dataset contains an identifier, a question, five possible answers and their manual correction(s).\n\n\nWe also propose first baseline models to automatically process this MCQA task in order to report on the current performances and to highlight the difficulty of the task. A detailed analysis of the results showed that it is necessary to have representations adapted to the medical domain or to the MCQA task: in our case, English specialized models yielded better results than generic French ones, even though FrenchMedMCQA is in French. Corpus, models and tools are available online.", "### Supported Tasks and Leaderboards\n\n\nMultiple-Choice Question Answering (MCQA)", "### Languages\n\n\nThe questions and answers are available in French.\n\n\nDataset Structure\n-----------------", "### Data Instances", "### Data Fields\n\n\n* 'id' : a string question identifier for each example\n* 'question' : question text (a string)\n* 'answer\\_a' : Option A\n* 'answer\\_b' : Option B\n* 'answer\\_c' : Option C\n* 'answer\\_d' : Option D\n* 'answer\\_e' : Option E\n* 'correct\\_answers' : Correct options, i.e., A, D and E\n* 'choice\\_type' ({\"single\", \"multiple\"}): Question choice type.\n\t+ \"single\": Single-choice question, where each choice contains a single option.\n\t+ \"multiple\": Multi-choice question, where each choice contains a combination of multiple options.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nThe questions and their associated candidate answer(s) were collected from real French pharmacy exams on the remede website. Questions and answers were manually created by medical experts and used during examinations. The dataset is composed of 2,025 questions with multiple answers and 1,080 with a single one, for a total of 3,105 questions. Each instance of the dataset contains an identifier, a question, five options (labeled from A to E) and correct answer(s). The average question length is 14.17 tokens and the average answer length is 6.44 tokens. The vocabulary size is of 13k words, of which 3.8k are estimated medical domain-specific words (i.e. a word related to the medical field). We find an average of 2.49 medical domain-specific words in each question (17 % of the words) and 2 in each answer (36 % of the words). On average, a medical domain-specific word is present in 2 questions and in 8 answers.", "### Personal and Sensitive Information\n\n\nThe corpora is free of personal or sensitive information.\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nThe dataset was created by Labrak Yanis and Bazoge Adrien and Dufour Richard and Daille Béatrice and Gourraud Pierre-Antoine and Morin Emmanuel and Rouvier Mickael.", "### Licensing Information\n\n\nApache 2.0\n\n\nIf you find this useful in your research, please consider citing the dataset paper :", "### Contact\n\n\nThanks to contact Yanis LABRAK for more information about this dataset." ]
[ "TAGS\n#task_categories-question-answering #task_categories-multiple-choice #task_ids-multiple-choice-qa #task_ids-open-domain-qa #annotations_creators-no-annotation #language_creators-expert-generated #multilinguality-monolingual #size_categories-1k<n<10k #source_datasets-original #language-French #license-apache-2.0 #region-us \n", "### Dataset Summary\n\n\nThis paper introduces FrenchMedMCQA, the first publicly available Multiple-Choice Question Answering (MCQA) dataset in French for medical domain. It is composed of 3,105 questions taken from real exams of the French medical specialization diploma in pharmacy, mixing single and multiple answers.\n\n\nEach instance of the dataset contains an identifier, a question, five possible answers and their manual correction(s).\n\n\nWe also propose first baseline models to automatically process this MCQA task in order to report on the current performances and to highlight the difficulty of the task. A detailed analysis of the results showed that it is necessary to have representations adapted to the medical domain or to the MCQA task: in our case, English specialized models yielded better results than generic French ones, even though FrenchMedMCQA is in French. Corpus, models and tools are available online.", "### Supported Tasks and Leaderboards\n\n\nMultiple-Choice Question Answering (MCQA)", "### Languages\n\n\nThe questions and answers are available in French.\n\n\nDataset Structure\n-----------------", "### Data Instances", "### Data Fields\n\n\n* 'id' : a string question identifier for each example\n* 'question' : question text (a string)\n* 'answer\\_a' : Option A\n* 'answer\\_b' : Option B\n* 'answer\\_c' : Option C\n* 'answer\\_d' : Option D\n* 'answer\\_e' : Option E\n* 'correct\\_answers' : Correct options, i.e., A, D and E\n* 'choice\\_type' ({\"single\", \"multiple\"}): Question choice type.\n\t+ \"single\": Single-choice question, where each choice contains a single option.\n\t+ \"multiple\": Multi-choice question, where each choice contains a combination of multiple options.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nThe questions and their associated candidate answer(s) were collected from real French pharmacy exams on the remede website. Questions and answers were manually created by medical experts and used during examinations. The dataset is composed of 2,025 questions with multiple answers and 1,080 with a single one, for a total of 3,105 questions. Each instance of the dataset contains an identifier, a question, five options (labeled from A to E) and correct answer(s). The average question length is 14.17 tokens and the average answer length is 6.44 tokens. The vocabulary size is of 13k words, of which 3.8k are estimated medical domain-specific words (i.e. a word related to the medical field). We find an average of 2.49 medical domain-specific words in each question (17 % of the words) and 2 in each answer (36 % of the words). On average, a medical domain-specific word is present in 2 questions and in 8 answers.", "### Personal and Sensitive Information\n\n\nThe corpora is free of personal or sensitive information.\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nThe dataset was created by Labrak Yanis and Bazoge Adrien and Dufour Richard and Daille Béatrice and Gourraud Pierre-Antoine and Morin Emmanuel and Rouvier Mickael.", "### Licensing Information\n\n\nApache 2.0\n\n\nIf you find this useful in your research, please consider citing the dataset paper :", "### Contact\n\n\nThanks to contact Yanis LABRAK for more information about this dataset." ]
a3c51840a1c4bc170b5ca483254b30f192d65ce0
# Dataset cointelegraph English ## Dataset Description It is a dataset where information about the title, description, author, etc. is collected. approx: 10041 row page: https://cointelegraph.com/ categorie: #cryptocurrency, #Bitcoin, #Ethereum ...
Nicky0007/cointelegraph_news_English
[ "task_categories:token-classification", "task_categories:question-answering", "size_categories:10K<n<100K", "language:en", "region:us" ]
2023-01-08T21:27:13+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"], "task_categories": ["token-classification", "question-answering"]}
2023-01-08T22:07:31+00:00
[]
[ "en" ]
TAGS #task_categories-token-classification #task_categories-question-answering #size_categories-10K<n<100K #language-English #region-us
# Dataset cointelegraph English ## Dataset Description It is a dataset where information about the title, description, author, etc. is collected. approx: 10041 row page: URL categorie: #cryptocurrency, #Bitcoin, #Ethereum ...
[ "# Dataset cointelegraph English", "## Dataset Description\n\nIt is a dataset where information about the title, description, author, etc. is collected.\napprox: 10041 row\npage: URL\n\n\n\ncategorie: #cryptocurrency, #Bitcoin, #Ethereum ..." ]
[ "TAGS\n#task_categories-token-classification #task_categories-question-answering #size_categories-10K<n<100K #language-English #region-us \n", "# Dataset cointelegraph English", "## Dataset Description\n\nIt is a dataset where information about the title, description, author, etc. is collected.\napprox: 10041 row\npage: URL\n\n\n\ncategorie: #cryptocurrency, #Bitcoin, #Ethereum ..." ]
3a4dc90ec7626657c448ed74b44bcc98fac3acc9
# Dataset cointelegraph español Dataset Description es un dataset donde se recopila informacion del titulo , descripcion , autor, etc. tiene aprox: 10738 fila pagina: https://cointelegraph.com/ categorie: #cryptocurrency, #Bitcoin, #Ethereum ...
Nicky0007/cointelegraph_noticias_Es
[ "task_categories:token-classification", "task_categories:question-answering", "size_categories:10K<n<100K", "language:es", "region:us" ]
2023-01-08T21:32:57+00:00
{"language": ["es"], "size_categories": ["10K<n<100K"], "task_categories": ["token-classification", "question-answering"]}
2023-01-08T22:19:07+00:00
[]
[ "es" ]
TAGS #task_categories-token-classification #task_categories-question-answering #size_categories-10K<n<100K #language-Spanish #region-us
# Dataset cointelegraph español Dataset Description es un dataset donde se recopila informacion del titulo , descripcion , autor, etc. tiene aprox: 10738 fila pagina: URL categorie: #cryptocurrency, #Bitcoin, #Ethereum ...
[ "# Dataset cointelegraph español\n\n\nDataset Description\n\n\nes un dataset donde se recopila informacion del titulo , descripcion , autor, etc.\n\n\ntiene aprox: 10738 fila \n\n\npagina: URL\n\n\n\ncategorie: #cryptocurrency, #Bitcoin, #Ethereum ..." ]
[ "TAGS\n#task_categories-token-classification #task_categories-question-answering #size_categories-10K<n<100K #language-Spanish #region-us \n", "# Dataset cointelegraph español\n\n\nDataset Description\n\n\nes un dataset donde se recopila informacion del titulo , descripcion , autor, etc.\n\n\ntiene aprox: 10738 fila \n\n\npagina: URL\n\n\n\ncategorie: #cryptocurrency, #Bitcoin, #Ethereum ..." ]
6949be6f8fb7b070b68edc654f6bba91530a6ac9
# Dataset Card for "sd_filtered_2m" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Ar4ikov/sd_filtered_2m
[ "region:us" ]
2023-01-08T21:43:39+00:00
{"dataset_info": {"features": [{"name": "Prompt", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 427667829.2266251, "num_examples": 2672923}, {"name": "test", "num_bytes": 47018271.06645638, "num_examples": 296922}], "download_size": 364684829, "dataset_size": 474686100.29308146}}
2023-01-08T21:52:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sd_filtered_2m" More Information needed
[ "# Dataset Card for \"sd_filtered_2m\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sd_filtered_2m\"\n\nMore Information needed" ]
7532ef2aaf044d80dae5bc0c2b4d39305e9cbb48
# Dataset Card for "bookcorpus_compact_1024_shard7" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_1024_shard7_of_10
[ "region:us" ]
2023-01-08T22:53:19+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 777388315, "num_examples": 61605}], "download_size": 394101383, "dataset_size": 777388315}}
2023-01-08T22:55:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_compact_1024_shard7" More Information needed
[ "# Dataset Card for \"bookcorpus_compact_1024_shard7\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_compact_1024_shard7\"\n\nMore Information needed" ]
3e785315f005a5dc97e56c281727c2eeef0050b2
This dataset is for ERNIE-layout to use
skywalkerzhang19/DVQA
[ "region:us" ]
2023-01-09T06:48:57+00:00
{}
2023-01-20T06:19:21+00:00
[]
[]
TAGS #region-us
This dataset is for ERNIE-layout to use
[]
[ "TAGS\n#region-us \n" ]
051a0ec1c92882f9ce8867a5732827c156177ff3
# KPWr & CEN
clarin-knext/kpwr_and_cen
[ "task_categories:token-classification", "task_ids:named-entity-recognition", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:18K", "size_categories:10K<n<100K", "source_datasets:original", "language:pl", "license:cc-by-3.0", "region:us" ]
2023-01-09T10:22:33+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["found"], "language": ["pl"], "license": ["cc-by-3.0"], "multilinguality": ["monolingual"], "size_categories": ["18K", "10K<n<100K"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": ["named-entity-recognition"], "pretty_name": "KPWr 1.27 & CEN"}
2023-01-09T11:37:59+00:00
[]
[ "pl" ]
TAGS #task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-18K #size_categories-10K<n<100K #source_datasets-original #language-Polish #license-cc-by-3.0 #region-us
# KPWr & CEN
[ "# KPWr & CEN" ]
[ "TAGS\n#task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-18K #size_categories-10K<n<100K #source_datasets-original #language-Polish #license-cc-by-3.0 #region-us \n", "# KPWr & CEN" ]
a436f7087749389b1681938402c8196b7b7d8340
# Dataset Card for "pairwise-code-review-instruct-critique-revision-python" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
reshinthadith/pairwise-code-review-instruct-critique-revision-python
[ "region:us" ]
2023-01-09T10:24:43+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}, {"name": "chosen_score", "dtype": "string"}, {"name": "rejected_score", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 35079153, "num_examples": 5236}], "download_size": 9344129, "dataset_size": 35079153}}
2023-01-09T11:25:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pairwise-code-review-instruct-critique-revision-python" More Information needed
[ "# Dataset Card for \"pairwise-code-review-instruct-critique-revision-python\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pairwise-code-review-instruct-critique-revision-python\"\n\nMore Information needed" ]
deee75de37aaa80b345951859ea103465967db3e
# Dataset Card for Dataset Name RexTheToy ### Dataset Summary Images of the rex the toy from Toy Story, stored for huggingface dreambooth hackathon.
sooolee/rexthetoy
[ "size_categories:n<1K", "region:us" ]
2023-01-09T11:09:37+00:00
{"size_categories": ["n<1K"]}
2023-01-10T09:21:23+00:00
[]
[]
TAGS #size_categories-n<1K #region-us
# Dataset Card for Dataset Name RexTheToy ### Dataset Summary Images of the rex the toy from Toy Story, stored for huggingface dreambooth hackathon.
[ "# Dataset Card for Dataset Name\n\nRexTheToy", "### Dataset Summary\n\nImages of the rex the toy from Toy Story, stored for huggingface dreambooth hackathon." ]
[ "TAGS\n#size_categories-n<1K #region-us \n", "# Dataset Card for Dataset Name\n\nRexTheToy", "### Dataset Summary\n\nImages of the rex the toy from Toy Story, stored for huggingface dreambooth hackathon." ]
4f6665728570c5c3c625684dbbae0d0e0bca12f5
# SpellGram ## Dataset consisting of grammatical and spelling errors - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [train.csv] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
vishnun/SpellGram
[ "task_categories:text2text-generation", "size_categories:10K<n<100K", "language:en", "license:mit", "NLP", "Text2Text", "region:us" ]
2023-01-09T13:39:23+00:00
{"language": ["en"], "license": "mit", "size_categories": ["10K<n<100K"], "task_categories": ["text2text-generation"], "pretty_name": "Dataset consisting of grammatical and spelling errors", "tags": ["NLP", "Text2Text"]}
2023-01-09T13:43:11+00:00
[]
[ "en" ]
TAGS #task_categories-text2text-generation #size_categories-10K<n<100K #language-English #license-mit #NLP #Text2Text #region-us
# SpellGram ## Dataset consisting of grammatical and spelling errors - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances [URL] ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# SpellGram", "## Dataset consisting of grammatical and spelling errors\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\n[URL]", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#task_categories-text2text-generation #size_categories-10K<n<100K #language-English #license-mit #NLP #Text2Text #region-us \n", "# SpellGram", "## Dataset consisting of grammatical and spelling errors\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\n[URL]", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
1b1d662c0d7af12fff0ffc3d34f1427148aa981d
# Dataset Card for "datasets-test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cahya/datasets-test
[ "region:us" ]
2023-01-09T14:46:57+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 51686701.30956695, "num_examples": 24336}, {"name": "test", "num_bytes": 5745090.690433046, "num_examples": 2705}], "download_size": 33849787, "dataset_size": 57431792.0}}
2023-01-09T14:48:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "datasets-test" More Information needed
[ "# Dataset Card for \"datasets-test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"datasets-test\"\n\nMore Information needed" ]
63e62b943b92c1f5626045b050de996e179d1e46
this is a dataset for translation
amineT/translt4
[ "region:us" ]
2023-01-09T15:17:38+00:00
{}
2023-01-09T15:18:27+00:00
[]
[]
TAGS #region-us
this is a dataset for translation
[]
[ "TAGS\n#region-us \n" ]
80ae97254b39cc08f1a617fa5b3b0c8875371235
# Dataset Card for "OxfordPets_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/OxfordPets_train
[ "region:us" ]
2023-01-09T16:56:48+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "abyssinian", "1": "american bulldog", "2": "american pit bull terrier", "3": "basset hound", "4": "beagle", "5": "bengal", "6": "birman", "7": "bombay", "8": "boxer", "9": "british shorthair", "10": "chihuahua", "11": "egyptian mau", "12": "english cocker spaniel", "13": "english setter", "14": "german shorthaired", "15": "great pyrenees", "16": "havanese", "17": "japanese chin", "18": "keeshond", "19": "leonberger", "20": "maine coon", "21": "miniature pinscher", "22": "newfoundland", "23": "persian", "24": "pomeranian", "25": "pug", "26": "ragdoll", "27": "russian blue", "28": "saint bernard", "29": "samoyed", "30": "scottish terrier", "31": "shiba inu", "32": "siamese", "33": "sphynx", "34": "staffordshire bull terrier", "35": "wheaten terrier", "36": "yorkshire terrier"}}}}, {"name": "species", "dtype": {"class_label": {"names": {"0": "Cat", "1": "Dog"}}}}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14", "sequence": "string"}, {"name": "blip_caption", "dtype": "string"}, {"name": "LLM_Description_opt175b_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_simple_specific", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14", "sequence": "string"}, {"name": "clip_tags_ViT_L_14with_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_wo_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_with_openai_classes", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_oxfordpets", "sequence": "string"}, {"name": "clip_tags_ViT_B_16_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_ensemble_specific", "dtype": "string"}, {"name": "Attributes_ViT_B_16_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_simple_specific", "dtype": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_ensemble_specific", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 386730161.36, "num_examples": 3680}], "download_size": 378295172, "dataset_size": 386730161.36}}
2023-05-04T03:54:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OxfordPets_train" More Information needed
[ "# Dataset Card for \"OxfordPets_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OxfordPets_train\"\n\nMore Information needed" ]
a97a01c39799efad09522ed8f7dff13f8b86770d
# Dataset Card for "OxfordPets_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/OxfordPets_test
[ "region:us" ]
2023-01-09T16:59:18+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "abyssinian", "1": "american bulldog", "2": "american pit bull terrier", "3": "basset hound", "4": "beagle", "5": "bengal", "6": "birman", "7": "bombay", "8": "boxer", "9": "british shorthair", "10": "chihuahua", "11": "egyptian mau", "12": "english cocker spaniel", "13": "english setter", "14": "german shorthaired", "15": "great pyrenees", "16": "havanese", "17": "japanese chin", "18": "keeshond", "19": "leonberger", "20": "maine coon", "21": "miniature pinscher", "22": "newfoundland", "23": "persian", "24": "pomeranian", "25": "pug", "26": "ragdoll", "27": "russian blue", "28": "saint bernard", "29": "samoyed", "30": "scottish terrier", "31": "shiba inu", "32": "siamese", "33": "sphynx", "34": "staffordshire bull terrier", "35": "wheaten terrier", "36": "yorkshire terrier"}}}}, {"name": "species", "dtype": {"class_label": {"names": {"0": "Cat", "1": "Dog"}}}}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14", "sequence": "string"}, {"name": "blip_caption", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "clip_tag_ViT_L_14_specific", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_simple_specific", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_with_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_wo_openai_classes", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_oxfordpets", "sequence": "string"}, {"name": "clip_tags_ViT_B_16_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_ensemble_specific", "dtype": "string"}, {"name": "Attributes_ViT_L_14_descriptors_text_davinci_003_full_validate", "sequence": "string"}, {"name": "Attributes_ViT_B_16_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_simple_specific", "dtype": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_ensemble_specific", "dtype": "string"}, {"name": "blip_caption_beam_5_Salesforce_blip2_opt_6.7b", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 421721560.0, "num_examples": 3669}], "download_size": 413176127, "dataset_size": 421721560.0}}
2023-08-15T04:11:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OxfordPets_test" More Information needed
[ "# Dataset Card for \"OxfordPets_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OxfordPets_test\"\n\nMore Information needed" ]
617b623475a1a0f7fa10fce249beacf7e747117b
# Dataset Card for "Caltech101_not_background_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/Caltech101_not_background_train
[ "region:us" ]
2023-01-09T17:50:41+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "accordion", "1": "airplanes", "2": "anchor", "3": "ant", "4": "background google", "5": "barrel", "6": "bass", "7": "beaver", "8": "binocular", "9": "bonsai", "10": "brain", "11": "brontosaurus", "12": "buddha", "13": "butterfly", "14": "camera", "15": "cannon", "16": "car side", "17": "ceiling fan", "18": "cellphone", "19": "chair", "20": "chandelier", "21": "cougar body", "22": "cougar face", "23": "crab", "24": "crayfish", "25": "crocodile", "26": "crocodile head", "27": "cup", "28": "dalmatian", "29": "dollar bill", "30": "dolphin", "31": "dragonfly", "32": "electric guitar", "33": "elephant", "34": "emu", "35": "euphonium", "36": "ewer", "37": "faces", "38": "faces easy", "39": "ferry", "40": "flamingo", "41": "flamingo head", "42": "garfield", "43": "gerenuk", "44": "gramophone", "45": "grand piano", "46": "hawksbill", "47": "headphone", "48": "hedgehog", "49": "helicopter", "50": "ibis", "51": "inline skate", "52": "joshua tree", "53": "kangaroo", "54": "ketch", "55": "lamp", "56": "laptop", "57": "leopards", "58": "llama", "59": "lobster", "60": "lotus", "61": "mandolin", "62": "mayfly", "63": "menorah", "64": "metronome", "65": "minaret", "66": "motorbikes", "67": "nautilus", "68": "octopus", "69": "okapi", "70": "pagoda", "71": "panda", "72": "pigeon", "73": "pizza", "74": "platypus", "75": "pyramid", "76": "revolver", "77": "rhino", "78": "rooster", "79": "saxophone", "80": "schooner", "81": "scissors", "82": "scorpion", "83": "sea horse", "84": "snoopy", "85": "soccer ball", "86": "stapler", "87": "starfish", "88": "stegosaurus", "89": "stop sign", "90": "strawberry", "91": "sunflower", "92": "tick", "93": "trilobite", "94": "umbrella", "95": "watch", "96": "water lilly", "97": "wheelchair", "98": "wild cat", "99": "windsor chair", "100": "wrench", "101": "yin yang"}}}}, {"name": "annotation", "struct": [{"name": "obj_contour", "dtype": {"array2_d": {"shape": [2], "dtype": "float64"}}}, {"name": "box_coord", "dtype": {"array2_d": {"shape": [1, 4], "dtype": "int64"}}}]}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14", "sequence": "string"}, {"name": "blip_caption", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_ViT_L_14", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 46675197.0, "num_examples": 3030}], "download_size": 45748181, "dataset_size": 46675197.0}}
2023-01-28T20:22:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Caltech101_not_background_train" More Information needed
[ "# Dataset Card for \"Caltech101_not_background_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Caltech101_not_background_train\"\n\nMore Information needed" ]
93e6ee1710cdd4019a336ffca12b399d8287ea7d
# Dataset Card for "Caltech101_not_background_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/Caltech101_not_background_test
[ "region:us" ]
2023-01-09T17:52:55+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "accordion", "1": "airplanes", "2": "anchor", "3": "ant", "4": "background google", "5": "barrel", "6": "bass", "7": "beaver", "8": "binocular", "9": "bonsai", "10": "brain", "11": "brontosaurus", "12": "buddha", "13": "butterfly", "14": "camera", "15": "cannon", "16": "car side", "17": "ceiling fan", "18": "cellphone", "19": "chair", "20": "chandelier", "21": "cougar body", "22": "cougar face", "23": "crab", "24": "crayfish", "25": "crocodile", "26": "crocodile head", "27": "cup", "28": "dalmatian", "29": "dollar bill", "30": "dolphin", "31": "dragonfly", "32": "electric guitar", "33": "elephant", "34": "emu", "35": "euphonium", "36": "ewer", "37": "faces", "38": "faces easy", "39": "ferry", "40": "flamingo", "41": "flamingo head", "42": "garfield", "43": "gerenuk", "44": "gramophone", "45": "grand piano", "46": "hawksbill", "47": "headphone", "48": "hedgehog", "49": "helicopter", "50": "ibis", "51": "inline skate", "52": "joshua tree", "53": "kangaroo", "54": "ketch", "55": "lamp", "56": "laptop", "57": "leopards", "58": "llama", "59": "lobster", "60": "lotus", "61": "mandolin", "62": "mayfly", "63": "menorah", "64": "metronome", "65": "minaret", "66": "motorbikes", "67": "nautilus", "68": "octopus", "69": "okapi", "70": "pagoda", "71": "panda", "72": "pigeon", "73": "pizza", "74": "platypus", "75": "pyramid", "76": "revolver", "77": "rhino", "78": "rooster", "79": "saxophone", "80": "schooner", "81": "scissors", "82": "scorpion", "83": "sea horse", "84": "snoopy", "85": "soccer ball", "86": "stapler", "87": "starfish", "88": "stegosaurus", "89": "stop sign", "90": "strawberry", "91": "sunflower", "92": "tick", "93": "trilobite", "94": "umbrella", "95": "watch", "96": "water lilly", "97": "wheelchair", "98": "wild cat", "99": "windsor chair", "100": "wrench", "101": "yin yang"}}}}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14", "sequence": "string"}, {"name": "blip_caption", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "LLM_Description_opt175b_downstream_tasks_ViT_L_14", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 81047146.0, "num_examples": 5647}], "download_size": 78304363, "dataset_size": 81047146.0}}
2023-01-28T20:23:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Caltech101_not_background_test" More Information needed
[ "# Dataset Card for \"Caltech101_not_background_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Caltech101_not_background_test\"\n\nMore Information needed" ]
d1d29bfb39747ec33f0ac0daeef78e6331edf1a6
# Dataset Card for "OxfordFlowers_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/OxfordFlowers_train
[ "region:us" ]
2023-01-09T17:56:51+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "pink primrose", "1": "hard-leaved pocket orchid", "2": "canterbury bells", "3": "sweet pea", "4": "english marigold", "5": "tiger lily", "6": "moon orchid", "7": "bird of paradise", "8": "monkshood", "9": "globe thistle", "10": "snapdragon", "11": "colt's foot", "12": "king protea", "13": "spear thistle", "14": "yellow iris", "15": "globe-flower", "16": "purple coneflower", "17": "peruvian lily", "18": "balloon flower", "19": "giant white arum lily", "20": "fire lily", "21": "pincushion flower", "22": "fritillary", "23": "red ginger", "24": "grape hyacinth", "25": "corn poppy", "26": "prince of wales feathers", "27": "stemless gentian", "28": "artichoke", "29": "sweet william", "30": "carnation", "31": "garden phlox", "32": "love in the mist", "33": "mexican aster", "34": "alpine sea holly", "35": "ruby-lipped cattleya", "36": "cape flower", "37": "great masterwort", "38": "siam tulip", "39": "lenten rose", "40": "barbeton daisy", "41": "daffodil", "42": "sword lily", "43": "poinsettia", "44": "bolero deep blue", "45": "wallflower", "46": "marigold", "47": "buttercup", "48": "oxeye daisy", "49": "common dandelion", "50": "petunia", "51": "wild pansy", "52": "primula", "53": "sunflower", "54": "pelargonium", "55": "bishop of llandaff", "56": "gaura", "57": "geranium", "58": "orange dahlia", "59": "pink-yellow dahlia?", "60": "cautleya spicata", "61": "japanese anemone", "62": "black-eyed susan", "63": "silverbush", "64": "californian poppy", "65": "osteospermum", "66": "spring crocus", "67": "bearded iris", "68": "windflower", "69": "tree poppy", "70": "gazania", "71": "azalea", "72": "water lily", "73": "rose", "74": "thorn apple", "75": "morning glory", "76": "passion flower", "77": "lotus", "78": "toad lily", "79": "anthurium", "80": "frangipani", "81": "clematis", "82": "hibiscus", "83": "columbine", "84": "desert-rose", "85": "tree mallow", "86": "magnolia", "87": "cyclamen", "88": "watercress", "89": "canna lily", "90": "hippeastrum", "91": "bee balm", "92": "ball moss", "93": "foxglove", "94": "bougainvillea", "95": "camellia", "96": "mallow", "97": "mexican petunia", "98": "bromelia", "99": "blanket flower", "100": "trumpet creeper", "101": "blackberry lily"}}}}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14", "sequence": "string"}, {"name": "blip_caption", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_ensemble_specific", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_with_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_wo_openai_classes", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_oxfordflowers", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_ensemble_specific", "dtype": "string"}, {"name": "Attributes_ViT_B_16_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_simple_specific", "dtype": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_ensemble_specific", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 45649356.0, "num_examples": 1020}], "download_size": 43625002, "dataset_size": 45649356.0}}
2023-05-04T04:37:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OxfordFlowers_train" More Information needed
[ "# Dataset Card for \"OxfordFlowers_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OxfordFlowers_train\"\n\nMore Information needed" ]
03293f7ccbfe329dc38d5f0f57b4546a93b044d5
# Dataset Card for "OxfordFlowers_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/OxfordFlowers_test
[ "region:us" ]
2023-01-09T17:57:03+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "pink primrose", "1": "hard-leaved pocket orchid", "2": "canterbury bells", "3": "sweet pea", "4": "english marigold", "5": "tiger lily", "6": "moon orchid", "7": "bird of paradise", "8": "monkshood", "9": "globe thistle", "10": "snapdragon", "11": "colt's foot", "12": "king protea", "13": "spear thistle", "14": "yellow iris", "15": "globe-flower", "16": "purple coneflower", "17": "peruvian lily", "18": "balloon flower", "19": "giant white arum lily", "20": "fire lily", "21": "pincushion flower", "22": "fritillary", "23": "red ginger", "24": "grape hyacinth", "25": "corn poppy", "26": "prince of wales feathers", "27": "stemless gentian", "28": "artichoke", "29": "sweet william", "30": "carnation", "31": "garden phlox", "32": "love in the mist", "33": "mexican aster", "34": "alpine sea holly", "35": "ruby-lipped cattleya", "36": "cape flower", "37": "great masterwort", "38": "siam tulip", "39": "lenten rose", "40": "barbeton daisy", "41": "daffodil", "42": "sword lily", "43": "poinsettia", "44": "bolero deep blue", "45": "wallflower", "46": "marigold", "47": "buttercup", "48": "oxeye daisy", "49": "common dandelion", "50": "petunia", "51": "wild pansy", "52": "primula", "53": "sunflower", "54": "pelargonium", "55": "bishop of llandaff", "56": "gaura", "57": "geranium", "58": "orange dahlia", "59": "pink-yellow dahlia?", "60": "cautleya spicata", "61": "japanese anemone", "62": "black-eyed susan", "63": "silverbush", "64": "californian poppy", "65": "osteospermum", "66": "spring crocus", "67": "bearded iris", "68": "windflower", "69": "tree poppy", "70": "gazania", "71": "azalea", "72": "water lily", "73": "rose", "74": "thorn apple", "75": "morning glory", "76": "passion flower", "77": "lotus", "78": "toad lily", "79": "anthurium", "80": "frangipani", "81": "clematis", "82": "hibiscus", "83": "columbine", "84": "desert-rose", "85": "tree mallow", "86": "magnolia", "87": "cyclamen", "88": "watercress", "89": "canna lily", "90": "hippeastrum", "91": "bee balm", "92": "ball moss", "93": "foxglove", "94": "bougainvillea", "95": "camellia", "96": "mallow", "97": "mexican petunia", "98": "bromelia", "99": "blanket flower", "100": "trumpet creeper", "101": "blackberry lily"}}}}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14", "sequence": "string"}, {"name": "blip_caption", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "LLM_Description_opt175b_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_with_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_wo_openai_classes", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_oxfordflowers", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_ensemble_specific", "dtype": "string"}, {"name": "Attributes_ViT_B_16_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_simple_specific", "dtype": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_ensemble_specific", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 275107541.0, "num_examples": 6149}], "download_size": 261098161, "dataset_size": 275107541.0}}
2023-06-02T01:11:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OxfordFlowers_test" More Information needed
[ "# Dataset Card for \"OxfordFlowers_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OxfordFlowers_test\"\n\nMore Information needed" ]
6eb63befc666151866d1277f5b9777f7620bb308
# Dataset Card for "DTD_parition1_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/DTD_parition1_train
[ "region:us" ]
2023-01-09T18:01:34+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "banded", "1": "blotchy", "2": "braided", "3": "bubbly", "4": "bumpy", "5": "chequered", "6": "cobwebbed", "7": "cracked", "8": "crosshatched", "9": "crystalline", "10": "dotted", "11": "fibrous", "12": "flecked", "13": "freckled", "14": "frilly", "15": "gauzy", "16": "grid", "17": "grooved", "18": "honeycombed", "19": "interlaced", "20": "knitted", "21": "lacelike", "22": "lined", "23": "marbled", "24": "matted", "25": "meshed", "26": "paisley", "27": "perforated", "28": "pitted", "29": "pleated", "30": "polka-dotted", "31": "porous", "32": "potholed", "33": "scaly", "34": "smeared", "35": "spiralled", "36": "sprinkled", "37": "stained", "38": "stratified", "39": "striped", "40": "studded", "41": "swirly", "42": "veined", "43": "waffled", "44": "woven", "45": "wrinkled", "46": "zigzagged"}}}}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14", "sequence": "string"}, {"name": "LLM_Description_opt175b_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "blip_caption", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_simple_specific", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_dtd", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_with_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_wo_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_B_16_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_ensemble_specific", "dtype": "string"}, {"name": "Attributes_ViT_B_16_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_simple_specific", "dtype": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_ensemble_specific", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 235001213.4, "num_examples": 1880}], "download_size": 230863096, "dataset_size": 235001213.4}}
2023-05-04T04:08:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DTD_parition1_train" More Information needed
[ "# Dataset Card for \"DTD_parition1_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DTD_parition1_train\"\n\nMore Information needed" ]
37173e61d183af9da740ba4ceaccbf560f903a5a
# Dataset Card for "DTD_parition1_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/DTD_parition1_test
[ "region:us" ]
2023-01-09T18:02:00+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "banded", "1": "blotchy", "2": "braided", "3": "bubbly", "4": "bumpy", "5": "chequered", "6": "cobwebbed", "7": "cracked", "8": "crosshatched", "9": "crystalline", "10": "dotted", "11": "fibrous", "12": "flecked", "13": "freckled", "14": "frilly", "15": "gauzy", "16": "grid", "17": "grooved", "18": "honeycombed", "19": "interlaced", "20": "knitted", "21": "lacelike", "22": "lined", "23": "marbled", "24": "matted", "25": "meshed", "26": "paisley", "27": "perforated", "28": "pitted", "29": "pleated", "30": "polka-dotted", "31": "porous", "32": "potholed", "33": "scaly", "34": "smeared", "35": "spiralled", "36": "sprinkled", "37": "stained", "38": "stratified", "39": "striped", "40": "studded", "41": "swirly", "42": "veined", "43": "waffled", "44": "woven", "45": "wrinkled", "46": "zigzagged"}}}}, {"name": "id", "dtype": "int64"}, {"name": "clip_tags_ViT_L_14", "sequence": "string"}, {"name": "LLM_Description_opt175b_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_ViT_L_14", "sequence": "string"}, {"name": "blip_caption", "dtype": "string"}, {"name": "clip_tag_ViT_L_14_specific", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_L_14_simple_specific", "dtype": "string"}, {"name": "LLM_Description_gpt3_downstream_tasks_visual_genome_ViT_L_14", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_dtd", "sequence": "string"}, {"name": "Attributes_ViT_L_14_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_with_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_L_14_wo_openai_classes", "sequence": "string"}, {"name": "clip_tags_ViT_B_16_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_16_ensemble_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_simple_specific", "dtype": "string"}, {"name": "clip_tags_ViT_B_32_ensemble_specific", "dtype": "string"}, {"name": "Attributes_ViT_B_16_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "Attributes_LAION_ViT_H_14_2B_descriptors_text_davinci_003_full", "sequence": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_simple_specific", "dtype": "string"}, {"name": "clip_tags_LAION_ViT_H_14_2B_ensemble_specific", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 184279525.4, "num_examples": 1880}], "download_size": 180002375, "dataset_size": 184279525.4}}
2023-06-02T01:05:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DTD_parition1_test" More Information needed
[ "# Dataset Card for \"DTD_parition1_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DTD_parition1_test\"\n\nMore Information needed" ]
faf6b834d42218ad07650ff6cbb624c7e92f6d73
--- TODO: Add YAML tags here. Copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging --- # Dataset Card for Testing Stock Data ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This is a test dataset ### Supported Tasks and Leaderboards BERT MARKET STOCK ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
Lord-Goku/testing_1
[ "license:afl-3.0", "region:us" ]
2023-01-09T18:28:35+00:00
{"license": "afl-3.0"}
2023-01-11T18:16:39+00:00
[]
[]
TAGS #license-afl-3.0 #region-us
--- TODO: Add YAML tags here. Copy-paste the tags obtained with the online tagging app: URL --- # Dataset Card for Testing Stock Data ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary This is a test dataset ### Supported Tasks and Leaderboards BERT MARKET STOCK ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @github-username for adding this dataset.
[ "# Dataset Card for Testing Stock Data", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThis is a test dataset", "### Supported Tasks and Leaderboards\n\nBERT\nMARKET\nSTOCK", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @github-username for adding this dataset." ]
[ "TAGS\n#license-afl-3.0 #region-us \n", "# Dataset Card for Testing Stock Data", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThis is a test dataset", "### Supported Tasks and Leaderboards\n\nBERT\nMARKET\nSTOCK", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @github-username for adding this dataset." ]
184d16b8e92489e53a96400fa7316d38ddf40661
# Dataset Card for "AbduRozik" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
matallanas/AbduRozik
[ "region:us" ]
2023-01-09T19:08:58+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 4418800.0, "num_examples": 22}], "download_size": 4418930, "dataset_size": 4418800.0}}
2023-01-09T19:09:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AbduRozik" More Information needed
[ "# Dataset Card for \"AbduRozik\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AbduRozik\"\n\nMore Information needed" ]
6a542c0150bb9ae4daa5ce0781bdee65266d2a29
# Dataset Card for "eee543" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ilhanemirhan/eee543
[ "region:us" ]
2023-01-09T19:12:04+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3303838079.792, "num_examples": 70568}, {"name": "test", "num_bytes": 1349710759.272, "num_examples": 28558}], "download_size": 4792902415, "dataset_size": 4653548839.064}}
2023-01-10T01:08:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eee543" More Information needed
[ "# Dataset Card for \"eee543\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eee543\"\n\nMore Information needed" ]
81e85e4f4e06a6f9b02a2f4afd47267c9d0c6cd6
# Dataset Card for "OxfordPets_test_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/OxfordPets_test_embeddings
[ "region:us" ]
2023-01-09T22:36:34+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 424231302.0, "num_examples": 3669}], "download_size": 426276832, "dataset_size": 424231302.0}}
2023-01-09T22:36:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OxfordPets_test_embeddings" More Information needed
[ "# Dataset Card for \"OxfordPets_test_embeddings\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OxfordPets_test_embeddings\"\n\nMore Information needed" ]
dcd49332e43a8838bc326fb6ba33b6981883dcfe
# Dataset Card for "OxfordPets_train_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/OxfordPets_train_embeddings
[ "region:us" ]
2023-01-09T22:41:17+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 389325271.36, "num_examples": 3680}], "download_size": 391341260, "dataset_size": 389325271.36}}
2023-01-09T22:41:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OxfordPets_train_embeddings" More Information needed
[ "# Dataset Card for \"OxfordPets_train_embeddings\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OxfordPets_train_embeddings\"\n\nMore Information needed" ]
b2fa6fa0df2f85031538f4c179517bf59250068c
# Dataset Card for "Caltech101_not_background_test_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/Caltech101_not_background_test_embeddings
[ "region:us" ]
2023-01-09T22:44:42+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 95535909.0, "num_examples": 5647}], "download_size": 98967583, "dataset_size": 95535909.0}}
2023-01-09T22:44:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Caltech101_not_background_test_embeddings" More Information needed
[ "# Dataset Card for \"Caltech101_not_background_test_embeddings\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Caltech101_not_background_test_embeddings\"\n\nMore Information needed" ]
7b440d373203f6198aec05d9f49d46fd1bda9797
# Dataset Card for "Caltech101_not_background_train_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/Caltech101_not_background_train_embeddings
[ "region:us" ]
2023-01-09T22:45:58+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 52727578.0, "num_examples": 3030}], "download_size": 54496531, "dataset_size": 52727578.0}}
2023-01-09T22:46:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Caltech101_not_background_train_embeddings" More Information needed
[ "# Dataset Card for \"Caltech101_not_background_train_embeddings\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Caltech101_not_background_train_embeddings\"\n\nMore Information needed" ]
ee914c3c04c5eda8278b124434c68f28e3bd7e83
# Dataset Card for "OxfordFlowers_test_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/OxfordFlowers_test_embeddings
[ "region:us" ]
2023-01-09T22:48:18+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 279299546.0, "num_examples": 6149}], "download_size": 283131238, "dataset_size": 279299546.0}}
2023-01-29T01:32:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OxfordFlowers_test_embeddings" More Information needed
[ "# Dataset Card for \"OxfordFlowers_test_embeddings\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OxfordFlowers_test_embeddings\"\n\nMore Information needed" ]
c35d055781b535e872cde32943e25e1ebe65e44f
# Dataset Card for "OxfordFlowers_train_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/OxfordFlowers_train_embeddings
[ "region:us" ]
2023-01-09T22:49:47+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 46545819.0, "num_examples": 1020}], "download_size": 47189831, "dataset_size": 46545819.0}}
2023-01-29T01:41:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OxfordFlowers_train_embeddings" More Information needed
[ "# Dataset Card for \"OxfordFlowers_train_embeddings\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OxfordFlowers_train_embeddings\"\n\nMore Information needed" ]
70ba1691b7ded13a4a7d98dc95fb9dd3817a1bc1
# CMP Facade Database We present a dataset of facade images assembled at the Center for Machine Perception, which includes 606 rectified images of facades from various sources, which have been manually annotated. The facades are from different cities around the world and diverse architectural styles. Documentation Data origin, format and processing, annotation principles for 12 classes are specified in the report. - facade - molding - cornice - pillar - window - door - sill - blind - balcony - shop - deco - background Link to original website: https://cmp.felk.cvut.cz/~tylecr1/facade/ Citation Please use the following reference to cite the dataset: ```latex @INPROCEEDINGS{Tylecek13, author = {Radim Tyle{\v c}ek and Radim {\v S}{\' a}ra}, title = {Spatial Pattern Templates for Recognition of Objects with Regular Structure}, booktitle = {Proc. GCPR}, year = {2013}, address = {Saarbrucken, Germany}, } ```
Xpitfire/cmp_facade
[ "task_categories:image-segmentation", "language:en", "license:mit", "building", "facade", "region:us" ]
2023-01-09T22:51:59+00:00
{"language": ["en"], "license": "mit", "task_categories": ["image-segmentation"], "tags": ["building", "facade"]}
2023-01-15T01:43:17+00:00
[]
[ "en" ]
TAGS #task_categories-image-segmentation #language-English #license-mit #building #facade #region-us
# CMP Facade Database We present a dataset of facade images assembled at the Center for Machine Perception, which includes 606 rectified images of facades from various sources, which have been manually annotated. The facades are from different cities around the world and diverse architectural styles. Documentation Data origin, format and processing, annotation principles for 12 classes are specified in the report. - facade - molding - cornice - pillar - window - door - sill - blind - balcony - shop - deco - background Link to original website: URL Citation Please use the following reference to cite the dataset:
[ "# CMP Facade Database\nWe present a dataset of facade images assembled at the Center for Machine Perception, which includes 606 rectified images of facades from various sources, which have been manually annotated. The facades are from different cities around the world and diverse architectural styles.\nDocumentation\n\nData origin, format and processing, annotation principles for 12 classes are specified in the report.\n\n- facade \n- molding\n- cornice\n- pillar\n- window\n- door\n- sill\n- blind\n- balcony\n- shop\n- deco\n- background\n\nLink to original website:\nURL\n\nCitation\nPlease use the following reference to cite the dataset:" ]
[ "TAGS\n#task_categories-image-segmentation #language-English #license-mit #building #facade #region-us \n", "# CMP Facade Database\nWe present a dataset of facade images assembled at the Center for Machine Perception, which includes 606 rectified images of facades from various sources, which have been manually annotated. The facades are from different cities around the world and diverse architectural styles.\nDocumentation\n\nData origin, format and processing, annotation principles for 12 classes are specified in the report.\n\n- facade \n- molding\n- cornice\n- pillar\n- window\n- door\n- sill\n- blind\n- balcony\n- shop\n- deco\n- background\n\nLink to original website:\nURL\n\nCitation\nPlease use the following reference to cite the dataset:" ]
e03a13d392a888ab4b908cf2c60234fb2eea2eb5
# Dataset Card for "DTD_parition1_test_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/DTD_parition1_test_embeddings
[ "region:us" ]
2023-01-09T22:53:19+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 185806556.4, "num_examples": 1880}], "download_size": 186181373, "dataset_size": 185806556.4}}
2023-01-29T01:33:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DTD_parition1_test_embeddings" More Information needed
[ "# Dataset Card for \"DTD_parition1_test_embeddings\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DTD_parition1_test_embeddings\"\n\nMore Information needed" ]
08cb6bef3f7f9608911431df8c82c7e4007ab051
# Dataset Card for "DTD_parition1_train_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/DTD_parition1_train_embeddings
[ "region:us" ]
2023-01-09T22:55:09+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "id", "dtype": "int64"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "openai_clip_vit_large_patch14", "num_bytes": 236557256.4, "num_examples": 1880}], "download_size": 237044519, "dataset_size": 236557256.4}}
2023-01-29T01:42:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DTD_parition1_train_embeddings" More Information needed
[ "# Dataset Card for \"DTD_parition1_train_embeddings\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DTD_parition1_train_embeddings\"\n\nMore Information needed" ]
198d3c4bce5f3b8ac529e85364c3242a4a3ec1d9
# Dataset Card for "M2BD" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nourlachtar/M2BD
[ "region:us" ]
2023-01-09T23:24:22+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "tokens", "sequence": "string"}, {"name": "ner_tags", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 1643326, "num_examples": 758}, {"name": "validation", "num_bytes": 82025, "num_examples": 42}, {"name": "test", "num_bytes": 83279, "num_examples": 43}], "download_size": 314857, "dataset_size": 1808630}}
2023-01-10T12:43:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "M2BD" More Information needed
[ "# Dataset Card for \"M2BD\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"M2BD\"\n\nMore Information needed" ]
3370987343b39cf8f9d374c182e309608335804a
# Dataset Card for "rick-and-morty-s5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
juliaturc/rick-and-morty-s5
[ "region:us" ]
2023-01-09T23:55:21+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "caption", "dtype": "string"}, {"name": "subtitle", "dtype": "string"}, {"name": "special_token", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 173445606.66373935, "num_examples": 2513}, {"name": "test", "num_bytes": 19233393.70426065, "num_examples": 280}], "download_size": 192257158, "dataset_size": 192679000.368}}
2023-01-09T23:55:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rick-and-morty-s5" More Information needed
[ "# Dataset Card for \"rick-and-morty-s5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rick-and-morty-s5\"\n\nMore Information needed" ]
4c15ffcc724c4804ecda677e6a63d2d0741f5d09
# Dataset Card for "openai_summarize_tldr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CarperAI/openai_summarize_tldr
[ "region:us" ]
2023-01-10T02:53:30+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 181260841, "num_examples": 116722}, {"name": "valid", "num_bytes": 10018338, "num_examples": 6447}, {"name": "test", "num_bytes": 10198128, "num_examples": 6553}], "download_size": 122973500, "dataset_size": 201477307}}
2023-01-10T02:53:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "openai_summarize_tldr" More Information needed
[ "# Dataset Card for \"openai_summarize_tldr\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"openai_summarize_tldr\"\n\nMore Information needed" ]
c92352d1f45757876c694ac1f85f9e9e74834347
# Dataset Card for "pingu-images-dreambooth" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
parinzee/pingu-images-dreambooth
[ "region:us" ]
2023-01-10T03:48:19+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 11360169.0, "num_examples": 14}], "download_size": 11358375, "dataset_size": 11360169.0}}
2023-01-10T03:48:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pingu-images-dreambooth" More Information needed
[ "# Dataset Card for \"pingu-images-dreambooth\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pingu-images-dreambooth\"\n\nMore Information needed" ]
c98c54bc90c6423318034a3f9095337c2073fb5f
# Dataset Card for "boostcamp-docvqa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Ssunbell/boostcamp-docvqa
[ "region:us" ]
2023-01-10T06:21:38+00:00
{"dataset_info": {"features": [{"name": "questionId", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "image", "sequence": {"sequence": {"sequence": {"sequence": "uint8"}}}}, {"name": "docId", "dtype": "int64"}, {"name": "ucsf_document_id", "dtype": "string"}, {"name": "ucsf_document_page_no", "dtype": "string"}, {"name": "answers", "sequence": "string"}, {"name": "data_split", "dtype": "string"}, {"name": "words", "sequence": "string"}, {"name": "boxes", "sequence": {"sequence": "int64"}}], "splits": [{"name": "train", "num_bytes": 6387690838, "num_examples": 39463}, {"name": "val", "num_bytes": 869953677, "num_examples": 5349}], "download_size": 2583317804, "dataset_size": 7257644515}}
2023-01-10T06:32:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "boostcamp-docvqa" More Information needed
[ "# Dataset Card for \"boostcamp-docvqa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"boostcamp-docvqa\"\n\nMore Information needed" ]
f3f8fa8b762c7038411a9ec8a1073804f1764bc1
# Dataset Card for "boostcamp-docvqa-test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Ssunbell/boostcamp-docvqa-test
[ "region:us" ]
2023-01-10T06:57:08+00:00
{"dataset_info": {"features": [{"name": "questionId", "dtype": "int64"}, {"name": "question", "dtype": "string"}, {"name": "image", "sequence": {"sequence": {"sequence": {"sequence": "uint8"}}}}, {"name": "docId", "dtype": "int64"}, {"name": "ucsf_document_id", "dtype": "string"}, {"name": "ucsf_document_page_no", "dtype": "string"}, {"name": "data_split", "dtype": "string"}, {"name": "words", "sequence": "string"}, {"name": "boxes", "sequence": {"sequence": "int64"}}], "splits": [{"name": "test", "num_bytes": 843659556, "num_examples": 5188}], "download_size": 297328696, "dataset_size": 843659556}}
2023-01-10T06:59:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "boostcamp-docvqa-test" More Information needed
[ "# Dataset Card for \"boostcamp-docvqa-test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"boostcamp-docvqa-test\"\n\nMore Information needed" ]
14d15972231aaae5b11175eded10477d4e67a2a9
# Dataset Card for "patents_green_plastics_10k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cwinkler/patents_green_plastics_10k
[ "region:us" ]
2023-01-10T08:51:55+00:00
{"dataset_info": {"features": [{"name": "abstract", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 7413030.0, "num_examples": 10282}], "download_size": 3678031, "dataset_size": 7413030.0}}
2023-01-10T08:53:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "patents_green_plastics_10k" More Information needed
[ "# Dataset Card for \"patents_green_plastics_10k\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"patents_green_plastics_10k\"\n\nMore Information needed" ]
4f1114880da57a4d7c53d53189d5fa7f19c57527
# Dataset Card for "images_first_day" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yuvalkirstain/images_first_day
[ "region:us" ]
2023-01-10T09:37:57+00:00
{"dataset_info": {"features": [{"name": "image_id", "dtype": "int64"}, {"name": "created_at", "dtype": "timestamp[ns]"}, {"name": "image_hash", "dtype": "string"}, {"name": "user_id", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "negative_prompt", "dtype": "string"}, {"name": "seed", "dtype": "int64"}, {"name": "gs", "dtype": "float64"}, {"name": "steps", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "num_generated", "dtype": "int64"}, {"name": "scheduler_cls", "dtype": "string"}, {"name": "model_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 5027572586.584, "num_examples": 6916}], "download_size": 5024119623, "dataset_size": 5027572586.584}}
2023-01-10T09:44:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "images_first_day" More Information needed
[ "# Dataset Card for \"images_first_day\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"images_first_day\"\n\nMore Information needed" ]
1a4a3add5b7f154526c6d76406b20765a63d0d10
# Dataset Card for "EnglishLM_Chars_removed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kabir5297/EnglishLM_Chars_removed
[ "region:us" ]
2023-01-10T09:52:26+00:00
{"dataset_info": {"features": [{"name": "translation", "dtype": {"translation": {"languages": ["en", "es"]}}}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 947789376, "num_examples": 2009073}], "download_size": 531597761, "dataset_size": 947789376}}
2023-01-10T10:22:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "EnglishLM_Chars_removed" More Information needed
[ "# Dataset Card for \"EnglishLM_Chars_removed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"EnglishLM_Chars_removed\"\n\nMore Information needed" ]
2f14274b2a5eb4b66c0207b6d009586a288c647f
# Dataset Card for "pick_a_pic_ranked_images_first_day" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yuvalkirstain/pick_a_pic_ranked_images_first_day
[ "region:us" ]
2023-01-10T10:15:32+00:00
{"dataset_info": {"features": [{"name": "image_id", "dtype": "int64"}, {"name": "created_at", "dtype": "timestamp[ns]"}, {"name": "image_uid", "dtype": "string"}, {"name": "user_id", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "negative_prompt", "dtype": "string"}, {"name": "seed", "dtype": "int64"}, {"name": "gs", "dtype": "float64"}, {"name": "steps", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "num_generated", "dtype": "int64"}, {"name": "scheduler_cls", "dtype": "string"}, {"name": "model_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 592439420.0, "num_examples": 859}], "download_size": 592037316, "dataset_size": 592439420.0}}
2023-01-10T10:16:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pick_a_pic_ranked_images_first_day" More Information needed
[ "# Dataset Card for \"pick_a_pic_ranked_images_first_day\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pick_a_pic_ranked_images_first_day\"\n\nMore Information needed" ]
36fd9d3bce05bde438e2a170520d5ec20c246da5
### Dataset Summary A Hebrew Deduplicated and Cleaned Common Crawl Corpus. A thoroughly cleaned and approximately deduplicated dataset for unsupervised learning. ### Citing If you use HeDC4 in your research, please cite [HeRo: RoBERTa and Longformer Hebrew Language Models](http://arxiv.org/abs/2304.11077). ``` @article{shalumov2023hero, title={HeRo: RoBERTa and Longformer Hebrew Language Models}, author={Vitaly Shalumov and Harel Haskey}, year={2023}, journal={arXiv:2304.11077}, } ```
HeNLP/HeDC4
[ "task_categories:fill-mask", "size_categories:1B<n<10B", "language:he", "arxiv:2304.11077", "region:us" ]
2023-01-10T10:28:22+00:00
{"language": ["he"], "size_categories": ["1B<n<10B"], "task_categories": ["fill-mask"]}
2023-04-24T05:04:29+00:00
[ "2304.11077" ]
[ "he" ]
TAGS #task_categories-fill-mask #size_categories-1B<n<10B #language-Hebrew #arxiv-2304.11077 #region-us
### Dataset Summary A Hebrew Deduplicated and Cleaned Common Crawl Corpus. A thoroughly cleaned and approximately deduplicated dataset for unsupervised learning. ### Citing If you use HeDC4 in your research, please cite HeRo: RoBERTa and Longformer Hebrew Language Models.
[ "### Dataset Summary\n\nA Hebrew Deduplicated and Cleaned Common Crawl Corpus. A thoroughly cleaned and\napproximately deduplicated dataset for unsupervised learning.", "### Citing\n\nIf you use HeDC4 in your research, please cite HeRo: RoBERTa and Longformer Hebrew Language Models." ]
[ "TAGS\n#task_categories-fill-mask #size_categories-1B<n<10B #language-Hebrew #arxiv-2304.11077 #region-us \n", "### Dataset Summary\n\nA Hebrew Deduplicated and Cleaned Common Crawl Corpus. A thoroughly cleaned and\napproximately deduplicated dataset for unsupervised learning.", "### Citing\n\nIf you use HeDC4 in your research, please cite HeRo: RoBERTa and Longformer Hebrew Language Models." ]
ac947a019834dd15c41ebee1c2f9cc727a4cf56a
> 《 License 》 > > 1. 본 AI데이터 등을 이용할 때에는 반드시 한국지능정보사회진흥원의 사업결과임을 밝혀야 하며, 본 AI데이터 등을 이용한 2차적 저작물에도 동일하게 밝혀야 합니다. > > 2. 국외에 소재하는 법인, 단체 또는 개인이 AI데이터 등을 이용하기 위해서는 수행기관 등 및 한국지능정보사회진흥원과 별도로 합의가 필요합니다. > > 3. 본 AI데이터 등의 국외 반출을 위해서는 수행기관 등 및 한국지능정보사회진흥원과 별도로 합의가 필요합니다. > > 4. 본 AI데이터는 인공지능 학습모델의 학습용으로만 사용할 수 있습니다. 한국지능정보사회진흥원은 AI데이터 등의 이용의 목적이나 방법, 내용 등이 위법하거나 부적합하다고 판단될 경우 제공을 거부할 수 있으며, 이미 제공한 경우 이용의 중지와 AI 데이터 등의 환수, 폐기 등을 요구할 수 있습니다. > > 5. 제공 받은 AI데이터 등을 수행기관 등과 한국지능정보사회진흥원의 승인을 받지 않은 다른 법인, 단체 또는 개인에게 열람하게 하거나 제공, 양도, 대여, 판매하여서는 안됩니다. > > 6. AI데이터 등에 대해서 제 4항에 따른 목적 외 이용, 제5항에 따른 무단 열람, 제공, 양도, 대여, 판매 등의 결과로 인하여 발생하는 모든 민・형사 상의 책임은 AI데이터 등을 이용한 법인, 단체 또는 개인에게 있습니다. > > 7. 이용자는 AI 허브 제공 데이터셋 내에 개인정보 등이 포함된 것이 발견된 경우, 즉시 AI 허브에 해당 사실을 신고하고 다운로드 받은 데이터셋을 삭제하여야 합니다. > > 8. AI 허브로부터 제공받은 비식별 정보(재현정보 포함)를 인공지능 서비스 개발 등의 목적으로 안전하게 이용하여야 하며, 이를 이용해서 개인을 재식별하기 위한 어떠한 행위도 하여서는 안됩니다. > > 9. 향후 한국지능정보사회진흥원에서 활용사례・성과 등에 관한 실태조사를 수행 할 경우 이에 성실하게 임하여야 합니다.
Laplace04/KoreanSummarizeAiHub
[ "license:other", "region:us" ]
2023-01-10T10:29:19+00:00
{"license": "other"}
2023-01-10T10:33:39+00:00
[]
[]
TAGS #license-other #region-us
> 《 License 》 > > 1. 본 AI데이터 등을 이용할 때에는 반드시 한국지능정보사회진흥원의 사업결과임을 밝혀야 하며, 본 AI데이터 등을 이용한 2차적 저작물에도 동일하게 밝혀야 합니다. > > 2. 국외에 소재하는 법인, 단체 또는 개인이 AI데이터 등을 이용하기 위해서는 수행기관 등 및 한국지능정보사회진흥원과 별도로 합의가 필요합니다. > > 3. 본 AI데이터 등의 국외 반출을 위해서는 수행기관 등 및 한국지능정보사회진흥원과 별도로 합의가 필요합니다. > > 4. 본 AI데이터는 인공지능 학습모델의 학습용으로만 사용할 수 있습니다. 한국지능정보사회진흥원은 AI데이터 등의 이용의 목적이나 방법, 내용 등이 위법하거나 부적합하다고 판단될 경우 제공을 거부할 수 있으며, 이미 제공한 경우 이용의 중지와 AI 데이터 등의 환수, 폐기 등을 요구할 수 있습니다. > > 5. 제공 받은 AI데이터 등을 수행기관 등과 한국지능정보사회진흥원의 승인을 받지 않은 다른 법인, 단체 또는 개인에게 열람하게 하거나 제공, 양도, 대여, 판매하여서는 안됩니다. > > 6. AI데이터 등에 대해서 제 4항에 따른 목적 외 이용, 제5항에 따른 무단 열람, 제공, 양도, 대여, 판매 등의 결과로 인하여 발생하는 모든 민・형사 상의 책임은 AI데이터 등을 이용한 법인, 단체 또는 개인에게 있습니다. > > 7. 이용자는 AI 허브 제공 데이터셋 내에 개인정보 등이 포함된 것이 발견된 경우, 즉시 AI 허브에 해당 사실을 신고하고 다운로드 받은 데이터셋을 삭제하여야 합니다. > > 8. AI 허브로부터 제공받은 비식별 정보(재현정보 포함)를 인공지능 서비스 개발 등의 목적으로 안전하게 이용하여야 하며, 이를 이용해서 개인을 재식별하기 위한 어떠한 행위도 하여서는 안됩니다. > > 9. 향후 한국지능정보사회진흥원에서 활용사례・성과 등에 관한 실태조사를 수행 할 경우 이에 성실하게 임하여야 합니다.
[]
[ "TAGS\n#license-other #region-us \n" ]
965e122c69e6c0c166f954a9a3fc5f7b375b81cd
# Dataset Card for "dreambooth-hackathon-images" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SuSung-boy/dreambooth-hackathon-images
[ "region:us" ]
2023-01-10T10:44:31+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 4714141.0, "num_examples": 31}], "download_size": 4715444, "dataset_size": 4714141.0}}
2023-01-10T10:44:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dreambooth-hackathon-images" More Information needed
[ "# Dataset Card for \"dreambooth-hackathon-images\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dreambooth-hackathon-images\"\n\nMore Information needed" ]
e18b665eeba6ad0833f85899f9ce81532311544d
# Dataset Card for "tr-wikihow-summ" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ardauzunoglu/tr-wikihow-summ
[ "region:us" ]
2023-01-10T12:24:11+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 279070558, "num_examples": 113356}, {"name": "validation", "num_bytes": 15174147, "num_examples": 6082}, {"name": "test", "num_bytes": 14888006, "num_examples": 5984}], "download_size": 166588788, "dataset_size": 309132711}}
2023-01-10T12:27:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tr-wikihow-summ" More Information needed
[ "# Dataset Card for \"tr-wikihow-summ\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tr-wikihow-summ\"\n\nMore Information needed" ]
bc524372259ede43ad08b11d0929bf5385e42d12
# AutoTrain Dataset for project: hannah-demo ## Dataset Description This dataset has been automatically processed by AutoTrain for project hannah-demo. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "image": "<11744x7026 RGBA PIL image>", "target": 0 }, { "image": "<11744x7026 RGBA PIL image>", "target": 0 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "image": "Image(decode=True, id=None)", "target": "ClassLabel(names=['hannah'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 7 | | valid | 7 |
slushily/autotrain-data-hannah-demo
[ "task_categories:image-classification", "region:us" ]
2023-01-10T12:29:00+00:00
{"task_categories": ["image-classification"]}
2023-01-11T03:18:33+00:00
[]
[]
TAGS #task_categories-image-classification #region-us
AutoTrain Dataset for project: hannah-demo ========================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project hannah-demo. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-image-classification #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
1c2c6699d58d1ac01fb4177fb65fb1a82b2bd37f
# Dataset Card for "hdb0110" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cestwc/hdb0110
[ "region:us" ]
2023-01-10T13:23:29+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 16067.0, "num_examples": 110}], "download_size": 13149, "dataset_size": 16067.0}}
2023-01-10T13:37:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "hdb0110" More Information needed
[ "# Dataset Card for \"hdb0110\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"hdb0110\"\n\nMore Information needed" ]
ac945c3619838e4e990cee54af9f4e788565e8f6
# Dataset Card for "lfqa_preprocessed" ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) ## Dataset Description - **Homepage:** [https://towardsdatascience.com/long-form-qa-beyond-eli5-an-updated-dataset-and-approach-319cb841aabb](https://towardsdatascience.com/long-form-qa-beyond-eli5-an-updated-dataset-and-approach-319cb841aabb) ### Dataset Summary This is a simplified version of [vblagoje's](https://huggingface.co/vblagoje) *[lfqa_support_docs](https://huggingface.co/datasets/vblagoje/lfqa_support_docs)* and *[lfqa](https://huggingface.co/datasets/vblagoje/lfqa)* datasets. It was generated by me to have a more straight forward way to train Seq2Seq models on context based long form question answering tasks. ## Dataset Structure ### Data Instances An example of 'train' looks as follows. ```json { "question": "what's the difference between a forest and a wood?", "answer": "They're used interchangeably a lot. You'll get different answers from different resources, but the ...", "context": [ "Wood is divided, according to its botanical origin, into two kinds: softwoods, ...", "Processing and products differs especially with regard to the distinction between softwood and hardwood ..." ] } ``` ### Data Fields The data fields are the same among all splits. - `question`: a `string` feature. - `answer`: a `string` feature. - `context`: a list feature containing `string` features. ### Data Splits | name |train|validation| |----------|----:|---------:| | |226147| 3020| ## Additional Information ### Licensing Information This dataset is distributed under the MIT licence.
LLukas22/lfqa_preprocessed
[ "task_categories:question-answering", "task_categories:sentence-similarity", "size_categories:100K<n<1M", "language:en", "license:mit", "region:us" ]
2023-01-10T13:30:52+00:00
{"language": ["en"], "license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["question-answering", "sentence-similarity"]}
2023-01-10T14:21:56+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #task_categories-sentence-similarity #size_categories-100K<n<1M #language-English #license-mit #region-us
Dataset Card for "lfqa\_preprocessed" ===================================== Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary * Dataset Structure + Data Instances + Data Fields + Data Splits * Additional Information + Licensing Information Dataset Description ------------------- * Homepage: URL ### Dataset Summary This is a simplified version of vblagoje's *lfqa\_support\_docs* and *lfqa* datasets. It was generated by me to have a more straight forward way to train Seq2Seq models on context based long form question answering tasks. Dataset Structure ----------------- ### Data Instances An example of 'train' looks as follows. ### Data Fields The data fields are the same among all splits. * 'question': a 'string' feature. * 'answer': a 'string' feature. * 'context': a list feature containing 'string' features. ### Data Splits Additional Information ---------------------- ### Licensing Information This dataset is distributed under the MIT licence.
[ "### Dataset Summary\n\n\nThis is a simplified version of vblagoje's *lfqa\\_support\\_docs* and *lfqa* datasets.\nIt was generated by me to have a more straight forward way to train Seq2Seq models on context based long form question answering tasks.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.\n\n\n* 'question': a 'string' feature.\n* 'answer': a 'string' feature.\n* 'context': a list feature containing 'string' features.", "### Data Splits\n\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThis dataset is distributed under the MIT licence." ]
[ "TAGS\n#task_categories-question-answering #task_categories-sentence-similarity #size_categories-100K<n<1M #language-English #license-mit #region-us \n", "### Dataset Summary\n\n\nThis is a simplified version of vblagoje's *lfqa\\_support\\_docs* and *lfqa* datasets.\nIt was generated by me to have a more straight forward way to train Seq2Seq models on context based long form question answering tasks.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.\n\n\n* 'question': a 'string' feature.\n* 'answer': a 'string' feature.\n* 'context': a list feature containing 'string' features.", "### Data Splits\n\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThis dataset is distributed under the MIT licence." ]
43a7c5638a086dc7071d2428c2bc9fcd89231dd8
# Dataset Card for "pick_a_pic_preferred_images_first_day" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yuvalkirstain/pick_a_pic_preferred_images_first_day
[ "region:us" ]
2023-01-10T14:17:23+00:00
{"dataset_info": {"features": [{"name": "image_id", "dtype": "int64"}, {"name": "created_at", "dtype": "timestamp[ns]"}, {"name": "image_uid", "dtype": "string"}, {"name": "user_id", "dtype": "int64"}, {"name": "prompt", "dtype": "string"}, {"name": "negative_prompt", "dtype": "string"}, {"name": "seed", "dtype": "int64"}, {"name": "gs", "dtype": "float64"}, {"name": "steps", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "num_generated", "dtype": "int64"}, {"name": "scheduler_cls", "dtype": "string"}, {"name": "model_id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 686322947.851, "num_examples": 1001}], "download_size": 685855336, "dataset_size": 686322947.851}}
2023-01-10T14:18:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pick_a_pic_preferred_images_first_day" More Information needed
[ "# Dataset Card for \"pick_a_pic_preferred_images_first_day\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pick_a_pic_preferred_images_first_day\"\n\nMore Information needed" ]
ee1ef6dbfd88c3aea0a8e9b59bb464706d516ea9
# Dataset Card for "IL2223_project" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tilos/IL2223_project
[ "region:us" ]
2023-01-10T14:58:19+00:00
{"dataset_info": {"features": [{"name": "referenceTime", "dtype": "string"}, {"name": "t", "dtype": "float64"}, {"name": "ws", "dtype": "float64"}, {"name": "prec1h", "dtype": "float64"}, {"name": "fesn1h", "dtype": "float64"}, {"name": "vis", "dtype": "float64"}, {"name": "confidence", "dtype": "float64"}, {"name": "congestionLevel", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 99680, "num_examples": 1246}], "download_size": 18777, "dataset_size": 99680}}
2023-01-21T21:09:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "IL2223_project" More Information needed
[ "# Dataset Card for \"IL2223_project\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"IL2223_project\"\n\nMore Information needed" ]
f776edeed36d302ce89e1e8c47316e77a47b6f41
# Word Sense Disambiguation Corpora for Polish ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** https://link.springer.com/chapter/10.1007/978-3-031-08754-7_70 - **Point of Contact:** [email protected] ### Dataset Summary `WSD Polish Datasets` is a comprehensive benchmark for word sense disambiguation (WSD) classification task in Polish language. It consists of 7 distinct datasets, manually annotated with senses from plWordNet-4.5 sense inventory. The following datasets were annotated and included into our benchmark: - KPWr - KPWr-100 - Sherlock (SPEC) - Skladnica - WikiGlex (a subset of GLEX corpus) - EmoGlex (a subset of GLEX corpus) - Walenty For more details, please check the following publication: ``` @InProceedings{10.1007/978-3-031-08754-7_70, author="Janz, Arkadiusz and Dziob, Agnieszka and Oleksy, Marcin and Baran, Joanna", editor="Groen, Derek and de Mulatier, Cl{\'e}llia and Paszynski, Maciej and Krzhizhanovskaya, Valeria V. and Dongarra, Jack J. and Sloot, Peter M. A.", title="A Unified Sense Inventory for Word Sense Disambiguation in Polish", booktitle="Computational Science -- ICCS 2022", year="2022", publisher="Springer International Publishing", address="Cham", pages="682--689", isbn="978-3-031-08754-7" } ``` **A new publication on Polish WSD corpora will be available soon** ### Supported Tasks and Leaderboards Word sense disambiguation task. We do not provide a leaderboard. However, we provide an example evaluation script for evaluating WSD models. ### Languages Polish language, PL ## Dataset Structure ### Data Instances Data are structured in JSONL format, each single text sample is divided by sentence. ``` { "text": "Wpierw pani Hudson została zerwana z łóżka, po czym odegrała się na mnie, a ja - na tobie.", "tokens": [ { "index": 0, "position": [ 0, 6 ], "orth": "Wpierw", "lemma": "wpierw", "pos": "adv", "ctag": "adv" }, { "index": 1, "position": [ 7, 11 ], "orth": "pani", "lemma": "pani", "pos": "noun", "ctag": "subst:nom:f:sg" }, { "index": 2, "position": [ 12, 18 ], "orth": "Hudson", "lemma": "Hudson", "pos": "noun", "ctag": "subst:nom:f:sg" }, { "index": 3, "position": [ 19, 26 ], "orth": "została", "lemma": "zostać", "pos": "verb", "ctag": "praet:perf:f:sg" }, { "index": 4, "position": [ 27, 34 ], "orth": "zerwana", "lemma": "zerwać", "pos": "verb", "ctag": "ppas:perf:nom:f:aff:sg" }, <...> ], "phrases": [ { "indices": [ 10, 11 ], "head": 10, "lemma": "odegrać się" } ], "wsd": [ { "index": 0, "pl_sense": "wpierw.1.r", "plWN_syn_id": "01a4a067-aac5-11ed-aae5-0242ac130002", "plWN_lex_id": "f2757c30-aac4-11ed-aae5-0242ac130002", "plWN_syn_legacy_id": "477654", "plWN_lex_legacy_id": "718454", "PWN_syn_id": "00102736-r", "bn_syn_id": "bn:00115376r", "mapping_relation": "synonymy" }, { "index": 1, "pl_sense": "pani.2.n", "plWN_syn_id": "f35fb1ed-aac4-11ed-aae5-0242ac130002", "plWN_lex_id": "d5145565-aac4-11ed-aae5-0242ac130002", "plWN_syn_legacy_id": "129", "plWN_lex_legacy_id": "20695", "PWN_syn_id": "10787470-n", "bn_syn_id": "bn:00001530n", "mapping_relation": "synonymy" }, <...> ] } ``` ### Data Fields Description of json keys: - `text`: text of the sentence - `tokens`: list of tokens made by tokenization process - `index`: token order index in sentence - `position`: token chars span indices <included, excluded> - `orth`: word - `lemma`: lemmatised word - `pos`: part of speech - `ctag`: morphosyntactic tag - `phrases`: list of multi-word - `wsd`: annotation labels for the WSD task ### Data Splits We do not specify an exact data split for training and evaluation. However, we suggest to use GLEX and Składnica for training and other datasets for testing. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection, Normalization and Post-processing Source corpora were initially pre-processed using morphosyntactic tagging and multi-word expression recognition tools. To tokenize and tag the datasets we used [MorphoDiTa](https://clarin-pl.eu/dspace/handle/11321/425) adapted to Polish language. To recognize multi-word expressions we applied pattern-based matching tool [Corpus2-MWE](https://clarin-pl.eu/dspace/handle/11321/533) - only MWEs from plWordNet were included. After manual annotation, sense indices of plWordNet 4.5 were mapped automatically to Princeton WordNet 3.0 and BabelNet 4.0 indices using plWordNet's interlingual mapping. ### Annotations #### Annotation process * 2+1 annotation process with inter-annotator agreement score over 0.6 PSA * annotated with [plWordNet 4.5](http://plwordnet.pwr.wroc.pl/wordnet/) * software: [WordNet-Loom](https://clarin-pl.eu/dspace/handle/11321/275) and [Inforex](https://clarin-pl.eu/dspace/handle/11321/13) * both single-word and multi-word expressions annotated * full-text sense annotation (excluding KPWr) #### Who are the annotators? - professional linguists from CLARIN-PL project ### Personal and Sensitive Information The datasets do not contain any personal or sensitive information. ## Considerations for Using the Data ### Discussion of Biases Some datasets are biased towards most frequent senses. No information about other biases - needs further analysis. ### Other Known Limitations * sense inventories are usually incomplete therefore some word senses might be missing in plWordNet * single-word and multi-word terms expressing novel senses (missing in plWordNet) were not marked ## Additional Information ### Dataset Curators Arkadiusz Janz ([email protected]) ### Licensing Information KPWR-100 [CC-BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/) KPWR [CC-BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/) Walenty [CC-BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/) Sherlock [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) Skladnica [GNU GPL 3](http://www.gnu.org/licenses/gpl-3.0.en.html) GLEX [plWordNet License](http://plwordnet.pwr.wroc.pl/wordnet/licence) ### Citation Information Main source (all corpora as a unified benchmark) and published here on HuggingFace: ```` @InProceedings{10.1007/978-3-031-08754-7_70, author="Janz, Arkadiusz and Dziob, Agnieszka and Oleksy, Marcin and Baran, Joanna", editor="Groen, Derek and de Mulatier, Cl{\'e}llia and Paszynski, Maciej and Krzhizhanovskaya, Valeria V. and Dongarra, Jack J. and Sloot, Peter M. A.", title="A Unified Sense Inventory for Word Sense Disambiguation in Polish", booktitle="Computational Science -- ICCS 2022", year="2022", publisher="Springer International Publishing", address="Cham", pages="682--689", isbn="978-3-031-08754-7" } ```` Related work ------------ KPWr-100, Składnica, SPEC ```` @article{janzresults, title={Results of the PolEval 2020 Shared Task 3: Word Sense Disambiguation}, author={Janz, Arkadiusz and Chlebus, Joanna and Dziob, Agnieszka and Piasecki, Maciej}, journal={Proceedings of the PolEval 2020 Workshop}, pages={65--77}, year={2020} } ```` GLEX (EmoGLEX) ```` @article{janz2017plwordnet, title={{plWordNet} as a basis for large emotive lexicons of Polish}, author={Janz, Arkadiusz and Kocon, Jan and Piasecki, Maciej and Zasko-Zielinska, Monika}, journal={Proceedings of Human Language Technologies as a Challenge for Computer Science and Linguistics Poznan: Fundacja Uniwersytetu im. Adama Mickiewicza w Poznaniu}, pages={189--193}, year={2017} } ```` KPWr ```` @conference{broda2012, address = {Istanbul, Turkey}, author = {Bartosz Broda and Micha{\l} Marci{\'n}czuk and Marek Maziarz and Adam Radziszewski and Adam Wardy{\'n}ski}, booktitle = {Proceedings of LREC'12}, owner = {Marlena}, publisher = {ELRA}, timestamp = {2014.06.20}, title = {KPWr: Towards a Free Corpus of Polish}, year = {2012} } ```` Składnica ```` @inproceedings{hajnicz-2014-lexico, title = "Lexico-Semantic Annotation of Sk{\l}adnica Treebank by means of {PLWN} Lexical Units", author = "Hajnicz, El{\.z}bieta", booktitle = "Proceedings of the Seventh Global {W}ordnet Conference", month = jan, year = "2014", address = "Tartu, Estonia", publisher = "University of Tartu Press", url = "https://aclanthology.org/W14-0104", pages = "23--31", } ```` Walenty ```` @inproceedings{haj:and:bar:lrec16, author = {Hajnicz, El{\.z}bieta and Andrzejczuk, Anna and Bartosiak, Tomasz}, crossref = {lrec:16}, pages = {2625--2632}, pdf = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/382_Paper.pdf}, title = {Semantic Layer of the Valence Dictionary of {P}olish \emph{{W}alenty}} } ```` Mapping plWordNet onto Princeton WordNet ```` @inproceedings{rudnicka-etal-2021-non, title = "A (Non)-Perfect Match: Mapping pl{W}ord{N}et onto {P}rinceton{W}ord{N}et", author = "Rudnicka, Ewa and Witkowski, Wojciech and Piasecki, Maciej", booktitle = "Proceedings of the 11th Global Wordnet Conference", month = jan, year = "2021", address = "University of South Africa (UNISA)", publisher = "Global Wordnet Association", url = "https://aclanthology.org/2021.gwc-1.16", pages = "137--146" } ````
clarin-knext/wsd_polish_datasets
[ "task_categories:token-classification", "task_ids:word-sense-disambiguation", "annotations_creators:expert-generated", "language_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "language:pl", "license:cc-by-4.0", "region:us" ]
2023-01-10T15:09:52+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated", "found"], "language": ["pl"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["1M<n<10M"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": ["word-sense-disambiguation"], "pretty_name": "wsd-polish-datasets", "tags": []}
2024-02-11T16:34:17+00:00
[]
[ "pl" ]
TAGS #task_categories-token-classification #task_ids-word-sense-disambiguation #annotations_creators-expert-generated #language_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-Polish #license-cc-by-4.0 #region-us
# Word Sense Disambiguation Corpora for Polish ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: URL - Point of Contact: URL@URL ### Dataset Summary 'WSD Polish Datasets' is a comprehensive benchmark for word sense disambiguation (WSD) classification task in Polish language. It consists of 7 distinct datasets, manually annotated with senses from plWordNet-4.5 sense inventory. The following datasets were annotated and included into our benchmark: - KPWr - KPWr-100 - Sherlock (SPEC) - Skladnica - WikiGlex (a subset of GLEX corpus) - EmoGlex (a subset of GLEX corpus) - Walenty For more details, please check the following publication: A new publication on Polish WSD corpora will be available soon ### Supported Tasks and Leaderboards Word sense disambiguation task. We do not provide a leaderboard. However, we provide an example evaluation script for evaluating WSD models. ### Languages Polish language, PL ## Dataset Structure ### Data Instances Data are structured in JSONL format, each single text sample is divided by sentence. ### Data Fields Description of json keys: - 'text': text of the sentence - 'tokens': list of tokens made by tokenization process - 'index': token order index in sentence - 'position': token chars span indices <included, excluded> - 'orth': word - 'lemma': lemmatised word - 'pos': part of speech - 'ctag': morphosyntactic tag - 'phrases': list of multi-word - 'wsd': annotation labels for the WSD task ### Data Splits We do not specify an exact data split for training and evaluation. However, we suggest to use GLEX and Składnica for training and other datasets for testing. ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection, Normalization and Post-processing Source corpora were initially pre-processed using morphosyntactic tagging and multi-word expression recognition tools. To tokenize and tag the datasets we used MorphoDiTa adapted to Polish language. To recognize multi-word expressions we applied pattern-based matching tool Corpus2-MWE - only MWEs from plWordNet were included. After manual annotation, sense indices of plWordNet 4.5 were mapped automatically to Princeton WordNet 3.0 and BabelNet 4.0 indices using plWordNet's interlingual mapping. ### Annotations #### Annotation process * 2+1 annotation process with inter-annotator agreement score over 0.6 PSA * annotated with plWordNet 4.5 * software: WordNet-Loom and Inforex * both single-word and multi-word expressions annotated * full-text sense annotation (excluding KPWr) #### Who are the annotators? - professional linguists from CLARIN-PL project ### Personal and Sensitive Information The datasets do not contain any personal or sensitive information. ## Considerations for Using the Data ### Discussion of Biases Some datasets are biased towards most frequent senses. No information about other biases - needs further analysis. ### Other Known Limitations * sense inventories are usually incomplete therefore some word senses might be missing in plWordNet * single-word and multi-word terms expressing novel senses (missing in plWordNet) were not marked ## Additional Information ### Dataset Curators Arkadiusz Janz (URL@URL) ### Licensing Information KPWR-100 CC-BY-SA 4.0 KPWR CC-BY-SA 4.0 Walenty CC-BY-SA 4.0 Sherlock CC-BY 4.0 Skladnica GNU GPL 3 GLEX plWordNet License Main source (all corpora as a unified benchmark) and published here on HuggingFace: ' Related work ------------ KPWr-100, Składnica, SPEC ' GLEX (EmoGLEX) ' KPWr ' Składnica ' Walenty ' Mapping plWordNet onto Princeton WordNet '
[ "# Word Sense Disambiguation Corpora for Polish", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper: URL\n- Point of Contact: URL@URL", "### Dataset Summary\n\n'WSD Polish Datasets' is a comprehensive benchmark for word sense disambiguation (WSD) classification task in Polish language.\nIt consists of 7 distinct datasets, manually annotated with senses from plWordNet-4.5 sense inventory. The following datasets\nwere annotated and included into our benchmark:\n- KPWr\n- KPWr-100\n- Sherlock (SPEC)\n- Skladnica\n- WikiGlex (a subset of GLEX corpus)\n- EmoGlex (a subset of GLEX corpus)\n- Walenty\n\nFor more details, please check the following publication:\n\n\n\nA new publication on Polish WSD corpora will be available soon", "### Supported Tasks and Leaderboards\n\nWord sense disambiguation task. We do not provide a leaderboard. However, we provide an example evaluation script for evaluating WSD models.", "### Languages\n\nPolish language, PL", "## Dataset Structure", "### Data Instances\n\nData are structured in JSONL format, each single text sample is divided by sentence.", "### Data Fields\n\nDescription of json keys:\n- 'text': text of the sentence\n- 'tokens': list of tokens made by tokenization process\n - 'index': token order index in sentence\n - 'position': token chars span indices <included, excluded>\n - 'orth': word\n - 'lemma': lemmatised word\n - 'pos': part of speech\n - 'ctag': morphosyntactic tag\n- 'phrases': list of multi-word \n- 'wsd': annotation labels for the WSD task", "### Data Splits\n\nWe do not specify an exact data split for training and evaluation. However, we suggest to use GLEX and Składnica for training and other datasets for testing.", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection, Normalization and Post-processing\n\nSource corpora were initially pre-processed using morphosyntactic tagging and multi-word expression recognition tools.\nTo tokenize and tag the datasets we used MorphoDiTa adapted to Polish language. To recognize multi-word expressions\nwe applied pattern-based matching tool Corpus2-MWE - only MWEs from plWordNet were included. After manual annotation, \nsense indices of plWordNet 4.5 were mapped automatically to Princeton WordNet 3.0 and BabelNet 4.0 indices using plWordNet's interlingual mapping.", "### Annotations", "#### Annotation process\n\n* 2+1 annotation process with inter-annotator agreement score over 0.6 PSA\n* annotated with plWordNet 4.5\n* software: WordNet-Loom and Inforex\n* both single-word and multi-word expressions annotated\n* full-text sense annotation (excluding KPWr)", "#### Who are the annotators?\n\n- professional linguists from CLARIN-PL project", "### Personal and Sensitive Information\n\nThe datasets do not contain any personal or sensitive information.", "## Considerations for Using the Data", "### Discussion of Biases\n\nSome datasets are biased towards most frequent senses. No information about other biases - needs further analysis.", "### Other Known Limitations\n\n* sense inventories are usually incomplete therefore some word senses might be missing in plWordNet\n* single-word and multi-word terms expressing novel senses (missing in plWordNet) were not marked", "## Additional Information", "### Dataset Curators\n\nArkadiusz Janz (URL@URL)", "### Licensing Information\n\nKPWR-100 CC-BY-SA 4.0 \nKPWR CC-BY-SA 4.0 \nWalenty CC-BY-SA 4.0 \nSherlock CC-BY 4.0 \nSkladnica GNU GPL 3 \nGLEX plWordNet License \n\n\n\n\nMain source (all corpora as a unified benchmark) and published here on HuggingFace:\n\n'\n\nRelated work\n------------\n\nKPWr-100, Składnica, SPEC\n'\n\nGLEX (EmoGLEX)\n\n'\n\nKPWr\n'\n\nSkładnica\n'\n\nWalenty\n'\n\nMapping plWordNet onto Princeton WordNet\n'" ]
[ "TAGS\n#task_categories-token-classification #task_ids-word-sense-disambiguation #annotations_creators-expert-generated #language_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-Polish #license-cc-by-4.0 #region-us \n", "# Word Sense Disambiguation Corpora for Polish", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper: URL\n- Point of Contact: URL@URL", "### Dataset Summary\n\n'WSD Polish Datasets' is a comprehensive benchmark for word sense disambiguation (WSD) classification task in Polish language.\nIt consists of 7 distinct datasets, manually annotated with senses from plWordNet-4.5 sense inventory. The following datasets\nwere annotated and included into our benchmark:\n- KPWr\n- KPWr-100\n- Sherlock (SPEC)\n- Skladnica\n- WikiGlex (a subset of GLEX corpus)\n- EmoGlex (a subset of GLEX corpus)\n- Walenty\n\nFor more details, please check the following publication:\n\n\n\nA new publication on Polish WSD corpora will be available soon", "### Supported Tasks and Leaderboards\n\nWord sense disambiguation task. We do not provide a leaderboard. However, we provide an example evaluation script for evaluating WSD models.", "### Languages\n\nPolish language, PL", "## Dataset Structure", "### Data Instances\n\nData are structured in JSONL format, each single text sample is divided by sentence.", "### Data Fields\n\nDescription of json keys:\n- 'text': text of the sentence\n- 'tokens': list of tokens made by tokenization process\n - 'index': token order index in sentence\n - 'position': token chars span indices <included, excluded>\n - 'orth': word\n - 'lemma': lemmatised word\n - 'pos': part of speech\n - 'ctag': morphosyntactic tag\n- 'phrases': list of multi-word \n- 'wsd': annotation labels for the WSD task", "### Data Splits\n\nWe do not specify an exact data split for training and evaluation. However, we suggest to use GLEX and Składnica for training and other datasets for testing.", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection, Normalization and Post-processing\n\nSource corpora were initially pre-processed using morphosyntactic tagging and multi-word expression recognition tools.\nTo tokenize and tag the datasets we used MorphoDiTa adapted to Polish language. To recognize multi-word expressions\nwe applied pattern-based matching tool Corpus2-MWE - only MWEs from plWordNet were included. After manual annotation, \nsense indices of plWordNet 4.5 were mapped automatically to Princeton WordNet 3.0 and BabelNet 4.0 indices using plWordNet's interlingual mapping.", "### Annotations", "#### Annotation process\n\n* 2+1 annotation process with inter-annotator agreement score over 0.6 PSA\n* annotated with plWordNet 4.5\n* software: WordNet-Loom and Inforex\n* both single-word and multi-word expressions annotated\n* full-text sense annotation (excluding KPWr)", "#### Who are the annotators?\n\n- professional linguists from CLARIN-PL project", "### Personal and Sensitive Information\n\nThe datasets do not contain any personal or sensitive information.", "## Considerations for Using the Data", "### Discussion of Biases\n\nSome datasets are biased towards most frequent senses. No information about other biases - needs further analysis.", "### Other Known Limitations\n\n* sense inventories are usually incomplete therefore some word senses might be missing in plWordNet\n* single-word and multi-word terms expressing novel senses (missing in plWordNet) were not marked", "## Additional Information", "### Dataset Curators\n\nArkadiusz Janz (URL@URL)", "### Licensing Information\n\nKPWR-100 CC-BY-SA 4.0 \nKPWR CC-BY-SA 4.0 \nWalenty CC-BY-SA 4.0 \nSherlock CC-BY 4.0 \nSkladnica GNU GPL 3 \nGLEX plWordNet License \n\n\n\n\nMain source (all corpora as a unified benchmark) and published here on HuggingFace:\n\n'\n\nRelated work\n------------\n\nKPWr-100, Składnica, SPEC\n'\n\nGLEX (EmoGLEX)\n\n'\n\nKPWr\n'\n\nSkładnica\n'\n\nWalenty\n'\n\nMapping plWordNet onto Princeton WordNet\n'" ]
b5bf5c945124f673c5be476266889bd819986083
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This is a multilingual dataset containing ~130k annotated sentence boundaries. It contains laws and court decision in 6 different languages. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages English, French, Italian, German, Portuguese, Spanish ## Dataset Structure It is structured in the following format: {language}\_{type}\_{shard}.jsonl.xz type is one of the following: - laws - judgements Use the the dataset like this: ``` from datasets import load_dataset config = 'fr_laws' #{language}_{type} | to load all languages and/or all types, use 'all_all' dataset = load_dataset('rdcs/MultiLegalSBD', config) ``` ### Data Instances [More Information Needed] ### Data Fields - text: the original text - spans: - start: offset of the first character - end: offset of the last character - label: One label only -> Sentence - token_start: id of the first token - token_end: id of the last token - tokens: - text: token text - start: offset of the first character - end: offset of the last character - id: token id - ws: whether the token is followed by whitespace ### Data Splits There is only one split available ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @inproceedings{10.1145/3594536.3595132, author = {Brugger, Tobias and St\"{u}rmer, Matthias and Niklaus, Joel}, title = {MultiLegalSBD: A Multilingual Legal Sentence Boundary Detection Dataset}, year = {2023}, isbn = {9798400701979}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3594536.3595132}, doi = {10.1145/3594536.3595132}, abstract = {Sentence Boundary Detection (SBD) is one of the foundational building blocks of Natural Language Processing (NLP), with incorrectly split sentences heavily influencing the output quality of downstream tasks. It is a challenging task for algorithms, especially in the legal domain, considering the complex and different sentence structures used. In this work, we curated a diverse multilingual legal dataset consisting of over 130'000 annotated sentences in 6 languages. Our experimental results indicate that the performance of existing SBD models is subpar on multilingual legal data. We trained and tested monolingual and multilingual models based on CRF, BiLSTM-CRF, and transformers, demonstrating state-of-the-art performance. We also show that our multilingual models outperform all baselines in the zero-shot setting on a Portuguese test set. To encourage further research and development by the community, we have made our dataset, models, and code publicly available.}, booktitle = {Proceedings of the Nineteenth International Conference on Artificial Intelligence and Law}, pages = {42–51}, numpages = {10}, keywords = {Natural Language Processing, Sentence Boundary Detection, Text Annotation, Legal Document Analysis, Multilingual}, location = {Braga, Portugal}, series = {ICAIL '23} } ``` ### Contributions [More Information Needed]
rcds/MultiLegalSBD
[ "task_categories:token-classification", "size_categories:100K<n<1M", "language:en", "language:es", "language:de", "language:it", "language:pt", "language:fr", "region:us" ]
2023-01-10T15:17:41+00:00
{"language": ["en", "es", "de", "it", "pt", "fr"], "size_categories": ["100K<n<1M"], "task_categories": ["token-classification"], "pretty_name": "MultiLegalSBD: A Multilingual Legal Sentence Boundary Detection Dataset", "dataset_info": [{"config_name": "fr_Laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8773683, "num_examples": 2131}], "download_size": 0, "dataset_size": 8773683}, {"config_name": "it_Laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8130577, "num_examples": 2910}], "download_size": 0, "dataset_size": 8130577}, {"config_name": "es_Laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6260211, "num_examples": 677}], "download_size": 0, "dataset_size": 6260211}, {"config_name": "en_Laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train"}], "download_size": 0, "dataset_size": 0}, {"config_name": "de_Laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13792836, "num_examples": 13}], "download_size": 0, "dataset_size": 13792836}, {"config_name": "fr_Judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8788244, "num_examples": 315}], "download_size": 0, "dataset_size": 8788244}, {"config_name": "fr_all", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 25977816, "num_examples": 2446}], "download_size": 4782672, "dataset_size": 25977816}, {"config_name": "it_Judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 8989061, "num_examples": 243}], "download_size": 0, "dataset_size": 8989061}, {"config_name": "it_all", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 25097560, "num_examples": 3153}], "download_size": 4610540, "dataset_size": 25097560}, {"config_name": "es_Judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9460558, "num_examples": 190}], "download_size": 0, "dataset_size": 9460558}, {"config_name": "es_all", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 23090629, "num_examples": 867}], "download_size": 4438716, "dataset_size": 23090629}, {"config_name": "en_Judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 18401754, "num_examples": 80}], "download_size": 0, "dataset_size": 18401754}, {"config_name": "en_all", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 27363914, "num_examples": 80}], "download_size": 5448700, "dataset_size": 27363914}, {"config_name": "de_Judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 14082173, "num_examples": 131}], "download_size": 0, "dataset_size": 14082173}, {"config_name": "de_all", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 40429185, "num_examples": 144}], "download_size": 7883640, "dataset_size": 40429185}, {"config_name": "fr_laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 12924503, "num_examples": 2131}], "download_size": 2201568, "dataset_size": 12924503}, {"config_name": "fr_judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13053313, "num_examples": 315}], "download_size": 2581104, "dataset_size": 13053313}, {"config_name": "it_laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 11869343, "num_examples": 2910}], "download_size": 2048828, "dataset_size": 11869343}, {"config_name": "it_judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13228218, "num_examples": 243}], "download_size": 2561712, "dataset_size": 13228218}, {"config_name": "es_laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9183057, "num_examples": 677}], "download_size": 1753376, "dataset_size": 9183057}, {"config_name": "es_judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13907572, "num_examples": 190}], "download_size": 2685340, "dataset_size": 13907572}, {"config_name": "en_laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train"}], "download_size": 0, "dataset_size": 0}, {"config_name": "en_judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 27363914, "num_examples": 80}], "download_size": 5448700, "dataset_size": 27363914}, {"config_name": "de_laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19935635, "num_examples": 13}], "download_size": 3745480, "dataset_size": 19935635}, {"config_name": "de_judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 20493550, "num_examples": 131}], "download_size": 4138160, "dataset_size": 20493550}, {"config_name": "pt_laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1005902, "num_examples": 58}], "download_size": 209128, "dataset_size": 1005902}, {"config_name": "pt_judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 812282, "num_examples": 10}], "download_size": 173424, "dataset_size": 812282}, {"config_name": "pt_all", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1818184, "num_examples": 68}], "download_size": 382552, "dataset_size": 1818184}, {"config_name": "all_laws", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 54918438, "num_examples": 5789}], "download_size": 9958380, "dataset_size": 54918438}, {"config_name": "all_judgements", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 88858845, "num_examples": 969}], "download_size": 17588440, "dataset_size": 88858845}, {"config_name": "all_all", "features": [{"name": "text", "dtype": "string"}, {"name": "spans", "list": [{"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "token_start", "dtype": "int64"}, {"name": "token_end", "dtype": "int64"}]}, {"name": "tokens", "list": [{"name": "text", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "end", "dtype": "int64"}, {"name": "id", "dtype": "int64"}, {"name": "ws", "dtype": "bool"}]}, {"name": "source", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 143777284, "num_examples": 6758}], "download_size": 27546820, "dataset_size": 143777284}]}
2023-10-23T05:36:36+00:00
[]
[ "en", "es", "de", "it", "pt", "fr" ]
TAGS #task_categories-token-classification #size_categories-100K<n<1M #language-English #language-Spanish #language-German #language-Italian #language-Portuguese #language-French #region-us
# Dataset Card for Dataset Name ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary This is a multilingual dataset containing ~130k annotated sentence boundaries. It contains laws and court decision in 6 different languages. ### Supported Tasks and Leaderboards ### Languages English, French, Italian, German, Portuguese, Spanish ## Dataset Structure It is structured in the following format: {language}\_{type}\_{shard}.URL type is one of the following: - laws - judgements Use the the dataset like this: ### Data Instances ### Data Fields - text: the original text - spans: - start: offset of the first character - end: offset of the last character - label: One label only -> Sentence - token_start: id of the first token - token_end: id of the last token - tokens: - text: token text - start: offset of the first character - end: offset of the last character - id: token id - ws: whether the token is followed by whitespace ### Data Splits There is only one split available ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThis is a multilingual dataset containing ~130k annotated sentence boundaries. It contains laws and court decision in 6 different languages.", "### Supported Tasks and Leaderboards", "### Languages\n\nEnglish, French, Italian, German, Portuguese, Spanish", "## Dataset Structure\n\nIt is structured in the following format: {language}\\_{type}\\_{shard}.URL\n\ntype is one of the following:\n- laws\n- judgements\n\nUse the the dataset like this:", "### Data Instances", "### Data Fields\n\n- text: the original text\n- spans:\n - start: offset of the first character\n - end: offset of the last character\n - label: One label only -> Sentence\n - token_start: id of the first token\n - token_end: id of the last token\n- tokens:\n - text: token text\n - start: offset of the first character\n - end: offset of the last character\n - id: token id\n - ws: whether the token is followed by whitespace", "### Data Splits\n\nThere is only one split available", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#task_categories-token-classification #size_categories-100K<n<1M #language-English #language-Spanish #language-German #language-Italian #language-Portuguese #language-French #region-us \n", "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThis is a multilingual dataset containing ~130k annotated sentence boundaries. It contains laws and court decision in 6 different languages.", "### Supported Tasks and Leaderboards", "### Languages\n\nEnglish, French, Italian, German, Portuguese, Spanish", "## Dataset Structure\n\nIt is structured in the following format: {language}\\_{type}\\_{shard}.URL\n\ntype is one of the following:\n- laws\n- judgements\n\nUse the the dataset like this:", "### Data Instances", "### Data Fields\n\n- text: the original text\n- spans:\n - start: offset of the first character\n - end: offset of the last character\n - label: One label only -> Sentence\n - token_start: id of the first token\n - token_end: id of the last token\n- tokens:\n - text: token text\n - start: offset of the first character\n - end: offset of the last character\n - id: token id\n - ws: whether the token is followed by whitespace", "### Data Splits\n\nThere is only one split available", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
4795741637529d511dde1fc6331e97e5087a7a2e
# SUST BANGLA EMOTIONAL SPEECH CORPUS ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** [SUBESCO PAPER](https://doi.org/10.1371/journal.pone.0250173) - **Leaderboard:** - **Point of Contact:** [Sadia Sultana]([email protected]) ### Dataset Summary SUBESCO is an audio-only emotional speech corpus of 7000 sentence-level utterances of the Bangla language. 20 professional actors (10 males and 10 females) participated in the recordings of 10 sentences for 7 target emotions. The emotions are Anger, Disgust, Fear, Happiness, Neutral, Sadness and Surprise. Total duration of the corpus is 7 hours 40 min 40 sec. Total size of the dataset is 2.03 GB. The dataset was evaluated by 50 raters (25 males, 25 females). Human perception test achieved a raw accuracy of 71%. All the details relating to creation, evaluation and analysis of SUBESCO have been described in the corresponding journal paper which has been published in Plos One. https://doi.org/10.1371/journal.pone.0250173 ### Downloading the data ``` from datasets import load_dataset train = load_dataset("sustcsenlp/bn_emotion_speech_corpus",split="train") ``` ### Naming Convention Each audio file in the dataset has a unique name. There are eight parts in the file name where all the parts are connected by underscores. The order of all the parts is organized as: Gender-Speaker's serial number-Speaker's name-Unit of recording-Unit number- Emotion name- Repeating number and the File format. For example, the filename F_02_MONIKA_S_1_NEUTRAL_5.wav refers to: | Symbol | Meaning | | ----------- | ----------- | | F | Speaker Gender | | 02 | Speaker Number | | MONIKA | Speaker Name | | S_1 | Sentence Number | | NEUTRAL | Emotion | | 5 | Take Number | ### Languages This dataset contains Bangla Audio Data. ## Dataset Creation This database was created as a part of PhD thesis project of the author Sadia Sultana. It was designed and developed by the author in the Department of Computer Science and Engineering of Shahjalal University of Science and Technology. Financial grant was supported by the university. If you use the dataset please cite SUBESCO and the corresponding academic journal publication in Plos One. ### Citation Information ``` @dataset{sadia_sultana_2021_4526477, author = {Sadia Sultana}, title = {SUST Bangla Emotional Speech Corpus (SUBESCO)}, month = feb, year = 2021, note = {{This database was created as a part of PhD thesis project of the author Sadia Sultana. It was designed and developed by the author in the Department of Computer Science and Engineering of Shahjalal University of Science and Technology. Financial grant was supported by the university. If you use the dataset please cite SUBESCO and the corresponding academic journal publication in Plos One.}}, publisher = {Zenodo}, version = {version - 1.1}, doi = {10.5281/zenodo.4526477}, url = {https://doi.org/10.5281/zenodo.4526477} } ``` ### Contributors | Name | University | | ----------- | ----------- | | Sadia Sultana | Shahjalal University of Science and Technology | | Dr. M. Zafar Iqbal | Shahjalal University of Science and Technology | | Dr. M. Shahidur Rahman | Shahjalal University of Science and Technology | ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed]
sustcsenlp/bn_emotion_speech_corpus
[ "task_categories:audio-classification", "size_categories:1K<n<10K", "language:bn", "license:cc-by-4.0", "region:us" ]
2023-01-10T15:49:12+00:00
{"language": ["bn"], "license": "cc-by-4.0", "size_categories": ["1K<n<10K"], "task_categories": ["audio-classification"], "pretty_name": "SUST BANGLA EMOTIONAL SPEECH CORPUS"}
2023-01-11T09:00:32+00:00
[]
[ "bn" ]
TAGS #task_categories-audio-classification #size_categories-1K<n<10K #language-Bengali #license-cc-by-4.0 #region-us
SUST BANGLA EMOTIONAL SPEECH CORPUS =================================== Dataset Description ------------------- * Homepage: * Repository: * Paper: SUBESCO PAPER * Leaderboard: * Point of Contact: Sadia Sultana ### Dataset Summary SUBESCO is an audio-only emotional speech corpus of 7000 sentence-level utterances of the Bangla language. 20 professional actors (10 males and 10 females) participated in the recordings of 10 sentences for 7 target emotions. The emotions are Anger, Disgust, Fear, Happiness, Neutral, Sadness and Surprise. Total duration of the corpus is 7 hours 40 min 40 sec. Total size of the dataset is 2.03 GB. The dataset was evaluated by 50 raters (25 males, 25 females). Human perception test achieved a raw accuracy of 71%. All the details relating to creation, evaluation and analysis of SUBESCO have been described in the corresponding journal paper which has been published in Plos One. URL ### Downloading the data ### Naming Convention Each audio file in the dataset has a unique name. There are eight parts in the file name where all the parts are connected by underscores. The order of all the parts is organized as: Gender-Speaker's serial number-Speaker's name-Unit of recording-Unit number- Emotion name- Repeating number and the File format. For example, the filename F\_02\_MONIKA\_S\_1\_NEUTRAL\_5.wav refers to: ### Languages This dataset contains Bangla Audio Data. Dataset Creation ---------------- This database was created as a part of PhD thesis project of the author Sadia Sultana. It was designed and developed by the author in the Department of Computer Science and Engineering of Shahjalal University of Science and Technology. Financial grant was supported by the university. If you use the dataset please cite SUBESCO and the corresponding academic journal publication in Plos One. ### Contributors Dataset Structure ----------------- ### Data Instances ### Data Fields ### Data Splits ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information
[ "### Dataset Summary\n\n\nSUBESCO is an audio-only emotional speech corpus of 7000 sentence-level utterances of the Bangla language. 20 professional actors (10 males and 10 females) participated in the recordings of 10 sentences for 7 target emotions. The emotions are Anger, Disgust, Fear, Happiness, Neutral, Sadness and Surprise. Total duration of the corpus is 7 hours 40 min 40 sec. Total size of the dataset is 2.03 GB. The dataset was evaluated by 50 raters (25 males, 25 females). Human perception test achieved a raw accuracy of 71%. All the details relating to creation, evaluation and analysis of SUBESCO have been described in the corresponding journal paper which has been published in Plos One.\n\n\nURL", "### Downloading the data", "### Naming Convention\n\n\nEach audio file in the dataset has a unique name. There are eight parts in the file name where all the parts are connected by underscores. The order of all the parts is organized as: Gender-Speaker's serial number-Speaker's name-Unit of recording-Unit number- Emotion name- Repeating number and the File format.\n\n\nFor example, the filename F\\_02\\_MONIKA\\_S\\_1\\_NEUTRAL\\_5.wav refers to:", "### Languages\n\n\nThis dataset contains Bangla Audio Data.\n\n\nDataset Creation\n----------------\n\n\nThis database was created as a part of PhD thesis project of the author Sadia Sultana. It was designed and developed by the author in the Department of Computer Science and Engineering of Shahjalal University of Science and Technology. Financial grant was supported by the university. If you use the dataset please cite SUBESCO and the corresponding academic journal publication in Plos One.", "### Contributors\n\n\n\nDataset Structure\n-----------------", "### Data Instances", "### Data Fields", "### Data Splits", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information" ]
[ "TAGS\n#task_categories-audio-classification #size_categories-1K<n<10K #language-Bengali #license-cc-by-4.0 #region-us \n", "### Dataset Summary\n\n\nSUBESCO is an audio-only emotional speech corpus of 7000 sentence-level utterances of the Bangla language. 20 professional actors (10 males and 10 females) participated in the recordings of 10 sentences for 7 target emotions. The emotions are Anger, Disgust, Fear, Happiness, Neutral, Sadness and Surprise. Total duration of the corpus is 7 hours 40 min 40 sec. Total size of the dataset is 2.03 GB. The dataset was evaluated by 50 raters (25 males, 25 females). Human perception test achieved a raw accuracy of 71%. All the details relating to creation, evaluation and analysis of SUBESCO have been described in the corresponding journal paper which has been published in Plos One.\n\n\nURL", "### Downloading the data", "### Naming Convention\n\n\nEach audio file in the dataset has a unique name. There are eight parts in the file name where all the parts are connected by underscores. The order of all the parts is organized as: Gender-Speaker's serial number-Speaker's name-Unit of recording-Unit number- Emotion name- Repeating number and the File format.\n\n\nFor example, the filename F\\_02\\_MONIKA\\_S\\_1\\_NEUTRAL\\_5.wav refers to:", "### Languages\n\n\nThis dataset contains Bangla Audio Data.\n\n\nDataset Creation\n----------------\n\n\nThis database was created as a part of PhD thesis project of the author Sadia Sultana. It was designed and developed by the author in the Department of Computer Science and Engineering of Shahjalal University of Science and Technology. Financial grant was supported by the university. If you use the dataset please cite SUBESCO and the corresponding academic journal publication in Plos One.", "### Contributors\n\n\n\nDataset Structure\n-----------------", "### Data Instances", "### Data Fields", "### Data Splits", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information" ]
7782b8adf91665e12404934ca540f2f5dd69452c
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
3DJedi/56chevy
[ "region:us" ]
2023-01-10T15:50:12+00:00
{}
2023-02-01T14:49:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
6333d780489d0b6d47d73e33acaac260c3d12fdd
# Malicious Smart Contract Classification Dataset This dataset includes malicious and benign smart contracts deployed on Ethereum. Code used to collect this data: [data collection notebook](https://github.com/forta-network/starter-kits/blob/main/malicious-smart-contract-ml-py/data_collection.ipynb) For more details on how this dataset can be used, please check out this blog: [How Forta’s Predictive ML Models Detect Attacks Before Exploitation](https://forta.org/blog/how-fortas-predictive-ml-models-detect-attacks-before-exploitation/)
forta/malicious-smart-contract-dataset
[ "task_categories:token-classification", "size_categories:100K<n<1M", "license:mit", "smart contract", "ethereum", "blockchain", "security", "region:us" ]
2023-01-10T20:17:11+00:00
{"license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["token-classification"], "pretty_name": "Malicious Smart Contract Classification Dataset", "tags": ["smart contract", "ethereum", "blockchain", "security"]}
2023-01-10T22:03:23+00:00
[]
[]
TAGS #task_categories-token-classification #size_categories-100K<n<1M #license-mit #smart contract #ethereum #blockchain #security #region-us
# Malicious Smart Contract Classification Dataset This dataset includes malicious and benign smart contracts deployed on Ethereum. Code used to collect this data: data collection notebook For more details on how this dataset can be used, please check out this blog: How Forta’s Predictive ML Models Detect Attacks Before Exploitation
[ "# Malicious Smart Contract Classification Dataset\n\nThis dataset includes malicious and benign smart contracts deployed on Ethereum. \n\n\nCode used to collect this data: data collection notebook\n\nFor more details on how this dataset can be used, please check out this blog: How Forta’s Predictive ML Models Detect Attacks Before Exploitation" ]
[ "TAGS\n#task_categories-token-classification #size_categories-100K<n<1M #license-mit #smart contract #ethereum #blockchain #security #region-us \n", "# Malicious Smart Contract Classification Dataset\n\nThis dataset includes malicious and benign smart contracts deployed on Ethereum. \n\n\nCode used to collect this data: data collection notebook\n\nFor more details on how this dataset can be used, please check out this blog: How Forta’s Predictive ML Models Detect Attacks Before Exploitation" ]
b8441b5e8629b8f55540e14b6ded5d895c6b30e3
# Dataset Card for "raw-commit-diffs" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mamiksik/raw-commit-diffs
[ "region:us" ]
2023-01-10T21:38:14+00:00
{"dataset_info": {"features": [{"name": "language", "dtype": "string"}, {"name": "owner", "dtype": "string"}, {"name": "repo", "dtype": "string"}, {"name": "sha", "dtype": "string"}, {"name": "message", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "patch", "dtype": "string"}, {"name": "is_multipart", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 791921294, "num_examples": 399253}], "download_size": 240089156, "dataset_size": 791921294}}
2023-01-17T14:32:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "raw-commit-diffs" More Information needed
[ "# Dataset Card for \"raw-commit-diffs\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"raw-commit-diffs\"\n\nMore Information needed" ]
74bce39c64cc079ebcd96e2cd0787be80e50c88f
# Dataset Card for "analysed-diff-metadata" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mamiksik/annotated-diff-metadata
[ "region:us" ]
2023-01-10T21:56:25+00:00
{"dataset_info": {"features": [{"name": "sha", "dtype": "string"}, {"name": "author", "dtype": "string"}, {"name": "committer", "dtype": "string"}, {"name": "message", "dtype": "string"}, {"name": "subject", "dtype": "string"}, {"name": "subject_length", "dtype": "float64"}, {"name": "is_chore", "dtype": "bool"}, {"name": "is_bot", "dtype": "bool"}, {"name": "subject_word_count", "dtype": "float64"}, {"name": "verb_object_spacy", "dtype": "bool"}, {"name": "verb_object_stanza", "dtype": "bool"}, {"name": "fits_requirements", "dtype": "bool"}, {"name": "owner", "dtype": "string"}, {"name": "repo", "dtype": "string"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 221089223, "num_examples": 668743}], "download_size": 0, "dataset_size": 221089223}}
2023-01-10T22:04:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "analysed-diff-metadata" More Information needed
[ "# Dataset Card for \"analysed-diff-metadata\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"analysed-diff-metadata\"\n\nMore Information needed" ]
50caa25433c1c98d894fb153d7e53cc49b3310b7
# Dataset Card for "bookcorpus_compact_1024_shard9_of_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_1024_shard9_of_10
[ "region:us" ]
2023-01-10T22:07:44+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 754029555, "num_examples": 61605}], "download_size": 379859859, "dataset_size": 754029555}}
2023-01-10T22:08:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_compact_1024_shard9_of_10" More Information needed
[ "# Dataset Card for \"bookcorpus_compact_1024_shard9_of_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_compact_1024_shard9_of_10\"\n\nMore Information needed" ]
c9d8830c08ec15269b2ef700f1cfbf5c0ce11c8a
# Dataset Card for "talkrl-podcast" This dataset is sourced from the [TalkRL Podcast website](https://www.talkrl.com/) and contains English transcripts of wonderful TalkRL podcast episodes. The transcripts were generated using OpenAI's base Whisper model
RamAnanth1/talkrl-podcast
[ "task_categories:text-classification", "task_categories:text-generation", "task_categories:summarization", "size_categories:n<1K", "language:en", "region:us" ]
2023-01-10T23:09:01+00:00
{"language": ["en"], "size_categories": ["n<1K"], "task_categories": ["text-classification", "text-generation", "summarization"], "pretty_name": "TalkRL Podcast", "dataset_info": {"features": [{"name": "title", "dtype": "string"}, {"name": "summary", "dtype": "string"}, {"name": "link", "dtype": "string"}, {"name": "transcript", "dtype": "string"}, {"name": "segments", "list": [{"name": "end", "dtype": "float64"}, {"name": "start", "dtype": "float64"}, {"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 4845076, "num_examples": 39}], "download_size": 2633561, "dataset_size": 4845076}}
2023-01-12T20:46:26+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_categories-text-generation #task_categories-summarization #size_categories-n<1K #language-English #region-us
# Dataset Card for "talkrl-podcast" This dataset is sourced from the TalkRL Podcast website and contains English transcripts of wonderful TalkRL podcast episodes. The transcripts were generated using OpenAI's base Whisper model
[ "# Dataset Card for \"talkrl-podcast\"\n\nThis dataset is sourced from the TalkRL Podcast website and contains English transcripts of wonderful TalkRL podcast episodes. The transcripts were generated using OpenAI's base Whisper model" ]
[ "TAGS\n#task_categories-text-classification #task_categories-text-generation #task_categories-summarization #size_categories-n<1K #language-English #region-us \n", "# Dataset Card for \"talkrl-podcast\"\n\nThis dataset is sourced from the TalkRL Podcast website and contains English transcripts of wonderful TalkRL podcast episodes. The transcripts were generated using OpenAI's base Whisper model" ]
9e8a752bcb4c44ee767c0a31d65668691414fe83
If you prefer MIDI or MusicXML, download [IrishMAN-MIDI](https://huggingface.co/datasets/sander-wood/irishman/resolve/main/irishman-midi.zip) or [IrishMAN-XML](https://huggingface.co/datasets/sander-wood/irishman/resolve/main/irishman-xml.zip). For better use of structural info in control codes, consider ABC notation. ## ABC Notation ABC notation is an ASCII-based plain text musical notation system that is commonly used for transcribing traditional music and sharing sheet music online. It provides a simple and concise way to represent musical elements such as notes, rhythms, chords, and more. For those looking to interact with ABC notation in various ways, there are several tools available: 1. **[Online ABC Player](https://abc.rectanglered.com/):** This web-based tool allows you to input ABC notation and hear the corresponding audio playback. By pasting your ABC code into the player, you can instantly listen to the tune as it would sound when played. 2. **[ABC Sheet Music Editor - EasyABC](https://easyabc.sourceforge.net/):** EasyABC is a user-friendly software application designed for creating, editing, and formatting ABC notation. Its graphical interface enables you to input your ABC code, preview the sheet music, and make adjustments as necessary. ## Dataset Summary The **Irish Massive ABC Notation (IrishMAN)** dataset includes 216,284 Irish tunes in ABC notation, divided into 99\% (214,122 tunes) for training and 1\% (2,162 tunes) for validation. These tunes were collected from thesession.org and abcnotation.com, both renowned for sharing traditional music. To ensure uniformity in formatting, all tunes were converted to XML and then back to ABC using [scripts](https://wim.vree.org/svgParse/), and fields containing natural language (e.g., titles and lyrics) were removed. Each tune is automatically annotated with control codes derived from ABC symbols, as described in the below section. These control codes offer insights into the musical forms and structures of each composition. In the IrishMAN dataset, a [music21](https://web.mit.edu/music21/doc/index.html#)-filtered [subset](https://huggingface.co/datasets/sander-wood/irishman/raw/main/leadsheet_ids.json) includes 34,211 lead sheets, each human-annotated with chord symbols. It is from this very subset that [TunesFormer](https://huggingface.co/sander-wood/tunesformer) developed its capacity to generate melodies with harmonies. A noteworthy aspect is the copyright status. All tunes in the dataset are in the public domain, ensuring ethical and legal usage for research and creative projects. ## Control Codes Inspired by [CTRL](https://huggingface.co/ctrl), we incorporate control codes into TunesFormer to represent musical forms. These codes, positioned ahead of the ABC notation, enable users to specify the structures of the generated tunes. The following control codes are introduced: - **S:number of sections**: determines the number of sections in the entire melody. It counts on several symbols that can be used to represent section boundaries: `[|`, `||`, `|]`, `|:`, `::`, and `:|`. In our dataset, the range is 1 to 8 (e.g., `S:1` for a single-section melody, and `S:8` for a melody with eight sections). - **B:number of bars**: specifies the desired number of bars within a section. It counts on the bar symbol `|`. In our dataset, the range is 1 to 32 (e.g., `B:1` for a one-bar section, and `B:32` for a section with 32 bars). - **E:edit distance similarity**: controls the similarity level between the current section $c$ and a previous section $p$ in the melody. It is based on the Levenshtein distance $lev(c,p)$ , quantifying the difference between sections for creating variations or contrasts. Mathematically, it can be expressed as: ``` eds(c,p) = 1 - lev(c,p) / max(|c|,|p|) ``` where $|c|$ and $|p|$ are the string lengths of the two sections. It is discretized into 11 levels, ranging from no match at all to an exact match (e.g., `E:0` for no similarity, and `E:10` for an exact match). ## Copyright Disclaimer This dataset is for research use only and not for commercial purposes. We believe all data in this dataset is in the public domain. If you own the copyright to any musical composition in the IrishMAN dataset and have concerns, please contact us at [email protected]. We will address your concerns and take appropriate action if needed. ## Special Thanks We would like to extend a special thanks to thesession.org and abcnotation.com for their contributions to the development and promotion of ABC notation, as well as their significant impact on the field of music information retrieval. Their platforms have become invaluable resources for the traditional and folk music community. We also wish to express our gratitude to Willem (Wim) for providing the essential conversion tools that enabled the transformation of the tunes into a uniform format. Together, these collaborations have made it possible for researchers like us to create and study extensive datasets like IrishMAN.
sander-wood/irishman
[ "task_categories:text-generation", "size_categories:100K<n<1M", "license:mit", "music", "region:us" ]
2023-01-10T23:42:04+00:00
{"license": "mit", "size_categories": ["100K<n<1M"], "task_categories": ["text-generation"], "pretty_name": "IrishMAN", "tags": ["music"]}
2023-09-25T14:14:16+00:00
[]
[]
TAGS #task_categories-text-generation #size_categories-100K<n<1M #license-mit #music #region-us
If you prefer MIDI or MusicXML, download IrishMAN-MIDI or IrishMAN-XML. For better use of structural info in control codes, consider ABC notation. ## ABC Notation ABC notation is an ASCII-based plain text musical notation system that is commonly used for transcribing traditional music and sharing sheet music online. It provides a simple and concise way to represent musical elements such as notes, rhythms, chords, and more. For those looking to interact with ABC notation in various ways, there are several tools available: 1. Online ABC Player: This web-based tool allows you to input ABC notation and hear the corresponding audio playback. By pasting your ABC code into the player, you can instantly listen to the tune as it would sound when played. 2. ABC Sheet Music Editor - EasyABC: EasyABC is a user-friendly software application designed for creating, editing, and formatting ABC notation. Its graphical interface enables you to input your ABC code, preview the sheet music, and make adjustments as necessary. ## Dataset Summary The Irish Massive ABC Notation (IrishMAN) dataset includes 216,284 Irish tunes in ABC notation, divided into 99\% (214,122 tunes) for training and 1\% (2,162 tunes) for validation. These tunes were collected from URL and URL, both renowned for sharing traditional music. To ensure uniformity in formatting, all tunes were converted to XML and then back to ABC using scripts, and fields containing natural language (e.g., titles and lyrics) were removed. Each tune is automatically annotated with control codes derived from ABC symbols, as described in the below section. These control codes offer insights into the musical forms and structures of each composition. In the IrishMAN dataset, a music21-filtered subset includes 34,211 lead sheets, each human-annotated with chord symbols. It is from this very subset that TunesFormer developed its capacity to generate melodies with harmonies. A noteworthy aspect is the copyright status. All tunes in the dataset are in the public domain, ensuring ethical and legal usage for research and creative projects. ## Control Codes Inspired by CTRL, we incorporate control codes into TunesFormer to represent musical forms. These codes, positioned ahead of the ABC notation, enable users to specify the structures of the generated tunes. The following control codes are introduced: - S:number of sections: determines the number of sections in the entire melody. It counts on several symbols that can be used to represent section boundaries: '[|', '||', '|]', '|:', '::', and ':|'. In our dataset, the range is 1 to 8 (e.g., 'S:1' for a single-section melody, and 'S:8' for a melody with eight sections). - B:number of bars: specifies the desired number of bars within a section. It counts on the bar symbol '|'. In our dataset, the range is 1 to 32 (e.g., 'B:1' for a one-bar section, and 'B:32' for a section with 32 bars). - E:edit distance similarity: controls the similarity level between the current section $c$ and a previous section $p$ in the melody. It is based on the Levenshtein distance $lev(c,p)$ , quantifying the difference between sections for creating variations or contrasts. Mathematically, it can be expressed as: where $|c|$ and $|p|$ are the string lengths of the two sections. It is discretized into 11 levels, ranging from no match at all to an exact match (e.g., 'E:0' for no similarity, and 'E:10' for an exact match). ## Copyright Disclaimer This dataset is for research use only and not for commercial purposes. We believe all data in this dataset is in the public domain. If you own the copyright to any musical composition in the IrishMAN dataset and have concerns, please contact us at shangda@URL. We will address your concerns and take appropriate action if needed. ## Special Thanks We would like to extend a special thanks to URL and URL for their contributions to the development and promotion of ABC notation, as well as their significant impact on the field of music information retrieval. Their platforms have become invaluable resources for the traditional and folk music community. We also wish to express our gratitude to Willem (Wim) for providing the essential conversion tools that enabled the transformation of the tunes into a uniform format. Together, these collaborations have made it possible for researchers like us to create and study extensive datasets like IrishMAN.
[ "## ABC Notation\n\nABC notation is an ASCII-based plain text musical notation system that is commonly used for transcribing traditional music and sharing sheet music online. It provides a simple and concise way to represent musical elements such as notes, rhythms, chords, and more.\n\nFor those looking to interact with ABC notation in various ways, there are several tools available:\n\n1. Online ABC Player: This web-based tool allows you to input ABC notation and hear the corresponding audio playback. By pasting your ABC code into the player, you can instantly listen to the tune as it would sound when played.\n\n2. ABC Sheet Music Editor - EasyABC: EasyABC is a user-friendly software application designed for creating, editing, and formatting ABC notation. Its graphical interface enables you to input your ABC code, preview the sheet music, and make adjustments as necessary.", "## Dataset Summary\n\nThe Irish Massive ABC Notation (IrishMAN) dataset includes 216,284 Irish tunes in ABC notation, divided into 99\\% (214,122 tunes) for training and 1\\% (2,162 tunes) for validation. These tunes were collected from URL and URL, both renowned for sharing traditional music. To ensure uniformity in formatting, all tunes were converted to XML and then back to ABC using scripts, and fields containing natural language (e.g., titles and lyrics) were removed.\n\nEach tune is automatically annotated with control codes derived from ABC symbols, as described in the below section. These control codes offer insights into the musical forms and structures of each composition.\n\nIn the IrishMAN dataset, a music21-filtered subset includes 34,211 lead sheets, each human-annotated with chord symbols. It is from this very subset that TunesFormer developed its capacity to generate melodies with harmonies.\n\nA noteworthy aspect is the copyright status. All tunes in the dataset are in the public domain, ensuring ethical and legal usage for research and creative projects.", "## Control Codes\n\nInspired by CTRL, we incorporate control codes into TunesFormer to represent musical forms. These codes, positioned ahead of the ABC notation, enable users to specify the structures of the generated tunes. The following control codes are introduced:\n\n- S:number of sections: determines the number of sections in the entire melody. It counts on several symbols that can be used to represent section boundaries: '[|', '||', '|]', '|:', '::', and ':|'. In our dataset, the range is 1 to 8 (e.g., 'S:1' for a single-section melody, and 'S:8' for a melody with eight sections).\n\n- B:number of bars: specifies the desired number of bars within a section. It counts on the bar symbol '|'. In our dataset, the range is 1 to 32 (e.g., 'B:1' for a one-bar section, and 'B:32' for a section with 32 bars).\n\n- E:edit distance similarity: controls the similarity level between the current section $c$ and a previous section $p$ in the melody. It is based on the Levenshtein distance $lev(c,p)$ , quantifying the difference between sections for creating variations or contrasts. Mathematically, it can be expressed as:\n \n where $|c|$ and $|p|$ are the string lengths of the two sections. It is discretized into 11 levels, ranging from no match at all to an exact match (e.g., 'E:0' for no similarity, and 'E:10' for an exact match).", "## Copyright Disclaimer\nThis dataset is for research use only and not for commercial purposes. We believe all data in this dataset is in the public domain. If you own the copyright to any musical composition in the IrishMAN dataset and have concerns, please contact us at shangda@URL. We will address your concerns and take appropriate action if needed.", "## Special Thanks\nWe would like to extend a special thanks to URL and URL for their contributions to the development and promotion of ABC notation, as well as their significant impact on the field of music information retrieval. Their platforms have become invaluable resources for the traditional and folk music community. We also wish to express our gratitude to Willem (Wim) for providing the essential conversion tools that enabled the transformation of the tunes into a uniform format. Together, these collaborations have made it possible for researchers like us to create and study extensive datasets like IrishMAN." ]
[ "TAGS\n#task_categories-text-generation #size_categories-100K<n<1M #license-mit #music #region-us \n", "## ABC Notation\n\nABC notation is an ASCII-based plain text musical notation system that is commonly used for transcribing traditional music and sharing sheet music online. It provides a simple and concise way to represent musical elements such as notes, rhythms, chords, and more.\n\nFor those looking to interact with ABC notation in various ways, there are several tools available:\n\n1. Online ABC Player: This web-based tool allows you to input ABC notation and hear the corresponding audio playback. By pasting your ABC code into the player, you can instantly listen to the tune as it would sound when played.\n\n2. ABC Sheet Music Editor - EasyABC: EasyABC is a user-friendly software application designed for creating, editing, and formatting ABC notation. Its graphical interface enables you to input your ABC code, preview the sheet music, and make adjustments as necessary.", "## Dataset Summary\n\nThe Irish Massive ABC Notation (IrishMAN) dataset includes 216,284 Irish tunes in ABC notation, divided into 99\\% (214,122 tunes) for training and 1\\% (2,162 tunes) for validation. These tunes were collected from URL and URL, both renowned for sharing traditional music. To ensure uniformity in formatting, all tunes were converted to XML and then back to ABC using scripts, and fields containing natural language (e.g., titles and lyrics) were removed.\n\nEach tune is automatically annotated with control codes derived from ABC symbols, as described in the below section. These control codes offer insights into the musical forms and structures of each composition.\n\nIn the IrishMAN dataset, a music21-filtered subset includes 34,211 lead sheets, each human-annotated with chord symbols. It is from this very subset that TunesFormer developed its capacity to generate melodies with harmonies.\n\nA noteworthy aspect is the copyright status. All tunes in the dataset are in the public domain, ensuring ethical and legal usage for research and creative projects.", "## Control Codes\n\nInspired by CTRL, we incorporate control codes into TunesFormer to represent musical forms. These codes, positioned ahead of the ABC notation, enable users to specify the structures of the generated tunes. The following control codes are introduced:\n\n- S:number of sections: determines the number of sections in the entire melody. It counts on several symbols that can be used to represent section boundaries: '[|', '||', '|]', '|:', '::', and ':|'. In our dataset, the range is 1 to 8 (e.g., 'S:1' for a single-section melody, and 'S:8' for a melody with eight sections).\n\n- B:number of bars: specifies the desired number of bars within a section. It counts on the bar symbol '|'. In our dataset, the range is 1 to 32 (e.g., 'B:1' for a one-bar section, and 'B:32' for a section with 32 bars).\n\n- E:edit distance similarity: controls the similarity level between the current section $c$ and a previous section $p$ in the melody. It is based on the Levenshtein distance $lev(c,p)$ , quantifying the difference between sections for creating variations or contrasts. Mathematically, it can be expressed as:\n \n where $|c|$ and $|p|$ are the string lengths of the two sections. It is discretized into 11 levels, ranging from no match at all to an exact match (e.g., 'E:0' for no similarity, and 'E:10' for an exact match).", "## Copyright Disclaimer\nThis dataset is for research use only and not for commercial purposes. We believe all data in this dataset is in the public domain. If you own the copyright to any musical composition in the IrishMAN dataset and have concerns, please contact us at shangda@URL. We will address your concerns and take appropriate action if needed.", "## Special Thanks\nWe would like to extend a special thanks to URL and URL for their contributions to the development and promotion of ABC notation, as well as their significant impact on the field of music information retrieval. Their platforms have become invaluable resources for the traditional and folk music community. We also wish to express our gratitude to Willem (Wim) for providing the essential conversion tools that enabled the transformation of the tunes into a uniform format. Together, these collaborations have made it possible for researchers like us to create and study extensive datasets like IrishMAN." ]
a3fa717ef08fe2cbb8a52bc3ca25925029148538
# Dataset Card for "chocolate-captioned-dataset-100" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Umal-exvc/chocolate-captioned-dataset-100
[ "region:us" ]
2023-01-11T01:49:10+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 26453719.0, "num_examples": 100}], "download_size": 26029410, "dataset_size": 26453719.0}}
2023-01-11T01:49:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chocolate-captioned-dataset-100" More Information needed
[ "# Dataset Card for \"chocolate-captioned-dataset-100\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chocolate-captioned-dataset-100\"\n\nMore Information needed" ]
bb8926f23b50beded63ab70356227fccf1710ba1
# Dataset Card for "chocolate-captioned-dataset-200" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Umal-exvc/chocolate-captioned-dataset-200
[ "region:us" ]
2023-01-11T01:52:08+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 39998461.0, "num_examples": 200}], "download_size": 39150206, "dataset_size": 39998461.0}}
2023-01-11T01:52:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chocolate-captioned-dataset-200" More Information needed
[ "# Dataset Card for \"chocolate-captioned-dataset-200\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chocolate-captioned-dataset-200\"\n\nMore Information needed" ]
d958811dd03a784c15920a9c7c4bc2d51525cd41
# Dataset Card for "chocolate-captioned-dataset-300" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Umal-exvc/chocolate-captioned-dataset-300
[ "region:us" ]
2023-01-11T01:54:22+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 51486341.0, "num_examples": 300}], "download_size": 50401727, "dataset_size": 51486341.0}}
2023-01-11T01:54:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chocolate-captioned-dataset-300" More Information needed
[ "# Dataset Card for \"chocolate-captioned-dataset-300\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chocolate-captioned-dataset-300\"\n\nMore Information needed" ]
a487f2ad4035ef84f49da42ae8029aa6c07d4e1c
# Dataset Card for "chocolate-captioned-dataset-400" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Umal-exvc/chocolate-captioned-dataset-400
[ "region:us" ]
2023-01-11T01:56:49+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 64772495.0, "num_examples": 400}], "download_size": 63382786, "dataset_size": 64772495.0}}
2023-01-11T01:57:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "chocolate-captioned-dataset-400" More Information needed
[ "# Dataset Card for \"chocolate-captioned-dataset-400\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"chocolate-captioned-dataset-400\"\n\nMore Information needed" ]
70462a61bd0bd7fda53096eed888b50f03d23823
# AutoTrain Dataset for project: hannah-training-demo ## Dataset Description This dataset has been automatically processed by AutoTrain for project hannah-training-demo. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "image": "<842x1392 RGBA PIL image>", "target": 0 }, { "image": "<1004x1516 RGBA PIL image>", "target": 0 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "image": "Image(decode=True, id=None)", "target": "ClassLabel(names=['hannah'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 7 | | valid | 7 |
slushily/autotrain-data-hannah-training-demo
[ "task_categories:image-classification", "region:us" ]
2023-01-11T03:41:05+00:00
{"task_categories": ["image-classification"]}
2023-01-11T03:42:50+00:00
[]
[]
TAGS #task_categories-image-classification #region-us
AutoTrain Dataset for project: hannah-training-demo =================================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project hannah-training-demo. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-image-classification #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
4ce4db8035a9ab9b968ee4a6329f7a724a963cca
# Dataset Card for "Lemon_filter" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
akshaypt7/Lemon_filter
[ "region:us" ]
2023-01-11T04:49:43+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 936008.0, "num_examples": 30}], "download_size": 0, "dataset_size": 936008.0}}
2023-01-18T06:23:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Lemon_filter" More Information needed
[ "# Dataset Card for \"Lemon_filter\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Lemon_filter\"\n\nMore Information needed" ]
06eb33548c674fb415dca8a2c116b8818205dcd1
> # Deprecation Notice! > [This dataset has been superseded by v2](https://huggingface.co/datasets/hearmeneigh/e621-rising-v2-raw). Use v2 instead of this dataset. **Warning: THIS dataset is NOT suitable for use by minors. The dataset contains X-rated/NFSW content.** # E621 Rising: Raw Image Dataset v1 **2,905,671** images (~1.1TB) downloaded from `e621.net` with [tags](https://huggingface.co/datasets/hearmeneigh/e621-rising-v1-raw/raw/main/meta/tag-counts.json). This is a raw, uncurated, and largely unprocessed dataset. You likely want to use the curated version, [available here](https://huggingface.co/datasets/hearmeneigh/e621-rising-v1-curated). This dataset contains all kinds of NFSW material. You have been warned. ## Image Processing * Only `jpg` and `png` images were considered * Image width and height have been clamped to `(0, 4096]px`; larger images have been resized to meet the limit * Alpha channels have been removed * All images have been converted to `jpg` format * All images have been converted to TrueColor `RGB` * All images have been verified to load with `Pillow` * Metadata from E621 is [available here](https://huggingface.co/datasets/hearmeneigh/e621-rising-v1-raw/tree/main/meta). ## Tags For a comprehensive list of tags and counts, [see here](https://huggingface.co/datasets/hearmeneigh/e621-rising-v1-raw/raw/main/meta/tag-counts.json). ### Changes From E621 * Symbols have been prefixed with `symbol:`, e.g. `symbol:<3` * Aspect ratio has been prefixed with `aspect_ratio:`, e.g. `aspect_ratio:16_9` * All categories except `general` have been prefixed with the category name, e.g. `artist:somename`. The categories are: * `artist` * `copyright` * `character` * `species` * `invalid` * `meta` * `lore` ### Additional Tags * Image rating * `rating:explicit` * `rating:questionable` * `rating:safe` * Image score * `score:above_250` * `score:above_500` * `score:above_1000` * `score:above_1500` * `score:above_2000` * `score:below_250` * `score:below_100` * `score:below_50` * `score:below_25` * `score:below_0` * Image favorites * `favorites:above_4000` * `favorites:above_3000` * `favorites:above_2000` * `favorites:above_1000` * `favorites:below_1000` * `favorites:below_500` * `favorites:below_250` * `favorites:below_100` * `favorites:below_50` * `favorites:below_25`
hearmeneigh/e621-rising-v1-raw
[ "size_categories:1M<n<10M", "not-for-all-audiences", "region:us" ]
2023-01-11T08:52:10+00:00
{"size_categories": ["1M<n<10M"], "pretty_name": "E621 Rising: Raw Image Dataset v1", "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1192534908282.634, "num_examples": 2905671}], "download_size": 210413447679, "dataset_size": 1192534908282.634}, "viewer": false, "tags": ["not-for-all-audiences"]}
2023-05-12T15:35:09+00:00
[]
[]
TAGS #size_categories-1M<n<10M #not-for-all-audiences #region-us
> # Deprecation Notice! > This dataset has been superseded by v2. Use v2 instead of this dataset. Warning: THIS dataset is NOT suitable for use by minors. The dataset contains X-rated/NFSW content. # E621 Rising: Raw Image Dataset v1 2,905,671 images (~1.1TB) downloaded from 'URL' with tags. This is a raw, uncurated, and largely unprocessed dataset. You likely want to use the curated version, available here. This dataset contains all kinds of NFSW material. You have been warned. ## Image Processing * Only 'jpg' and 'png' images were considered * Image width and height have been clamped to '(0, 4096]px'; larger images have been resized to meet the limit * Alpha channels have been removed * All images have been converted to 'jpg' format * All images have been converted to TrueColor 'RGB' * All images have been verified to load with 'Pillow' * Metadata from E621 is available here. ## Tags For a comprehensive list of tags and counts, see here. ### Changes From E621 * Symbols have been prefixed with 'symbol:', e.g. 'symbol:<3' * Aspect ratio has been prefixed with 'aspect_ratio:', e.g. 'aspect_ratio:16_9' * All categories except 'general' have been prefixed with the category name, e.g. 'artist:somename'. The categories are: * 'artist' * 'copyright' * 'character' * 'species' * 'invalid' * 'meta' * 'lore' ### Additional Tags * Image rating * 'rating:explicit' * 'rating:questionable' * 'rating:safe' * Image score * 'score:above_250' * 'score:above_500' * 'score:above_1000' * 'score:above_1500' * 'score:above_2000' * 'score:below_250' * 'score:below_100' * 'score:below_50' * 'score:below_25' * 'score:below_0' * Image favorites * 'favorites:above_4000' * 'favorites:above_3000' * 'favorites:above_2000' * 'favorites:above_1000' * 'favorites:below_1000' * 'favorites:below_500' * 'favorites:below_250' * 'favorites:below_100' * 'favorites:below_50' * 'favorites:below_25'
[ "# Deprecation Notice!\n> This dataset has been superseded by v2. Use v2 instead of this dataset.\n\n\nWarning: THIS dataset is NOT suitable for use by minors. The dataset contains X-rated/NFSW content.", "# E621 Rising: Raw Image Dataset v1\n\n2,905,671 images (~1.1TB) downloaded from 'URL' with tags.\n\nThis is a raw, uncurated, and largely unprocessed dataset. You likely want to use the curated version, available here. This dataset contains all kinds of NFSW material. You have been warned.", "## Image Processing\n* Only 'jpg' and 'png' images were considered\n* Image width and height have been clamped to '(0, 4096]px'; larger images have been resized to meet the limit\n* Alpha channels have been removed\n* All images have been converted to 'jpg' format\n* All images have been converted to TrueColor 'RGB'\n* All images have been verified to load with 'Pillow'\n* Metadata from E621 is available here.", "## Tags\nFor a comprehensive list of tags and counts, see here.", "### Changes From E621\n* Symbols have been prefixed with 'symbol:', e.g. 'symbol:<3'\n* Aspect ratio has been prefixed with 'aspect_ratio:', e.g. 'aspect_ratio:16_9'\n* All categories except 'general' have been prefixed with the category name, e.g. 'artist:somename'. The categories are:\n * 'artist'\n * 'copyright'\n * 'character'\n * 'species'\n * 'invalid'\n * 'meta'\n * 'lore'", "### Additional Tags\n* Image rating\n * 'rating:explicit'\n * 'rating:questionable'\n * 'rating:safe'\n* Image score\n * 'score:above_250'\n * 'score:above_500'\n * 'score:above_1000'\n * 'score:above_1500'\n * 'score:above_2000'\n * 'score:below_250'\n * 'score:below_100'\n * 'score:below_50'\n * 'score:below_25'\n * 'score:below_0'\n* Image favorites\n * 'favorites:above_4000'\n * 'favorites:above_3000'\n * 'favorites:above_2000'\n * 'favorites:above_1000'\n * 'favorites:below_1000'\n * 'favorites:below_500'\n * 'favorites:below_250'\n * 'favorites:below_100'\n * 'favorites:below_50'\n * 'favorites:below_25'" ]
[ "TAGS\n#size_categories-1M<n<10M #not-for-all-audiences #region-us \n", "# Deprecation Notice!\n> This dataset has been superseded by v2. Use v2 instead of this dataset.\n\n\nWarning: THIS dataset is NOT suitable for use by minors. The dataset contains X-rated/NFSW content.", "# E621 Rising: Raw Image Dataset v1\n\n2,905,671 images (~1.1TB) downloaded from 'URL' with tags.\n\nThis is a raw, uncurated, and largely unprocessed dataset. You likely want to use the curated version, available here. This dataset contains all kinds of NFSW material. You have been warned.", "## Image Processing\n* Only 'jpg' and 'png' images were considered\n* Image width and height have been clamped to '(0, 4096]px'; larger images have been resized to meet the limit\n* Alpha channels have been removed\n* All images have been converted to 'jpg' format\n* All images have been converted to TrueColor 'RGB'\n* All images have been verified to load with 'Pillow'\n* Metadata from E621 is available here.", "## Tags\nFor a comprehensive list of tags and counts, see here.", "### Changes From E621\n* Symbols have been prefixed with 'symbol:', e.g. 'symbol:<3'\n* Aspect ratio has been prefixed with 'aspect_ratio:', e.g. 'aspect_ratio:16_9'\n* All categories except 'general' have been prefixed with the category name, e.g. 'artist:somename'. The categories are:\n * 'artist'\n * 'copyright'\n * 'character'\n * 'species'\n * 'invalid'\n * 'meta'\n * 'lore'", "### Additional Tags\n* Image rating\n * 'rating:explicit'\n * 'rating:questionable'\n * 'rating:safe'\n* Image score\n * 'score:above_250'\n * 'score:above_500'\n * 'score:above_1000'\n * 'score:above_1500'\n * 'score:above_2000'\n * 'score:below_250'\n * 'score:below_100'\n * 'score:below_50'\n * 'score:below_25'\n * 'score:below_0'\n* Image favorites\n * 'favorites:above_4000'\n * 'favorites:above_3000'\n * 'favorites:above_2000'\n * 'favorites:above_1000'\n * 'favorites:below_1000'\n * 'favorites:below_500'\n * 'favorites:below_250'\n * 'favorites:below_100'\n * 'favorites:below_50'\n * 'favorites:below_25'" ]
bdac0343d333c72ea8c64f17298accaae424136c
# Dataset Card for "vul_lines" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EddieChen372/Vudenc_with_norm_vul_lines
[ "region:us" ]
2023-01-11T09:17:49+00:00
{"dataset_info": {"features": [{"name": "lines", "sequence": "string"}, {"name": "raw_lines", "sequence": "string"}, {"name": "label", "sequence": "int64"}, {"name": "type", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 14476057, "num_examples": 12672}, {"name": "test", "num_bytes": 3485317, "num_examples": 3169}], "download_size": 7020615, "dataset_size": 17961374}}
2023-04-01T00:24:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "vul_lines" More Information needed
[ "# Dataset Card for \"vul_lines\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"vul_lines\"\n\nMore Information needed" ]
7747df46ae4efef9a33af5c877bc9dc9d4c1ba95
See https://github.com/PaulLerner/ViQuAE
PaulLerner/wit_for_mict
[ "region:us" ]
2023-01-11T09:47:56+00:00
{}
2023-01-11T10:01:40+00:00
[]
[]
TAGS #region-us
See URL
[]
[ "TAGS\n#region-us \n" ]
a4045dfafc8f653eff42ae42ecea9167dc6fdb46
AviationQA is introduced in the paper titled- There is No Big Brother or Small Brother: Knowledge Infusion in Language Models for Link Prediction and Question Answering https://aclanthology.org/2022.icon-main.26/ The paper is accepted in the main conference of ICON 2022. We create a synthetic dataset, AviationQA, a set of 1 million factoid QA pairs from 12,000 National Transportation Safety Board (NTSB) reports using templates. These QA pairs contain questions such that answers to them are entities occurring in the AviationKG (Agarwal et al., 2022). AviationQA will be helpful to researchers in finding insights into aircraft accidents and their prevention. Examples from dataset: What was the Aircraft Damage of the accident no. ERA22LA162? Answer: Substantial Where was the Destination of the accident no. ERA22LA162?, Answer: Naples, GA (APH)
sakharamg/AviationQA
[ "task_categories:question-answering", "language:en", "license:cc-by-4.0", "Question Answering", "Aviation", "Knowledge Graphs", "region:us" ]
2023-01-11T09:52:39+00:00
{"language": ["en"], "license": "cc-by-4.0", "task_categories": ["question-answering"], "pretty_name": "AviationQA", "tags": ["Question Answering", "Aviation", "Knowledge Graphs"]}
2023-04-06T18:08:21+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #language-English #license-cc-by-4.0 #Question Answering #Aviation #Knowledge Graphs #region-us
AviationQA is introduced in the paper titled- There is No Big Brother or Small Brother: Knowledge Infusion in Language Models for Link Prediction and Question Answering URL The paper is accepted in the main conference of ICON 2022. We create a synthetic dataset, AviationQA, a set of 1 million factoid QA pairs from 12,000 National Transportation Safety Board (NTSB) reports using templates. These QA pairs contain questions such that answers to them are entities occurring in the AviationKG (Agarwal et al., 2022). AviationQA will be helpful to researchers in finding insights into aircraft accidents and their prevention. Examples from dataset: What was the Aircraft Damage of the accident no. ERA22LA162? Answer: Substantial Where was the Destination of the accident no. ERA22LA162?, Answer: Naples, GA (APH)
[]
[ "TAGS\n#task_categories-question-answering #language-English #license-cc-by-4.0 #Question Answering #Aviation #Knowledge Graphs #region-us \n" ]
758679ce409e6cc3a496654ebd0198c2fdcda19e
# Dataset Card for "npsc_dataset_tmp" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
perisolb/npsc_dataset_tmp
[ "region:us" ]
2023-01-11T10:32:08+00:00
{"dataset_info": {"features": [{"name": "speaker_id", "dtype": "string"}, {"name": "gender", "dtype": "string"}, {"name": "utterance_id", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "raw_text", "dtype": "string"}, {"name": "full_audio_file", "dtype": "string"}, {"name": "original_data_split", "dtype": "string"}, {"name": "region", "dtype": "string"}, {"name": "duration", "dtype": "float64"}, {"name": "start", "dtype": "float64"}, {"name": "end", "dtype": "float64"}, {"name": "utterance_audio_file", "dtype": "audio"}, {"name": "standardized_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9050653.0, "num_examples": 50}, {"name": "test", "num_bytes": 1225074.0, "num_examples": 10}, {"name": "validation", "num_bytes": 1225074.0, "num_examples": 10}], "download_size": 11505743, "dataset_size": 11500801.0}}
2023-01-11T10:32:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "npsc_dataset_tmp" More Information needed
[ "# Dataset Card for \"npsc_dataset_tmp\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"npsc_dataset_tmp\"\n\nMore Information needed" ]
8a51973c55ce233490fe8095b5d3eac5ad1b9438
# Colection of models trained on axis cameras
evo4np/testing
[ "region:us" ]
2023-01-11T12:12:56+00:00
{}
2023-01-12T08:40:59+00:00
[]
[]
TAGS #region-us
# Colection of models trained on axis cameras
[ "# Colection of models trained on axis cameras" ]
[ "TAGS\n#region-us \n", "# Colection of models trained on axis cameras" ]