sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
sequencelengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
sequencelengths
0
25
languages
sequencelengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
sequencelengths
0
352
processed_texts
sequencelengths
1
353
ee09a413cf6a14ea2c27b807f7350ef04e25772d
# Dataset Card for [Telugu Asr Corpus] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@parambharat](https://github.com/parambharat) for adding this dataset.
parambharat/telugu_asr_corpus
[ "task_categories:automatic-speech-recognition", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:extended|openslr", "language:te", "license:cc-by-4.0", "region:us" ]
2022-12-12T07:23:54+00:00
{"annotations_creators": ["found"], "language_creators": ["found"], "language": ["te"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["extended|openslr"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "Telugu ASR Corpus", "tags": []}
2022-12-13T08:04:56+00:00
[]
[ "te" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-extended|openslr #language-Telugu #license-cc-by-4.0 #region-us
# Dataset Card for [Telugu Asr Corpus] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @parambharat for adding this dataset.
[ "# Dataset Card for [Telugu Asr Corpus]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @parambharat for adding this dataset." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-extended|openslr #language-Telugu #license-cc-by-4.0 #region-us \n", "# Dataset Card for [Telugu Asr Corpus]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @parambharat for adding this dataset." ]
3104c36eed9241dbff7230e0cc4acf62d7331a06
# Dataset Card for "SDN-Dialect-Dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
AymanMansour/SDN-Dialect-Dataset
[ "region:us" ]
2022-12-12T08:36:08+00:00
{"dataset_info": {"features": [{"name": "filename", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "orthographic", "dtype": "string"}, {"name": "transliteration", "dtype": "string"}, {"name": "audio", "dtype": "audio"}], "splits": [{"name": "train", "num_bytes": 1697895484.76, "num_examples": 4830}, {"name": "test", "num_bytes": 244760635.0, "num_examples": 532}], "download_size": 2883670807, "dataset_size": 1942656119.76}}
2022-12-12T08:41:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "SDN-Dialect-Dataset" More Information needed
[ "# Dataset Card for \"SDN-Dialect-Dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"SDN-Dialect-Dataset\"\n\nMore Information needed" ]
89ae90cf6e8d21e4f81b581252f1c8f4964b2de3
# Dataset Card for "lexFridmanPodcast-transcript-audio" ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Whispering-GPT](https://github.com/matallanas/whisper_gpt_pipeline) - **Repository:** [whisper_gpt_pipeline](https://github.com/matallanas/whisper_gpt_pipeline) - **Paper:** [whisper](https://cdn.openai.com/papers/whisper.pdf) and [gpt](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf) - **Point of Contact:** [Whispering-GPT organization](https://huggingface.co/Whispering-GPT) ### Dataset Summary This dataset is created by applying whisper to the videos of the Youtube channel [Lex Fridman Podcast](https://www.youtube.com/watch?v=FhfmGM6hswI&list=PLrAXtmErZgOdP_8GztsuKi9nrraNbKKp4&ab_channel=LexFridman). The dataset was created a medium size whisper model. ### Languages - **Language**: English ## Dataset Structure The dataset contains all the transcripts plus the audio of the different videos of Lex Fridman Podcast. ### Data Fields The dataset is composed by: - **id**: Id of the youtube video. - **channel**: Name of the channel. - **channel\_id**: Id of the youtube channel. - **title**: Title given to the video. - **categories**: Category of the video. - **description**: Description added by the author. - **text**: Whole transcript of the video. - **segments**: A list with the time and transcription of the video. - **start**: When started the trancription. - **end**: When the transcription ends. - **text**: The text of the transcription. ### Data Splits - Train split. ## Dataset Creation ### Source Data The transcriptions are from the videos of [Lex Fridman Podcast](https://www.youtube.com/watch?v=FhfmGM6hswI&list=PLrAXtmErZgOdP_8GztsuKi9nrraNbKKp4&ab_channel=LexFridman) ### Contributions Thanks to [Whispering-GPT](https://huggingface.co/Whispering-GPT) organization for adding this dataset.
Whispering-GPT/lex-fridman-podcast
[ "task_categories:automatic-speech-recognition", "language:en", "whisper", "whispering", "medium", "region:us" ]
2022-12-12T09:09:49+00:00
{"language": "en", "task_categories": ["automatic-speech-recognition"], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "channel", "dtype": "string"}, {"name": "channel_id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "categories", "sequence": "string"}, {"name": "tags", "sequence": "string"}, {"name": "description", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "segments", "list": [{"name": "start", "dtype": "float64"}, {"name": "end", "dtype": "float64"}, {"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 102530760, "num_examples": 346}], "download_size": 57264732, "dataset_size": 102530760}, "tags": ["whisper", "whispering", "medium"]}
2023-07-13T13:47:17+00:00
[]
[ "en" ]
TAGS #task_categories-automatic-speech-recognition #language-English #whisper #whispering #medium #region-us
# Dataset Card for "lexFridmanPodcast-transcript-audio" ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Contributions ## Dataset Description - Homepage: Whispering-GPT - Repository: whisper_gpt_pipeline - Paper: whisper and gpt - Point of Contact: Whispering-GPT organization ### Dataset Summary This dataset is created by applying whisper to the videos of the Youtube channel Lex Fridman Podcast. The dataset was created a medium size whisper model. ### Languages - Language: English ## Dataset Structure The dataset contains all the transcripts plus the audio of the different videos of Lex Fridman Podcast. ### Data Fields The dataset is composed by: - id: Id of the youtube video. - channel: Name of the channel. - channel\_id: Id of the youtube channel. - title: Title given to the video. - categories: Category of the video. - description: Description added by the author. - text: Whole transcript of the video. - segments: A list with the time and transcription of the video. - start: When started the trancription. - end: When the transcription ends. - text: The text of the transcription. ### Data Splits - Train split. ## Dataset Creation ### Source Data The transcriptions are from the videos of Lex Fridman Podcast ### Contributions Thanks to Whispering-GPT organization for adding this dataset.
[ "# Dataset Card for \"lexFridmanPodcast-transcript-audio\"", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Contributions", "## Dataset Description\n\n- Homepage: Whispering-GPT\n- Repository: whisper_gpt_pipeline\n- Paper: whisper and gpt\n- Point of Contact: Whispering-GPT organization", "### Dataset Summary\n\nThis dataset is created by applying whisper to the videos of the Youtube channel Lex Fridman Podcast. The dataset was created a medium size whisper model.", "### Languages\n\n- Language: English", "## Dataset Structure\n\nThe dataset contains all the transcripts plus the audio of the different videos of Lex Fridman Podcast.", "### Data Fields\n\nThe dataset is composed by:\n- id: Id of the youtube video.\n- channel: Name of the channel.\n- channel\\_id: Id of the youtube channel.\n- title: Title given to the video.\n- categories: Category of the video.\n- description: Description added by the author.\n- text: Whole transcript of the video.\n- segments: A list with the time and transcription of the video.\n - start: When started the trancription.\n - end: When the transcription ends.\n - text: The text of the transcription.", "### Data Splits\n\n- Train split.", "## Dataset Creation", "### Source Data\n\nThe transcriptions are from the videos of Lex Fridman Podcast", "### Contributions\n\nThanks to Whispering-GPT organization for adding this dataset." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #language-English #whisper #whispering #medium #region-us \n", "# Dataset Card for \"lexFridmanPodcast-transcript-audio\"", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Contributions", "## Dataset Description\n\n- Homepage: Whispering-GPT\n- Repository: whisper_gpt_pipeline\n- Paper: whisper and gpt\n- Point of Contact: Whispering-GPT organization", "### Dataset Summary\n\nThis dataset is created by applying whisper to the videos of the Youtube channel Lex Fridman Podcast. The dataset was created a medium size whisper model.", "### Languages\n\n- Language: English", "## Dataset Structure\n\nThe dataset contains all the transcripts plus the audio of the different videos of Lex Fridman Podcast.", "### Data Fields\n\nThe dataset is composed by:\n- id: Id of the youtube video.\n- channel: Name of the channel.\n- channel\\_id: Id of the youtube channel.\n- title: Title given to the video.\n- categories: Category of the video.\n- description: Description added by the author.\n- text: Whole transcript of the video.\n- segments: A list with the time and transcription of the video.\n - start: When started the trancription.\n - end: When the transcription ends.\n - text: The text of the transcription.", "### Data Splits\n\n- Train split.", "## Dataset Creation", "### Source Data\n\nThe transcriptions are from the videos of Lex Fridman Podcast", "### Contributions\n\nThanks to Whispering-GPT organization for adding this dataset." ]
453d2263bff44b16b7cccac1679d44042c064199
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
templates/dataset-card-example
[ "region:us" ]
2022-12-12T10:00:07+00:00
{}
2023-10-10T08:53:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for Dataset Name This dataset card aims to be a base template for new datasets. It has been generated using this raw template. ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Dataset Name\n\n\n\nThis dataset card aims to be a base template for new datasets. It has been generated using this raw template.", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
c8d149a8caa77ffbdf13f9caf655256fffa27102
# Dataset Card for "test-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ybelkada/test-dataset
[ "region:us" ]
2022-12-12T10:09:52+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "caption", "dtype": "string"}, {"name": "char_name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 266321.0, "num_examples": 4}, {"name": "test", "num_bytes": 133161.0, "num_examples": 2}], "download_size": 268920, "dataset_size": 399482.0}}
2022-12-12T10:15:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test-dataset" More Information needed
[ "# Dataset Card for \"test-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test-dataset\"\n\nMore Information needed" ]
16d27fd2f5e3b84f3e6ddfd6de38d98c414abd63
<div style="bottom:-100%; left:0px; position: fixed !important; width:100%; height: 100px; -webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; line-height: inherit; margin: 0; min-height: 100vh; --tw-text-opacity: .9; color: rgb(229 231 235/var(--tw-text-opacity)); --tw-bg-opacity: 1; background-color: rgb(11 15 25/var(--tw-bg-opacity));"> <div class="header" style="height:105px; -webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; line-height: inherit; --tw-text-opacity: 1; color: rgb(229 231 235/var(--tw-text-opacity)); --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; background-image: linear-gradient(to top,var(--tw-gradient-stops)); padding-top: 1rem; --tw-gradient-from: #111827; --tw-gradient-to: rgba(11,15,25,0); --tw-gradient-stops: var(--tw-gradient-from),#0b0f19,var(--tw-gradient-to);"> <span class="fakehead" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; line-height: inherit; --tw-text-opacity: 1; color: rgb(229 231 235/var(--tw-text-opacity)); --tw-bg-opacity: 1; --tw-border-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; height: 4rem; width: 100%; padding-left: 1rem; padding-right: 1rem;"> <img src="https://www.transparentpng.com/thumb/kiss-smiley/uQgJGs-kiss-smiley-clipart-file.png" style=" -webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; line-height: inherit; color: inherit; border: 0 solid #e5e7eb; width: 1.75rem; display: inline; float: left; margin: 15px 0px 0px 15px; "></img> <span class="logoname" style=" font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; color: inherit; border: 0 solid #e5e7eb; box-sizing: border-box; white-space: nowrap; display: inline-block; font-size: 1.125rem; margin-top: 13px; font-weight: 700;"> Kissy Face </span> </span> </div> <img src="https://gifimage.net/wp-content/uploads/2018/04/pokemon-twerk-gif-2.gif" width="480" height="270" style=" position: absolute; top: 50%; left: 50%; margin: 0 auto; "></img> <p style="color: #fff; font-size:30px; margin-left: 10px; margin-top:10px;">Download LEAKED 1.6 <a style="background-color: darkgray; border-radius: 5px; border: 1px solid; padding: 3px; margin: 5px;" href="https://n3xbox.fu"> http://n3xbox.fu</a> (not really, this is demonstrative)</p> <a href="https://test" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; float: left; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ordinal: ; --tw-slashed-zero: ; --tw-numeric-figure: ; --tw-numeric-spacing: ; --tw-numeric-fraction: ; --tw-ring-inset: ; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; --tw-blur: ; --tw-brightness: ; --tw-contrast: ; --tw-grayscale: ; --tw-hue-rotate: ; --tw-invert: ; --tw-saturate: ; --tw-sepia: ; --tw-drop-shadow: ; --tw-backdrop-blur: ; --tw-backdrop-brightness: ; --tw-backdrop-contrast: ; --tw-backdrop-grayscale: ; --tw-backdrop-hue-rotate: ; --tw-backdrop-invert: ; --tw-backdrop-opacity: ; --tw-backdrop-saturate: ; --tw-backdrop-sepia: ; text-decoration: inherit; align-items: center; background-image: linear-gradient(to bottom,var(--tw-gradient-stops)); border-radius: .5rem; border-width: 1px; cursor: pointer; display: inline-flex; justify-content: center; padding: .25rem .75rem; user-select: none; white-space: nowrap; font-size: .875rem; line-height: 1.25rem; width: auto; --tw-border-opacity: 1; --tw-gradient-from: #1f2937; --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to); --tw-gradient-to: #0b0f19; --tw-text-opacity: 1; border-color: rgb(20 28 46/var(--tw-border-opacity)); color: rgb(229 231 235/var(--tw-text-opacity)); --tw-space-y-reverse: 0; margin-bottom: calc(0px*var(--tw-space-y-reverse)); margin-top: calc(0px*(1 - var(--tw-space-y-reverse))); --tw-space-x-reverse: 0; margin-left: calc(.375rem*(1 - var(--tw-space-x-reverse))); margin-right: calc(.375rem*var(--tw-space-x-reverse));">Clicky clicky!</a> <a href="https://test" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; float: left; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ordinal: ; --tw-slashed-zero: ; --tw-numeric-figure: ; --tw-numeric-spacing: ; --tw-numeric-fraction: ; --tw-ring-inset: ; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; --tw-blur: ; --tw-brightness: ; --tw-contrast: ; --tw-grayscale: ; --tw-hue-rotate: ; --tw-invert: ; --tw-saturate: ; --tw-sepia: ; --tw-drop-shadow: ; --tw-backdrop-blur: ; --tw-backdrop-brightness: ; --tw-backdrop-contrast: ; --tw-backdrop-grayscale: ; --tw-backdrop-hue-rotate: ; --tw-backdrop-invert: ; --tw-backdrop-opacity: ; --tw-backdrop-saturate: ; --tw-backdrop-sepia: ; text-decoration: inherit; align-items: center; background-image: linear-gradient(to bottom,var(--tw-gradient-stops)); border-radius: .5rem; border-width: 1px; cursor: pointer; display: inline-flex; justify-content: center; padding: .25rem .75rem; user-select: none; white-space: nowrap; font-size: .875rem; line-height: 1.25rem; width: auto; --tw-border-opacity: 1; --tw-gradient-from: #1f2937; --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to); --tw-gradient-to: #0b0f19; --tw-text-opacity: 1; border-color: rgb(20 28 46/var(--tw-border-opacity)); color: rgb(229 231 235/var(--tw-text-opacity)); --tw-space-y-reverse: 0; margin-bottom: calc(0px*var(--tw-space-y-reverse)); margin-top: calc(0px*(1 - var(--tw-space-y-reverse))); --tw-space-x-reverse: 0; margin-left: calc(.375rem*(1 - var(--tw-space-x-reverse))); margin-right: calc(.375rem*var(--tw-space-x-reverse));">Vamos Horacio!</a> <a href="https://test" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; float: left; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ordinal: ; --tw-slashed-zero: ; --tw-numeric-figure: ; --tw-numeric-spacing: ; --tw-numeric-fraction: ; --tw-ring-inset: ; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; --tw-blur: ; --tw-brightness: ; --tw-contrast: ; --tw-grayscale: ; --tw-hue-rotate: ; --tw-invert: ; --tw-saturate: ; --tw-sepia: ; --tw-drop-shadow: ; --tw-backdrop-blur: ; --tw-backdrop-brightness: ; --tw-backdrop-contrast: ; --tw-backdrop-grayscale: ; --tw-backdrop-hue-rotate: ; --tw-backdrop-invert: ; --tw-backdrop-opacity: ; --tw-backdrop-saturate: ; --tw-backdrop-sepia: ; text-decoration: inherit; align-items: center; background-image: linear-gradient(to bottom,var(--tw-gradient-stops)); border-radius: .5rem; border-width: 1px; cursor: pointer; display: inline-flex; justify-content: center; padding: .25rem .75rem; user-select: none; white-space: nowrap; font-size: .875rem; line-height: 1.25rem; width: auto; --tw-border-opacity: 1; --tw-gradient-from: #1f2937; --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to); --tw-gradient-to: #0b0f19; --tw-text-opacity: 1; border-color: rgb(20 28 46/var(--tw-border-opacity)); color: rgb(229 231 235/var(--tw-text-opacity)); --tw-space-y-reverse: 0; margin-bottom: calc(0px*var(--tw-space-y-reverse)); margin-top: calc(0px*(1 - var(--tw-space-y-reverse))); --tw-space-x-reverse: 0; margin-left: calc(.375rem*(1 - var(--tw-space-x-reverse))); margin-right: calc(.375rem*var(--tw-space-x-reverse));">Totally Legit</a> <a href="https://test" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; float: left; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ordinal: ; --tw-slashed-zero: ; --tw-numeric-figure: ; --tw-numeric-spacing: ; --tw-numeric-fraction: ; --tw-ring-inset: ; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; --tw-blur: ; --tw-brightness: ; --tw-contrast: ; --tw-grayscale: ; --tw-hue-rotate: ; --tw-invert: ; --tw-saturate: ; --tw-sepia: ; --tw-drop-shadow: ; --tw-backdrop-blur: ; --tw-backdrop-brightness: ; --tw-backdrop-contrast: ; --tw-backdrop-grayscale: ; --tw-backdrop-hue-rotate: ; --tw-backdrop-invert: ; --tw-backdrop-opacity: ; --tw-backdrop-saturate: ; --tw-backdrop-sepia: ; text-decoration: inherit; align-items: center; background-image: linear-gradient(to bottom,var(--tw-gradient-stops)); border-radius: .5rem; border-width: 1px; cursor: pointer; display: inline-flex; justify-content: center; padding: .25rem .75rem; user-select: none; white-space: nowrap; font-size: .875rem; line-height: 1.25rem; width: auto; --tw-border-opacity: 1; --tw-gradient-from: #1f2937; --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to); --tw-gradient-to: #0b0f19; --tw-text-opacity: 1; border-color: rgb(20 28 46/var(--tw-border-opacity)); color: rgb(229 231 235/var(--tw-text-opacity)); --tw-space-y-reverse: 0; margin-bottom: calc(0px*var(--tw-space-y-reverse)); margin-top: calc(0px*(1 - var(--tw-space-y-reverse))); --tw-space-x-reverse: 0; margin-left: calc(.375rem*(1 - var(--tw-space-x-reverse))); margin-right: calc(.375rem*var(--tw-space-x-reverse));">Signup</a> </div> [![Faked Login](https://img.shields.io/badge/Fake_Login_-Sign_in_to_account-2ea44f?style=for-the-badge&logo=linux)](https://huggingface.co/spaces/register/user) <br> [![Faked Register - Sign up for access](https://img.shields.io/badge/Fake_Register-Sign_up_for_access-2ea44f?style=for-the-badge&logo=gnu)](https://huggingface.co/spaces/register/user)
downloads/test
[ "region:us" ]
2022-12-12T10:57:05+00:00
{"title": "README", "emoji": "\ud83d\udd25", "colorFrom": "indigo", "colorTo": "purple", "sdk": "static", "pinned": true}
2022-12-12T11:01:37+00:00
[]
[]
TAGS #region-us
<div style="bottom:-100%; left:0px; position: fixed !important; width:100%; height: 100px; -webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; line-height: inherit; margin: 0; min-height: 100vh; --tw-text-opacity: .9; color: rgb(229 231 235/var(--tw-text-opacity)); --tw-bg-opacity: 1; background-color: rgb(11 15 25/var(--tw-bg-opacity));"> <div class="header" style="height:105px; -webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; line-height: inherit; --tw-text-opacity: 1; color: rgb(229 231 235/var(--tw-text-opacity)); --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; background-image: linear-gradient(to top,var(--tw-gradient-stops)); padding-top: 1rem; --tw-gradient-from: #111827; --tw-gradient-to: rgba(11,15,25,0); --tw-gradient-stops: var(--tw-gradient-from),#0b0f19,var(--tw-gradient-to);"> <span class="fakehead" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; line-height: inherit; --tw-text-opacity: 1; color: rgb(229 231 235/var(--tw-text-opacity)); --tw-bg-opacity: 1; --tw-border-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; height: 4rem; width: 100%; padding-left: 1rem; padding-right: 1rem;"> <img src="URL style=" -webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; line-height: inherit; color: inherit; border: 0 solid #e5e7eb; width: 1.75rem; display: inline; float: left; margin: 15px 0px 0px 15px; "></img> <span class="logoname" style=" font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; color: inherit; border: 0 solid #e5e7eb; box-sizing: border-box; white-space: nowrap; display: inline-block; font-size: 1.125rem; margin-top: 13px; font-weight: 700;"> Kissy Face </span> </span> </div> <img src="URL width="480" height="270" style=" position: absolute; top: 50%; left: 50%; margin: 0 auto; "></img> <p style="color: #fff; font-size:30px; margin-left: 10px; margin-top:10px;">Download LEAKED 1.6 <a style="background-color: darkgray; border-radius: 5px; border: 1px solid; padding: 3px; margin: 5px;" href="URL"> URL</a> (not really, this is demonstrative)</p> <a href="https://test" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; float: left; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ordinal: ; --tw-slashed-zero: ; --tw-numeric-figure: ; --tw-numeric-spacing: ; --tw-numeric-fraction: ; --tw-ring-inset: ; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; --tw-blur: ; --tw-brightness: ; --tw-contrast: ; --tw-grayscale: ; --tw-hue-rotate: ; --tw-invert: ; --tw-saturate: ; --tw-sepia: ; --tw-drop-shadow: ; --tw-backdrop-blur: ; --tw-backdrop-brightness: ; --tw-backdrop-contrast: ; --tw-backdrop-grayscale: ; --tw-backdrop-hue-rotate: ; --tw-backdrop-invert: ; --tw-backdrop-opacity: ; --tw-backdrop-saturate: ; --tw-backdrop-sepia: ; text-decoration: inherit; align-items: center; background-image: linear-gradient(to bottom,var(--tw-gradient-stops)); border-radius: .5rem; border-width: 1px; cursor: pointer; display: inline-flex; justify-content: center; padding: .25rem .75rem; user-select: none; white-space: nowrap; font-size: .875rem; line-height: 1.25rem; width: auto; --tw-border-opacity: 1; --tw-gradient-from: #1f2937; --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to); --tw-gradient-to: #0b0f19; --tw-text-opacity: 1; border-color: rgb(20 28 46/var(--tw-border-opacity)); color: rgb(229 231 235/var(--tw-text-opacity)); --tw-space-y-reverse: 0; margin-bottom: calc(0px*var(--tw-space-y-reverse)); margin-top: calc(0px*(1 - var(--tw-space-y-reverse))); --tw-space-x-reverse: 0; margin-left: calc(.375rem*(1 - var(--tw-space-x-reverse))); margin-right: calc(.375rem*var(--tw-space-x-reverse));">Clicky clicky!</a> <a href="https://test" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; float: left; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ordinal: ; --tw-slashed-zero: ; --tw-numeric-figure: ; --tw-numeric-spacing: ; --tw-numeric-fraction: ; --tw-ring-inset: ; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; --tw-blur: ; --tw-brightness: ; --tw-contrast: ; --tw-grayscale: ; --tw-hue-rotate: ; --tw-invert: ; --tw-saturate: ; --tw-sepia: ; --tw-drop-shadow: ; --tw-backdrop-blur: ; --tw-backdrop-brightness: ; --tw-backdrop-contrast: ; --tw-backdrop-grayscale: ; --tw-backdrop-hue-rotate: ; --tw-backdrop-invert: ; --tw-backdrop-opacity: ; --tw-backdrop-saturate: ; --tw-backdrop-sepia: ; text-decoration: inherit; align-items: center; background-image: linear-gradient(to bottom,var(--tw-gradient-stops)); border-radius: .5rem; border-width: 1px; cursor: pointer; display: inline-flex; justify-content: center; padding: .25rem .75rem; user-select: none; white-space: nowrap; font-size: .875rem; line-height: 1.25rem; width: auto; --tw-border-opacity: 1; --tw-gradient-from: #1f2937; --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to); --tw-gradient-to: #0b0f19; --tw-text-opacity: 1; border-color: rgb(20 28 46/var(--tw-border-opacity)); color: rgb(229 231 235/var(--tw-text-opacity)); --tw-space-y-reverse: 0; margin-bottom: calc(0px*var(--tw-space-y-reverse)); margin-top: calc(0px*(1 - var(--tw-space-y-reverse))); --tw-space-x-reverse: 0; margin-left: calc(.375rem*(1 - var(--tw-space-x-reverse))); margin-right: calc(.375rem*var(--tw-space-x-reverse));">Vamos Horacio!</a> <a href="https://test" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; float: left; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ordinal: ; --tw-slashed-zero: ; --tw-numeric-figure: ; --tw-numeric-spacing: ; --tw-numeric-fraction: ; --tw-ring-inset: ; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; --tw-blur: ; --tw-brightness: ; --tw-contrast: ; --tw-grayscale: ; --tw-hue-rotate: ; --tw-invert: ; --tw-saturate: ; --tw-sepia: ; --tw-drop-shadow: ; --tw-backdrop-blur: ; --tw-backdrop-brightness: ; --tw-backdrop-contrast: ; --tw-backdrop-grayscale: ; --tw-backdrop-hue-rotate: ; --tw-backdrop-invert: ; --tw-backdrop-opacity: ; --tw-backdrop-saturate: ; --tw-backdrop-sepia: ; text-decoration: inherit; align-items: center; background-image: linear-gradient(to bottom,var(--tw-gradient-stops)); border-radius: .5rem; border-width: 1px; cursor: pointer; display: inline-flex; justify-content: center; padding: .25rem .75rem; user-select: none; white-space: nowrap; font-size: .875rem; line-height: 1.25rem; width: auto; --tw-border-opacity: 1; --tw-gradient-from: #1f2937; --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to); --tw-gradient-to: #0b0f19; --tw-text-opacity: 1; border-color: rgb(20 28 46/var(--tw-border-opacity)); color: rgb(229 231 235/var(--tw-text-opacity)); --tw-space-y-reverse: 0; margin-bottom: calc(0px*var(--tw-space-y-reverse)); margin-top: calc(0px*(1 - var(--tw-space-y-reverse))); --tw-space-x-reverse: 0; margin-left: calc(.375rem*(1 - var(--tw-space-x-reverse))); margin-right: calc(.375rem*var(--tw-space-x-reverse));">Totally Legit</a> <a href="https://test" style="-webkit-text-size-adjust: 100%; font-family: Source Sans Pro,ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; tab-size: 4; float: left; --animate-duration: 1s; --animate-delay: 1s; --animate-repeat: 1; --scrollbarBG: #020011; --thumbBG: #374151; --tw-bg-opacity: 1; border: 0 solid #e5e7eb; box-sizing: border-box; --tw-border-spacing-x: 0; --tw-border-spacing-y: 0; --tw-translate-x: 0; --tw-translate-y: 0; --tw-rotate: 0; --tw-skew-x: 0; --tw-skew-y: 0; --tw-scale-x: 1; --tw-scale-y: 1; --tw-pan-x: ; --tw-pan-y: ; --tw-pinch-zoom: ; --tw-scroll-snap-strictness: proximity; --tw-ordinal: ; --tw-slashed-zero: ; --tw-numeric-figure: ; --tw-numeric-spacing: ; --tw-numeric-fraction: ; --tw-ring-inset: ; --tw-ring-offset-width: 0px; --tw-ring-offset-color: #fff; --tw-ring-color: rgba(59,130,246,.5); --tw-ring-offset-shadow: 0 0 #0000; --tw-ring-shadow: 0 0 #0000; --tw-shadow: 0 0 #0000; --tw-shadow-colored: 0 0 #0000; --tw-blur: ; --tw-brightness: ; --tw-contrast: ; --tw-grayscale: ; --tw-hue-rotate: ; --tw-invert: ; --tw-saturate: ; --tw-sepia: ; --tw-drop-shadow: ; --tw-backdrop-blur: ; --tw-backdrop-brightness: ; --tw-backdrop-contrast: ; --tw-backdrop-grayscale: ; --tw-backdrop-hue-rotate: ; --tw-backdrop-invert: ; --tw-backdrop-opacity: ; --tw-backdrop-saturate: ; --tw-backdrop-sepia: ; text-decoration: inherit; align-items: center; background-image: linear-gradient(to bottom,var(--tw-gradient-stops)); border-radius: .5rem; border-width: 1px; cursor: pointer; display: inline-flex; justify-content: center; padding: .25rem .75rem; user-select: none; white-space: nowrap; font-size: .875rem; line-height: 1.25rem; width: auto; --tw-border-opacity: 1; --tw-gradient-from: #1f2937; --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to); --tw-gradient-to: #0b0f19; --tw-text-opacity: 1; border-color: rgb(20 28 46/var(--tw-border-opacity)); color: rgb(229 231 235/var(--tw-text-opacity)); --tw-space-y-reverse: 0; margin-bottom: calc(0px*var(--tw-space-y-reverse)); margin-top: calc(0px*(1 - var(--tw-space-y-reverse))); --tw-space-x-reverse: 0; margin-left: calc(.375rem*(1 - var(--tw-space-x-reverse))); margin-right: calc(.375rem*var(--tw-space-x-reverse));">Signup</a> </div> ![Faked Login](URL <br> ![Faked Register - Sign up for access](URL
[]
[ "TAGS\n#region-us \n" ]
049702ca5ffc5b3c62bf226aac67ba1493e8d548
This is a repreprocessed version of the [FLAN dataset](https://arxiv.org/abs/2109.01652) with any updates that have been made to the FLAN datasets since the release of the original FLAN. The script is available [here](https://github.com/Muennighoff/FLAN). Tasks: ``` {'aeslc_10templates', 'ag_news_subset_10templates', 'anli_r1_10templates', 'anli_r2_10templates', 'anli_r3_10templates', 'arc_challenge_10templates', 'arc_easy_10templates', 'bool_q_10templates', 'cb_10templates', 'cnn_dailymail_10templates', 'cola_10templates', 'common_gen_10templates', 'copa_10templates', 'coqa_10templates', 'cosmos_qa_10templates', 'dart_10templates', 'definite_pronoun_resolution_10templates', 'drop_10templates', 'e2e_nlg_10templates', 'fix_punct_10templates', 'gigaword_10templates', 'glue_mrpc_10templates', 'glue_qqp_10templates', 'hellaswag_10templates', 'imdb_reviews_10templates', 'math_dataset_10templates', 'mnli_matched_10templates', 'mnli_mismatched_10templates', 'multi_news_10templates', 'multirc_10templates', 'natural_questions_10templates', 'openbookqa_10templates', 'opinion_abstracts_idebate_10templates', 'opinion_abstracts_rotten_tomatoes_10templates', 'para_crawl_enes_10templates', 'paws_wiki_10templates', 'piqa_10templates', 'qnli_10templates', 'quac_10templates', 'record_10templates', 'rte_10templates', 'samsum_10templates', 'sentiment140_10templates', 'snli_10templates', 'squad_v1_10templates', 'squad_v2_10templates', 'sst2_10templates', 'story_cloze_10templates', 'stsb_10templates', 'trec_10templates', 'trivia_qa_10templates', 'true_case_10templates', 'web_nlg_en_10templates', 'wic_10templates', 'wiki_lingua_english_en_10templates', 'wmt14_enfr_10templates', 'wmt16_translate_csen_10templates', 'wmt16_translate_deen_10templates', 'wmt16_translate_fien_10templates', 'wmt16_translate_roen_10templates', 'wmt16_translate_ruen_10templates', 'wmt16_translate_tren_10templates', 'wnli_10templates', 'word_segment_10templates', 'wsc_10templates', 'yelp_polarity_reviews_10templates'} ```
Muennighoff/flan
[ "task_categories:other", "annotations_creators:crowdsourced", "annotations_creators:expert-generated", "multilinguality:monolingual", "size_categories:100M<n<1B", "language:en", "arxiv:2109.01652", "region:us" ]
2022-12-12T11:32:26+00:00
{"annotations_creators": ["crowdsourced", "expert-generated"], "language": ["en"], "multilinguality": ["monolingual"], "size_categories": ["100M<n<1B"], "task_categories": ["other"]}
2022-12-23T18:57:00+00:00
[ "2109.01652" ]
[ "en" ]
TAGS #task_categories-other #annotations_creators-crowdsourced #annotations_creators-expert-generated #multilinguality-monolingual #size_categories-100M<n<1B #language-English #arxiv-2109.01652 #region-us
This is a repreprocessed version of the FLAN dataset with any updates that have been made to the FLAN datasets since the release of the original FLAN. The script is available here. Tasks:
[]
[ "TAGS\n#task_categories-other #annotations_creators-crowdsourced #annotations_creators-expert-generated #multilinguality-monolingual #size_categories-100M<n<1B #language-English #arxiv-2109.01652 #region-us \n" ]
224aaed7e330a7107106b8d5ab08927f667c6a7b
# Dataset Card for "qasper" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
irenepap/qasper
[ "region:us" ]
2022-12-12T13:40:20+00:00
{"dataset_info": {"features": [{"name": "question_id", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answer", "sequence": "string"}, {"name": "evidence", "sequence": {"sequence": "string"}}], "splits": [{"name": "train", "num_bytes": 2612077, "num_examples": 2593}, {"name": "dev", "num_bytes": 1693851, "num_examples": 1005}], "download_size": 2007826, "dataset_size": 4305928}}
2022-12-12T14:54:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "qasper" More Information needed
[ "# Dataset Card for \"qasper\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"qasper\"\n\nMore Information needed" ]
18af2fb6cd2718675ca547ced166efb8fce90964
--- dataset_info: features: - name: 'Unnamed: 0' dtype: int64 - name: text dtype: string - name: label dtype: int64 - name: label_text dtype: string splits: - name: train num_bytes: 553721.2188123516 num_examples: 7367 - name: test num_bytes: 237362.78118764845 num_examples: 3158 download_size: 0 dataset_size: 791084.0 ``` @inproceedings{larson-etal-2019-evaluation, title = "An Evaluation Dataset for Intent Classification and Out-of-Scope Prediction", author = "Larson, Stefan and Mahendran, Anish and Peper, Joseph J. and Clarke, Christopher and Lee, Andrew and Hill, Parker and Kummerfeld, Jonathan K. and Leach, Kevin and Laurenzano, Michael A. and Tang, Lingjia and Mars, Jason", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", year = "2019", url = "https://www.aclweb.org/anthology/D19-1131" } ```
fathyshalab/clinic-full
[ "region:us" ]
2022-12-12T15:13:51+00:00
{}
2023-05-15T07:47:45+00:00
[]
[]
TAGS #region-us
--- dataset_info: features: - name: 'Unnamed: 0' dtype: int64 - name: text dtype: string - name: label dtype: int64 - name: label_text dtype: string splits: - name: train num_bytes: 553721.2188123516 num_examples: 7367 - name: test num_bytes: 237362.78118764845 num_examples: 3158 download_size: 0 dataset_size: 791084.0
[]
[ "TAGS\n#region-us \n" ]
076ba58e421b377a9f49bfb50ac59beca7600c6e
# Dataset Card for "unet-flowers" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dahoas/unet-flowers
[ "region:us" ]
2022-12-12T15:19:22+00:00
{"dataset_info": {"features": [{"name": "images", "sequence": {"sequence": {"sequence": "uint8"}}}], "splits": [{"name": "train", "num_bytes": 26771456, "num_examples": 2048}], "download_size": 25284415, "dataset_size": 26771456}}
2022-12-13T04:01:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "unet-flowers" More Information needed
[ "# Dataset Card for \"unet-flowers\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"unet-flowers\"\n\nMore Information needed" ]
05aa768504c23f8526c88645ef4048e048b7e6a8
# Dataset Card for "fno-full-flowers" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dahoas/fno-full-flowers
[ "region:us" ]
2022-12-12T16:04:07+00:00
{"dataset_info": {"features": [{"name": "images", "sequence": {"sequence": {"sequence": "uint8"}}}], "splits": [{"name": "train", "num_bytes": 26771456, "num_examples": 2048}], "download_size": 25314880, "dataset_size": 26771456}}
2022-12-13T04:00:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fno-full-flowers" More Information needed
[ "# Dataset Card for \"fno-full-flowers\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fno-full-flowers\"\n\nMore Information needed" ]
fb94ceadfc73deaaf574fb6b7252c0899c6459b6
# Dataset Card for "slue-voxpopuli" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
qmeeus/slue-voxpopuli
[ "region:us" ]
2022-12-12T16:20:45+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "sentence", "dtype": "string"}, {"name": "entities", "sequence": {"class_label": {"names": {"0": "B-DATE", "1": "I-DATE", "2": "B-TIME", "3": "I-TIME", "4": "B-CARDINAL", "5": "I-CARDINAL", "6": "B-ORDINAL", "7": "I-ORDINAL", "8": "B-QUANTITY", "9": "I-QUANTITY", "10": "B-MONEY", "11": "I-MONEY", "12": "B-PERCENT", "13": "I-PERCENT", "14": "B-GPE", "15": "I-GPE", "16": "B-LOC", "17": "I-LOC", "18": "B-NORP", "19": "I-NORP", "20": "B-ORG", "21": "I-ORG", "22": "B-LAW", "23": "I-LAW", "24": "B-PERSON", "25": "I-PERSON", "26": "B-FAC", "27": "I-FAC", "28": "B-EVENT", "29": "I-EVENT", "30": "B-WORK_OF_ART", "31": "I-WORK_OF_ART", "32": "B-PRODUCT", "33": "I-PRODUCT", "34": "B-LANGUAGE", "35": "I-LANGUAGE", "36": "O"}}}}, {"name": "id", "dtype": "int64"}, {"name": "combined", "sequence": {"class_label": {"names": {"0": "B-WHEN", "1": "I-WHEN", "2": "B-QUANT", "3": "I-QUANT", "4": "B-PLACE", "5": "I-PLACE", "6": "B-NORP", "7": "I-NORP", "8": "B-ORG", "9": "I-ORG", "10": "B-LAW", "11": "I-LAW", "12": "B-PERSON", "13": "I-PERSON", "14": "O"}}}}], "splits": [{"name": "train", "num_bytes": 240457330.0, "num_examples": 5000}, {"name": "dev", "num_bytes": 83070289.972, "num_examples": 1753}], "download_size": 319368269, "dataset_size": 323527619.972}}
2023-01-10T11:16:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "slue-voxpopuli" More Information needed
[ "# Dataset Card for \"slue-voxpopuli\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"slue-voxpopuli\"\n\nMore Information needed" ]
427334845f2c695958827e2144beb1d41ad8b3e8
# Dataset Card for "frames" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shipmaal/frames
[ "region:us" ]
2022-12-12T18:17:19+00:00
{"dataset_info": {"features": [{"name": "spectral_frame", "sequence": "float32"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 905704193.018, "num_examples": 18958}], "download_size": 752493076, "dataset_size": 905704193.018}}
2022-12-12T18:17:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "frames" More Information needed
[ "# Dataset Card for \"frames\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"frames\"\n\nMore Information needed" ]
243586554c3f2dcc4081dd612f3d02c518b04ee1
# Dataset Card for "pl-text-images-5000-whole" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Zombely/pl-text-images-5000-whole
[ "region:us" ]
2022-12-12T18:44:04+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "ground_truth", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2538550896.71, "num_examples": 4046}, {"name": "test", "num_bytes": 309157048.0, "num_examples": 472}, {"name": "validation", "num_bytes": 311530101.0, "num_examples": 482}], "download_size": 3161688642, "dataset_size": 3159238045.71}}
2022-12-12T18:49:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pl-text-images-5000-whole" More Information needed
[ "# Dataset Card for \"pl-text-images-5000-whole\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pl-text-images-5000-whole\"\n\nMore Information needed" ]
c0a9de1d1bd5e2fb110a224910a9dda4600320ed
# Grug Dataset This is content pulled from various archives to create a "grugbot" or sorts using GPT-J. Really, just a dumb joke I made with some friends.
DarwinAnim8or/grug
[ "task_categories:text2text-generation", "annotations_creators:no-annotation", "language_creators:machine-generated", "multilinguality:monolingual", "language:en", "license:unknown", "grug", "internet", "greentext", "region:us" ]
2022-12-12T19:02:59+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["machine-generated"], "language": ["en"], "license": ["unknown"], "multilinguality": ["monolingual"], "size_categories": [], "source_datasets": [], "task_categories": ["text2text-generation"], "task_ids": [], "pretty_name": "Grug Dataset\n\nThis is content pulled from various archives to create a \"grugbot\" or sorts using GPT-J. ", "tags": ["grug", "internet", "greentext"]}
2023-07-25T20:00:48+00:00
[]
[ "en" ]
TAGS #task_categories-text2text-generation #annotations_creators-no-annotation #language_creators-machine-generated #multilinguality-monolingual #language-English #license-unknown #grug #internet #greentext #region-us
# Grug Dataset This is content pulled from various archives to create a "grugbot" or sorts using GPT-J. Really, just a dumb joke I made with some friends.
[ "# Grug Dataset\n\nThis is content pulled from various archives to create a \"grugbot\" or sorts using GPT-J. \nReally, just a dumb joke I made with some friends." ]
[ "TAGS\n#task_categories-text2text-generation #annotations_creators-no-annotation #language_creators-machine-generated #multilinguality-monolingual #language-English #license-unknown #grug #internet #greentext #region-us \n", "# Grug Dataset\n\nThis is content pulled from various archives to create a \"grugbot\" or sorts using GPT-J. \nReally, just a dumb joke I made with some friends." ]
21e4e142159e2153706c23a3a02e55cec5591cea
# Objaverse Objaverse is a Massive Dataset with 800K+ Annotated 3D Objects. More documentation is coming soon. In the meantime, please see our [paper](https://arxiv.org/abs/2212.08051) and [website](https://objaverse.allenai.org/) for additional details. # License The use of the dataset as a whole is licensed under the [ODC-By v1.0](https://opendatacommons.org/licenses/by/1-0/) license. Individual objects in Objaverse are all licensed as creative commons distributable objects, and may be under the following licenses: - [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) - 721K objects - [CC-BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/) - 25K objects - [CC-BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) - 52K objects - [CC-BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/) - 16K objects - [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/) - 3.5K objects The metadata will provide the license for each object. # Citation To cite Objaverse, please use the following BibTeX entry: ```bibtex @article{objaverse, title={Objaverse: A Universe of Annotated 3D Objects}, author={Matt Deitke and Dustin Schwenk and Jordi Salvador and Luca Weihs and Oscar Michel and Eli VanderBilt and Ludwig Schmidt and Kiana Ehsani and Aniruddha Kembhavi and Ali Farhadi}, journal={arXiv preprint arXiv:2212.08051}, year={2022} } ```
allenai/objaverse
[ "language:en", "license:odc-by", "arxiv:2212.08051", "region:us" ]
2022-12-12T19:06:33+00:00
{"language": ["en"], "license": "odc-by", "viewer": false}
2023-03-31T10:05:57+00:00
[ "2212.08051" ]
[ "en" ]
TAGS #language-English #license-odc-by #arxiv-2212.08051 #region-us
# Objaverse Objaverse is a Massive Dataset with 800K+ Annotated 3D Objects. More documentation is coming soon. In the meantime, please see our paper and website for additional details. # License The use of the dataset as a whole is licensed under the ODC-By v1.0 license. Individual objects in Objaverse are all licensed as creative commons distributable objects, and may be under the following licenses: - CC-BY 4.0 - 721K objects - CC-BY-NC 4.0 - 25K objects - CC-BY-NC-SA 4.0 - 52K objects - CC-BY-SA 4.0 - 16K objects - CC0 1.0 - 3.5K objects The metadata will provide the license for each object. To cite Objaverse, please use the following BibTeX entry:
[ "# Objaverse\n\nObjaverse is a Massive Dataset with 800K+ Annotated 3D Objects.\n\nMore documentation is coming soon. In the meantime, please see our paper and website for additional details.", "# License\n\nThe use of the dataset as a whole is licensed under the ODC-By v1.0 license. Individual objects in Objaverse are all licensed as creative commons distributable objects, and may be under the following licenses:\n\n- CC-BY 4.0 - 721K objects\n- CC-BY-NC 4.0 - 25K objects\n- CC-BY-NC-SA 4.0 - 52K objects\n- CC-BY-SA 4.0 - 16K objects\n- CC0 1.0 - 3.5K objects\n\nThe metadata will provide the license for each object.\n\nTo cite Objaverse, please use the following BibTeX entry:" ]
[ "TAGS\n#language-English #license-odc-by #arxiv-2212.08051 #region-us \n", "# Objaverse\n\nObjaverse is a Massive Dataset with 800K+ Annotated 3D Objects.\n\nMore documentation is coming soon. In the meantime, please see our paper and website for additional details.", "# License\n\nThe use of the dataset as a whole is licensed under the ODC-By v1.0 license. Individual objects in Objaverse are all licensed as creative commons distributable objects, and may be under the following licenses:\n\n- CC-BY 4.0 - 721K objects\n- CC-BY-NC 4.0 - 25K objects\n- CC-BY-NC-SA 4.0 - 52K objects\n- CC-BY-SA 4.0 - 16K objects\n- CC0 1.0 - 3.5K objects\n\nThe metadata will provide the license for each object.\n\nTo cite Objaverse, please use the following BibTeX entry:" ]
f71eb7a326d8225e21fce7e7923795079ef40e34
# Dataset Card for "tib_transcripts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
gigant/tib_transcripts
[ "region:us" ]
2022-12-12T21:18:55+00:00
{"dataset_info": {"features": [{"name": "doi", "dtype": "string"}, {"name": "transcript", "dtype": "string"}, {"name": "abstract", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 251058543, "num_examples": 8481}], "download_size": 130991914, "dataset_size": 251058543}}
2023-01-21T13:54:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tib_transcripts" More Information needed
[ "# Dataset Card for \"tib_transcripts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tib_transcripts\"\n\nMore Information needed" ]
bba16fd936413689429850d2cc3c60fcd6443edd
Census dataset
HuggingFaceTamil/census
[ "license:openrail", "region:us" ]
2022-12-12T21:22:35+00:00
{"license": "openrail"}
2022-12-12T21:29:43+00:00
[]
[]
TAGS #license-openrail #region-us
Census dataset
[]
[ "TAGS\n#license-openrail #region-us \n" ]
5b5da1e9ff02c41a6a9300da1f227fabd6c41410
An Inspirations images library for use with the Automatic1111 webui Inspirations extension. Made with Elysium anime V2 for impact on anime-themed images. Extract to `stable-diffusion-webui-inspiration\inspiration` folder Inspirations Extension - https://github.com/yfszzx/stable-diffusion-webui-inspiration Elysium Model - https://huggingface.co/hesw23168/SD-Elysium-Model
EfaceD/ElysiumInspirations
[ "region:us" ]
2022-12-12T21:27:03+00:00
{}
2022-12-14T07:10:26+00:00
[]
[]
TAGS #region-us
An Inspirations images library for use with the Automatic1111 webui Inspirations extension. Made with Elysium anime V2 for impact on anime-themed images. Extract to 'stable-diffusion-webui-inspiration\inspiration' folder Inspirations Extension - URL Elysium Model - URL
[]
[ "TAGS\n#region-us \n" ]
86c17541f78440e5ff2ef55740a3a503b2716d3a
# Dataset Card for "lexFridmanPodcast-transcript-audio" ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Whispering-GPT](https://github.com/matallanas/whisper_gpt_pipeline) - **Repository:** [whisper_gpt_pipeline](https://github.com/matallanas/whisper_gpt_pipeline) - **Paper:** [whisper](https://cdn.openai.com/papers/whisper.pdf) and [gpt](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf) - **Point of Contact:** [Whispering-GPT organization](https://huggingface.co/Whispering-GPT) ### Dataset Summary This dataset is created by applying whisper to the videos of the Youtube channel [Lex Fridman Podcast](https://www.youtube.com/watch?v=FhfmGM6hswI&list=PLrAXtmErZgOdP_8GztsuKi9nrraNbKKp4&ab_channel=LexFridman). The dataset was created a medium size whisper model. ### Languages - **Language**: English ## Dataset Structure The dataset contains all the transcripts plus the audio of the different videos of Lex Fridman Podcast. ### Data Fields The dataset is composed by: - **id**: Id of the youtube video. - **channel**: Name of the channel. - **channel\_id**: Id of the youtube channel. - **title**: Title given to the video. - **categories**: Category of the video. - **description**: Description added by the author. - **text**: Whole transcript of the video. - **segments**: A list with the time and transcription of the video. - **start**: When started the trancription. - **end**: When the transcription ends. - **text**: The text of the transcription. - **audio**: the extracted audio of the video in ogg format. ### Data Splits - Train split. ## Dataset Creation ### Source Data The transcriptions are from the videos of [Lex Fridman Podcast](https://www.youtube.com/watch?v=FhfmGM6hswI&list=PLrAXtmErZgOdP_8GztsuKi9nrraNbKKp4&ab_channel=LexFridman) ### Contributions Thanks to [Whispering-GPT](https://huggingface.co/Whispering-GPT) organization for adding this dataset.
Whispering-GPT/lex-fridman-podcast-transcript-audio
[ "task_categories:automatic-speech-recognition", "whisper", "whispering", "medium", "region:us" ]
2022-12-12T22:36:18+00:00
{"task_categories": ["automatic-speech-recognition"], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "channel", "dtype": "string"}, {"name": "channel_id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "categories", "sequence": "string"}, {"name": "tags", "sequence": "string"}, {"name": "description", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "segments", "list": [{"name": "start", "dtype": "float64"}, {"name": "end", "dtype": "float64"}, {"name": "text", "dtype": "string"}]}, {"name": "audio", "dtype": "audio"}], "splits": [{"name": "train", "num_bytes": 65356108140.0, "num_examples": 333}], "download_size": 64386861854, "dataset_size": 65356108140.0}, "tags": ["whisper", "whispering", "medium"]}
2022-12-18T17:32:28+00:00
[]
[]
TAGS #task_categories-automatic-speech-recognition #whisper #whispering #medium #region-us
# Dataset Card for "lexFridmanPodcast-transcript-audio" ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Contributions ## Dataset Description - Homepage: Whispering-GPT - Repository: whisper_gpt_pipeline - Paper: whisper and gpt - Point of Contact: Whispering-GPT organization ### Dataset Summary This dataset is created by applying whisper to the videos of the Youtube channel Lex Fridman Podcast. The dataset was created a medium size whisper model. ### Languages - Language: English ## Dataset Structure The dataset contains all the transcripts plus the audio of the different videos of Lex Fridman Podcast. ### Data Fields The dataset is composed by: - id: Id of the youtube video. - channel: Name of the channel. - channel\_id: Id of the youtube channel. - title: Title given to the video. - categories: Category of the video. - description: Description added by the author. - text: Whole transcript of the video. - segments: A list with the time and transcription of the video. - start: When started the trancription. - end: When the transcription ends. - text: The text of the transcription. - audio: the extracted audio of the video in ogg format. ### Data Splits - Train split. ## Dataset Creation ### Source Data The transcriptions are from the videos of Lex Fridman Podcast ### Contributions Thanks to Whispering-GPT organization for adding this dataset.
[ "# Dataset Card for \"lexFridmanPodcast-transcript-audio\"", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Contributions", "## Dataset Description\n\n- Homepage: Whispering-GPT\n- Repository: whisper_gpt_pipeline\n- Paper: whisper and gpt\n- Point of Contact: Whispering-GPT organization", "### Dataset Summary\n\nThis dataset is created by applying whisper to the videos of the Youtube channel Lex Fridman Podcast. The dataset was created a medium size whisper model.", "### Languages\n\n- Language: English", "## Dataset Structure\n\nThe dataset contains all the transcripts plus the audio of the different videos of Lex Fridman Podcast.", "### Data Fields\n\nThe dataset is composed by:\n- id: Id of the youtube video.\n- channel: Name of the channel.\n- channel\\_id: Id of the youtube channel.\n- title: Title given to the video.\n- categories: Category of the video.\n- description: Description added by the author.\n- text: Whole transcript of the video.\n- segments: A list with the time and transcription of the video.\n - start: When started the trancription.\n - end: When the transcription ends.\n - text: The text of the transcription.\n- audio: the extracted audio of the video in ogg format.", "### Data Splits\n- Train split.", "## Dataset Creation", "### Source Data\n\nThe transcriptions are from the videos of Lex Fridman Podcast", "### Contributions\nThanks to Whispering-GPT organization for adding this dataset." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #whisper #whispering #medium #region-us \n", "# Dataset Card for \"lexFridmanPodcast-transcript-audio\"", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Contributions", "## Dataset Description\n\n- Homepage: Whispering-GPT\n- Repository: whisper_gpt_pipeline\n- Paper: whisper and gpt\n- Point of Contact: Whispering-GPT organization", "### Dataset Summary\n\nThis dataset is created by applying whisper to the videos of the Youtube channel Lex Fridman Podcast. The dataset was created a medium size whisper model.", "### Languages\n\n- Language: English", "## Dataset Structure\n\nThe dataset contains all the transcripts plus the audio of the different videos of Lex Fridman Podcast.", "### Data Fields\n\nThe dataset is composed by:\n- id: Id of the youtube video.\n- channel: Name of the channel.\n- channel\\_id: Id of the youtube channel.\n- title: Title given to the video.\n- categories: Category of the video.\n- description: Description added by the author.\n- text: Whole transcript of the video.\n- segments: A list with the time and transcription of the video.\n - start: When started the trancription.\n - end: When the transcription ends.\n - text: The text of the transcription.\n- audio: the extracted audio of the video in ogg format.", "### Data Splits\n- Train split.", "## Dataset Creation", "### Source Data\n\nThe transcriptions are from the videos of Lex Fridman Podcast", "### Contributions\nThanks to Whispering-GPT organization for adding this dataset." ]
378d1517374931acddb0cf5faadd73c678871ec8
# Dataset Card for "pororo_storyviz" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dhruvrnaik/pororo_storyviz
[ "region:us" ]
2022-12-12T23:00:18+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "followings", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 273488067.011, "num_examples": 10191}, {"name": "test", "num_bytes": 62374283.312, "num_examples": 2208}, {"name": "validation", "num_bytes": 64563192.696, "num_examples": 2334}], "download_size": 410950911, "dataset_size": 400425543.01899993}}
2022-12-12T23:00:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pororo_storyviz" More Information needed
[ "# Dataset Card for \"pororo_storyviz\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pororo_storyviz\"\n\nMore Information needed" ]
7ed97aa2f711010d471687e9881ae91a8f5ea341
# Dataset Card for "wikipedia_august_october_diff" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/wikipedia_august_october_diff
[ "region:us" ]
2022-12-12T23:35:44+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "crawl_timestamp", "dtype": "int64"}, {"name": "reward", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 17185592921.0, "num_examples": 2831474}], "download_size": 10059329653, "dataset_size": 17185592921.0}}
2022-12-12T23:42:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikipedia_august_october_diff" More Information needed
[ "# Dataset Card for \"wikipedia_august_october_diff\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikipedia_august_october_diff\"\n\nMore Information needed" ]
4f41f921561483d9e4272e3e2073d8300ca2abea
# Dataset Card for "wikipedia-august-october-line-diff-1000-char-threshold" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/wikipedia-august-october-line-diff-1000-char-threshold
[ "region:us" ]
2022-12-13T01:36:49+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "crawl_timestamp", "dtype": "int64"}, {"name": "reward", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 403299007, "num_examples": 285657}], "download_size": 161874884, "dataset_size": 403299007}}
2022-12-13T01:36:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikipedia-august-october-line-diff-1000-char-threshold" More Information needed
[ "# Dataset Card for \"wikipedia-august-october-line-diff-1000-char-threshold\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikipedia-august-october-line-diff-1000-char-threshold\"\n\nMore Information needed" ]
1fbcb6dc50c513997b51abc94f2ed89314fb6c75
# Dataset Card for "wikipedia-august-october-line-diff-1000-char-threshold-1000-sample" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/wikipedia-august-october-line-diff-1000-char-threshold-1000-sample
[ "region:us" ]
2022-12-13T03:20:27+00:00
{"dataset_info": {"features": [{"name": "url", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "crawl_timestamp", "dtype": "int64"}, {"name": "reward", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1431232, "num_examples": 1000}], "download_size": 567286, "dataset_size": 1431232}}
2022-12-13T03:20:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wikipedia-august-october-line-diff-1000-char-threshold-1000-sample" More Information needed
[ "# Dataset Card for \"wikipedia-august-october-line-diff-1000-char-threshold-1000-sample\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wikipedia-august-october-line-diff-1000-char-threshold-1000-sample\"\n\nMore Information needed" ]
d806a2406f6b089c7649e060af5520739af7afa1
# Dataset Card for "unet-cifar10-32" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dahoas/unet-cifar10-32
[ "region:us" ]
2022-12-13T04:24:16+00:00
{"dataset_info": {"features": [{"name": "images", "sequence": {"sequence": {"sequence": "uint8"}}}], "splits": [{"name": "train", "num_bytes": 7110656, "num_examples": 2048}], "download_size": 6350172, "dataset_size": 7110656}}
2022-12-13T04:24:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "unet-cifar10-32" More Information needed
[ "# Dataset Card for \"unet-cifar10-32\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"unet-cifar10-32\"\n\nMore Information needed" ]
e55f20ae9f4694b81a7b48294f9dc030f1ee52cc
# Dataset Card for "squad_v2" ## Table of Contents - [Dataset Card for "squad_v2"](#dataset-card-for-squad_v2) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [squad_v2](#squad_v2) - [Data Fields](#data-fields) - [squad_v2](#squad_v2-1) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) - [Who are the source language producers?](#who-are-the-source-language-producers) - [Annotations](#annotations) - [Annotation process](#annotation-process) - [Who are the annotators?](#who-are-the-annotators) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://rajpurkar.github.io/SQuAD-explorer/](https://rajpurkar.github.io/SQuAD-explorer/) - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Size of downloaded dataset files:** 44.34 MB - **Size of the generated dataset:** 122.57 MB - **Total amount of disk used:** 166.91 MB ### Dataset Summary combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. ### Supported Tasks and Leaderboards [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Languages [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Dataset Structure ### Data Instances #### squad_v2 - **Size of downloaded dataset files:** 44.34 MB - **Size of the generated dataset:** 122.57 MB - **Total amount of disk used:** 166.91 MB An example of 'validation' looks as follows. ``` This example was too long and was cropped: { "answers": { "answer_start": [94, 87, 94, 94], "text": ["10th and 11th centuries", "in the 10th and 11th centuries", "10th and 11th centuries", "10th and 11th centuries"] }, "context": "\"The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave thei...", "id": "56ddde6b9a695914005b9629", "question": "When were the Normans in Normandy?", "title": "Normans" } ``` ### Data Fields The data fields are the same among all splits. #### squad_v2 - `id`: a `string` feature. - `title`: a `string` feature. - `context`: a `string` feature. - `question`: a `string` feature. - `answers`: a dictionary feature containing: - `text`: a `string` feature. - `answer_start`: a `int32` feature. ### Data Splits | name | train | validation | | -------- | -----: | ---------: | | squad_v2 | 130319 | 11873 |
weijiang2009/AlgmonQuestioningAnsweringDataset
[ "task_categories:question-answering", "task_ids:open-domain-qa", "task_ids:extractive-qa", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "license:cc-by-sa-4.0", "region:us" ]
2022-12-13T05:49:38+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["crowdsourced"], "language": ["en"], "license": ["cc-by-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["question-answering"], "task_ids": ["open-domain-qa", "extractive-qa"], "paperswithcode_id": "squad", "pretty_name": "SQuAD2.0", "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}], "config_name": "squad_v2", "splits": [{"name": "train", "num_bytes": 116699950, "num_examples": 130319}, {"name": "validation", "num_bytes": 11660302, "num_examples": 11873}], "download_size": 46494161, "dataset_size": 128360252}, "train-eval-index": [{"config": "squad_v2", "task": "question-answering", "task_id": "extractive_question_answering", "splits": {"train_split": "train", "eval_split": "validation"}, "col_mapping": {"question": "question", "context": "context", "answers": {"text": "text", "answer_start": "answer_start"}}, "metrics": [{"type": "squad_v2", "name": "SQuAD v2"}]}]}
2022-12-13T05:54:11+00:00
[]
[ "en" ]
TAGS #task_categories-question-answering #task_ids-open-domain-qa #task_ids-extractive-qa #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-English #license-cc-by-sa-4.0 #region-us
Dataset Card for "squad\_v2" ============================ Table of Contents ----------------- * Dataset Card for "squad\_v2" + Table of Contents + Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages + Dataset Structure - Data Instances * squad\_v2 - Data Fields * squad\_v2 - Data Splits + Dataset Creation - Curation Rationale - Source Data * Initial Data Collection and Normalization * Who are the source language producers? - Annotations * Annotation process * Who are the annotators? - Personal and Sensitive Information + Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations + Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions Dataset Description ------------------- * Homepage: URL * Repository: * Paper: * Point of Contact: * Size of downloaded dataset files: 44.34 MB * Size of the generated dataset: 122.57 MB * Total amount of disk used: 166.91 MB ### Dataset Summary combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. ### Supported Tasks and Leaderboards ### Languages Dataset Structure ----------------- ### Data Instances #### squad\_v2 * Size of downloaded dataset files: 44.34 MB * Size of the generated dataset: 122.57 MB * Total amount of disk used: 166.91 MB An example of 'validation' looks as follows. ### Data Fields The data fields are the same among all splits. #### squad\_v2 * 'id': a 'string' feature. * 'title': a 'string' feature. * 'context': a 'string' feature. * 'question': a 'string' feature. * 'answers': a dictionary feature containing: + 'text': a 'string' feature. + 'answer\_start': a 'int32' feature. ### Data Splits
[ "### Dataset Summary\n\n\ncombines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers\nto look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but\nalso determine when no answer is supported by the paragraph and abstain from answering.", "### Supported Tasks and Leaderboards", "### Languages\n\n\nDataset Structure\n-----------------", "### Data Instances", "#### squad\\_v2\n\n\n* Size of downloaded dataset files: 44.34 MB\n* Size of the generated dataset: 122.57 MB\n* Total amount of disk used: 166.91 MB\n\n\nAn example of 'validation' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### squad\\_v2\n\n\n* 'id': a 'string' feature.\n* 'title': a 'string' feature.\n* 'context': a 'string' feature.\n* 'question': a 'string' feature.\n* 'answers': a dictionary feature containing:\n\t+ 'text': a 'string' feature.\n\t+ 'answer\\_start': a 'int32' feature.", "### Data Splits" ]
[ "TAGS\n#task_categories-question-answering #task_ids-open-domain-qa #task_ids-extractive-qa #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-English #license-cc-by-sa-4.0 #region-us \n", "### Dataset Summary\n\n\ncombines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers\nto look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but\nalso determine when no answer is supported by the paragraph and abstain from answering.", "### Supported Tasks and Leaderboards", "### Languages\n\n\nDataset Structure\n-----------------", "### Data Instances", "#### squad\\_v2\n\n\n* Size of downloaded dataset files: 44.34 MB\n* Size of the generated dataset: 122.57 MB\n* Total amount of disk used: 166.91 MB\n\n\nAn example of 'validation' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### squad\\_v2\n\n\n* 'id': a 'string' feature.\n* 'title': a 'string' feature.\n* 'context': a 'string' feature.\n* 'question': a 'string' feature.\n* 'answers': a dictionary feature containing:\n\t+ 'text': a 'string' feature.\n\t+ 'answer\\_start': a 'int32' feature.", "### Data Splits" ]
45c83f1c3f07f5051b82d6964c8360e6d219a5a3
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: henryu-lin/t5-large-samsum-deepspeed * Dataset: samsum * Config: samsum * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@uunicee](https://huggingface.co/uunicee) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-0cab72-2447375877
[ "autotrain", "evaluation", "region:us" ]
2022-12-13T06:39:27+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "henryu-lin/t5-large-samsum-deepspeed", "metrics": ["squad_v2"], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "train", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-13T08:21:03+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: henryu-lin/t5-large-samsum-deepspeed * Dataset: samsum * Config: samsum * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @uunicee for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: henryu-lin/t5-large-samsum-deepspeed\n* Dataset: samsum\n* Config: samsum\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @uunicee for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: henryu-lin/t5-large-samsum-deepspeed\n* Dataset: samsum\n* Config: samsum\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @uunicee for evaluating this model." ]
69a32e19243b03c6c410206548b444011b82da59
# Dataset Card for "trdg_random_words_en_text_recognition" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
priyank-m/trdg_dict_random_words_en_text_recognition
[ "region:us" ]
2022-12-13T08:44:24+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3435853878.0, "num_examples": 115000}], "download_size": 3436541480, "dataset_size": 3435853878.0}}
2022-12-14T01:25:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "trdg_random_words_en_text_recognition" More Information needed
[ "# Dataset Card for \"trdg_random_words_en_text_recognition\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"trdg_random_words_en_text_recognition\"\n\nMore Information needed" ]
362e16c8c8a6d6079dfa1ce5b95f99278f38c36d
# Dataset Card for "trdg_random_single_words_en_text_recognition" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
priyank-m/trdg_random_single_words_en_text_recognition
[ "region:us" ]
2022-12-13T09:23:08+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2595486075.0, "num_examples": 155000}], "download_size": 2596520034, "dataset_size": 2595486075.0}}
2022-12-13T20:54:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "trdg_random_single_words_en_text_recognition" More Information needed
[ "# Dataset Card for \"trdg_random_single_words_en_text_recognition\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"trdg_random_single_words_en_text_recognition\"\n\nMore Information needed" ]
5a8ac754a624e6ce0ab7a0a3251b4cdd6b8326fc
# AutoTrain Dataset for project: massive-4-catalan ## Dataset Description This dataset has been automatically processed by AutoTrain for project massive-4-catalan. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "feat_id": "1", "feat_locale": "ca-ES", "feat_partition": "train", "feat_scenario": 0, "target": 2, "text": "desperta'm a les nou a. m. del divendres", "feat_annot_utt": "desperta'm a les [time : nou a. m.] del [date : divendres]", "feat_worker_id": "42", "feat_slot_method.slot": [ "time", "date" ], "feat_slot_method.method": [ "translation", "translation" ], "feat_judgments.worker_id": [ "42", "30", "3" ], "feat_judgments.intent_score": [ 1, 1, 1 ], "feat_judgments.slots_score": [ 1, 1, 1 ], "feat_judgments.grammar_score": [ 4, 3, 4 ], "feat_judgments.spelling_score": [ 2, 2, 2 ], "feat_judgments.language_identification": [ "target", "target|english", "target" ] }, { "feat_id": "2", "feat_locale": "ca-ES", "feat_partition": "train", "feat_scenario": 0, "target": 2, "text": "posa una alarma per d\u2019aqu\u00ed a dues hores", "feat_annot_utt": "posa una alarma per [time : d\u2019aqu\u00ed a dues hores]", "feat_worker_id": "15", "feat_slot_method.slot": [ "time" ], "feat_slot_method.method": [ "translation" ], "feat_judgments.worker_id": [ "42", "30", "24" ], "feat_judgments.intent_score": [ 1, 1, 1 ], "feat_judgments.slots_score": [ 1, 1, 1 ], "feat_judgments.grammar_score": [ 4, 4, 4 ], "feat_judgments.spelling_score": [ 2, 2, 2 ], "feat_judgments.language_identification": [ "target", "target", "target" ] } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "feat_id": "Value(dtype='string', id=None)", "feat_locale": "Value(dtype='string', id=None)", "feat_partition": "Value(dtype='string', id=None)", "feat_scenario": "ClassLabel(num_classes=18, names=['alarm', 'audio', 'calendar', 'cooking', 'datetime', 'email', 'general', 'iot', 'lists', 'music', 'news', 'play', 'qa', 'recommendation', 'social', 'takeaway', 'transport', 'weather'], id=None)", "target": "ClassLabel(num_classes=60, names=['alarm_query', 'alarm_remove', 'alarm_set', 'audio_volume_down', 'audio_volume_mute', 'audio_volume_other', 'audio_volume_up', 'calendar_query', 'calendar_remove', 'calendar_set', 'cooking_query', 'cooking_recipe', 'datetime_convert', 'datetime_query', 'email_addcontact', 'email_query', 'email_querycontact', 'email_sendemail', 'general_greet', 'general_joke', 'general_quirky', 'iot_cleaning', 'iot_coffee', 'iot_hue_lightchange', 'iot_hue_lightdim', 'iot_hue_lightoff', 'iot_hue_lighton', 'iot_hue_lightup', 'iot_wemo_off', 'iot_wemo_on', 'lists_createoradd', 'lists_query', 'lists_remove', 'music_dislikeness', 'music_likeness', 'music_query', 'music_settings', 'news_query', 'play_audiobook', 'play_game', 'play_music', 'play_podcasts', 'play_radio', 'qa_currency', 'qa_definition', 'qa_factoid', 'qa_maths', 'qa_stock', 'recommendation_events', 'recommendation_locations', 'recommendation_movies', 'social_post', 'social_query', 'takeaway_order', 'takeaway_query', 'transport_query', 'transport_taxi', 'transport_ticket', 'transport_traffic', 'weather_query'], id=None)", "text": "Value(dtype='string', id=None)", "feat_annot_utt": "Value(dtype='string', id=None)", "feat_worker_id": "Value(dtype='string', id=None)", "feat_slot_method.slot": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)", "feat_slot_method.method": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)", "feat_judgments.worker_id": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)", "feat_judgments.intent_score": "Sequence(feature=Value(dtype='int8', id=None), length=-1, id=None)", "feat_judgments.slots_score": "Sequence(feature=Value(dtype='int8', id=None), length=-1, id=None)", "feat_judgments.grammar_score": "Sequence(feature=Value(dtype='int8', id=None), length=-1, id=None)", "feat_judgments.spelling_score": "Sequence(feature=Value(dtype='int8', id=None), length=-1, id=None)", "feat_judgments.language_identification": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 11514 | | valid | 2033 |
crodri/autotrain-data-massive-4-catalan
[ "task_categories:text-classification", "region:us" ]
2022-12-13T11:44:59+00:00
{"task_categories": ["text-classification"]}
2022-12-13T11:51:02+00:00
[]
[]
TAGS #task_categories-text-classification #region-us
AutoTrain Dataset for project: massive-4-catalan ================================================ Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project massive-4-catalan. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-text-classification #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
8b0fcd050ac5dc29cd30c0fc3d66a92b58b261cf
# Dataset Card for "common_voice_11_0_dummy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sanchit-gandhi/common_voice_11_0_dummy
[ "region:us" ]
2022-12-13T12:00:28+00:00
{"dataset_info": {"features": [{"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1012, "num_examples": 10}, {"name": "validation", "num_bytes": 592, "num_examples": 5}], "download_size": 3199, "dataset_size": 1604}}
2022-12-13T12:00:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "common_voice_11_0_dummy" More Information needed
[ "# Dataset Card for \"common_voice_11_0_dummy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"common_voice_11_0_dummy\"\n\nMore Information needed" ]
41ebfc4ccef59325b2a904f0788007267b05184a
## Dataset Description This is the Python, Java and JavaScript subsets of The Stack (v1.1) after cleaning* and agressive deduplication from [stack-dedup-alt-decontaminate](https://huggingface.co/datasets/bigcode/stack-dedup-alt-decontaminate) with filtering on [comment to code ratio](https://github.com/bigcode-project/bigcode-dataset/tree/main/preprocessing) with minimum of 0.01 and maximum of 0.8. The additional comments filtering removes 26.5% of the dataset's volume which goes from 215GB of text to 170GB. (*) cleaning: near deduplication + PII redaction + line length & percentage of alphanumeric characters filtering + data decontamination
bigcode/stack-dedup-alt-comments
[ "task_categories:text-generation", "task_ids:language-modeling", "language_creators:crowdsourced", "multilinguality:multilingual", "size_categories:unknown", "language:code", "region:us" ]
2022-12-13T12:32:10+00:00
{"annotations_creators": [], "language_creators": ["crowdsourced"], "language": ["code"], "multilinguality": ["multilingual"], "size_categories": ["unknown"], "source_datasets": [], "task_categories": ["text-generation"], "task_ids": ["language-modeling"], "extra_gated_prompt": "## Terms of Use for The Stack\n\nThe Stack dataset is a collection of source code in over 300 programming languages. We ask that you read and acknowledge the following points before using the dataset:\n1. The Stack is a collection of source code from repositories with various licenses. Any use of all or part of the code gathered in The Stack must abide by the terms of the original licenses, including attribution clauses when relevant. We facilitate this by providing provenance information for each data point.\n2. The Stack is regularly updated to enact validated data removal requests. By clicking on \"Access repository\", you agree to update your own version of The Stack to the most recent usable version specified by the maintainers in [the following thread](https://huggingface.co/datasets/bigcode/the-stack/discussions/7). If you have questions about dataset versions and allowed uses, please also ask them in the dataset\u2019s [community discussions](https://huggingface.co/datasets/bigcode/the-stack/discussions/new). We will also notify users via email when the latest usable version changes.\n3. To host, share, or otherwise provide access to The Stack dataset, you must include [these Terms of Use](https://huggingface.co/datasets/bigcode/the-stack#terms-of-use-for-the-stack) and require users to agree to it.\n\nBy clicking on \"Access repository\" below, you accept that your contact information (email address and username) can be shared with the dataset maintainers as well.", "extra_gated_fields": {"Email": "text", "I have read the License and agree with its terms": "checkbox"}}
2023-05-13T05:17:27+00:00
[]
[ "code" ]
TAGS #task_categories-text-generation #task_ids-language-modeling #language_creators-crowdsourced #multilinguality-multilingual #size_categories-unknown #language-code #region-us
## Dataset Description This is the Python, Java and JavaScript subsets of The Stack (v1.1) after cleaning* and agressive deduplication from stack-dedup-alt-decontaminate with filtering on comment to code ratio with minimum of 0.01 and maximum of 0.8. The additional comments filtering removes 26.5% of the dataset's volume which goes from 215GB of text to 170GB. (*) cleaning: near deduplication + PII redaction + line length & percentage of alphanumeric characters filtering + data decontamination
[ "## Dataset Description\nThis is the Python, Java and JavaScript subsets of The Stack (v1.1) after cleaning* and agressive deduplication from stack-dedup-alt-decontaminate with \nfiltering on comment to code ratio with minimum of 0.01 and maximum of 0.8.\n\nThe additional comments filtering removes 26.5% of the dataset's volume which goes from 215GB of text to 170GB.\n\n(*) cleaning: near deduplication + PII redaction + line length & percentage of alphanumeric characters filtering + data decontamination" ]
[ "TAGS\n#task_categories-text-generation #task_ids-language-modeling #language_creators-crowdsourced #multilinguality-multilingual #size_categories-unknown #language-code #region-us \n", "## Dataset Description\nThis is the Python, Java and JavaScript subsets of The Stack (v1.1) after cleaning* and agressive deduplication from stack-dedup-alt-decontaminate with \nfiltering on comment to code ratio with minimum of 0.01 and maximum of 0.8.\n\nThe additional comments filtering removes 26.5% of the dataset's volume which goes from 215GB of text to 170GB.\n\n(*) cleaning: near deduplication + PII redaction + line length & percentage of alphanumeric characters filtering + data decontamination" ]
5ad57e73741afe157262119ff9a785a52c82ad83
polinaeterna/audio_configs
[ "region:us" ]
2022-12-13T12:44:13+00:00
{"configs_kwargs": [{"config_name": "v1", "data_dir": "v1", "drop_labels": true}, {"config_name": "v2", "data_dir": "v2", "drop_labels": false}]}
2022-12-13T12:57:53+00:00
[]
[]
TAGS #region-us
[]
[ "TAGS\n#region-us \n" ]
9b03c49b48da54b4d91a644e6a9ec295065b4e9b
I collected a few antonym pairs from several sources for creating interpretable embeddings. This file also contains the variance computed on quantized (english) fasttext embedings.
KnutJaegersberg/antonym_dataset_fasttext_variances
[ "license:mit", "region:us" ]
2022-12-13T12:51:29+00:00
{"license": "mit"}
2022-12-13T12:56:39+00:00
[]
[]
TAGS #license-mit #region-us
I collected a few antonym pairs from several sources for creating interpretable embeddings. This file also contains the variance computed on quantized (english) fasttext embedings.
[]
[ "TAGS\n#license-mit #region-us \n" ]
dbc9fe670691e6c828c788afbc2a649e79811d0a
These direction vectors of antonyms can be used to calculate fasttext interpretable embeddings on the fly, solving the OOV problem of other interpretable embeddings. Simply calculate cosine similarity for each row.
KnutJaegersberg/direction_vectors_ftq_en
[ "license:mit", "region:us" ]
2022-12-13T12:59:41+00:00
{"license": "mit"}
2022-12-13T13:02:41+00:00
[]
[]
TAGS #license-mit #region-us
These direction vectors of antonyms can be used to calculate fasttext interpretable embeddings on the fly, solving the OOV problem of other interpretable embeddings. Simply calculate cosine similarity for each row.
[]
[ "TAGS\n#license-mit #region-us \n" ]
e7a07029cfd7378548c2b26028a6b38b233876d5
# Dataset Card for "sbbdata_snr_none" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marccgrau/sbbdata_snr_none
[ "region:us" ]
2022-12-13T13:54:38+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 499888925.0, "num_examples": 1121}, {"name": "test", "num_bytes": 63201474.0, "num_examples": 142}, {"name": "val", "num_bytes": 62608885.0, "num_examples": 141}], "download_size": 508647230, "dataset_size": 625699284.0}}
2023-01-05T16:21:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sbbdata_snr_none" More Information needed
[ "# Dataset Card for \"sbbdata_snr_none\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sbbdata_snr_none\"\n\nMore Information needed" ]
c6256d9e3af82bf35156a2b760be423e139d63a3
# Dataset Card for "sbbdata_snr_neg10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marccgrau/sbbdata_snr_neg10
[ "region:us" ]
2022-12-13T13:55:22+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 499781309.0, "num_examples": 1121}, {"name": "test", "num_bytes": 63201474.0, "num_examples": 142}, {"name": "val", "num_bytes": 62608885.0, "num_examples": 141}], "download_size": 623869547, "dataset_size": 625591668.0}}
2023-01-05T16:22:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sbbdata_snr_neg10" More Information needed
[ "# Dataset Card for \"sbbdata_snr_neg10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sbbdata_snr_neg10\"\n\nMore Information needed" ]
6c3d66e636ed9c415c2178872928e792be93e205
# Dataset Card for "sbbdata_snr_0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marccgrau/sbbdata_snr_0
[ "region:us" ]
2022-12-13T13:56:10+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 499781309.0, "num_examples": 1121}, {"name": "test", "num_bytes": 63201474.0, "num_examples": 142}, {"name": "val", "num_bytes": 62608885.0, "num_examples": 141}], "download_size": 620800482, "dataset_size": 625591668.0}}
2023-01-05T16:23:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sbbdata_snr_0" More Information needed
[ "# Dataset Card for \"sbbdata_snr_0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sbbdata_snr_0\"\n\nMore Information needed" ]
27f92337a769f905244c77c0eb02e5599c578785
# Dataset Card for "sbbdata_snr_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marccgrau/sbbdata_snr_10
[ "region:us" ]
2022-12-13T13:56:40+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 499781309.0, "num_examples": 1121}, {"name": "test", "num_bytes": 63201474.0, "num_examples": 142}, {"name": "val", "num_bytes": 62608885.0, "num_examples": 141}], "download_size": 605948787, "dataset_size": 625591668.0}}
2023-01-05T16:24:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sbbdata_snr_10" More Information needed
[ "# Dataset Card for \"sbbdata_snr_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sbbdata_snr_10\"\n\nMore Information needed" ]
bfe239d14be1d0da7ca5ffa53f48d0c1dca923ff
# Dataset Card for "sbbdata_snr_20" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marccgrau/sbbdata_snr_20
[ "region:us" ]
2022-12-13T13:57:18+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 499781309.0, "num_examples": 1121}, {"name": "test", "num_bytes": 63201474.0, "num_examples": 142}, {"name": "val", "num_bytes": 62608885.0, "num_examples": 141}], "download_size": 569514379, "dataset_size": 625591668.0}}
2023-01-05T16:25:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sbbdata_snr_20" More Information needed
[ "# Dataset Card for \"sbbdata_snr_20\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sbbdata_snr_20\"\n\nMore Information needed" ]
cdde0d0a76a48224b786ca2aeda76462ca470131
# Dataset Card for "sbbdata_snr_30" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marccgrau/sbbdata_snr_30
[ "region:us" ]
2022-12-13T13:57:51+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 499781309.0, "num_examples": 1121}, {"name": "test", "num_bytes": 63201474.0, "num_examples": 142}, {"name": "val", "num_bytes": 62608885.0, "num_examples": 141}], "download_size": 532753840, "dataset_size": 625591668.0}}
2023-01-05T16:26:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sbbdata_snr_30" More Information needed
[ "# Dataset Card for \"sbbdata_snr_30\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sbbdata_snr_30\"\n\nMore Information needed" ]
a09fe834115f722720b975b51df749fe9c118101
# Dataset Card for "sbbdata_snr_40" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marccgrau/sbbdata_snr_40
[ "region:us" ]
2022-12-13T13:58:25+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 499781309.0, "num_examples": 1121}, {"name": "test", "num_bytes": 63201474.0, "num_examples": 142}, {"name": "val", "num_bytes": 62608885.0, "num_examples": 141}], "download_size": 515470146, "dataset_size": 625591668.0}}
2023-01-05T16:27:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sbbdata_snr_40" More Information needed
[ "# Dataset Card for \"sbbdata_snr_40\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sbbdata_snr_40\"\n\nMore Information needed" ]
490bd49c6b930452080f8287e8b8b47619cee8f2
# Dataset Card for "sbbdata_snr_random" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marccgrau/sbbdata_snr_random
[ "region:us" ]
2022-12-13T13:59:00+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 499781309.0, "num_examples": 1121}, {"name": "test", "num_bytes": 63201474.0, "num_examples": 142}, {"name": "val", "num_bytes": 62608885.0, "num_examples": 141}], "download_size": 581004500, "dataset_size": 625591668.0}}
2023-01-05T16:28:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sbbdata_snr_random" More Information needed
[ "# Dataset Card for \"sbbdata_snr_random\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sbbdata_snr_random\"\n\nMore Information needed" ]
6a2e8b30d790d75a7d56707d34893c1d413e5027
# Dataset Card for "butterflies_names" Processed version of https://huggingface.co/datasets/huggan/inat_butterflies_top10k -- taking the description and extracting the scientific name in parentheses.
sasha/butterflies_names
[ "region:us" ]
2022-12-13T14:03:11+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "description", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "sim_score", "dtype": "float64"}, {"name": "name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 369545620.0, "num_examples": 10000}], "download_size": 367769662, "dataset_size": 369545620.0}}
2022-12-13T14:27:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "butterflies_names" Processed version of URL -- taking the description and extracting the scientific name in parentheses.
[ "# Dataset Card for \"butterflies_names\"\n\nProcessed version of URL -- taking the description and extracting the scientific name in parentheses." ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"butterflies_names\"\n\nProcessed version of URL -- taking the description and extracting the scientific name in parentheses." ]
8fa24feeba8bd2d09356f6bfeb9f080d2a179df3
# KPWr
clarin-knext/kpwr
[ "task_categories:token-classification", "task_ids:named-entity-recognition", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:18K", "size_categories:10K<n<100K", "source_datasets:original", "language:pl", "license:cc-by-sa-4.0", "region:us" ]
2022-12-13T14:10:39+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["found"], "language": ["pl"], "license": ["cc-by-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["18K", "10K<n<100K"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": ["named-entity-recognition"], "pretty_name": "KPWr 1.27"}
2023-08-18T07:51:36+00:00
[]
[ "pl" ]
TAGS #task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-18K #size_categories-10K<n<100K #source_datasets-original #language-Polish #license-cc-by-sa-4.0 #region-us
# KPWr
[ "# KPWr" ]
[ "TAGS\n#task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-18K #size_categories-10K<n<100K #source_datasets-original #language-Polish #license-cc-by-sa-4.0 #region-us \n", "# KPWr" ]
7916a8abcaa48c17c3e0b56f554b3fe6ee717864
# CEN
clarin-knext/cen
[ "task_categories:token-classification", "task_ids:named-entity-recognition", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:18K", "size_categories:10K<n<100K", "source_datasets:original", "language:pl", "license:cc-by-3.0", "region:us" ]
2022-12-13T14:10:55+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["found"], "language": ["pl"], "license": ["cc-by-3.0"], "multilinguality": ["monolingual"], "size_categories": ["18K", "10K<n<100K"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": ["named-entity-recognition"], "pretty_name": "KPWr 1.27"}
2022-12-13T15:48:02+00:00
[]
[ "pl" ]
TAGS #task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-18K #size_categories-10K<n<100K #source_datasets-original #language-Polish #license-cc-by-3.0 #region-us
# CEN
[ "# CEN" ]
[ "TAGS\n#task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-18K #size_categories-10K<n<100K #source_datasets-original #language-Polish #license-cc-by-3.0 #region-us \n", "# CEN" ]
5c9996aa94c7be1858ef0a0585312a6d6903048e
# Dataset Card for "ptf-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hts98/ptf-dataset
[ "region:us" ]
2022-12-13T17:46:07+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 19917891520, "num_examples": 20736}, {"name": "test", "num_bytes": 4980426456, "num_examples": 5185}], "download_size": 4001768209, "dataset_size": 24898317976}}
2022-12-14T01:10:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ptf-dataset" More Information needed
[ "# Dataset Card for \"ptf-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ptf-dataset\"\n\nMore Information needed" ]
b8e586d619c8a594582fb13febb1e38bdbae7ff2
# Dataset Card for "lcbsi-wbc-ap" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
polejowska/lcbsi-wbc-ap
[ "region:us" ]
2022-12-13T20:51:11+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "basophil", "1": "eosinophil", "2": "lymphocyte", "3": "monocyte", "4": "neutrophil"}}}}], "splits": [{"name": "train", "num_bytes": 25369707.0, "num_examples": 3500}, {"name": "test", "num_bytes": 5540002.0, "num_examples": 750}, {"name": "valid", "num_bytes": 5488683.0, "num_examples": 750}], "download_size": 36231350, "dataset_size": 36398392.0}}
2022-12-13T20:52:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lcbsi-wbc-ap" More Information needed
[ "# Dataset Card for \"lcbsi-wbc-ap\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lcbsi-wbc-ap\"\n\nMore Information needed" ]
489805c28b95433743218bab33f59f0b52d75bde
![;)](https://media.giphy.com/media/xd9HUXswWPY1EEJ80a/giphy.gif) ### Dataset Curators The original data is maintained by [ArXiv](https://arxiv.org/) ### Licensing Information The data is under the [Creative Commons CC0 1.0 Universal Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/) ### Citation Information ``` @misc{clement2019arxiv, title={On the Use of ArXiv as a Dataset}, author={Colin B. Clement and Matthew Bierbaum and Kevin P. O'Keeffe and Alexander A. Alemi}, year={2019}, eprint={1905.00075}, archivePrefix={arXiv}, primaryClass={cs.IR} } ```
zeroshot/arxiv-biology
[ "annotations_creators:no-annotation", "language_creators:expert-generated", "multilinguality:monolingual", "language:en", "license:cc0-1.0", "arxiv:1905.00075", "region:us" ]
2022-12-13T20:55:38+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["expert-generated"], "language": ["en"], "license": ["cc0-1.0"], "multilinguality": ["monolingual"]}
2023-01-05T15:43:07+00:00
[ "1905.00075" ]
[ "en" ]
TAGS #annotations_creators-no-annotation #language_creators-expert-generated #multilinguality-monolingual #language-English #license-cc0-1.0 #arxiv-1905.00075 #region-us
!;) ### Dataset Curators The original data is maintained by ArXiv ### Licensing Information The data is under the Creative Commons CC0 1.0 Universal Public Domain Dedication
[ "### Dataset Curators\n\nThe original data is maintained by ArXiv", "### Licensing Information\n\nThe data is under the Creative Commons CC0 1.0 Universal Public Domain Dedication" ]
[ "TAGS\n#annotations_creators-no-annotation #language_creators-expert-generated #multilinguality-monolingual #language-English #license-cc0-1.0 #arxiv-1905.00075 #region-us \n", "### Dataset Curators\n\nThe original data is maintained by ArXiv", "### Licensing Information\n\nThe data is under the Creative Commons CC0 1.0 Universal Public Domain Dedication" ]
7baf3f8a5f3d38acc585d42d12193b27baf8cf79
<p align="center"><h1>🧠 Awesome ChatGPT Prompts [CSV dataset]</h1></p> This is a Dataset Repository of **Awesome ChatGPT Prompts** **[View All Prompts on GitHub](https://github.com/f/awesome-chatgpt-prompts)** # License CC-0
fka/awesome-chatgpt-prompts
[ "license:cc0-1.0", "ChatGPT", "region:us" ]
2022-12-13T23:47:45+00:00
{"license": "cc0-1.0", "tags": ["ChatGPT"]}
2023-03-07T10:04:18+00:00
[]
[]
TAGS #license-cc0-1.0 #ChatGPT #region-us
<p align="center"><h1> Awesome ChatGPT Prompts [CSV dataset]</h1></p> This is a Dataset Repository of Awesome ChatGPT Prompts View All Prompts on GitHub # License CC-0
[ "# License\n\nCC-0" ]
[ "TAGS\n#license-cc0-1.0 #ChatGPT #region-us \n", "# License\n\nCC-0" ]
673c69941d7cc168ff29437cad24f80840a6838e
https://github.com/entangledloops/slidingpuzzle/ Each JSON file contains a list of board and solution pairs. These are the optimal solutions as found through Breadth-First Search. For example, here is a random snapshot from inside `examples_3x3.json`: ``` ... [[[8, 4, 5], [3, 7, 1], [0, 2, 6]], [2, 7, 3, 8, 4, 3, 8, 2, 7, 8, 1, 5, 3, 1, 2, 4, 1, 2, 5, 6]], [[[8, 4, 5], [3, 7, 1], [2, 6, 0]], [6, 7, 3, 8, 4, 3, 8, 2, 7, 8, 1, 5, 3, 1, 2, 4, 1, 2, 5, 6]], [[[8, 4, 5], [2, 3, 1], [7, 6, 0]], [6, 7, 2, 8, 4, 3, 8, 2, 7, 8, 1, 5, 3, 1, 2, 4, 1, 2, 5, 6]], ... ``` The first example represents the following board, ``` 8 4 5 3 7 1 2 6 ``` where 0 indicates the blank. The solution is to first move the 2, then 7, 3, 8, etc. The final solved board state would be: ``` 1 2 3 4 5 6 7 8 ``` To use the data, it can be parsed: ```python import json with open("examples_3x3.json", "rt") as fp: db = json.load(fp) board, solution = db[0] # inspect first example print(board, solution) ```
entangledloops/slidingpuzzle
[ "license:apache-2.0", "region:us" ]
2022-12-14T00:06:46+00:00
{"license": "apache-2.0"}
2023-02-08T20:04:44+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
URL Each JSON file contains a list of board and solution pairs. These are the optimal solutions as found through Breadth-First Search. For example, here is a random snapshot from inside 'examples_3x3.json': The first example represents the following board, where 0 indicates the blank. The solution is to first move the 2, then 7, 3, 8, etc. The final solved board state would be: To use the data, it can be parsed:
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
d1ac4d1e0d323add5ca4c935e6926c91b863acdf
### CORAA V1 - Dataset CORAA is a publicly available dataset for Automatic Speech Recognition (ASR) in the Brazilian Portuguese language containing 290.77 hours of audios and their respective transcriptions (400k+ segmented audios). The dataset is composed of audios of 5 original projects: * ALIP (Gonçalves, 2019) * C-ORAL Brazil (Raso and Mello, 2012) * NURC-Recife (Oliviera Jr., 2016) * SP-2010 (Mendes and Oushiro, 2012) * TEDx talks (talks in Portuguese) The audios were either validated by annotators or transcripted for the first time aiming at the ASR task. <br> ### References * Gonçalves SCL (2019) Projeto ALIP (amostra linguística do interior paulista) e banco de dados iboruna: 10 anos de contribuição com a descrição do Português Brasileiro. Revista Estudos Linguísticos 48(1):276–297. * Raso T, Mello H, Mittmann MM (2012) The C-ORAL-BRASIL I: Reference corpus for spoken Brazilian Portuguese. In: Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC’12), European Language Resources Association (ELRA), Istanbul, Turkey, pp 106–113, URL http://www.lrec-conf.org/proceedings/lrec2012/pdf/624_Paper.pdf * Oliviera Jr M (2016) Nurc digital um protocolo para a digitalização, anotação, arquivamento e disseminação do material do projeto da norma urbana linguística culta (NURC). CHIMERA: Revista de Corpus de Lenguas Romances y Estudios Linguísticos 3(2):149–174, URL https://revistas.uam.es/chimera/article/view/6519 * Mendes RB, Oushiro L (2012) Mapping Paulistano Portuguese: the SP2010 Project. In: Proceedings of the VIIth GSCP International Conference: Speech and Corpora, Fizenze University Press, Firenze, Italy, pp 459–463.
Felipe474/nilc-coraa-v1
[ "license:other", "region:us" ]
2022-12-14T01:34:10+00:00
{"license": "other"}
2022-12-18T20:47:33+00:00
[]
[]
TAGS #license-other #region-us
### CORAA V1 - Dataset CORAA is a publicly available dataset for Automatic Speech Recognition (ASR) in the Brazilian Portuguese language containing 290.77 hours of audios and their respective transcriptions (400k+ segmented audios). The dataset is composed of audios of 5 original projects: * ALIP (Gonçalves, 2019) * C-ORAL Brazil (Raso and Mello, 2012) * NURC-Recife (Oliviera Jr., 2016) * SP-2010 (Mendes and Oushiro, 2012) * TEDx talks (talks in Portuguese) The audios were either validated by annotators or transcripted for the first time aiming at the ASR task. <br> ### References * Gonçalves SCL (2019) Projeto ALIP (amostra linguística do interior paulista) e banco de dados iboruna: 10 anos de contribuição com a descrição do Português Brasileiro. Revista Estudos Linguísticos 48(1):276–297. * Raso T, Mello H, Mittmann MM (2012) The C-ORAL-BRASIL I: Reference corpus for spoken Brazilian Portuguese. In: Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC’12), European Language Resources Association (ELRA), Istanbul, Turkey, pp 106–113, URL URL * Oliviera Jr M (2016) Nurc digital um protocolo para a digitalização, anotação, arquivamento e disseminação do material do projeto da norma urbana linguística culta (NURC). CHIMERA: Revista de Corpus de Lenguas Romances y Estudios Linguísticos 3(2):149–174, URL URL * Mendes RB, Oushiro L (2012) Mapping Paulistano Portuguese: the SP2010 Project. In: Proceedings of the VIIth GSCP International Conference: Speech and Corpora, Fizenze University Press, Firenze, Italy, pp 459–463.
[ "### CORAA V1 - Dataset \n\nCORAA is a publicly available dataset for Automatic Speech Recognition (ASR) in the Brazilian Portuguese language containing 290.77 hours of audios and their respective transcriptions (400k+ segmented audios). The dataset is composed of audios of 5 original projects:\n\n* ALIP (Gonçalves, 2019)\n* C-ORAL Brazil (Raso and Mello, 2012)\n* NURC-Recife (Oliviera Jr., 2016)\n* SP-2010 (Mendes and Oushiro, 2012)\n* TEDx talks (talks in Portuguese)\nThe audios were either validated by annotators or transcripted for the first time aiming at the ASR task.\n\n<br>", "### References \n\n* Gonçalves SCL (2019) Projeto ALIP (amostra linguística do interior paulista) e banco de dados iboruna: 10 anos de contribuição com a descrição do Português Brasileiro. Revista Estudos Linguísticos 48(1):276–297.\n* Raso T, Mello H, Mittmann MM (2012) The C-ORAL-BRASIL I: Reference corpus for spoken Brazilian Portuguese. In: Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC’12), European Language Resources Association (ELRA), Istanbul, Turkey, pp 106–113, URL URL\n* Oliviera Jr M (2016) Nurc digital um protocolo para a digitalização, anotação, arquivamento e disseminação do material do projeto da norma urbana linguística culta (NURC). CHIMERA: Revista de Corpus de Lenguas Romances y Estudios Linguísticos 3(2):149–174, URL URL\n* Mendes RB, Oushiro L (2012) Mapping Paulistano Portuguese: the SP2010 Project. In: Proceedings of the VIIth GSCP International Conference: Speech and Corpora, Fizenze University Press, Firenze, Italy, pp 459–463." ]
[ "TAGS\n#license-other #region-us \n", "### CORAA V1 - Dataset \n\nCORAA is a publicly available dataset for Automatic Speech Recognition (ASR) in the Brazilian Portuguese language containing 290.77 hours of audios and their respective transcriptions (400k+ segmented audios). The dataset is composed of audios of 5 original projects:\n\n* ALIP (Gonçalves, 2019)\n* C-ORAL Brazil (Raso and Mello, 2012)\n* NURC-Recife (Oliviera Jr., 2016)\n* SP-2010 (Mendes and Oushiro, 2012)\n* TEDx talks (talks in Portuguese)\nThe audios were either validated by annotators or transcripted for the first time aiming at the ASR task.\n\n<br>", "### References \n\n* Gonçalves SCL (2019) Projeto ALIP (amostra linguística do interior paulista) e banco de dados iboruna: 10 anos de contribuição com a descrição do Português Brasileiro. Revista Estudos Linguísticos 48(1):276–297.\n* Raso T, Mello H, Mittmann MM (2012) The C-ORAL-BRASIL I: Reference corpus for spoken Brazilian Portuguese. In: Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC’12), European Language Resources Association (ELRA), Istanbul, Turkey, pp 106–113, URL URL\n* Oliviera Jr M (2016) Nurc digital um protocolo para a digitalização, anotação, arquivamento e disseminação do material do projeto da norma urbana linguística culta (NURC). CHIMERA: Revista de Corpus de Lenguas Romances y Estudios Linguísticos 3(2):149–174, URL URL\n* Mendes RB, Oushiro L (2012) Mapping Paulistano Portuguese: the SP2010 Project. In: Proceedings of the VIIth GSCP International Conference: Speech and Corpora, Fizenze University Press, Firenze, Italy, pp 459–463." ]
52689e1c86fdbf8a43c3df0a0f9422d78552dd30
# Dataset Card for "local-unet-cifar10-32" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dahoas/local-unet-cifar10-32
[ "region:us" ]
2022-12-14T02:30:22+00:00
{"dataset_info": {"features": [{"name": "images", "sequence": {"sequence": {"sequence": "float32"}}}], "splits": [{"name": "train", "num_bytes": 635009024, "num_examples": 50048}], "download_size": 647505353, "dataset_size": 635009024}}
2022-12-14T22:42:18+00:00
[]
[]
TAGS #region-us
# Dataset Card for "local-unet-cifar10-32" More Information needed
[ "# Dataset Card for \"local-unet-cifar10-32\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"local-unet-cifar10-32\"\n\nMore Information needed" ]
9d43e2a79b70cde5b95c9c61b268bb20442c6421
# Dataset Card for Capstone ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** [Needs More Information] - **Repository:** [Needs More Information] - **Paper:** [Needs More Information] - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary ### Supported Tasks and Leaderboards [Needs More Information] ### Languages English ## Dataset Structure ### Data Instances [Needs More Information] ### Data Fields Id MSSubClass MSZoning LotFrontage LotArea Street Alley LotShape LandContour Utilities LotConfig LandSlope Neighborhood Condition1 Condition2 BldgType HouseStyle OverallQual OverallCond YearBuilt YearRemodAdd RoofStyle RoofMatl Exterior1st Exterior2nd MasVnrType MasVnrArea ExterQual ExterCond Foundation BsmtQual BsmtCond BsmtExposure BsmtFinType1 BsmtFinSF1 BsmtFinType2 BsmtFinSF2 BsmtUnfSF TotalBsmtSF Heating HeatingQC CentralAir Electrical 1stFlrSF 2ndFlrSF LowQualFinSF GrLivArea BsmtFullBath BsmtHalfBath FullBath HalfBath BedroomAbvGr KitchenAbvGr KitchenQual TotRmsAbvGrd Functional Fireplaces FireplaceQu GarageType GarageYrBlt GarageFinish GarageCars GarageArea GarageQual GarageCond PavedDrive WoodDeckSF OpenPorchSF EnclosedPorch 3SsnPorch ScreenPorch PoolArea PoolQC Fence MiscFeature MiscVal MoSold YrSold SaleType SaleCondition ### Data Splits train test ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information [Needs More Information] ### Citation Information [Needs More Information]
deancgarcia/capstone4650
[ "region:us" ]
2022-12-14T03:59:17+00:00
{}
2022-12-14T04:08:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for Capstone ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages English ## Dataset Structure ### Data Instances ### Data Fields Id MSSubClass MSZoning LotFrontage LotArea Street Alley LotShape LandContour Utilities LotConfig LandSlope Neighborhood Condition1 Condition2 BldgType HouseStyle OverallQual OverallCond YearBuilt YearRemodAdd RoofStyle RoofMatl Exterior1st Exterior2nd MasVnrType MasVnrArea ExterQual ExterCond Foundation BsmtQual BsmtCond BsmtExposure BsmtFinType1 BsmtFinSF1 BsmtFinType2 BsmtFinSF2 BsmtUnfSF TotalBsmtSF Heating HeatingQC CentralAir Electrical 1stFlrSF 2ndFlrSF LowQualFinSF GrLivArea BsmtFullBath BsmtHalfBath FullBath HalfBath BedroomAbvGr KitchenAbvGr KitchenQual TotRmsAbvGrd Functional Fireplaces FireplaceQu GarageType GarageYrBlt GarageFinish GarageCars GarageArea GarageQual GarageCond PavedDrive WoodDeckSF OpenPorchSF EnclosedPorch 3SsnPorch ScreenPorch PoolArea PoolQC Fence MiscFeature MiscVal MoSold YrSold SaleType SaleCondition ### Data Splits train test ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information
[ "# Dataset Card for Capstone", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages\n\nEnglish", "## Dataset Structure", "### Data Instances", "### Data Fields\nId\nMSSubClass\nMSZoning\nLotFrontage\nLotArea\nStreet\nAlley\nLotShape\nLandContour\nUtilities\nLotConfig\nLandSlope\nNeighborhood\nCondition1\nCondition2\nBldgType\nHouseStyle\nOverallQual\nOverallCond\nYearBuilt\nYearRemodAdd\nRoofStyle\nRoofMatl\nExterior1st\nExterior2nd\nMasVnrType\nMasVnrArea\nExterQual\nExterCond\nFoundation\nBsmtQual\nBsmtCond\nBsmtExposure\nBsmtFinType1\nBsmtFinSF1\nBsmtFinType2\nBsmtFinSF2\nBsmtUnfSF\nTotalBsmtSF\nHeating\nHeatingQC\nCentralAir\nElectrical\n1stFlrSF\n2ndFlrSF\nLowQualFinSF\nGrLivArea\nBsmtFullBath\nBsmtHalfBath\nFullBath\nHalfBath\nBedroomAbvGr\nKitchenAbvGr\nKitchenQual\nTotRmsAbvGrd\nFunctional\nFireplaces\nFireplaceQu\nGarageType\nGarageYrBlt\nGarageFinish\nGarageCars\nGarageArea\nGarageQual\nGarageCond\nPavedDrive\nWoodDeckSF\nOpenPorchSF\nEnclosedPorch\n3SsnPorch\nScreenPorch\nPoolArea\nPoolQC\nFence\nMiscFeature\nMiscVal\t\nMoSold\nYrSold\nSaleType\t\nSaleCondition", "### Data Splits\n\ntrain\ntest", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Capstone", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages\n\nEnglish", "## Dataset Structure", "### Data Instances", "### Data Fields\nId\nMSSubClass\nMSZoning\nLotFrontage\nLotArea\nStreet\nAlley\nLotShape\nLandContour\nUtilities\nLotConfig\nLandSlope\nNeighborhood\nCondition1\nCondition2\nBldgType\nHouseStyle\nOverallQual\nOverallCond\nYearBuilt\nYearRemodAdd\nRoofStyle\nRoofMatl\nExterior1st\nExterior2nd\nMasVnrType\nMasVnrArea\nExterQual\nExterCond\nFoundation\nBsmtQual\nBsmtCond\nBsmtExposure\nBsmtFinType1\nBsmtFinSF1\nBsmtFinType2\nBsmtFinSF2\nBsmtUnfSF\nTotalBsmtSF\nHeating\nHeatingQC\nCentralAir\nElectrical\n1stFlrSF\n2ndFlrSF\nLowQualFinSF\nGrLivArea\nBsmtFullBath\nBsmtHalfBath\nFullBath\nHalfBath\nBedroomAbvGr\nKitchenAbvGr\nKitchenQual\nTotRmsAbvGrd\nFunctional\nFireplaces\nFireplaceQu\nGarageType\nGarageYrBlt\nGarageFinish\nGarageCars\nGarageArea\nGarageQual\nGarageCond\nPavedDrive\nWoodDeckSF\nOpenPorchSF\nEnclosedPorch\n3SsnPorch\nScreenPorch\nPoolArea\nPoolQC\nFence\nMiscFeature\nMiscVal\t\nMoSold\nYrSold\nSaleType\t\nSaleCondition", "### Data Splits\n\ntrain\ntest", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information" ]
7d35ef5c1d493102b67adf21120a1483927c738f
# Dataset Card for "yannic-kilcher-transcript" ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Whispering-GPT](https://github.com/matallanas/whisper_gpt_pipeline) - **Repository:** [whisper_gpt_pipeline](https://github.com/matallanas/whisper_gpt_pipeline) - **Paper:** [whisper](https://cdn.openai.com/papers/whisper.pdf) and [gpt](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf) - **Point of Contact:** [Whispering-GPT organization](https://huggingface.co/Whispering-GPT) ### Dataset Summary This dataset is created by applying whisper to the videos of the Youtube channel [Yannic Kilcher](https://www.youtube.com/yannickilcher). The dataset was created a medium size whisper model. ### Languages - **Language**: English ## Dataset Structure The dataset contains all the transcripts plus the audio of the different videos of Yannic Kilcher. ### Data Fields The dataset is composed by: - **id**: Id of the youtube video. - **channel**: Name of the channel. - **channel\_id**: Id of the youtube channel. - **title**: Title given to the video. - **categories**: Category of the video. - **description**: Description added by the author. - **text**: Whole transcript of the video. - **segments**: A list with the time and transcription of the video. - **start**: When started the trancription. - **end**: When the transcription ends. - **text**: The text of the transcription. ### Data Splits - Train split. ## Dataset Creation ### Source Data The transcriptions are from the videos of [Yannic Kilcher](https://www.youtube.com/yannickilcher) ### Contributions Thanks to [Whispering-GPT](https://huggingface.co/Whispering-GPT) organization for adding this dataset.
Whispering-GPT/yannic-kilcher-transcript
[ "task_categories:automatic-speech-recognition", "whisper", "whispering", "medium", "region:us" ]
2022-12-14T05:49:51+00:00
{"task_categories": ["automatic-speech-recognition"], "dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "channel", "dtype": "string"}, {"name": "channel_id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "categories", "sequence": "string"}, {"name": "tags", "sequence": "string"}, {"name": "description", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "segments", "list": [{"name": "start", "dtype": "float64"}, {"name": "end", "dtype": "float64"}, {"name": "text", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 24560830, "num_examples": 370}], "download_size": 12784371, "dataset_size": 24560830}, "tags": ["whisper", "whispering", "medium"]}
2022-12-20T14:28:30+00:00
[]
[]
TAGS #task_categories-automatic-speech-recognition #whisper #whispering #medium #region-us
# Dataset Card for "yannic-kilcher-transcript" ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Contributions ## Dataset Description - Homepage: Whispering-GPT - Repository: whisper_gpt_pipeline - Paper: whisper and gpt - Point of Contact: Whispering-GPT organization ### Dataset Summary This dataset is created by applying whisper to the videos of the Youtube channel Yannic Kilcher. The dataset was created a medium size whisper model. ### Languages - Language: English ## Dataset Structure The dataset contains all the transcripts plus the audio of the different videos of Yannic Kilcher. ### Data Fields The dataset is composed by: - id: Id of the youtube video. - channel: Name of the channel. - channel\_id: Id of the youtube channel. - title: Title given to the video. - categories: Category of the video. - description: Description added by the author. - text: Whole transcript of the video. - segments: A list with the time and transcription of the video. - start: When started the trancription. - end: When the transcription ends. - text: The text of the transcription. ### Data Splits - Train split. ## Dataset Creation ### Source Data The transcriptions are from the videos of Yannic Kilcher ### Contributions Thanks to Whispering-GPT organization for adding this dataset.
[ "# Dataset Card for \"yannic-kilcher-transcript\"", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Contributions", "## Dataset Description\n\n- Homepage: Whispering-GPT\n- Repository: whisper_gpt_pipeline\n- Paper: whisper and gpt\n- Point of Contact: Whispering-GPT organization", "### Dataset Summary\n\nThis dataset is created by applying whisper to the videos of the Youtube channel Yannic Kilcher. The dataset was created a medium size whisper model.", "### Languages\n\n- Language: English", "## Dataset Structure\n\nThe dataset contains all the transcripts plus the audio of the different videos of Yannic Kilcher.", "### Data Fields\n\nThe dataset is composed by:\n- id: Id of the youtube video.\n- channel: Name of the channel.\n- channel\\_id: Id of the youtube channel.\n- title: Title given to the video.\n- categories: Category of the video.\n- description: Description added by the author.\n- text: Whole transcript of the video.\n- segments: A list with the time and transcription of the video.\n - start: When started the trancription.\n - end: When the transcription ends.\n - text: The text of the transcription.", "### Data Splits\n\n- Train split.", "## Dataset Creation", "### Source Data\n\nThe transcriptions are from the videos of Yannic Kilcher", "### Contributions\n\nThanks to Whispering-GPT organization for adding this dataset." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #whisper #whispering #medium #region-us \n", "# Dataset Card for \"yannic-kilcher-transcript\"", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Contributions", "## Dataset Description\n\n- Homepage: Whispering-GPT\n- Repository: whisper_gpt_pipeline\n- Paper: whisper and gpt\n- Point of Contact: Whispering-GPT organization", "### Dataset Summary\n\nThis dataset is created by applying whisper to the videos of the Youtube channel Yannic Kilcher. The dataset was created a medium size whisper model.", "### Languages\n\n- Language: English", "## Dataset Structure\n\nThe dataset contains all the transcripts plus the audio of the different videos of Yannic Kilcher.", "### Data Fields\n\nThe dataset is composed by:\n- id: Id of the youtube video.\n- channel: Name of the channel.\n- channel\\_id: Id of the youtube channel.\n- title: Title given to the video.\n- categories: Category of the video.\n- description: Description added by the author.\n- text: Whole transcript of the video.\n- segments: A list with the time and transcription of the video.\n - start: When started the trancription.\n - end: When the transcription ends.\n - text: The text of the transcription.", "### Data Splits\n\n- Train split.", "## Dataset Creation", "### Source Data\n\nThe transcriptions are from the videos of Yannic Kilcher", "### Contributions\n\nThanks to Whispering-GPT organization for adding this dataset." ]
2a0cd8f71530f40f72fd3b20816644659733a5ae
# Dataset Card for "BDMS02_TH_AR_unchanged_with_quotation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shularp/BDMS02_TH_AR_unchanged_with_quotation
[ "region:us" ]
2022-12-14T07:12:54+00:00
{"dataset_info": {"features": [{"name": "th", "dtype": "string"}, {"name": "ar", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19731, "num_examples": 85}], "download_size": 9903, "dataset_size": 19731}}
2022-12-14T07:12:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "BDMS02_TH_AR_unchanged_with_quotation" More Information needed
[ "# Dataset Card for \"BDMS02_TH_AR_unchanged_with_quotation\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"BDMS02_TH_AR_unchanged_with_quotation\"\n\nMore Information needed" ]
71062f39b0fc263ce07f556b7a0162b6571a4ca1
# Dataset Card for "BDMS03_TH_AR_unchanged_without_quotation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shularp/BDMS03_TH_AR_unchanged_without_quotation
[ "region:us" ]
2022-12-14T07:17:34+00:00
{"dataset_info": {"features": [{"name": "th", "dtype": "string"}, {"name": "ar", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19731, "num_examples": 85}], "download_size": 9903, "dataset_size": 19731}}
2022-12-14T07:17:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "BDMS03_TH_AR_unchanged_without_quotation" More Information needed
[ "# Dataset Card for \"BDMS03_TH_AR_unchanged_without_quotation\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"BDMS03_TH_AR_unchanged_without_quotation\"\n\nMore Information needed" ]
00405940bef76d939edbb1f593ccfcedccc1d17c
# Dataset Card for "BDMS04_TH_AR_rearranged_with_quotation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shularp/BDMS04_TH_AR_rearranged_with_quotation
[ "region:us" ]
2022-12-14T07:18:43+00:00
{"dataset_info": {"features": [{"name": "th", "dtype": "string"}, {"name": "ar", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19731, "num_examples": 85}], "download_size": 9903, "dataset_size": 19731}}
2022-12-14T07:18:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "BDMS04_TH_AR_rearranged_with_quotation" More Information needed
[ "# Dataset Card for \"BDMS04_TH_AR_rearranged_with_quotation\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"BDMS04_TH_AR_rearranged_with_quotation\"\n\nMore Information needed" ]
791a41003a90c57cb91a040167e260561e435e9b
# Dataset Card for "pretrained-unet-cifar10-32" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dahoas/pretrained-unet-cifar10-32
[ "region:us" ]
2022-12-14T07:39:06+00:00
{"dataset_info": {"features": [{"name": "images", "sequence": {"sequence": {"sequence": "float32"}}}], "splits": [{"name": "train", "num_bytes": 634400000, "num_examples": 50000}], "download_size": 646502859, "dataset_size": 634400000}}
2022-12-14T07:39:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pretrained-unet-cifar10-32" More Information needed
[ "# Dataset Card for \"pretrained-unet-cifar10-32\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pretrained-unet-cifar10-32\"\n\nMore Information needed" ]
2fa50d957da3f515464030d58083e3938eb64b1d
# Dataset Card for "vnese-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
truongpdd/vnese-dataset
[ "region:us" ]
2022-12-14T07:48:10+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2183248914.938227, "num_examples": 1478292}, {"name": "test", "num_bytes": 242583705.06177297, "num_examples": 164255}, {"name": "validation", "num_bytes": 242583705.06177297, "num_examples": 164255}], "download_size": 1386554788, "dataset_size": 2668416325.061773}}
2022-12-14T08:07:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "vnese-dataset" More Information needed
[ "# Dataset Card for \"vnese-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"vnese-dataset\"\n\nMore Information needed" ]
9d7764ae4a76a84887d65f5bf7efc05a683c7be4
# laion-high-resolution-chinese ## 简介 Brief Introduction 取自Laion5B-high-resolution多语言多模态数据集中的中文部分,一共2.66M个图文对。 A subset from Laion5B-high-resolution (a multimodal dataset), around 2.66M image-text pairs (only Chinese). ## 数据集信息 Dataset Information 大约一共2.66M个中文图文对。大约占用381MB空间(仅仅是url等文本信息,不包含图片)。 - Homepage: [laion-5b](https://laion.ai/blog/laion-5b/) - Huggingface: [laion/laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) ## 下载 Download ```bash mkdir release && cd release for i in {00000..00015}; do wget https://huggingface.co/datasets/wanng/laion-high-resolution-chinese/resolve/main/data/train-$i-of-00016.parquet; done cd .. ``` ## Lisence CC-BY-4.0
wanng/laion-high-resolution-chinese
[ "task_categories:feature-extraction", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "language:zh", "license:cc-by-4.0", "region:us" ]
2022-12-14T08:42:43+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["crowdsourced"], "language": ["zh"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "task_categories": ["feature-extraction"], "pretty_name": "laion-high-resolution-chinese"}
2022-12-14T15:11:23+00:00
[]
[ "zh" ]
TAGS #task_categories-feature-extraction #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #language-Chinese #license-cc-by-4.0 #region-us
# laion-high-resolution-chinese ## 简介 Brief Introduction 取自Laion5B-high-resolution多语言多模态数据集中的中文部分,一共2.66M个图文对。 A subset from Laion5B-high-resolution (a multimodal dataset), around 2.66M image-text pairs (only Chinese). ## 数据集信息 Dataset Information 大约一共2.66M个中文图文对。大约占用381MB空间(仅仅是url等文本信息,不包含图片)。 - Homepage: laion-5b - Huggingface: laion/laion-high-resolution ## 下载 Download ## Lisence CC-BY-4.0
[ "# laion-high-resolution-chinese", "## 简介 Brief Introduction\n\n取自Laion5B-high-resolution多语言多模态数据集中的中文部分,一共2.66M个图文对。\n\nA subset from Laion5B-high-resolution (a multimodal dataset), around 2.66M image-text pairs (only Chinese).", "## 数据集信息 Dataset Information\n\n大约一共2.66M个中文图文对。大约占用381MB空间(仅仅是url等文本信息,不包含图片)。\n\n- Homepage: laion-5b\n- Huggingface: laion/laion-high-resolution", "## 下载 Download", "## Lisence\n\nCC-BY-4.0" ]
[ "TAGS\n#task_categories-feature-extraction #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #language-Chinese #license-cc-by-4.0 #region-us \n", "# laion-high-resolution-chinese", "## 简介 Brief Introduction\n\n取自Laion5B-high-resolution多语言多模态数据集中的中文部分,一共2.66M个图文对。\n\nA subset from Laion5B-high-resolution (a multimodal dataset), around 2.66M image-text pairs (only Chinese).", "## 数据集信息 Dataset Information\n\n大约一共2.66M个中文图文对。大约占用381MB空间(仅仅是url等文本信息,不包含图片)。\n\n- Homepage: laion-5b\n- Huggingface: laion/laion-high-resolution", "## 下载 Download", "## Lisence\n\nCC-BY-4.0" ]
ec1a727cc40b775bffd1ae9d3ace8ce671e9d5cd
``` BUILDING VOCABULARY Processed 1754541204 tokens. Counted 5329509 unique words. Truncating vocabulary at min count 5. Using vocabulary of size 1539115. ``` --- # Build the Arabic Corpus #### Dowload Resources The arabic corpus {1.9B word} consists of the following resources: - ShamelaLibrary348.7z [link](https://www.quran.tv/ketab/ShamelaLibrary348.7z) {1.15B} - UN arabic corpus [mirror1](http://lotus.kuee.kyoto-u.ac.jp/~raj/rajwindroot/corpora_downloads/UN_CORPUS/UNv1.0.6way.ar.txt) [mirror2](http://corpus.leeds.ac.uk/bogdan/resources/UN-corpus/6way/UNv1.0.6way.ar.txt) {0.37B} - AraCorpus.tar.gz [link](http://aracorpus.e3rab.com/argistestsrv.nmsu.edu/AraCorpus.tar.gz) {0.14B} - Arabic Wikipedia Latest Articles Dump [link](https://dumps.wikimedia.org/arwiki/latest/arwiki-latest-pages-articles.xml.bz2) {0.11B} - Tashkeela-arabic-diacritized-text-utf8-0.3.zip [link](https://netix.dl.sourceforge.net/project/tashkeela/) {0.07B} - Arabic Tweets [link](https://github.com/bakrianoo/Datasets) {0.03B} - watan-2004.7z [link](https://netix.dl.sourceforge.net/project/arabiccorpus/watan-2004corpus/watan-2004.7z) {0.01B} #### Build Script: https://github.com/tarekeldeeb/GloVe-Arabic/tree/master/arabic_corpus --- # Download the dataset Mirror : https://archive.org/details/arabic_corpus --- license: Waqf v2 (https://github.com/ojuba-org/waqf/tree/master/2.0)
tarekeldeeb/ArabicCorpus2B
[ "license:other", "region:us" ]
2022-12-14T10:03:09+00:00
{"license": "other"}
2022-12-14T11:17:34+00:00
[]
[]
TAGS #license-other #region-us
--- # Build the Arabic Corpus #### Dowload Resources The arabic corpus {1.9B word} consists of the following resources: - ShamelaLibrary348.7z link {1.15B} - UN arabic corpus mirror1 mirror2 {0.37B} - URL link {0.14B} - Arabic Wikipedia Latest Articles Dump link {0.11B} - Tashkeela-arabic-diacritized-text-utf8-0.3.zip link {0.07B} - Arabic Tweets link {0.03B} - watan-2004.7z link {0.01B} #### Build Script: URL --- # Download the dataset Mirror : URL --- license: Waqf v2 (URL
[ "# Build the Arabic Corpus", "#### Dowload Resources\nThe arabic corpus {1.9B word} consists of the following resources:\n - ShamelaLibrary348.7z link {1.15B}\n - UN arabic corpus mirror1 mirror2 {0.37B}\n - URL link {0.14B}\n - Arabic Wikipedia Latest Articles Dump link {0.11B}\n - Tashkeela-arabic-diacritized-text-utf8-0.3.zip link {0.07B}\n - Arabic Tweets link {0.03B}\n - watan-2004.7z link {0.01B}", "#### Build Script: URL\n\n---", "# Download the dataset\nMirror : URL\n\n---\nlicense: Waqf v2 (URL" ]
[ "TAGS\n#license-other #region-us \n", "# Build the Arabic Corpus", "#### Dowload Resources\nThe arabic corpus {1.9B word} consists of the following resources:\n - ShamelaLibrary348.7z link {1.15B}\n - UN arabic corpus mirror1 mirror2 {0.37B}\n - URL link {0.14B}\n - Arabic Wikipedia Latest Articles Dump link {0.11B}\n - Tashkeela-arabic-diacritized-text-utf8-0.3.zip link {0.07B}\n - Arabic Tweets link {0.03B}\n - watan-2004.7z link {0.01B}", "#### Build Script: URL\n\n---", "# Download the dataset\nMirror : URL\n\n---\nlicense: Waqf v2 (URL" ]
cbe3e224f1ae99617e6188679175ff4a9751a1e3
# Dataset Card for Visual Spatial Reasoning ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://ltl.mmll.cam.ac.uk/ - **Repository:** https://github.com/cambridgeltl/visual-spatial-reasoning - **Paper:** https://arxiv.org/abs/2205.00363 - **Leaderboard:** https://paperswithcode.com/sota/visual-reasoning-on-vsr - **Point of Contact:** https://ltl.mmll.cam.ac.uk/ ### Dataset Summary The Visual Spatial Reasoning (VSR) corpus is a collection of caption-image pairs with true/false labels. Each caption describes the spatial relation of two individual objects in the image, and a vision-language model (VLM) needs to judge whether the caption is correctly describing the image (True) or not (False). ### Supported Tasks and Leaderboards We test three baselines, all supported in huggingface. They are VisualBERT [(Li et al. 2019)](https://arxiv.org/abs/1908.03557), LXMERT [(Tan and Bansal, 2019)](https://arxiv.org/abs/1908.07490) and ViLT [(Kim et al. 2021)](https://arxiv.org/abs/2102.03334). The leaderboard can be checked at [Papers With Code](https://paperswithcode.com/sota/visual-reasoning-on-vsr). model | random split | zero-shot :-------------|:-------------:|:-------------: *human* | *95.4* | *95.4* VisualBERT | 57.4 | 54.0 LXMERT | **72.5** | **63.2** ViLT | 71.0 | 62.4 ### Languages The language in the dataset is English as spoken by the annotators. The BCP-47 code for English is en. [`meta_data.csv`](https://github.com/cambridgeltl/visual-spatial-reasoning/tree/master/data/data_files/meta_data.jsonl) contains meta data of annotators. ## Dataset Structure ### Data Instances Each line is an individual data point. Each `jsonl` file is of the following format: ```json {"image": "000000050403.jpg", "image_link": "http://images.cocodataset.org/train2017/000000050403.jpg", "caption": "The teddy bear is in front of the person.", "label": 1, "relation": "in front of", "annotator_id": 31, "vote_true_validator_id": [2, 6], "vote_false_validator_id": []} {"image": "000000401552.jpg", "image_link": "http://images.cocodataset.org/train2017/000000401552.jpg", "caption": "The umbrella is far away from the motorcycle.", "label": 0, "relation": "far away from", "annotator_id": 2, "vote_true_validator_id": [], "vote_false_validator_id": [2, 9, 1]} ``` ### Data Fields `image` denotes name of the image in COCO and `image_link` points to the image on the COCO server (so you can also access directly). `caption` is self-explanatory. `label` being `0` and `1` corresponds to False and True respectively. `relation` records the spatial relation used. `annotator_id` points to the annotator who originally wrote the caption. `vote_true_validator_id` and `vote_false_validator_id` are annotators who voted True or False in the second phase validation. ### Data Splits The VSR corpus, after validation, contains 10,119 data points with high agreement. On top of these, we create two splits (1) random split and (2) zero-shot split. For random split, we randomly split all data points into train, development, and test sets. Zero-shot split makes sure that train, development and test sets have no overlap of concepts (i.e., if *dog* is in test set, it is not used for training and development). Below are some basic statistics of the two splits. split | train | dev | test | total :------|:--------:|:--------:|:--------:|:--------: random | 7,083 | 1,012 | 2,024 | 10,119 zero-shot | 5,440 | 259 | 731 | 6,430 Check out [`data/`](https://github.com/cambridgeltl/visual-spatial-reasoning/tree/master/data) for more details. ## Dataset Creation ### Curation Rationale Understanding spatial relations is fundamental to achieve intelligence. Existing vision-language reasoning datasets are great but they compose multiple types of challenges and can thus conflate different sources of error. The VSR corpus focuses specifically on spatial relations so we can have accurate diagnosis and maximum interpretability. ### Source Data #### Initial Data Collection and Normalization **Image pair sampling.** MS COCO 2017 contains 123,287 images and has labelled the segmentation and classes of 886,284 instances (individual objects). Leveraging the segmentation, we first randomly select two concepts, then retrieve all images containing the two concepts in COCO 2017 (train and validation sets). Then images that contain multiple instances of any of the concept are filtered out to avoid referencing ambiguity. For the single-instance images, we also filter out any of the images with instance area size < 30, 000, to prevent extremely small instances. After these filtering steps, we randomly sample a pair in the remaining images. We repeat such process to obtain a large number of individual image pairs for caption generation. #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process **Fill in the blank: template-based caption generation.** Given a pair of images, the annotator needs to come up with a valid caption that makes it correctly describing one image but incorrect for the other. In this way, the annotator could focus on the key difference of the two images (which should be spatial relation of the two objects of interest) and come up with challenging relation that differentiates the two. Similar paradigms are also used in the annotation of previous vision-language reasoning datasets such as NLVR2 (Suhr et al., 2017, 2019) and MaRVL (Liu et al., 2021). To regularise annotators from writing modifiers and differentiating the image pair with things beyond accurate spatial relations, we opt for a template-based classification task instead of free-form caption writing. Besides, the template-generated dataset can be easily categorised based on relations and their meta-categories. The caption template has the format of “The `OBJ1` (is) __ the `OBJ2`.”, and the annotators are instructed to select a relation from a fixed set to fill in the slot. The copula “is” can be omitted for grammaticality. For example, for “contains”, “consists of”, and “has as a part”, “is” should be discarded in the template when extracting the final caption. The fixed set of spatial relations enable us to obtain the full control of the generation process. The full list of used relations are listed in the table below. It contains 71 spatial relations and is adapted from the summarised relation table of Fagundes et al. (2021). We made minor changes to filter out clearly unusable relations, made relation names grammatical under our template, and reduced repeated relations. In our final dataset, 65 out of the 71 available relations are actually included (the other 6 are either not selected by annotators or are selected but the captions did not pass the validation phase). | Category | Spatial Relations | |-------------|-------------------------------------------------------------------------------------------------------------------------------------------------| | Adjacency | Adjacent to, alongside, at the side of, at the right side of, at the left side of, attached to, at the back of, ahead of, against, at the edge of | | Directional | Off, past, toward, down, deep down*, up*, away from, along, around, from*, into, to*, across, across from, through*, down from | | Orientation | Facing, facing away from, parallel to, perpendicular to | | Projective | On top of, beneath, beside, behind, left of, right of, under, in front of, below, above, over, in the middle of | | Proximity | By, close to, near, far from, far away from | | Topological | Connected to, detached from, has as a part, part of, contains, within, at, on, in, with, surrounding, among, consists of, out of, between, inside, outside, touching | | Unallocated | Beyond, next to, opposite to, after*, among, enclosed by | **Second-round Human Validation.** Every annotated data point is reviewed by at least two additional human annotators (validators). In validation, given a data point (consists of an image and a caption), the validator gives either a True or False label. We exclude data points that have < 2/3 validators agreeing with the original label. In the guideline, we communicated to the validators that, for relations such as “left”/“right”, “in front of”/“behind”, they should tolerate different reference frame: i.e., if the caption is true from either the object’s or the viewer’s reference, it should be given a True label. Only when the caption is incorrect under all reference frames, a False label is assigned. This adds difficulty to the models since they could not naively rely on relative locations of the objects in the images but also need to correctly identify orientations of objects to make the best judgement. #### Who are the annotators? Annotators are hired from [prolific.co](https://prolific.co). We require them (1) have at least a bachelor’s degree, (2) are fluent in English or native speaker, and (3) have a >99% historical approval rate on the platform. All annotators are paid with an hourly salary of 12 GBP. Prolific takes an extra 33% of service charge and 20% VAT on the service charge. For caption generation, we release the task with batches of 200 instances and the annotator is required to finish a batch in 80 minutes. An annotator cannot take more than one batch per day. In this way we have a diverse set of annotators and can also prevent annotators from being fatigued. For second round validation, we group 500 data points in one batch and an annotator is asked to label each batch in 90 minutes. In total, 24 annotators participated in caption generation and 26 participated in validation. The annotators have diverse demographic background: they were born in 13 different countries; live in 13 different couturiers; and have 14 different nationalities. 57.4% of the annotators identify themselves as females and 42.6% as males. ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information This project is licensed under the [Apache-2.0 License](https://github.com/cambridgeltl/visual-spatial-reasoning/blob/master/LICENSE). ### Citation Information ```bibtex @article{Liu2022VisualSR, title={Visual Spatial Reasoning}, author={Fangyu Liu and Guy Edward Toh Emerson and Nigel Collier}, journal={ArXiv}, year={2022}, volume={abs/2205.00363} } ``` ### Contributions Thanks to [@juletx](https://github.com/juletx) for adding this dataset.
albertvillanova/visual-spatial-reasoning
[ "task_categories:image-classification", "annotations_creators:crowdsourced", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:apache-2.0", "arxiv:2205.00363", "arxiv:1908.03557", "arxiv:1908.07490", "arxiv:2102.03334", "region:us" ]
2022-12-14T11:31:30+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["machine-generated"], "language": ["en"], "license": ["apache-2.0"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["image-classification"], "task_ids": [], "pretty_name": "Visual Spatial Reasoning", "tags": []}
2022-12-14T11:55:48+00:00
[ "2205.00363", "1908.03557", "1908.07490", "2102.03334" ]
[ "en" ]
TAGS #task_categories-image-classification #annotations_creators-crowdsourced #language_creators-machine-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-apache-2.0 #arxiv-2205.00363 #arxiv-1908.03557 #arxiv-1908.07490 #arxiv-2102.03334 #region-us
Dataset Card for Visual Spatial Reasoning ========================================= Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: URL * Leaderboard: URL * Point of Contact: URL ### Dataset Summary The Visual Spatial Reasoning (VSR) corpus is a collection of caption-image pairs with true/false labels. Each caption describes the spatial relation of two individual objects in the image, and a vision-language model (VLM) needs to judge whether the caption is correctly describing the image (True) or not (False). ### Supported Tasks and Leaderboards We test three baselines, all supported in huggingface. They are VisualBERT (Li et al. 2019), LXMERT (Tan and Bansal, 2019) and ViLT (Kim et al. 2021). The leaderboard can be checked at Papers With Code. ### Languages The language in the dataset is English as spoken by the annotators. The BCP-47 code for English is en. 'meta\_data.csv' contains meta data of annotators. Dataset Structure ----------------- ### Data Instances Each line is an individual data point. Each 'jsonl' file is of the following format: ### Data Fields 'image' denotes name of the image in COCO and 'image\_link' points to the image on the COCO server (so you can also access directly). 'caption' is self-explanatory. 'label' being '0' and '1' corresponds to False and True respectively. 'relation' records the spatial relation used. 'annotator\_id' points to the annotator who originally wrote the caption. 'vote\_true\_validator\_id' and 'vote\_false\_validator\_id' are annotators who voted True or False in the second phase validation. ### Data Splits The VSR corpus, after validation, contains 10,119 data points with high agreement. On top of these, we create two splits (1) random split and (2) zero-shot split. For random split, we randomly split all data points into train, development, and test sets. Zero-shot split makes sure that train, development and test sets have no overlap of concepts (i.e., if *dog* is in test set, it is not used for training and development). Below are some basic statistics of the two splits. Check out 'data/' for more details. Dataset Creation ---------------- ### Curation Rationale Understanding spatial relations is fundamental to achieve intelligence. Existing vision-language reasoning datasets are great but they compose multiple types of challenges and can thus conflate different sources of error. The VSR corpus focuses specifically on spatial relations so we can have accurate diagnosis and maximum interpretability. ### Source Data #### Initial Data Collection and Normalization Image pair sampling. MS COCO 2017 contains 123,287 images and has labelled the segmentation and classes of 886,284 instances (individual objects). Leveraging the segmentation, we first randomly select two concepts, then retrieve all images containing the two concepts in COCO 2017 (train and validation sets). Then images that contain multiple instances of any of the concept are filtered out to avoid referencing ambiguity. For the single-instance images, we also filter out any of the images with instance area size < 30, 000, to prevent extremely small instances. After these filtering steps, we randomly sample a pair in the remaining images. We repeat such process to obtain a large number of individual image pairs for caption generation. #### Who are the source language producers? ### Annotations #### Annotation process Fill in the blank: template-based caption generation. Given a pair of images, the annotator needs to come up with a valid caption that makes it correctly describing one image but incorrect for the other. In this way, the annotator could focus on the key difference of the two images (which should be spatial relation of the two objects of interest) and come up with challenging relation that differentiates the two. Similar paradigms are also used in the annotation of previous vision-language reasoning datasets such as NLVR2 (Suhr et al., 2017, 2019) and MaRVL (Liu et al., 2021). To regularise annotators from writing modifiers and differentiating the image pair with things beyond accurate spatial relations, we opt for a template-based classification task instead of free-form caption writing. Besides, the template-generated dataset can be easily categorised based on relations and their meta-categories. The caption template has the format of “The 'OBJ1' (is) \_\_ the 'OBJ2'.”, and the annotators are instructed to select a relation from a fixed set to fill in the slot. The copula “is” can be omitted for grammaticality. For example, for “contains”, “consists of”, and “has as a part”, “is” should be discarded in the template when extracting the final caption. The fixed set of spatial relations enable us to obtain the full control of the generation process. The full list of used relations are listed in the table below. It contains 71 spatial relations and is adapted from the summarised relation table of Fagundes et al. (2021). We made minor changes to filter out clearly unusable relations, made relation names grammatical under our template, and reduced repeated relations. In our final dataset, 65 out of the 71 available relations are actually included (the other 6 are either not selected by annotators or are selected but the captions did not pass the validation phase). Second-round Human Validation. Every annotated data point is reviewed by at least two additional human annotators (validators). In validation, given a data point (consists of an image and a caption), the validator gives either a True or False label. We exclude data points that have < 2/3 validators agreeing with the original label. In the guideline, we communicated to the validators that, for relations such as “left”/“right”, “in front of”/“behind”, they should tolerate different reference frame: i.e., if the caption is true from either the object’s or the viewer’s reference, it should be given a True label. Only when the caption is incorrect under all reference frames, a False label is assigned. This adds difficulty to the models since they could not naively rely on relative locations of the objects in the images but also need to correctly identify orientations of objects to make the best judgement. #### Who are the annotators? Annotators are hired from URL. We require them (1) have at least a bachelor’s degree, (2) are fluent in English or native speaker, and (3) have a >99% historical approval rate on the platform. All annotators are paid with an hourly salary of 12 GBP. Prolific takes an extra 33% of service charge and 20% VAT on the service charge. For caption generation, we release the task with batches of 200 instances and the annotator is required to finish a batch in 80 minutes. An annotator cannot take more than one batch per day. In this way we have a diverse set of annotators and can also prevent annotators from being fatigued. For second round validation, we group 500 data points in one batch and an annotator is asked to label each batch in 90 minutes. In total, 24 annotators participated in caption generation and 26 participated in validation. The annotators have diverse demographic background: they were born in 13 different countries; live in 13 different couturiers; and have 14 different nationalities. 57.4% of the annotators identify themselves as females and 42.6% as males. ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information This project is licensed under the Apache-2.0 License. ### Contributions Thanks to @juletx for adding this dataset.
[ "### Dataset Summary\n\n\nThe Visual Spatial Reasoning (VSR) corpus is a collection of caption-image pairs with true/false labels. Each caption describes the spatial relation of two individual objects in the image, and a vision-language model (VLM) needs to judge whether the caption is correctly describing the image (True) or not (False).", "### Supported Tasks and Leaderboards\n\n\nWe test three baselines, all supported in huggingface. They are VisualBERT (Li et al. 2019), LXMERT (Tan and Bansal, 2019) and ViLT (Kim et al. 2021). The leaderboard can be checked at Papers With Code.", "### Languages\n\n\nThe language in the dataset is English as spoken by the annotators. The BCP-47 code for English is en. 'meta\\_data.csv' contains meta data of annotators.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nEach line is an individual data point. Each 'jsonl' file is of the following format:", "### Data Fields\n\n\n'image' denotes name of the image in COCO and 'image\\_link' points to the image on the COCO server (so you can also access directly). 'caption' is self-explanatory. 'label' being '0' and '1' corresponds to False and True respectively. 'relation' records the spatial relation used. 'annotator\\_id' points to the annotator who originally wrote the caption. 'vote\\_true\\_validator\\_id' and 'vote\\_false\\_validator\\_id' are annotators who voted True or False in the second phase validation.", "### Data Splits\n\n\nThe VSR corpus, after validation, contains 10,119 data points with high agreement. On top of these, we create two splits (1) random split and (2) zero-shot split. For random split, we randomly split all data points into train, development, and test sets. Zero-shot split makes sure that train, development and test sets have no overlap of concepts (i.e., if *dog* is in test set, it is not used for training and development). Below are some basic statistics of the two splits.\n\n\n\nCheck out 'data/' for more details.\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nUnderstanding spatial relations is fundamental to achieve intelligence. Existing vision-language reasoning datasets are great but they compose multiple types of challenges and can thus conflate different sources of error.\nThe VSR corpus focuses specifically on spatial relations so we can have accurate diagnosis and maximum interpretability.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nImage pair sampling. MS COCO 2017 contains\n123,287 images and has labelled the segmentation and classes of 886,284 instances (individual\nobjects). Leveraging the segmentation, we first\nrandomly select two concepts, then retrieve all images containing the two concepts in COCO 2017 (train and\nvalidation sets). Then images that contain multiple instances of any of the concept are filtered\nout to avoid referencing ambiguity. For the single-instance images, we also filter out any of the images with instance area size < 30, 000, to prevent extremely small instances. After these filtering steps,\nwe randomly sample a pair in the remaining images.\nWe repeat such process to obtain a large number of\nindividual image pairs for caption generation.", "#### Who are the source language producers?", "### Annotations", "#### Annotation process\n\n\nFill in the blank: template-based caption generation. Given a pair of images, the annotator needs to come up with a valid caption that makes it correctly describing one image but incorrect for the other. In this way, the annotator could focus on the key difference of the two images (which should be spatial relation of the two objects of interest) and come up with challenging relation that differentiates the two. Similar paradigms are also used in the annotation of previous vision-language reasoning datasets such as NLVR2 (Suhr et al., 2017,\n2019) and MaRVL (Liu et al., 2021). To regularise annotators from writing modifiers and differentiating the image pair with things beyond accurate spatial relations, we opt for a template-based classification task instead of free-form caption writing. Besides, the template-generated dataset can be easily categorised based on relations and their meta-categories.\n\n\nThe caption template has the format of “The\n'OBJ1' (is) \\_\\_ the 'OBJ2'.”, and the annotators\nare instructed to select a relation from a fixed set\nto fill in the slot. The copula “is” can be omitted\nfor grammaticality. For example, for “contains”,\n“consists of”, and “has as a part”, “is” should be\ndiscarded in the template when extracting the final\ncaption.\n\n\nThe fixed set of spatial relations enable us to obtain the full control of the generation process. The\nfull list of used relations are listed in the table below. It\ncontains 71 spatial relations and is adapted from\nthe summarised relation table of Fagundes et al.\n(2021). We made minor changes to filter out clearly\nunusable relations, made relation names grammatical under our template, and reduced repeated relations.\nIn our final dataset, 65 out of the 71 available relations are actually included (the other 6 are\neither not selected by annotators or are selected but\nthe captions did not pass the validation phase).\n\n\n\nSecond-round Human Validation. Every annotated data point is reviewed by at least\ntwo additional human annotators (validators). In\nvalidation, given a data point (consists of an image\nand a caption), the validator gives either a True or\nFalse label. We exclude data points that have <\n2/3 validators agreeing with the original label.\n\n\nIn the guideline, we communicated to the validators that, for relations such as “left”/“right”, “in\nfront of”/“behind”, they should tolerate different\nreference frame: i.e., if the caption is true from either the object’s or the viewer’s reference, it should\nbe given a True label. Only\nwhen the caption is incorrect under all reference\nframes, a False label is assigned. This adds\ndifficulty to the models since they could not naively\nrely on relative locations of the objects in the images but also need to correctly identify orientations of objects to make the best judgement.", "#### Who are the annotators?\n\n\nAnnotators are hired from URL. We\nrequire them (1) have at least a bachelor’s degree,\n(2) are fluent in English or native speaker, and (3)\nhave a >99% historical approval rate on the platform. All annotators are paid with an hourly salary\nof 12 GBP. Prolific takes an extra 33% of service\ncharge and 20% VAT on the service charge.\n\n\nFor caption generation, we release the task with\nbatches of 200 instances and the annotator is required to finish a batch in 80 minutes. An annotator\ncannot take more than one batch per day. In this\nway we have a diverse set of annotators and can\nalso prevent annotators from being fatigued. For\nsecond round validation, we group 500 data points\nin one batch and an annotator is asked to label each\nbatch in 90 minutes.\n\n\nIn total, 24 annotators participated in caption\ngeneration and 26 participated in validation. The\nannotators have diverse demographic background:\nthey were born in 13 different countries; live in 13\ndifferent couturiers; and have 14 different nationalities. 57.4% of the annotators identify themselves\nas females and 42.6% as males.", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information\n\n\nThis project is licensed under the Apache-2.0 License.", "### Contributions\n\n\nThanks to @juletx for adding this dataset." ]
[ "TAGS\n#task_categories-image-classification #annotations_creators-crowdsourced #language_creators-machine-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-apache-2.0 #arxiv-2205.00363 #arxiv-1908.03557 #arxiv-1908.07490 #arxiv-2102.03334 #region-us \n", "### Dataset Summary\n\n\nThe Visual Spatial Reasoning (VSR) corpus is a collection of caption-image pairs with true/false labels. Each caption describes the spatial relation of two individual objects in the image, and a vision-language model (VLM) needs to judge whether the caption is correctly describing the image (True) or not (False).", "### Supported Tasks and Leaderboards\n\n\nWe test three baselines, all supported in huggingface. They are VisualBERT (Li et al. 2019), LXMERT (Tan and Bansal, 2019) and ViLT (Kim et al. 2021). The leaderboard can be checked at Papers With Code.", "### Languages\n\n\nThe language in the dataset is English as spoken by the annotators. The BCP-47 code for English is en. 'meta\\_data.csv' contains meta data of annotators.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nEach line is an individual data point. Each 'jsonl' file is of the following format:", "### Data Fields\n\n\n'image' denotes name of the image in COCO and 'image\\_link' points to the image on the COCO server (so you can also access directly). 'caption' is self-explanatory. 'label' being '0' and '1' corresponds to False and True respectively. 'relation' records the spatial relation used. 'annotator\\_id' points to the annotator who originally wrote the caption. 'vote\\_true\\_validator\\_id' and 'vote\\_false\\_validator\\_id' are annotators who voted True or False in the second phase validation.", "### Data Splits\n\n\nThe VSR corpus, after validation, contains 10,119 data points with high agreement. On top of these, we create two splits (1) random split and (2) zero-shot split. For random split, we randomly split all data points into train, development, and test sets. Zero-shot split makes sure that train, development and test sets have no overlap of concepts (i.e., if *dog* is in test set, it is not used for training and development). Below are some basic statistics of the two splits.\n\n\n\nCheck out 'data/' for more details.\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nUnderstanding spatial relations is fundamental to achieve intelligence. Existing vision-language reasoning datasets are great but they compose multiple types of challenges and can thus conflate different sources of error.\nThe VSR corpus focuses specifically on spatial relations so we can have accurate diagnosis and maximum interpretability.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nImage pair sampling. MS COCO 2017 contains\n123,287 images and has labelled the segmentation and classes of 886,284 instances (individual\nobjects). Leveraging the segmentation, we first\nrandomly select two concepts, then retrieve all images containing the two concepts in COCO 2017 (train and\nvalidation sets). Then images that contain multiple instances of any of the concept are filtered\nout to avoid referencing ambiguity. For the single-instance images, we also filter out any of the images with instance area size < 30, 000, to prevent extremely small instances. After these filtering steps,\nwe randomly sample a pair in the remaining images.\nWe repeat such process to obtain a large number of\nindividual image pairs for caption generation.", "#### Who are the source language producers?", "### Annotations", "#### Annotation process\n\n\nFill in the blank: template-based caption generation. Given a pair of images, the annotator needs to come up with a valid caption that makes it correctly describing one image but incorrect for the other. In this way, the annotator could focus on the key difference of the two images (which should be spatial relation of the two objects of interest) and come up with challenging relation that differentiates the two. Similar paradigms are also used in the annotation of previous vision-language reasoning datasets such as NLVR2 (Suhr et al., 2017,\n2019) and MaRVL (Liu et al., 2021). To regularise annotators from writing modifiers and differentiating the image pair with things beyond accurate spatial relations, we opt for a template-based classification task instead of free-form caption writing. Besides, the template-generated dataset can be easily categorised based on relations and their meta-categories.\n\n\nThe caption template has the format of “The\n'OBJ1' (is) \\_\\_ the 'OBJ2'.”, and the annotators\nare instructed to select a relation from a fixed set\nto fill in the slot. The copula “is” can be omitted\nfor grammaticality. For example, for “contains”,\n“consists of”, and “has as a part”, “is” should be\ndiscarded in the template when extracting the final\ncaption.\n\n\nThe fixed set of spatial relations enable us to obtain the full control of the generation process. The\nfull list of used relations are listed in the table below. It\ncontains 71 spatial relations and is adapted from\nthe summarised relation table of Fagundes et al.\n(2021). We made minor changes to filter out clearly\nunusable relations, made relation names grammatical under our template, and reduced repeated relations.\nIn our final dataset, 65 out of the 71 available relations are actually included (the other 6 are\neither not selected by annotators or are selected but\nthe captions did not pass the validation phase).\n\n\n\nSecond-round Human Validation. Every annotated data point is reviewed by at least\ntwo additional human annotators (validators). In\nvalidation, given a data point (consists of an image\nand a caption), the validator gives either a True or\nFalse label. We exclude data points that have <\n2/3 validators agreeing with the original label.\n\n\nIn the guideline, we communicated to the validators that, for relations such as “left”/“right”, “in\nfront of”/“behind”, they should tolerate different\nreference frame: i.e., if the caption is true from either the object’s or the viewer’s reference, it should\nbe given a True label. Only\nwhen the caption is incorrect under all reference\nframes, a False label is assigned. This adds\ndifficulty to the models since they could not naively\nrely on relative locations of the objects in the images but also need to correctly identify orientations of objects to make the best judgement.", "#### Who are the annotators?\n\n\nAnnotators are hired from URL. We\nrequire them (1) have at least a bachelor’s degree,\n(2) are fluent in English or native speaker, and (3)\nhave a >99% historical approval rate on the platform. All annotators are paid with an hourly salary\nof 12 GBP. Prolific takes an extra 33% of service\ncharge and 20% VAT on the service charge.\n\n\nFor caption generation, we release the task with\nbatches of 200 instances and the annotator is required to finish a batch in 80 minutes. An annotator\ncannot take more than one batch per day. In this\nway we have a diverse set of annotators and can\nalso prevent annotators from being fatigued. For\nsecond round validation, we group 500 data points\nin one batch and an annotator is asked to label each\nbatch in 90 minutes.\n\n\nIn total, 24 annotators participated in caption\ngeneration and 26 participated in validation. The\nannotators have diverse demographic background:\nthey were born in 13 different countries; live in 13\ndifferent couturiers; and have 14 different nationalities. 57.4% of the annotators identify themselves\nas females and 42.6% as males.", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information\n\n\nThis project is licensed under the Apache-2.0 License.", "### Contributions\n\n\nThanks to @juletx for adding this dataset." ]
0dbeda6f6c892deaf39027f8088c9b26294db98d
# FLEURS ## Dataset Description - **Fine-Tuning script:** [pytorch/speech-recognition](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition) - **Paper:** [FLEURS: Few-shot Learning Evaluation of Universal Representations of Speech](https://arxiv.org/abs/2205.12446) - **Total amount of disk used:** ca. 350 GB Fleurs is the speech version of the [FLoRes machine translation benchmark](https://arxiv.org/abs/2106.03193). We use 2009 n-way parallel sentences from the FLoRes dev and devtest publicly available sets, in 102 languages. Training sets have around 10 hours of supervision. Speakers of the train sets are different than speakers from the dev/test sets. Multilingual fine-tuning is used and ”unit error rate” (characters, signs) of all languages is averaged. Languages and results are also grouped into seven geographical areas: - **Western Europe**: *Asturian, Bosnian, Catalan, Croatian, Danish, Dutch, English, Finnish, French, Galician, German, Greek, Hungarian, Icelandic, Irish, Italian, Kabuverdianu, Luxembourgish, Maltese, Norwegian, Occitan, Portuguese, Spanish, Swedish, Welsh* - **Eastern Europe**: *Armenian, Belarusian, Bulgarian, Czech, Estonian, Georgian, Latvian, Lithuanian, Macedonian, Polish, Romanian, Russian, Serbian, Slovak, Slovenian, Ukrainian* - **Central-Asia/Middle-East/North-Africa**: *Arabic, Azerbaijani, Hebrew, Kazakh, Kyrgyz, Mongolian, Pashto, Persian, Sorani-Kurdish, Tajik, Turkish, Uzbek* - **Sub-Saharan Africa**: *Afrikaans, Amharic, Fula, Ganda, Hausa, Igbo, Kamba, Lingala, Luo, Northern-Sotho, Nyanja, Oromo, Shona, Somali, Swahili, Umbundu, Wolof, Xhosa, Yoruba, Zulu* - **South-Asia**: *Assamese, Bengali, Gujarati, Hindi, Kannada, Malayalam, Marathi, Nepali, Oriya, Punjabi, Sindhi, Tamil, Telugu, Urdu* - **South-East Asia**: *Burmese, Cebuano, Filipino, Indonesian, Javanese, Khmer, Lao, Malay, Maori, Thai, Vietnamese* - **CJK languages**: *Cantonese and Mandarin Chinese, Japanese, Korean* ## Supported Tasks ### 1. Speech Recognition (ASR) ```py from datasets import load_dataset fleurs_asr = load_dataset("google/fleurs", "af_za") # for Afrikaans # to download all data for multi-lingual fine-tuning uncomment following line # fleurs_asr = load_dataset("google/fleurs", "all") # see structure print(fleurs_asr) # load audio sample on the fly audio_input = fleurs_asr["train"][0]["audio"] # first decoded audio sample transcription = fleurs_asr["train"][0]["transcription"] # first transcription # use `audio_input` and `transcription` to fine-tune your model for ASR # for analyses see language groups all_language_groups = fleurs_asr["train"].features["lang_group_id"].names lang_group_id = fleurs_asr["train"][0]["lang_group_id"] all_language_groups[lang_group_id] ``` ### 2. Language Identification LangID can often be a domain classification, but in the case of FLEURS-LangID, recordings are done in a similar setting across languages and the utterances correspond to n-way parallel sentences, in the exact same domain, making this task particularly relevant for evaluating LangID. The setting is simple, FLEURS-LangID is splitted in train/valid/test for each language. We simply create a single train/valid/test for LangID by merging all. ```py from datasets import load_dataset fleurs_langID = load_dataset("google/fleurs", "all") # to download all data # see structure print(fleurs_langID) # load audio sample on the fly audio_input = fleurs_langID["train"][0]["audio"] # first decoded audio sample language_class = fleurs_langID["train"][0]["lang_id"] # first id class language = fleurs_langID["train"].features["lang_id"].names[language_class] # use audio_input and language_class to fine-tune your model for audio classification ``` ### 3. Retrieval Retrieval provides n-way parallel speech and text data. Similar to how XTREME for text leverages Tatoeba to evaluate bitext mining a.k.a sentence translation retrieval, we use Retrieval to evaluate the quality of fixed-size representations of speech utterances. Our goal is to incentivize the creation of fixed-size speech encoder for speech retrieval. The system has to retrieve the English "key" utterance corresponding to the speech translation of "queries" in 15 languages. Results have to be reported on the test sets of Retrieval whose utterances are used as queries (and keys for English). We augment the English keys with a large number of utterances to make the task more difficult. ```py from datasets import load_dataset fleurs_retrieval = load_dataset("google/fleurs", "af_za") # for Afrikaans # to download all data for multi-lingual fine-tuning uncomment following line # fleurs_retrieval = load_dataset("google/fleurs", "all") # see structure print(fleurs_retrieval) # load audio sample on the fly audio_input = fleurs_retrieval["train"][0]["audio"] # decoded audio sample text_sample_pos = fleurs_retrieval["train"][0]["transcription"] # positive text sample text_sample_neg = fleurs_retrieval["train"][1:20]["transcription"] # negative text samples # use `audio_input`, `text_sample_pos`, and `text_sample_neg` to fine-tune your model for retrieval ``` Users can leverage the training (and dev) sets of FLEURS-Retrieval with a ranking loss to build better cross-lingual fixed-size representations of speech. ## Dataset Structure We show detailed information the example configurations `af_za` of the dataset. All other configurations have the same structure. ### Data Instances **af_za** - Size of downloaded dataset files: 1.47 GB - Size of the generated dataset: 1 MB - Total amount of disk used: 1.47 GB An example of a data instance of the config `af_za` looks as follows: ``` {'id': 91, 'num_samples': 385920, 'path': '/home/patrick/.cache/huggingface/datasets/downloads/extracted/310a663d52322700b3d3473cbc5af429bd92a23f9bc683594e70bc31232db39e/home/vaxelrod/FLEURS/oss2_obfuscated/af_za/audio/train/17797742076841560615.wav', 'audio': {'path': '/home/patrick/.cache/huggingface/datasets/downloads/extracted/310a663d52322700b3d3473cbc5af429bd92a23f9bc683594e70bc31232db39e/home/vaxelrod/FLEURS/oss2_obfuscated/af_za/audio/train/17797742076841560615.wav', 'array': array([ 0.0000000e+00, 0.0000000e+00, 0.0000000e+00, ..., -1.1205673e-04, -8.4638596e-05, -1.2731552e-04], dtype=float32), 'sampling_rate': 16000}, 'raw_transcription': 'Dit is nog nie huidiglik bekend watter aantygings gemaak sal word of wat owerhede na die seun gelei het nie maar jeugmisdaad-verrigtinge het in die federale hof begin', 'transcription': 'dit is nog nie huidiglik bekend watter aantygings gemaak sal word of wat owerhede na die seun gelei het nie maar jeugmisdaad-verrigtinge het in die federale hof begin', 'gender': 0, 'lang_id': 0, 'language': 'Afrikaans', 'lang_group_id': 3} ``` ### Data Fields The data fields are the same among all splits. - **id** (int): ID of audio sample - **num_samples** (int): Number of float values - **path** (str): Path to the audio file - **audio** (dict): Audio object including loaded audio array, sampling rate and path ot audio - **raw_transcription** (str): The non-normalized transcription of the audio file - **transcription** (str): Transcription of the audio file - **gender** (int): Class id of gender - **lang_id** (int): Class id of language - **lang_group_id** (int): Class id of language group ### Data Splits Every config only has the `"train"` split containing of *ca.* 1000 examples, and a `"validation"` and `"test"` split each containing of *ca.* 400 examples. ## Dataset Creation We collect between one and three recordings for each sentence (2.3 on average), and buildnew train-dev-test splits with 1509, 150 and 350 sentences for train, dev and test respectively. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is meant to encourage the development of speech technology in a lot more languages of the world. One of the goal is to give equal access to technologies like speech recognition or speech translation to everyone, meaning better dubbing or better access to content from the internet (like podcasts, streaming or videos). ### Discussion of Biases Most datasets have a fair distribution of gender utterances (e.g. the newly introduced FLEURS dataset). While many languages are covered from various regions of the world, the benchmark misses many languages that are all equally important. We believe technology built through FLEURS should generalize to all languages. ### Other Known Limitations The dataset has a particular focus on read-speech because common evaluation benchmarks like CoVoST-2 or LibriSpeech evaluate on this type of speech. There is sometimes a known mismatch between performance obtained in a read-speech setting and a more noisy setting (in production for instance). Given the big progress that remains to be made on many languages, we believe better performance on FLEURS should still correlate well with actual progress made for speech understanding. ## Additional Information All datasets are licensed under the [Creative Commons license (CC-BY)](https://creativecommons.org/licenses/). ### Citation Information You can access the FLEURS paper at https://arxiv.org/abs/2205.12446. Please cite the paper when referencing the FLEURS corpus as: ``` @article{fleurs2022arxiv, title = {FLEURS: Few-shot Learning Evaluation of Universal Representations of Speech}, author = {Conneau, Alexis and Ma, Min and Khanuja, Simran and Zhang, Yu and Axelrod, Vera and Dalmia, Siddharth and Riesa, Jason and Rivera, Clara and Bapna, Ankur}, journal={arXiv preprint arXiv:2205.12446}, url = {https://arxiv.org/abs/2205.12446}, year = {2022}, ``` ### Contributions Thanks to [@patrickvonplaten](https://github.com/patrickvonplaten) and [@aconneau](https://github.com/aconneau) for adding this dataset.
cahya/fleurs
[ "task_categories:automatic-speech-recognition", "annotations_creators:expert-generated", "annotations_creators:crowdsourced", "annotations_creators:machine-generated", "language_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:multilingual", "size_categories:10K<n<100K", "language:afr", "language:amh", "language:ara", "language:asm", "language:ast", "language:azj", "language:bel", "language:ben", "language:bos", "language:cat", "language:ceb", "language:cmn", "language:ces", "language:cym", "language:dan", "language:deu", "language:ell", "language:eng", "language:spa", "language:est", "language:fas", "language:ful", "language:fin", "language:tgl", "language:fra", "language:gle", "language:glg", "language:guj", "language:hau", "language:heb", "language:hin", "language:hrv", "language:hun", "language:hye", "language:ind", "language:ibo", "language:isl", "language:ita", "language:jpn", "language:jav", "language:kat", "language:kam", "language:kea", "language:kaz", "language:khm", "language:kan", "language:kor", "language:ckb", "language:kir", "language:ltz", "language:lug", "language:lin", "language:lao", "language:lit", "language:luo", "language:lav", "language:mri", "language:mkd", "language:mal", "language:mon", "language:mar", "language:msa", "language:mlt", "language:mya", "language:nob", "language:npi", "language:nld", "language:nso", "language:nya", "language:oci", "language:orm", "language:ory", "language:pan", "language:pol", "language:pus", "language:por", "language:ron", "language:rus", "language:bul", "language:snd", "language:slk", "language:slv", "language:sna", "language:som", "language:srp", "language:swe", "language:swh", "language:tam", "language:tel", "language:tgk", "language:tha", "language:tur", "language:ukr", "language:umb", "language:urd", "language:uzb", "language:vie", "language:wol", "language:xho", "language:yor", "language:yue", "language:zul", "license:cc-by-4.0", "speech-recognition", "arxiv:2205.12446", "arxiv:2106.03193", "region:us" ]
2022-12-14T12:00:52+00:00
{"annotations_creators": ["expert-generated", "crowdsourced", "machine-generated"], "language_creators": ["crowdsourced", "expert-generated"], "language": ["afr", "amh", "ara", "asm", "ast", "azj", "bel", "ben", "bos", "cat", "ceb", "cmn", "ces", "cym", "dan", "deu", "ell", "eng", "spa", "est", "fas", "ful", "fin", "tgl", "fra", "gle", "glg", "guj", "hau", "heb", "hin", "hrv", "hun", "hye", "ind", "ibo", "isl", "ita", "jpn", "jav", "kat", "kam", "kea", "kaz", "khm", "kan", "kor", "ckb", "kir", "ltz", "lug", "lin", "lao", "lit", "luo", "lav", "mri", "mkd", "mal", "mon", "mar", "msa", "mlt", "mya", "nob", "npi", "nld", "nso", "nya", "oci", "orm", "ory", "pan", "pol", "pus", "por", "ron", "rus", "bul", "snd", "slk", "slv", "sna", "som", "srp", "swe", "swh", "tam", "tel", "tgk", "tha", "tur", "ukr", "umb", "urd", "uzb", "vie", "wol", "xho", "yor", "yue", "zul"], "license": ["cc-by-4.0"], "multilinguality": ["multilingual"], "size_categories": ["10K<n<100K"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "The Cross-lingual TRansfer Evaluation of Multilingual Encoders for Speech (XTREME-S) benchmark is a benchmark designed to evaluate speech representations across languages, tasks, domains and data regimes. It covers 102 languages from 10+ language families, 3 different domains and 4 task families: speech recognition, translation, classification and retrieval.", "tags": ["speech-recognition"]}
2022-12-18T11:58:34+00:00
[ "2205.12446", "2106.03193" ]
[ "afr", "amh", "ara", "asm", "ast", "azj", "bel", "ben", "bos", "cat", "ceb", "cmn", "ces", "cym", "dan", "deu", "ell", "eng", "spa", "est", "fas", "ful", "fin", "tgl", "fra", "gle", "glg", "guj", "hau", "heb", "hin", "hrv", "hun", "hye", "ind", "ibo", "isl", "ita", "jpn", "jav", "kat", "kam", "kea", "kaz", "khm", "kan", "kor", "ckb", "kir", "ltz", "lug", "lin", "lao", "lit", "luo", "lav", "mri", "mkd", "mal", "mon", "mar", "msa", "mlt", "mya", "nob", "npi", "nld", "nso", "nya", "oci", "orm", "ory", "pan", "pol", "pus", "por", "ron", "rus", "bul", "snd", "slk", "slv", "sna", "som", "srp", "swe", "swh", "tam", "tel", "tgk", "tha", "tur", "ukr", "umb", "urd", "uzb", "vie", "wol", "xho", "yor", "yue", "zul" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-expert-generated #annotations_creators-crowdsourced #annotations_creators-machine-generated #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-multilingual #size_categories-10K<n<100K #language-Afrikaans #language-Amharic #language-Arabic #language-Assamese #language-Asturian #language-North Azerbaijani #language-Belarusian #language-Bengali #language-Bosnian #language-Catalan #language-Cebuano #language-Mandarin Chinese #language-Czech #language-Welsh #language-Danish #language-German #language-Modern Greek (1453-) #language-English #language-Spanish #language-Estonian #language-Persian #language-Fulah #language-Finnish #language-Tagalog #language-French #language-Irish #language-Galician #language-Gujarati #language-Hausa #language-Hebrew #language-Hindi #language-Croatian #language-Hungarian #language-Armenian #language-Indonesian #language-Igbo #language-Icelandic #language-Italian #language-Japanese #language-Javanese #language-Georgian #language-Kamba (Kenya) #language-Kabuverdianu #language-Kazakh #language-Khmer #language-Kannada #language-Korean #language-Central Kurdish #language-Kirghiz #language-Luxembourgish #language-Ganda #language-Lingala #language-Lao #language-Lithuanian #language-Luo (Kenya and Tanzania) #language-Latvian #language-Maori #language-Macedonian #language-Malayalam #language-Mongolian #language-Marathi #language-Malay (macrolanguage) #language-Maltese #language-Burmese #language-Norwegian Bokmål #language-Nepali (individual language) #language-Dutch #language-Pedi #language-Nyanja #language-Occitan (post 1500) #language-Oromo #language-Odia #language-Panjabi #language-Polish #language-Pushto #language-Portuguese #language-Romanian #language-Russian #language-Bulgarian #language-Sindhi #language-Slovak #language-Slovenian #language-Shona #language-Somali #language-Serbian #language-Swedish #language-Swahili (individual language) #language-Tamil #language-Telugu #language-Tajik #language-Thai #language-Turkish #language-Ukrainian #language-Umbundu #language-Urdu #language-Uzbek #language-Vietnamese #language-Wolof #language-Xhosa #language-Yoruba #language-Yue Chinese #language-Zulu #license-cc-by-4.0 #speech-recognition #arxiv-2205.12446 #arxiv-2106.03193 #region-us
# FLEURS ## Dataset Description - Fine-Tuning script: pytorch/speech-recognition - Paper: FLEURS: Few-shot Learning Evaluation of Universal Representations of Speech - Total amount of disk used: ca. 350 GB Fleurs is the speech version of the FLoRes machine translation benchmark. We use 2009 n-way parallel sentences from the FLoRes dev and devtest publicly available sets, in 102 languages. Training sets have around 10 hours of supervision. Speakers of the train sets are different than speakers from the dev/test sets. Multilingual fine-tuning is used and ”unit error rate” (characters, signs) of all languages is averaged. Languages and results are also grouped into seven geographical areas: - Western Europe: *Asturian, Bosnian, Catalan, Croatian, Danish, Dutch, English, Finnish, French, Galician, German, Greek, Hungarian, Icelandic, Irish, Italian, Kabuverdianu, Luxembourgish, Maltese, Norwegian, Occitan, Portuguese, Spanish, Swedish, Welsh* - Eastern Europe: *Armenian, Belarusian, Bulgarian, Czech, Estonian, Georgian, Latvian, Lithuanian, Macedonian, Polish, Romanian, Russian, Serbian, Slovak, Slovenian, Ukrainian* - Central-Asia/Middle-East/North-Africa: *Arabic, Azerbaijani, Hebrew, Kazakh, Kyrgyz, Mongolian, Pashto, Persian, Sorani-Kurdish, Tajik, Turkish, Uzbek* - Sub-Saharan Africa: *Afrikaans, Amharic, Fula, Ganda, Hausa, Igbo, Kamba, Lingala, Luo, Northern-Sotho, Nyanja, Oromo, Shona, Somali, Swahili, Umbundu, Wolof, Xhosa, Yoruba, Zulu* - South-Asia: *Assamese, Bengali, Gujarati, Hindi, Kannada, Malayalam, Marathi, Nepali, Oriya, Punjabi, Sindhi, Tamil, Telugu, Urdu* - South-East Asia: *Burmese, Cebuano, Filipino, Indonesian, Javanese, Khmer, Lao, Malay, Maori, Thai, Vietnamese* - CJK languages: *Cantonese and Mandarin Chinese, Japanese, Korean* ## Supported Tasks ### 1. Speech Recognition (ASR) ### 2. Language Identification LangID can often be a domain classification, but in the case of FLEURS-LangID, recordings are done in a similar setting across languages and the utterances correspond to n-way parallel sentences, in the exact same domain, making this task particularly relevant for evaluating LangID. The setting is simple, FLEURS-LangID is splitted in train/valid/test for each language. We simply create a single train/valid/test for LangID by merging all. ### 3. Retrieval Retrieval provides n-way parallel speech and text data. Similar to how XTREME for text leverages Tatoeba to evaluate bitext mining a.k.a sentence translation retrieval, we use Retrieval to evaluate the quality of fixed-size representations of speech utterances. Our goal is to incentivize the creation of fixed-size speech encoder for speech retrieval. The system has to retrieve the English "key" utterance corresponding to the speech translation of "queries" in 15 languages. Results have to be reported on the test sets of Retrieval whose utterances are used as queries (and keys for English). We augment the English keys with a large number of utterances to make the task more difficult. Users can leverage the training (and dev) sets of FLEURS-Retrieval with a ranking loss to build better cross-lingual fixed-size representations of speech. ## Dataset Structure We show detailed information the example configurations 'af_za' of the dataset. All other configurations have the same structure. ### Data Instances af_za - Size of downloaded dataset files: 1.47 GB - Size of the generated dataset: 1 MB - Total amount of disk used: 1.47 GB An example of a data instance of the config 'af_za' looks as follows: ### Data Fields The data fields are the same among all splits. - id (int): ID of audio sample - num_samples (int): Number of float values - path (str): Path to the audio file - audio (dict): Audio object including loaded audio array, sampling rate and path ot audio - raw_transcription (str): The non-normalized transcription of the audio file - transcription (str): Transcription of the audio file - gender (int): Class id of gender - lang_id (int): Class id of language - lang_group_id (int): Class id of language group ### Data Splits Every config only has the '"train"' split containing of *ca.* 1000 examples, and a '"validation"' and '"test"' split each containing of *ca.* 400 examples. ## Dataset Creation We collect between one and three recordings for each sentence (2.3 on average), and buildnew train-dev-test splits with 1509, 150 and 350 sentences for train, dev and test respectively. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is meant to encourage the development of speech technology in a lot more languages of the world. One of the goal is to give equal access to technologies like speech recognition or speech translation to everyone, meaning better dubbing or better access to content from the internet (like podcasts, streaming or videos). ### Discussion of Biases Most datasets have a fair distribution of gender utterances (e.g. the newly introduced FLEURS dataset). While many languages are covered from various regions of the world, the benchmark misses many languages that are all equally important. We believe technology built through FLEURS should generalize to all languages. ### Other Known Limitations The dataset has a particular focus on read-speech because common evaluation benchmarks like CoVoST-2 or LibriSpeech evaluate on this type of speech. There is sometimes a known mismatch between performance obtained in a read-speech setting and a more noisy setting (in production for instance). Given the big progress that remains to be made on many languages, we believe better performance on FLEURS should still correlate well with actual progress made for speech understanding. ## Additional Information All datasets are licensed under the Creative Commons license (CC-BY). You can access the FLEURS paper at URL Please cite the paper when referencing the FLEURS corpus as: ### Contributions Thanks to @patrickvonplaten and @aconneau for adding this dataset.
[ "# FLEURS", "## Dataset Description\n\n- Fine-Tuning script: pytorch/speech-recognition\n- Paper: FLEURS: Few-shot Learning Evaluation of\nUniversal Representations of Speech\n- Total amount of disk used: ca. 350 GB\n\nFleurs is the speech version of the FLoRes machine translation benchmark. \nWe use 2009 n-way parallel sentences from the FLoRes dev and devtest publicly available sets, in 102 languages. \n\nTraining sets have around 10 hours of supervision. Speakers of the train sets are different than speakers from the dev/test sets. Multilingual fine-tuning is\nused and ”unit error rate” (characters, signs) of all languages is averaged. Languages and results are also grouped into seven geographical areas: \n\n- Western Europe: *Asturian, Bosnian, Catalan, Croatian, Danish, Dutch, English, Finnish, French, Galician, German, Greek, Hungarian, Icelandic, Irish, Italian, Kabuverdianu, Luxembourgish, Maltese, Norwegian, Occitan, Portuguese, Spanish, Swedish, Welsh* \n- Eastern Europe: *Armenian, Belarusian, Bulgarian, Czech, Estonian, Georgian, Latvian, Lithuanian, Macedonian, Polish, Romanian, Russian, Serbian, Slovak, Slovenian, Ukrainian*\n- Central-Asia/Middle-East/North-Africa: *Arabic, Azerbaijani, Hebrew, Kazakh, Kyrgyz, Mongolian, Pashto, Persian, Sorani-Kurdish, Tajik, Turkish, Uzbek*\n- Sub-Saharan Africa: *Afrikaans, Amharic, Fula, Ganda, Hausa, Igbo, Kamba, Lingala, Luo, Northern-Sotho, Nyanja, Oromo, Shona, Somali, Swahili, Umbundu, Wolof, Xhosa, Yoruba, Zulu*\n- South-Asia: *Assamese, Bengali, Gujarati, Hindi, Kannada, Malayalam, Marathi, Nepali, Oriya, Punjabi, Sindhi, Tamil, Telugu, Urdu*\n- South-East Asia: *Burmese, Cebuano, Filipino, Indonesian, Javanese, Khmer, Lao, Malay, Maori, Thai, Vietnamese*\n- CJK languages: *Cantonese and Mandarin Chinese, Japanese, Korean*", "## Supported Tasks", "### 1. Speech Recognition (ASR)", "### 2. Language Identification\n\nLangID can often be a domain classification, but in the case of FLEURS-LangID, recordings are done in a similar setting across languages and the utterances correspond to n-way parallel sentences, in the exact same domain, making this task particularly relevant for evaluating LangID. The setting is simple, FLEURS-LangID is splitted in train/valid/test for each language. We simply create a single train/valid/test for LangID by merging all.", "### 3. Retrieval\n\nRetrieval provides n-way parallel speech and text data. Similar to how XTREME for text leverages Tatoeba to evaluate bitext mining a.k.a sentence translation retrieval, we use Retrieval to evaluate the quality of fixed-size representations of speech utterances. Our goal is to incentivize the creation of fixed-size speech encoder for speech retrieval. The system has to retrieve the English \"key\" utterance corresponding to the speech translation of \"queries\" in 15 languages. Results have to be reported on the test sets of Retrieval whose utterances are used as queries (and keys for English). We augment the English keys with a large number of utterances to make the task more difficult.\n\n\n\nUsers can leverage the training (and dev) sets of FLEURS-Retrieval with a ranking loss to build better cross-lingual fixed-size representations of speech.", "## Dataset Structure\n\nWe show detailed information the example configurations 'af_za' of the dataset.\nAll other configurations have the same structure.", "### Data Instances\n\naf_za\n- Size of downloaded dataset files: 1.47 GB\n- Size of the generated dataset: 1 MB\n- Total amount of disk used: 1.47 GB\n\nAn example of a data instance of the config 'af_za' looks as follows:", "### Data Fields\n\nThe data fields are the same among all splits.\n- id (int): ID of audio sample\n- num_samples (int): Number of float values\n- path (str): Path to the audio file\n- audio (dict): Audio object including loaded audio array, sampling rate and path ot audio\n- raw_transcription (str): The non-normalized transcription of the audio file\n- transcription (str): Transcription of the audio file\n- gender (int): Class id of gender\n- lang_id (int): Class id of language\n- lang_group_id (int): Class id of language group", "### Data Splits\n\nEvery config only has the '\"train\"' split containing of *ca.* 1000 examples, and a '\"validation\"' and '\"test\"' split each containing of *ca.* 400 examples.", "## Dataset Creation\n\nWe collect between one and three recordings for each sentence (2.3 on average), and buildnew train-dev-test splits with 1509, 150 and 350 sentences for\ntrain, dev and test respectively.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nThis dataset is meant to encourage the development of speech technology in a lot more languages of the world. One of the goal is to give equal access to technologies like speech recognition or speech translation to everyone, meaning better dubbing or better access to content from the internet (like podcasts, streaming or videos).", "### Discussion of Biases\n\nMost datasets have a fair distribution of gender utterances (e.g. the newly introduced FLEURS dataset). While many languages are covered from various regions of the world, the benchmark misses many languages that are all equally important. We believe technology built through FLEURS should generalize to all languages.", "### Other Known Limitations\n\nThe dataset has a particular focus on read-speech because common evaluation benchmarks like CoVoST-2 or LibriSpeech evaluate on this type of speech. There is sometimes a known mismatch between performance obtained in a read-speech setting and a more noisy setting (in production for instance). Given the big progress that remains to be made on many languages, we believe better performance on FLEURS should still correlate well with actual progress made for speech understanding.", "## Additional Information\n\nAll datasets are licensed under the Creative Commons license (CC-BY).\n\n\n\nYou can access the FLEURS paper at URL\nPlease cite the paper when referencing the FLEURS corpus as:", "### Contributions\n\nThanks to @patrickvonplaten and @aconneau for adding this dataset." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-expert-generated #annotations_creators-crowdsourced #annotations_creators-machine-generated #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-multilingual #size_categories-10K<n<100K #language-Afrikaans #language-Amharic #language-Arabic #language-Assamese #language-Asturian #language-North Azerbaijani #language-Belarusian #language-Bengali #language-Bosnian #language-Catalan #language-Cebuano #language-Mandarin Chinese #language-Czech #language-Welsh #language-Danish #language-German #language-Modern Greek (1453-) #language-English #language-Spanish #language-Estonian #language-Persian #language-Fulah #language-Finnish #language-Tagalog #language-French #language-Irish #language-Galician #language-Gujarati #language-Hausa #language-Hebrew #language-Hindi #language-Croatian #language-Hungarian #language-Armenian #language-Indonesian #language-Igbo #language-Icelandic #language-Italian #language-Japanese #language-Javanese #language-Georgian #language-Kamba (Kenya) #language-Kabuverdianu #language-Kazakh #language-Khmer #language-Kannada #language-Korean #language-Central Kurdish #language-Kirghiz #language-Luxembourgish #language-Ganda #language-Lingala #language-Lao #language-Lithuanian #language-Luo (Kenya and Tanzania) #language-Latvian #language-Maori #language-Macedonian #language-Malayalam #language-Mongolian #language-Marathi #language-Malay (macrolanguage) #language-Maltese #language-Burmese #language-Norwegian Bokmål #language-Nepali (individual language) #language-Dutch #language-Pedi #language-Nyanja #language-Occitan (post 1500) #language-Oromo #language-Odia #language-Panjabi #language-Polish #language-Pushto #language-Portuguese #language-Romanian #language-Russian #language-Bulgarian #language-Sindhi #language-Slovak #language-Slovenian #language-Shona #language-Somali #language-Serbian #language-Swedish #language-Swahili (individual language) #language-Tamil #language-Telugu #language-Tajik #language-Thai #language-Turkish #language-Ukrainian #language-Umbundu #language-Urdu #language-Uzbek #language-Vietnamese #language-Wolof #language-Xhosa #language-Yoruba #language-Yue Chinese #language-Zulu #license-cc-by-4.0 #speech-recognition #arxiv-2205.12446 #arxiv-2106.03193 #region-us \n", "# FLEURS", "## Dataset Description\n\n- Fine-Tuning script: pytorch/speech-recognition\n- Paper: FLEURS: Few-shot Learning Evaluation of\nUniversal Representations of Speech\n- Total amount of disk used: ca. 350 GB\n\nFleurs is the speech version of the FLoRes machine translation benchmark. \nWe use 2009 n-way parallel sentences from the FLoRes dev and devtest publicly available sets, in 102 languages. \n\nTraining sets have around 10 hours of supervision. Speakers of the train sets are different than speakers from the dev/test sets. Multilingual fine-tuning is\nused and ”unit error rate” (characters, signs) of all languages is averaged. Languages and results are also grouped into seven geographical areas: \n\n- Western Europe: *Asturian, Bosnian, Catalan, Croatian, Danish, Dutch, English, Finnish, French, Galician, German, Greek, Hungarian, Icelandic, Irish, Italian, Kabuverdianu, Luxembourgish, Maltese, Norwegian, Occitan, Portuguese, Spanish, Swedish, Welsh* \n- Eastern Europe: *Armenian, Belarusian, Bulgarian, Czech, Estonian, Georgian, Latvian, Lithuanian, Macedonian, Polish, Romanian, Russian, Serbian, Slovak, Slovenian, Ukrainian*\n- Central-Asia/Middle-East/North-Africa: *Arabic, Azerbaijani, Hebrew, Kazakh, Kyrgyz, Mongolian, Pashto, Persian, Sorani-Kurdish, Tajik, Turkish, Uzbek*\n- Sub-Saharan Africa: *Afrikaans, Amharic, Fula, Ganda, Hausa, Igbo, Kamba, Lingala, Luo, Northern-Sotho, Nyanja, Oromo, Shona, Somali, Swahili, Umbundu, Wolof, Xhosa, Yoruba, Zulu*\n- South-Asia: *Assamese, Bengali, Gujarati, Hindi, Kannada, Malayalam, Marathi, Nepali, Oriya, Punjabi, Sindhi, Tamil, Telugu, Urdu*\n- South-East Asia: *Burmese, Cebuano, Filipino, Indonesian, Javanese, Khmer, Lao, Malay, Maori, Thai, Vietnamese*\n- CJK languages: *Cantonese and Mandarin Chinese, Japanese, Korean*", "## Supported Tasks", "### 1. Speech Recognition (ASR)", "### 2. Language Identification\n\nLangID can often be a domain classification, but in the case of FLEURS-LangID, recordings are done in a similar setting across languages and the utterances correspond to n-way parallel sentences, in the exact same domain, making this task particularly relevant for evaluating LangID. The setting is simple, FLEURS-LangID is splitted in train/valid/test for each language. We simply create a single train/valid/test for LangID by merging all.", "### 3. Retrieval\n\nRetrieval provides n-way parallel speech and text data. Similar to how XTREME for text leverages Tatoeba to evaluate bitext mining a.k.a sentence translation retrieval, we use Retrieval to evaluate the quality of fixed-size representations of speech utterances. Our goal is to incentivize the creation of fixed-size speech encoder for speech retrieval. The system has to retrieve the English \"key\" utterance corresponding to the speech translation of \"queries\" in 15 languages. Results have to be reported on the test sets of Retrieval whose utterances are used as queries (and keys for English). We augment the English keys with a large number of utterances to make the task more difficult.\n\n\n\nUsers can leverage the training (and dev) sets of FLEURS-Retrieval with a ranking loss to build better cross-lingual fixed-size representations of speech.", "## Dataset Structure\n\nWe show detailed information the example configurations 'af_za' of the dataset.\nAll other configurations have the same structure.", "### Data Instances\n\naf_za\n- Size of downloaded dataset files: 1.47 GB\n- Size of the generated dataset: 1 MB\n- Total amount of disk used: 1.47 GB\n\nAn example of a data instance of the config 'af_za' looks as follows:", "### Data Fields\n\nThe data fields are the same among all splits.\n- id (int): ID of audio sample\n- num_samples (int): Number of float values\n- path (str): Path to the audio file\n- audio (dict): Audio object including loaded audio array, sampling rate and path ot audio\n- raw_transcription (str): The non-normalized transcription of the audio file\n- transcription (str): Transcription of the audio file\n- gender (int): Class id of gender\n- lang_id (int): Class id of language\n- lang_group_id (int): Class id of language group", "### Data Splits\n\nEvery config only has the '\"train\"' split containing of *ca.* 1000 examples, and a '\"validation\"' and '\"test\"' split each containing of *ca.* 400 examples.", "## Dataset Creation\n\nWe collect between one and three recordings for each sentence (2.3 on average), and buildnew train-dev-test splits with 1509, 150 and 350 sentences for\ntrain, dev and test respectively.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nThis dataset is meant to encourage the development of speech technology in a lot more languages of the world. One of the goal is to give equal access to technologies like speech recognition or speech translation to everyone, meaning better dubbing or better access to content from the internet (like podcasts, streaming or videos).", "### Discussion of Biases\n\nMost datasets have a fair distribution of gender utterances (e.g. the newly introduced FLEURS dataset). While many languages are covered from various regions of the world, the benchmark misses many languages that are all equally important. We believe technology built through FLEURS should generalize to all languages.", "### Other Known Limitations\n\nThe dataset has a particular focus on read-speech because common evaluation benchmarks like CoVoST-2 or LibriSpeech evaluate on this type of speech. There is sometimes a known mismatch between performance obtained in a read-speech setting and a more noisy setting (in production for instance). Given the big progress that remains to be made on many languages, we believe better performance on FLEURS should still correlate well with actual progress made for speech understanding.", "## Additional Information\n\nAll datasets are licensed under the Creative Commons license (CC-BY).\n\n\n\nYou can access the FLEURS paper at URL\nPlease cite the paper when referencing the FLEURS corpus as:", "### Contributions\n\nThanks to @patrickvonplaten and @aconneau for adding this dataset." ]
309d09e16b72f4c7056aa8ada7e2feddd6a7c180
GitHub metadata for https://huggingface.co/datasets/bigcode/commitpack
bigcode/commitpackmeta
[ "region:us" ]
2022-12-14T12:53:43+00:00
{}
2023-07-22T15:05:21+00:00
[]
[]
TAGS #region-us
GitHub metadata for URL
[]
[ "TAGS\n#region-us \n" ]
735ddbadd8720256865e0a77cdb874080d829bc7
# Dataset Card for "generic_review_detection" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Brian-M-Collins/generic_review_detection
[ "region:us" ]
2022-12-14T13:38:48+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2659807, "num_examples": 3464}, {"name": "test", "num_bytes": 2168768, "num_examples": 1583}], "download_size": 0, "dataset_size": 4828575}}
2022-12-14T15:35:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "generic_review_detection" More Information needed
[ "# Dataset Card for \"generic_review_detection\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"generic_review_detection\"\n\nMore Information needed" ]
b36b9b7af14b341eba5380b0f5dccb31fce3a922
# Dataset Card for "Trial2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
amosr2002/Trial2
[ "region:us" ]
2022-12-14T15:13:09+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "uuid", "dtype": "string"}, {"name": "status", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "label.annotations", "list": [{"name": "id", "dtype": "int32"}, {"name": "category_id", "dtype": "int32"}]}, {"name": "label.segmentation_bitmap", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1505608.0, "num_examples": 3}], "download_size": 1515722, "dataset_size": 1505608.0}}
2022-12-14T15:13:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Trial2" More Information needed
[ "# Dataset Card for \"Trial2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Trial2\"\n\nMore Information needed" ]
fee82d7584c6aa279d9e0b0d3c3198e0040f05f6
# Dataset Card for "text_recognition_en_zh" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
priyank-m/text_recognition_en_zh
[ "region:us" ]
2022-12-14T15:17:56+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 24066869565.924767, "num_examples": 1447547}, {"name": "val", "num_bytes": 49945762.89211632, "num_examples": 3000}, {"name": "test", "num_bytes": 49961758.89211632, "num_examples": 3000}], "download_size": 5513763538, "dataset_size": 24166777087.709}}
2022-12-14T17:54:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "text_recognition_en_zh" More Information needed
[ "# Dataset Card for \"text_recognition_en_zh\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"text_recognition_en_zh\"\n\nMore Information needed" ]
d15fa37451ed266aa76c3aaea2ecff0913dabf68
# Dataset Card for "Trial4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
amosr2002/Trial4
[ "region:us" ]
2022-12-14T15:18:25+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "uuid", "dtype": "string"}, {"name": "status", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "label.annotations", "list": [{"name": "id", "dtype": "int32"}, {"name": "category_id", "dtype": "int32"}]}, {"name": "label.segmentation_bitmap", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1505608.0, "num_examples": 3}], "download_size": 1515722, "dataset_size": 1505608.0}}
2022-12-14T15:18:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Trial4" More Information needed
[ "# Dataset Card for \"Trial4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Trial4\"\n\nMore Information needed" ]
424732d636dfa1ced68d30ca0b46b2216d016699
# Dataset Card for "NYC_Data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
amosr2002/NYC_Data
[ "region:us" ]
2022-12-14T16:29:27+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "uuid", "dtype": "string"}, {"name": "status", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "label.annotations", "list": [{"name": "id", "dtype": "int32"}, {"name": "category_id", "dtype": "int32"}]}, {"name": "label.segmentation_bitmap", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 5365744.0, "num_examples": 10}], "download_size": 5375305, "dataset_size": 5365744.0}}
2022-12-14T16:29:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "NYC_Data" More Information needed
[ "# Dataset Card for \"NYC_Data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"NYC_Data\"\n\nMore Information needed" ]
c401292d7330c49414c58a6ebefbaee6fb6e9436
# Dataset Card for "NYCDATA" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
amosr2002/NYCDATA
[ "region:us" ]
2022-12-14T16:30:31+00:00
{"dataset_info": {"features": [{"name": "name", "dtype": "string"}, {"name": "uuid", "dtype": "string"}, {"name": "status", "dtype": "string"}, {"name": "image", "dtype": "image"}, {"name": "label.annotations", "list": [{"name": "id", "dtype": "int32"}, {"name": "category_id", "dtype": "int32"}]}, {"name": "label.segmentation_bitmap", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 5365744.0, "num_examples": 10}], "download_size": 0, "dataset_size": 5365744.0}}
2022-12-14T16:36:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "NYCDATA" More Information needed
[ "# Dataset Card for \"NYCDATA\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"NYCDATA\"\n\nMore Information needed" ]
a1d1197bd51c734339d8eedc1aeb6ed062a4fed3
# Dataset Card for "diachronia-ocr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Zombely/diachronia-ocr-test-A
[ "region:us" ]
2022-12-14T17:21:21+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 62457501.0, "num_examples": 81}], "download_size": 62461147, "dataset_size": 62457501.0}}
2022-12-14T17:21:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "diachronia-ocr" More Information needed
[ "# Dataset Card for \"diachronia-ocr\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"diachronia-ocr\"\n\nMore Information needed" ]
86176eaf110c34c7f362265da2bbaa11af01125d
# Dataset Card for Universal Dependencies Treebank ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Universal Dependencies](https://universaldependencies.org/) - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@patrickvonplaten](https://github.com/patrickvonplaten), [@jplu](https://github.com/jplu) for adding this dataset.
albertvillanova/universal_dependencies
[ "task_categories:token-classification", "task_ids:parsing", "annotations_creators:expert-generated", "language_creators:crowdsourced", "multilinguality:multilingual", "size_categories:1K<n<10K", "source_datasets:original", "language:af", "language:aii", "language:ajp", "language:akk", "language:am", "language:apu", "language:aqz", "language:ar", "language:be", "language:bg", "language:bho", "language:bm", "language:br", "language:bxr", "language:ca", "language:ckt", "language:cop", "language:cs", "language:cu", "language:cy", "language:da", "language:de", "language:el", "language:en", "language:es", "language:et", "language:eu", "language:fa", "language:fi", "language:fo", "language:fr", "language:fro", "language:ga", "language:gd", "language:gl", "language:got", "language:grc", "language:gsw", "language:gun", "language:gv", "language:he", "language:hi", "language:hr", "language:hsb", "language:hu", "language:hy", "language:id", "language:is", "language:it", "language:ja", "language:kfm", "language:kk", "language:kmr", "language:ko", "language:koi", "language:kpv", "language:krl", "language:la", "language:lt", "language:lv", "language:lzh", "language:mdf", "language:mr", "language:mt", "language:myu", "language:myv", "language:nl", "language:no", "language:nyq", "language:olo", "language:orv", "language:otk", "language:pcm", "language:pl", "language:pt", "language:ro", "language:ru", "language:sa", "language:sk", "language:sl", "language:sme", "language:sms", "language:soj", "language:sq", "language:sr", "language:sv", "language:swl", "language:ta", "language:te", "language:th", "language:tl", "language:tpn", "language:tr", "language:ug", "language:uk", "language:ur", "language:vi", "language:wbp", "language:wo", "language:yo", "language:yue", "language:zh", "license:unknown", "constituency-parsing", "dependency-parsing", "region:us" ]
2022-12-14T17:34:02+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["crowdsourced"], "language": ["af", "aii", "ajp", "akk", "am", "apu", "aqz", "ar", "be", "bg", "bho", "bm", "br", "bxr", "ca", "ckt", "cop", "cs", "cu", "cy", "da", "de", "el", "en", "es", "et", "eu", "fa", "fi", "fo", "fr", "fro", "ga", "gd", "gl", "got", "grc", "gsw", "gun", "gv", "he", "hi", "hr", "hsb", "hu", "hy", "id", "is", "it", "ja", "kfm", "kk", "kmr", "ko", "koi", "kpv", "krl", "la", "lt", "lv", "lzh", "mdf", "mr", "mt", "myu", "myv", "nl", "no", "nyq", "olo", "orv", "otk", "pcm", "pl", "pt", "ro", "ru", "sa", "sk", "sl", "sme", "sms", "soj", "sq", "sr", "sv", "swl", "ta", "te", "th", "tl", "tpn", "tr", "ug", "uk", "ur", "vi", "wbp", "wo", "yo", "yue", "zh"], "license": ["unknown"], "multilinguality": ["multilingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": ["parsing"], "paperswithcode_id": "universal-dependencies", "pretty_name": "Universal Dependencies Treebank", "config_names": ["af_afribooms", "aii_as", "ajp_madar", "akk_pisandub", "akk_riao", "am_att", "apu_ufpa", "aqz_tudet", "ar_nyuad", "ar_padt", "ar_pud", "be_hse", "bg_btb", "bho_bhtb", "bm_crb", "br_keb", "bxr_bdt", "ca_ancora", "ckt_hse", "cop_scriptorium", "cs_cac", "cs_cltt", "cs_fictree", "cs_pdt", "cs_pud", "cu_proiel", "cy_ccg", "da_ddt", "de_gsd", "de_hdt", "de_lit", "de_pud", "el_gdt", "en_esl", "en_ewt", "en_gum", "en_gumreddit", "en_lines", "en_partut", "en_pronouns", "en_pud", "es_ancora", "es_gsd", "es_pud", "et_edt", "et_ewt", "eu_bdt", "fa_perdt", "fa_seraji", "fi_ftb", "fi_ood", "fi_pud", "fi_tdt", "fo_farpahc", "fo_oft", "fr_fqb", "fr_ftb", "fr_gsd", "fr_partut", "fr_pud", "fr_sequoia", "fr_spoken", "fro_srcmf", "ga_idt", "gd_arcosg", "gl_ctg", "gl_treegal", "got_proiel", "grc_perseus", "grc_proiel", "gsw_uzh", "gun_dooley", "gun_thomas", "gv_cadhan", "he_htb", "hi_hdtb", "hi_pud", "hr_set", "hsb_ufal", "hu_szeged", "hy_armtdp", "id_csui", "id_gsd", "id_pud", "is_icepahc", "is_pud", "it_isdt", "it_partut", "it_postwita", "it_pud", "it_twittiro", "it_vit", "ja_bccwj", "ja_gsd", "ja_modern", "ja_pud", "kfm_aha", "kk_ktb", "kmr_mg", "ko_gsd", "ko_kaist", "ko_pud", "koi_uh", "kpv_ikdp", "kpv_lattice", "krl_kkpp", "la_ittb", "la_llct", "la_perseus", "la_proiel", "lt_alksnis", "lt_hse", "lv_lvtb", "lzh_kyoto", "mdf_jr", "mr_ufal", "mt_mudt", "myu_tudet", "myv_jr", "nl_alpino", "nl_lassysmall", "no_bokmaal", "no_nynorsk", "no_nynorsklia", "nyq_aha", "olo_kkpp", "orv_rnc", "orv_torot", "otk_tonqq", "pcm_nsc", "pl_lfg", "pl_pdb", "pl_pud", "pt_bosque", "pt_gsd", "pt_pud", "qhe_hiencs", "qtd_sagt", "ro_nonstandard", "ro_rrt", "ro_simonero", "ru_gsd", "ru_pud", "ru_syntagrus", "ru_taiga", "sa_ufal", "sa_vedic", "sk_snk", "sl_ssj", "sl_sst", "sme_giella", "sms_giellagas", "soj_aha", "sq_tsa", "sr_set", "sv_lines", "sv_pud", "sv_talbanken", "swl_sslc", "ta_mwtt", "ta_ttb", "te_mtg", "th_pud", "tl_trg", "tl_ugnayan", "tpn_tudet", "tr_boun", "tr_gb", "tr_imst", "tr_pud", "ug_udt", "uk_iu", "ur_udtb", "vi_vtb", "wbp_ufal", "wo_wtb", "yo_ytb", "yue_hk", "zh_cfl", "zh_gsd", "zh_gsdsimp", "zh_hk", "zh_pud"], "tags": ["constituency-parsing", "dependency-parsing"], "dataset_info": [{"config_name": "af_afribooms", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 3523113, "num_examples": 1315}, {"name": "validation", "num_bytes": 547285, "num_examples": 194}, {"name": "test", "num_bytes": 1050299, "num_examples": 425}], "download_size": 3088237, "dataset_size": 5120697}, {"config_name": "akk_pisandub", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 153470, "num_examples": 101}], "download_size": 101789, "dataset_size": 153470}, {"config_name": "akk_riao", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3374577, "num_examples": 1804}], "download_size": 2022357, "dataset_size": 3374577}, {"config_name": "aqz_tudet", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 8286, "num_examples": 24}], "download_size": 5683, "dataset_size": 8286}, {"config_name": "sq_tsa", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 116034, "num_examples": 60}], "download_size": 68875, "dataset_size": 116034}, {"config_name": "am_att", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1554859, "num_examples": 1074}], "download_size": 1019607, "dataset_size": 1554859}, {"config_name": "grc_perseus", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 22611612, "num_examples": 11476}, {"name": "validation", "num_bytes": 3152233, "num_examples": 1137}, {"name": "test", "num_bytes": 3004502, "num_examples": 1306}], "download_size": 18898313, "dataset_size": 28768347}, {"config_name": "grc_proiel", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 30938089, "num_examples": 15014}, {"name": "validation", "num_bytes": 2264551, "num_examples": 1019}, {"name": "test", "num_bytes": 2192289, "num_examples": 1047}], "download_size": 23715831, "dataset_size": 35394929}, {"config_name": "apu_ufpa", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 75578, "num_examples": 76}], "download_size": 69565, "dataset_size": 75578}, {"config_name": "ar_nyuad", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 79064476, "num_examples": 15789}, {"name": "validation", "num_bytes": 9859912, "num_examples": 1986}, {"name": "test", "num_bytes": 9880240, "num_examples": 1963}], "download_size": 58583673, "dataset_size": 98804628}, {"config_name": "ar_padt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 58537298, "num_examples": 6075}, {"name": "validation", "num_bytes": 7787253, "num_examples": 909}, {"name": "test", "num_bytes": 7428063, "num_examples": 680}], "download_size": 51208169, "dataset_size": 73752614}, {"config_name": "ar_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2816625, "num_examples": 1000}], "download_size": 2084082, "dataset_size": 2816625}, {"config_name": "hy_armtdp", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 7697891, "num_examples": 1975}, {"name": "validation", "num_bytes": 988849, "num_examples": 249}, {"name": "test", "num_bytes": 947287, "num_examples": 278}], "download_size": 6886567, "dataset_size": 9634027}, {"config_name": "aii_as", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 52540, "num_examples": 57}], "download_size": 32639, "dataset_size": 52540}, {"config_name": "bm_crb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1502886, "num_examples": 1026}], "download_size": 892924, "dataset_size": 1502886}, {"config_name": "eu_bdt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 8199861, "num_examples": 5396}, {"name": "validation", "num_bytes": 2701073, "num_examples": 1798}, {"name": "test", "num_bytes": 2734601, "num_examples": 1799}], "download_size": 8213576, "dataset_size": 13635535}, {"config_name": "be_hse", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 34880663, "num_examples": 21555}, {"name": "validation", "num_bytes": 1745668, "num_examples": 1090}, {"name": "test", "num_bytes": 1818113, "num_examples": 889}], "download_size": 26433402, "dataset_size": 38444444}, {"config_name": "bho_bhtb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 947740, "num_examples": 357}], "download_size": 614159, "dataset_size": 947740}, {"config_name": "br_keb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1026257, "num_examples": 888}], "download_size": 679680, "dataset_size": 1026257}, {"config_name": "bg_btb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 18545312, "num_examples": 8907}, {"name": "validation", "num_bytes": 2393174, "num_examples": 1115}, {"name": "test", "num_bytes": 2344136, "num_examples": 1116}], "download_size": 14910603, "dataset_size": 23282622}, {"config_name": "bxr_bdt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 17364, "num_examples": 19}, {"name": "test", "num_bytes": 1116630, "num_examples": 908}], "download_size": 726053, "dataset_size": 1133994}, {"config_name": "yue_hk", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1242850, "num_examples": 1004}], "download_size": 710060, "dataset_size": 1242850}, {"config_name": "ca_ancora", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 46502842, "num_examples": 13123}, {"name": "validation", "num_bytes": 6282364, "num_examples": 1709}, {"name": "test", "num_bytes": 6441038, "num_examples": 1846}], "download_size": 35924146, "dataset_size": 59226244}, {"config_name": "zh_cfl", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 660584, "num_examples": 451}], "download_size": 384725, "dataset_size": 660584}, {"config_name": "zh_gsd", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 9268661, "num_examples": 3997}, {"name": "validation", "num_bytes": 1188371, "num_examples": 500}, {"name": "test", "num_bytes": 1130467, "num_examples": 500}], "download_size": 6828367, "dataset_size": 11587499}, {"config_name": "zh_gsdsimp", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 9268663, "num_examples": 3997}, {"name": "validation", "num_bytes": 1188383, "num_examples": 500}, {"name": "test", "num_bytes": 1130459, "num_examples": 500}], "download_size": 6828419, "dataset_size": 11587505}, {"config_name": "zh_hk", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 880193, "num_examples": 1004}], "download_size": 494447, "dataset_size": 880193}, {"config_name": "zh_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2425817, "num_examples": 1000}], "download_size": 1606982, "dataset_size": 2425817}, {"config_name": "ckt_hse", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 808669, "num_examples": 1004}], "download_size": 771943, "dataset_size": 808669}, {"config_name": "lzh_kyoto", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 26615708, "num_examples": 38669}, {"name": "validation", "num_bytes": 3770507, "num_examples": 5296}, {"name": "test", "num_bytes": 3155207, "num_examples": 4469}], "download_size": 22658287, "dataset_size": 33541422}, {"config_name": "cop_scriptorium", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 3944468, "num_examples": 1089}, {"name": "validation", "num_bytes": 1566786, "num_examples": 381}, {"name": "test", "num_bytes": 1487709, "num_examples": 403}], "download_size": 4502996, "dataset_size": 6998963}, {"config_name": "hr_set", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 19104315, "num_examples": 6914}, {"name": "validation", "num_bytes": 2787184, "num_examples": 960}, {"name": "test", "num_bytes": 3035797, "num_examples": 1136}], "download_size": 15103034, "dataset_size": 24927296}, {"config_name": "cs_cac", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 81527862, "num_examples": 23478}, {"name": "validation", "num_bytes": 1898678, "num_examples": 603}, {"name": "test", "num_bytes": 1878841, "num_examples": 628}], "download_size": 55990235, "dataset_size": 85305381}, {"config_name": "cs_cltt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 4277239, "num_examples": 860}, {"name": "validation", "num_bytes": 752253, "num_examples": 129}, {"name": "test", "num_bytes": 646103, "num_examples": 136}], "download_size": 3745656, "dataset_size": 5675595}, {"config_name": "cs_fictree", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 21490020, "num_examples": 10160}, {"name": "validation", "num_bytes": 2677727, "num_examples": 1309}, {"name": "test", "num_bytes": 2679930, "num_examples": 1291}], "download_size": 17464342, "dataset_size": 26847677}, {"config_name": "cs_pdt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 201356662, "num_examples": 68495}, {"name": "validation", "num_bytes": 27366981, "num_examples": 9270}, {"name": "test", "num_bytes": 29817339, "num_examples": 10148}], "download_size": 171506068, "dataset_size": 258540982}, {"config_name": "cs_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3195818, "num_examples": 1000}], "download_size": 2231853, "dataset_size": 3195818}, {"config_name": "da_ddt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 8689809, "num_examples": 4383}, {"name": "validation", "num_bytes": 1117939, "num_examples": 564}, {"name": "test", "num_bytes": 1082651, "num_examples": 565}], "download_size": 6425281, "dataset_size": 10890399}, {"config_name": "nl_alpino", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 22503950, "num_examples": 12264}, {"name": "validation", "num_bytes": 1411253, "num_examples": 718}, {"name": "test", "num_bytes": 1354908, "num_examples": 596}], "download_size": 16858557, "dataset_size": 25270111}, {"config_name": "nl_lassysmall", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 9001614, "num_examples": 5787}, {"name": "validation", "num_bytes": 1361552, "num_examples": 676}, {"name": "test", "num_bytes": 1391136, "num_examples": 875}], "download_size": 8034396, "dataset_size": 11754302}, {"config_name": "en_esl", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 5335977, "num_examples": 4124}, {"name": "validation", "num_bytes": 648562, "num_examples": 500}, {"name": "test", "num_bytes": 651829, "num_examples": 500}], "download_size": 3351548, "dataset_size": 6636368}, {"config_name": "en_ewt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 22755753, "num_examples": 12543}, {"name": "validation", "num_bytes": 2829889, "num_examples": 2002}, {"name": "test", "num_bytes": 2820398, "num_examples": 2077}], "download_size": 16893922, "dataset_size": 28406040}, {"config_name": "en_gum", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 8999554, "num_examples": 4287}, {"name": "validation", "num_bytes": 1704949, "num_examples": 784}, {"name": "test", "num_bytes": 1743317, "num_examples": 890}], "download_size": 7702761, "dataset_size": 12447820}, {"config_name": "en_gumreddit", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1365930, "num_examples": 587}, {"name": "validation", "num_bytes": 317546, "num_examples": 150}, {"name": "test", "num_bytes": 374707, "num_examples": 158}], "download_size": 1195979, "dataset_size": 2058183}, {"config_name": "en_lines", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 5728898, "num_examples": 3176}, {"name": "validation", "num_bytes": 1911762, "num_examples": 1032}, {"name": "test", "num_bytes": 1766797, "num_examples": 1035}], "download_size": 5522254, "dataset_size": 9407457}, {"config_name": "en_partut", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 4133445, "num_examples": 1781}, {"name": "validation", "num_bytes": 265039, "num_examples": 156}, {"name": "test", "num_bytes": 326834, "num_examples": 153}], "download_size": 2720286, "dataset_size": 4725318}, {"config_name": "en_pronouns", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 207364, "num_examples": 285}], "download_size": 147181, "dataset_size": 207364}, {"config_name": "en_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2282027, "num_examples": 1000}], "download_size": 1340563, "dataset_size": 2282027}, {"config_name": "myv_jr", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2763297, "num_examples": 1690}], "download_size": 1945981, "dataset_size": 2763297}, {"config_name": "et_edt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 42901059, "num_examples": 24633}, {"name": "validation", "num_bytes": 5551620, "num_examples": 3125}, {"name": "test", "num_bytes": 5994421, "num_examples": 3214}], "download_size": 32393618, "dataset_size": 54447100}, {"config_name": "et_ewt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 4199896, "num_examples": 2837}, {"name": "validation", "num_bytes": 1089459, "num_examples": 743}, {"name": "test", "num_bytes": 1600116, "num_examples": 913}], "download_size": 4044147, "dataset_size": 6889471}, {"config_name": "fo_farpahc", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2114958, "num_examples": 1020}, {"name": "validation", "num_bytes": 809707, "num_examples": 300}, {"name": "test", "num_bytes": 798245, "num_examples": 301}], "download_size": 2186706, "dataset_size": 3722910}, {"config_name": "fo_oft", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1220792, "num_examples": 1208}], "download_size": 802681, "dataset_size": 1220792}, {"config_name": "fi_ftb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 16800109, "num_examples": 14981}, {"name": "validation", "num_bytes": 2074201, "num_examples": 1875}, {"name": "test", "num_bytes": 2144908, "num_examples": 1867}], "download_size": 13132466, "dataset_size": 21019218}, {"config_name": "fi_ood", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2366923, "num_examples": 2122}], "download_size": 1480506, "dataset_size": 2366923}, {"config_name": "fi_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2086421, "num_examples": 1000}], "download_size": 1411514, "dataset_size": 2086421}, {"config_name": "fi_tdt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 22065448, "num_examples": 12217}, {"name": "validation", "num_bytes": 2483303, "num_examples": 1364}, {"name": "test", "num_bytes": 2855263, "num_examples": 1555}], "download_size": 16692242, "dataset_size": 27404014}, {"config_name": "fr_fqb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2674644, "num_examples": 2289}], "download_size": 1556235, "dataset_size": 2674644}, {"config_name": "fr_ftb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 44714315, "num_examples": 14759}, {"name": "validation", "num_bytes": 3929428, "num_examples": 1235}, {"name": "test", "num_bytes": 7583038, "num_examples": 2541}], "download_size": 30926802, "dataset_size": 56226781}, {"config_name": "fr_gsd", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 38329902, "num_examples": 14449}, {"name": "validation", "num_bytes": 3861548, "num_examples": 1476}, {"name": "test", "num_bytes": 1086926, "num_examples": 416}], "download_size": 25492044, "dataset_size": 43278376}, {"config_name": "fr_partut", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2620477, "num_examples": 803}, {"name": "validation", "num_bytes": 205839, "num_examples": 107}, {"name": "test", "num_bytes": 288829, "num_examples": 110}], "download_size": 1817897, "dataset_size": 3115145}, {"config_name": "fr_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2660405, "num_examples": 1000}], "download_size": 1685033, "dataset_size": 2660405}, {"config_name": "fr_sequoia", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 5370647, "num_examples": 2231}, {"name": "validation", "num_bytes": 1065411, "num_examples": 412}, {"name": "test", "num_bytes": 1067676, "num_examples": 456}], "download_size": 4415282, "dataset_size": 7503734}, {"config_name": "fr_spoken", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1625626, "num_examples": 1167}, {"name": "validation", "num_bytes": 1091750, "num_examples": 909}, {"name": "test", "num_bytes": 1078438, "num_examples": 730}], "download_size": 2483341, "dataset_size": 3795814}, {"config_name": "gl_ctg", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 8157432, "num_examples": 2272}, {"name": "validation", "num_bytes": 3057483, "num_examples": 860}, {"name": "test", "num_bytes": 3053764, "num_examples": 861}], "download_size": 8230649, "dataset_size": 14268679}, {"config_name": "gl_treegal", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1804389, "num_examples": 600}, {"name": "test", "num_bytes": 1174023, "num_examples": 400}], "download_size": 1741471, "dataset_size": 2978412}, {"config_name": "de_gsd", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 32297384, "num_examples": 13814}, {"name": "validation", "num_bytes": 1504189, "num_examples": 799}, {"name": "test", "num_bytes": 2000117, "num_examples": 977}], "download_size": 21507364, "dataset_size": 35801690}, {"config_name": "de_hdt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 334214761, "num_examples": 153035}, {"name": "validation", "num_bytes": 39099013, "num_examples": 18434}, {"name": "test", "num_bytes": 39519143, "num_examples": 18459}], "download_size": 249243037, "dataset_size": 412832917}, {"config_name": "de_lit", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3327891, "num_examples": 1922}], "download_size": 2060988, "dataset_size": 3327891}, {"config_name": "de_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2684407, "num_examples": 1000}], "download_size": 1731875, "dataset_size": 2684407}, {"config_name": "got_proiel", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 5175361, "num_examples": 3387}, {"name": "validation", "num_bytes": 1498101, "num_examples": 985}, {"name": "test", "num_bytes": 1518642, "num_examples": 1029}], "download_size": 5225655, "dataset_size": 8192104}, {"config_name": "el_gdt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 6028077, "num_examples": 1662}, {"name": "validation", "num_bytes": 1492610, "num_examples": 403}, {"name": "test", "num_bytes": 1521094, "num_examples": 456}], "download_size": 5788161, "dataset_size": 9041781}, {"config_name": "he_htb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 17324640, "num_examples": 5241}, {"name": "validation", "num_bytes": 1440985, "num_examples": 484}, {"name": "test", "num_bytes": 1550465, "num_examples": 491}], "download_size": 12054025, "dataset_size": 20316090}, {"config_name": "qhe_hiencs", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1510145, "num_examples": 1448}, {"name": "validation", "num_bytes": 244129, "num_examples": 225}, {"name": "test", "num_bytes": 236291, "num_examples": 225}], "download_size": 914584, "dataset_size": 1990565}, {"config_name": "hi_hdtb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 61893814, "num_examples": 13304}, {"name": "validation", "num_bytes": 7748544, "num_examples": 1659}, {"name": "test", "num_bytes": 7786343, "num_examples": 1684}], "download_size": 51589681, "dataset_size": 77428701}, {"config_name": "hi_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3384789, "num_examples": 1000}], "download_size": 2303495, "dataset_size": 3384789}, {"config_name": "hu_szeged", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2822934, "num_examples": 910}, {"name": "validation", "num_bytes": 1584932, "num_examples": 441}, {"name": "test", "num_bytes": 1419130, "num_examples": 449}], "download_size": 3687905, "dataset_size": 5826996}, {"config_name": "is_icepahc", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 97197159, "num_examples": 34007}, {"name": "validation", "num_bytes": 18931295, "num_examples": 4865}, {"name": "test", "num_bytes": 19039838, "num_examples": 5157}], "download_size": 85106126, "dataset_size": 135168292}, {"config_name": "is_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2304432, "num_examples": 1000}], "download_size": 1525635, "dataset_size": 2304432}, {"config_name": "id_csui", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1611334, "num_examples": 656}, {"name": "test", "num_bytes": 888832, "num_examples": 374}], "download_size": 1448601, "dataset_size": 2500166}, {"config_name": "id_gsd", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 11728948, "num_examples": 4477}, {"name": "validation", "num_bytes": 1513894, "num_examples": 559}, {"name": "test", "num_bytes": 1417208, "num_examples": 557}], "download_size": 9487349, "dataset_size": 14660050}, {"config_name": "id_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1768596, "num_examples": 1000}], "download_size": 1149692, "dataset_size": 1768596}, {"config_name": "ga_idt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 10327215, "num_examples": 4005}, {"name": "validation", "num_bytes": 1057313, "num_examples": 451}, {"name": "test", "num_bytes": 1109028, "num_examples": 454}], "download_size": 7417728, "dataset_size": 12493556}, {"config_name": "it_isdt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 33510781, "num_examples": 13121}, {"name": "validation", "num_bytes": 1439348, "num_examples": 564}, {"name": "test", "num_bytes": 1267932, "num_examples": 482}], "download_size": 20998527, "dataset_size": 36218061}, {"config_name": "it_partut", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 5428686, "num_examples": 1781}, {"name": "validation", "num_bytes": 335085, "num_examples": 156}, {"name": "test", "num_bytes": 413752, "num_examples": 153}], "download_size": 3582155, "dataset_size": 6177523}, {"config_name": "it_postwita", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 10523322, "num_examples": 5368}, {"name": "validation", "num_bytes": 1299818, "num_examples": 671}, {"name": "test", "num_bytes": 1344079, "num_examples": 674}], "download_size": 7611319, "dataset_size": 13167219}, {"config_name": "it_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2612838, "num_examples": 1000}], "download_size": 1641073, "dataset_size": 2612838}, {"config_name": "it_twittiro", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2536429, "num_examples": 1138}, {"name": "validation", "num_bytes": 323504, "num_examples": 144}, {"name": "test", "num_bytes": 316211, "num_examples": 142}], "download_size": 1894686, "dataset_size": 3176144}, {"config_name": "it_vit", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 24536095, "num_examples": 8277}, {"name": "validation", "num_bytes": 3144507, "num_examples": 743}, {"name": "test", "num_bytes": 2870355, "num_examples": 1067}], "download_size": 17605311, "dataset_size": 30550957}, {"config_name": "ja_bccwj", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 119164443, "num_examples": 40740}, {"name": "validation", "num_bytes": 23390188, "num_examples": 8417}, {"name": "test", "num_bytes": 21904413, "num_examples": 7871}], "download_size": 87340125, "dataset_size": 164459044}, {"config_name": "ja_gsd", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 36905139, "num_examples": 7027}, {"name": "validation", "num_bytes": 2662999, "num_examples": 501}, {"name": "test", "num_bytes": 2858141, "num_examples": 543}], "download_size": 30397358, "dataset_size": 42426279}, {"config_name": "ja_modern", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 3062149, "num_examples": 822}], "download_size": 2163988, "dataset_size": 3062149}, {"config_name": "ja_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 6322307, "num_examples": 1000}], "download_size": 4661525, "dataset_size": 6322307}, {"config_name": "krl_kkpp", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 370378, "num_examples": 228}], "download_size": 226103, "dataset_size": 370378}, {"config_name": "kk_ktb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 64737, "num_examples": 31}, {"name": "test", "num_bytes": 1263246, "num_examples": 1047}], "download_size": 849300, "dataset_size": 1327983}, {"config_name": "kfm_aha", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 8464, "num_examples": 10}], "download_size": 6290, "dataset_size": 8464}, {"config_name": "koi_uh", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 117629, "num_examples": 81}], "download_size": 91509, "dataset_size": 117629}, {"config_name": "kpv_ikdp", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 182189, "num_examples": 132}], "download_size": 121684, "dataset_size": 182189}, {"config_name": "kpv_lattice", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 685683, "num_examples": 435}], "download_size": 467085, "dataset_size": 685683}, {"config_name": "ko_gsd", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 5480313, "num_examples": 4400}, {"name": "validation", "num_bytes": 1156603, "num_examples": 950}, {"name": "test", "num_bytes": 1129555, "num_examples": 989}], "download_size": 4882238, "dataset_size": 7766471}, {"config_name": "ko_kaist", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 29037654, "num_examples": 23010}, {"name": "validation", "num_bytes": 2511880, "num_examples": 2066}, {"name": "test", "num_bytes": 2792215, "num_examples": 2287}], "download_size": 21855177, "dataset_size": 34341749}, {"config_name": "ko_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2511856, "num_examples": 1000}], "download_size": 2024810, "dataset_size": 2511856}, {"config_name": "kmr_mg", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 30374, "num_examples": 20}, {"name": "test", "num_bytes": 1248564, "num_examples": 734}], "download_size": 765158, "dataset_size": 1278938}, {"config_name": "la_ittb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 54306304, "num_examples": 22775}, {"name": "validation", "num_bytes": 4236222, "num_examples": 2101}, {"name": "test", "num_bytes": 4221459, "num_examples": 2101}], "download_size": 40247546, "dataset_size": 62763985}, {"config_name": "la_llct", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 26885433, "num_examples": 7289}, {"name": "validation", "num_bytes": 3363915, "num_examples": 850}, {"name": "test", "num_bytes": 3352500, "num_examples": 884}], "download_size": 21975884, "dataset_size": 33601848}, {"config_name": "la_perseus", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2542043, "num_examples": 1334}, {"name": "test", "num_bytes": 1575350, "num_examples": 939}], "download_size": 2573703, "dataset_size": 4117393}, {"config_name": "la_proiel", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 24956038, "num_examples": 15917}, {"name": "validation", "num_bytes": 2020476, "num_examples": 1234}, {"name": "test", "num_bytes": 2029828, "num_examples": 1260}], "download_size": 18434442, "dataset_size": 29006342}, {"config_name": "lv_lvtb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 29167529, "num_examples": 10156}, {"name": "validation", "num_bytes": 4501172, "num_examples": 1664}, {"name": "test", "num_bytes": 4565919, "num_examples": 1823}], "download_size": 25227301, "dataset_size": 38234620}, {"config_name": "lt_alksnis", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 7272501, "num_examples": 2341}, {"name": "validation", "num_bytes": 1763901, "num_examples": 617}, {"name": "test", "num_bytes": 1648521, "num_examples": 684}], "download_size": 7008248, "dataset_size": 10684923}, {"config_name": "lt_hse", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 433214, "num_examples": 153}, {"name": "validation", "num_bytes": 433214, "num_examples": 153}, {"name": "test", "num_bytes": 433214, "num_examples": 153}], "download_size": 265619, "dataset_size": 1299642}, {"config_name": "olo_kkpp", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 18096, "num_examples": 19}, {"name": "test", "num_bytes": 175355, "num_examples": 106}], "download_size": 121837, "dataset_size": 193451}, {"config_name": "mt_mudt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1858001, "num_examples": 1123}, {"name": "validation", "num_bytes": 826004, "num_examples": 433}, {"name": "test", "num_bytes": 892629, "num_examples": 518}], "download_size": 2011753, "dataset_size": 3576634}, {"config_name": "gv_cadhan", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 483042, "num_examples": 291}], "download_size": 287206, "dataset_size": 483042}, {"config_name": "mr_ufal", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 420345, "num_examples": 373}, {"name": "validation", "num_bytes": 60791, "num_examples": 46}, {"name": "test", "num_bytes": 56582, "num_examples": 47}], "download_size": 339354, "dataset_size": 537718}, {"config_name": "gun_dooley", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 1037858, "num_examples": 1046}], "download_size": 571571, "dataset_size": 1037858}, {"config_name": "gun_thomas", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 143111, "num_examples": 98}], "download_size": 92963, "dataset_size": 143111}, {"config_name": "mdf_jr", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 234147, "num_examples": 167}], "download_size": 162330, "dataset_size": 234147}, {"config_name": "myu_tudet", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 26202, "num_examples": 62}], "download_size": 20315, "dataset_size": 26202}, {"config_name": "pcm_nsc", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 16079391, "num_examples": 7279}, {"name": "validation", "num_bytes": 2099571, "num_examples": 991}, {"name": "test", "num_bytes": 2063685, "num_examples": 972}], "download_size": 14907410, "dataset_size": 20242647}, {"config_name": "nyq_aha", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 8723, "num_examples": 10}], "download_size": 6387, "dataset_size": 8723}, {"config_name": "sme_giella", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1987666, "num_examples": 2257}, {"name": "test", "num_bytes": 1142396, "num_examples": 865}], "download_size": 1862302, "dataset_size": 3130062}, {"config_name": "no_bokmaal", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 25647647, "num_examples": 15696}, {"name": "validation", "num_bytes": 3828310, "num_examples": 2409}, {"name": "test", "num_bytes": 3151638, "num_examples": 1939}], "download_size": 19177350, "dataset_size": 32627595}, {"config_name": "no_nynorsk", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 25630539, "num_examples": 14174}, {"name": "validation", "num_bytes": 3277649, "num_examples": 1890}, {"name": "test", "num_bytes": 2601676, "num_examples": 1511}], "download_size": 18532495, "dataset_size": 31509864}, {"config_name": "no_nynorsklia", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 3500907, "num_examples": 3412}, {"name": "validation", "num_bytes": 1003845, "num_examples": 881}, {"name": "test", "num_bytes": 999943, "num_examples": 957}], "download_size": 3349676, "dataset_size": 5504695}, {"config_name": "cu_proiel", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 6106144, "num_examples": 4124}, {"name": "validation", "num_bytes": 1639912, "num_examples": 1073}, {"name": "test", "num_bytes": 1648459, "num_examples": 1141}], "download_size": 6239839, "dataset_size": 9394515}, {"config_name": "fro_srcmf", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 11959859, "num_examples": 13909}, {"name": "validation", "num_bytes": 1526574, "num_examples": 1842}, {"name": "test", "num_bytes": 1535923, "num_examples": 1927}], "download_size": 9043098, "dataset_size": 15022356}, {"config_name": "orv_rnc", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1527306, "num_examples": 320}, {"name": "test", "num_bytes": 2552216, "num_examples": 637}], "download_size": 2627398, "dataset_size": 4079522}, {"config_name": "orv_torot", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 18077991, "num_examples": 13336}, {"name": "validation", "num_bytes": 2408313, "num_examples": 1852}, {"name": "test", "num_bytes": 2347934, "num_examples": 1756}], "download_size": 15296362, "dataset_size": 22834238}, {"config_name": "otk_tonqq", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 22829, "num_examples": 18}], "download_size": 14389, "dataset_size": 22829}, {"config_name": "fa_perdt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 48654947, "num_examples": 26196}, {"name": "validation", "num_bytes": 2687750, "num_examples": 1456}, {"name": "test", "num_bytes": 2600303, "num_examples": 1455}], "download_size": 33606395, "dataset_size": 53943000}, {"config_name": "fa_seraji", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 12627691, "num_examples": 4798}, {"name": "validation", "num_bytes": 1634327, "num_examples": 599}, {"name": "test", "num_bytes": 1675134, "num_examples": 600}], "download_size": 9890107, "dataset_size": 15937152}, {"config_name": "pl_lfg", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 16810910, "num_examples": 13774}, {"name": "validation", "num_bytes": 2093712, "num_examples": 1745}, {"name": "test", "num_bytes": 2100915, "num_examples": 1727}], "download_size": 14865541, "dataset_size": 21005537}, {"config_name": "pl_pdb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 44652289, "num_examples": 17722}, {"name": "validation", "num_bytes": 5494883, "num_examples": 2215}, {"name": "test", "num_bytes": 5322608, "num_examples": 2215}], "download_size": 36340919, "dataset_size": 55469780}, {"config_name": "pl_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2943603, "num_examples": 1000}], "download_size": 1943983, "dataset_size": 2943603}, {"config_name": "pt_bosque", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 22808617, "num_examples": 8328}, {"name": "validation", "num_bytes": 1201577, "num_examples": 560}, {"name": "test", "num_bytes": 1131511, "num_examples": 476}], "download_size": 15201503, "dataset_size": 25141705}, {"config_name": "pt_gsd", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 22208385, "num_examples": 9664}, {"name": "validation", "num_bytes": 2805628, "num_examples": 1210}, {"name": "test", "num_bytes": 2732063, "num_examples": 1204}], "download_size": 15300844, "dataset_size": 27746076}, {"config_name": "pt_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2431942, "num_examples": 1000}], "download_size": 1516883, "dataset_size": 2431942}, {"config_name": "ro_nonstandard", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 74489083, "num_examples": 24121}, {"name": "validation", "num_bytes": 2663152, "num_examples": 1052}, {"name": "test", "num_bytes": 3017162, "num_examples": 1052}], "download_size": 50345748, "dataset_size": 80169397}, {"config_name": "ro_rrt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 23695399, "num_examples": 8043}, {"name": "validation", "num_bytes": 2190973, "num_examples": 752}, {"name": "test", "num_bytes": 2092520, "num_examples": 729}], "download_size": 17187956, "dataset_size": 27978892}, {"config_name": "ro_simonero", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 15390734, "num_examples": 3747}, {"name": "validation", "num_bytes": 1926639, "num_examples": 443}, {"name": "test", "num_bytes": 1940787, "num_examples": 491}], "download_size": 11409378, "dataset_size": 19258160}, {"config_name": "ru_gsd", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 10504099, "num_examples": 3850}, {"name": "validation", "num_bytes": 1635884, "num_examples": 579}, {"name": "test", "num_bytes": 1597603, "num_examples": 601}], "download_size": 8830986, "dataset_size": 13737586}, {"config_name": "ru_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2695958, "num_examples": 1000}], "download_size": 1869304, "dataset_size": 2695958}, {"config_name": "ru_syntagrus", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 126305584, "num_examples": 48814}, {"name": "validation", "num_bytes": 17043673, "num_examples": 6584}, {"name": "test", "num_bytes": 16880203, "num_examples": 6491}], "download_size": 102745164, "dataset_size": 160229460}, {"config_name": "ru_taiga", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 5802733, "num_examples": 3138}, {"name": "validation", "num_bytes": 1382140, "num_examples": 945}, {"name": "test", "num_bytes": 1314084, "num_examples": 881}], "download_size": 5491427, "dataset_size": 8498957}, {"config_name": "sa_ufal", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 431697, "num_examples": 230}], "download_size": 424675, "dataset_size": 431697}, {"config_name": "sa_vedic", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2179608, "num_examples": 2524}, {"name": "test", "num_bytes": 1209605, "num_examples": 1473}], "download_size": 2041583, "dataset_size": 3389213}, {"config_name": "gd_arcosg", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 3952356, "num_examples": 1990}, {"name": "validation", "num_bytes": 1038211, "num_examples": 645}, {"name": "test", "num_bytes": 1034788, "num_examples": 538}], "download_size": 3474087, "dataset_size": 6025355}, {"config_name": "sr_set", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 9309552, "num_examples": 3328}, {"name": "validation", "num_bytes": 1503953, "num_examples": 536}, {"name": "test", "num_bytes": 1432672, "num_examples": 520}], "download_size": 7414381, "dataset_size": 12246177}, {"config_name": "sms_giellagas", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 174744, "num_examples": 104}], "download_size": 116491, "dataset_size": 174744}, {"config_name": "sk_snk", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 12017312, "num_examples": 8483}, {"name": "validation", "num_bytes": 1863926, "num_examples": 1060}, {"name": "test", "num_bytes": 1943012, "num_examples": 1061}], "download_size": 10013420, "dataset_size": 15824250}, {"config_name": "sl_ssj", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 16713639, "num_examples": 6478}, {"name": "validation", "num_bytes": 2070847, "num_examples": 734}, {"name": "test", "num_bytes": 2083062, "num_examples": 788}], "download_size": 12455962, "dataset_size": 20867548}, {"config_name": "sl_sst", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2903675, "num_examples": 2078}, {"name": "test", "num_bytes": 1493885, "num_examples": 1110}], "download_size": 2655777, "dataset_size": 4397560}, {"config_name": "soj_aha", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 6218, "num_examples": 8}], "download_size": 4577, "dataset_size": 6218}, {"config_name": "ajp_madar", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 71956, "num_examples": 100}], "download_size": 43174, "dataset_size": 71956}, {"config_name": "es_ancora", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 50101327, "num_examples": 14305}, {"name": "validation", "num_bytes": 5883940, "num_examples": 1654}, {"name": "test", "num_bytes": 5928986, "num_examples": 1721}], "download_size": 37668083, "dataset_size": 61914253}, {"config_name": "es_gsd", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 39582074, "num_examples": 14187}, {"name": "validation", "num_bytes": 3834443, "num_examples": 1400}, {"name": "test", "num_bytes": 1253720, "num_examples": 426}], "download_size": 26073760, "dataset_size": 44670237}, {"config_name": "es_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2595946, "num_examples": 1000}], "download_size": 1628475, "dataset_size": 2595946}, {"config_name": "swl_sslc", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 57443, "num_examples": 87}, {"name": "validation", "num_bytes": 59002, "num_examples": 82}, {"name": "test", "num_bytes": 24542, "num_examples": 34}], "download_size": 81699, "dataset_size": 140987}, {"config_name": "sv_lines", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 6731662, "num_examples": 3176}, {"name": "validation", "num_bytes": 2239951, "num_examples": 1032}, {"name": "test", "num_bytes": 2070626, "num_examples": 1035}], "download_size": 7245283, "dataset_size": 11042239}, {"config_name": "sv_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2554725, "num_examples": 1000}], "download_size": 1722516, "dataset_size": 2554725}, {"config_name": "sv_talbanken", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 9287256, "num_examples": 4303}, {"name": "validation", "num_bytes": 1361535, "num_examples": 504}, {"name": "test", "num_bytes": 2835742, "num_examples": 1219}], "download_size": 8476012, "dataset_size": 13484533}, {"config_name": "gsw_uzh", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 111357, "num_examples": 100}], "download_size": 59675, "dataset_size": 111357}, {"config_name": "tl_trg", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 86696, "num_examples": 128}], "download_size": 61344, "dataset_size": 86696}, {"config_name": "tl_ugnayan", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 90863, "num_examples": 94}], "download_size": 55207, "dataset_size": 90863}, {"config_name": "ta_mwtt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 522349, "num_examples": 534}], "download_size": 414263, "dataset_size": 522349}, {"config_name": "ta_ttb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1538780, "num_examples": 400}, {"name": "validation", "num_bytes": 305206, "num_examples": 80}, {"name": "test", "num_bytes": 478941, "num_examples": 120}], "download_size": 1753448, "dataset_size": 2322927}, {"config_name": "te_mtg", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 703512, "num_examples": 1051}, {"name": "validation", "num_bytes": 91547, "num_examples": 131}, {"name": "test", "num_bytes": 99757, "num_examples": 146}], "download_size": 643764, "dataset_size": 894816}, {"config_name": "th_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2341697, "num_examples": 1000}], "download_size": 1606517, "dataset_size": 2341697}, {"config_name": "tpn_tudet", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 8089, "num_examples": 8}], "download_size": 5447, "dataset_size": 8089}, {"config_name": "qtd_sagt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 583697, "num_examples": 285}, {"name": "validation", "num_bytes": 1564765, "num_examples": 801}, {"name": "test", "num_bytes": 1710777, "num_examples": 805}], "download_size": 2299611, "dataset_size": 3859239}, {"config_name": "tr_boun", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 12827173, "num_examples": 7803}, {"name": "validation", "num_bytes": 1577760, "num_examples": 979}, {"name": "test", "num_bytes": 1580727, "num_examples": 979}], "download_size": 9742035, "dataset_size": 15985660}, {"config_name": "tr_gb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2146729, "num_examples": 2880}], "download_size": 1474083, "dataset_size": 2146729}, {"config_name": "tr_imst", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 5063905, "num_examples": 3664}, {"name": "validation", "num_bytes": 1342351, "num_examples": 988}, {"name": "test", "num_bytes": 1347524, "num_examples": 983}], "download_size": 4711018, "dataset_size": 7753780}, {"config_name": "tr_pud", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 2021772, "num_examples": 1000}], "download_size": 1359487, "dataset_size": 2021772}, {"config_name": "uk_iu", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 18886802, "num_examples": 5496}, {"name": "validation", "num_bytes": 2592721, "num_examples": 672}, {"name": "test", "num_bytes": 3561164, "num_examples": 892}], "download_size": 17344586, "dataset_size": 25040687}, {"config_name": "hsb_ufal", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 54257, "num_examples": 23}, {"name": "test", "num_bytes": 1246592, "num_examples": 623}], "download_size": 781067, "dataset_size": 1300849}, {"config_name": "ur_udtb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 19808745, "num_examples": 4043}, {"name": "validation", "num_bytes": 2652349, "num_examples": 552}, {"name": "test", "num_bytes": 2702596, "num_examples": 535}], "download_size": 15901007, "dataset_size": 25163690}, {"config_name": "ug_udt", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2570856, "num_examples": 1656}, {"name": "validation", "num_bytes": 1406032, "num_examples": 900}, {"name": "test", "num_bytes": 1371993, "num_examples": 900}], "download_size": 3455092, "dataset_size": 5348881}, {"config_name": "vi_vtb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1689772, "num_examples": 1400}, {"name": "validation", "num_bytes": 948019, "num_examples": 800}, {"name": "test", "num_bytes": 987207, "num_examples": 800}], "download_size": 2055529, "dataset_size": 3624998}, {"config_name": "wbp_ufal", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 48533, "num_examples": 55}], "download_size": 38326, "dataset_size": 48533}, {"config_name": "cy_ccg", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1629465, "num_examples": 704}, {"name": "test", "num_bytes": 1779002, "num_examples": 953}], "download_size": 1984759, "dataset_size": 3408467}, {"config_name": "wo_wtb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2781883, "num_examples": 1188}, {"name": "validation", "num_bytes": 1204839, "num_examples": 449}, {"name": "test", "num_bytes": 1227124, "num_examples": 470}], "download_size": 3042699, "dataset_size": 5213846}, {"config_name": "yo_ytb", "features": [{"name": "idx", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "upos", "sequence": {"class_label": {"names": {"0": "NOUN", "1": "PUNCT", "2": "ADP", "3": "NUM", "4": "SYM", "5": "SCONJ", "6": "ADJ", "7": "PART", "8": "DET", "9": "CCONJ", "10": "PROPN", "11": "PRON", "12": "X", "13": "_", "14": "ADV", "15": "INTJ", "16": "VERB", "17": "AUX"}}}}, {"name": "xpos", "sequence": "string"}, {"name": "feats", "sequence": "string"}, {"name": "head", "sequence": "string"}, {"name": "deprel", "sequence": "string"}, {"name": "deps", "sequence": "string"}, {"name": "misc", "sequence": "string"}], "splits": [{"name": "test", "num_bytes": 905766, "num_examples": 318}], "download_size": 567955, "dataset_size": 905766}]}
2023-11-24T13:31:54+00:00
[]
[ "af", "aii", "ajp", "akk", "am", "apu", "aqz", "ar", "be", "bg", "bho", "bm", "br", "bxr", "ca", "ckt", "cop", "cs", "cu", "cy", "da", "de", "el", "en", "es", "et", "eu", "fa", "fi", "fo", "fr", "fro", "ga", "gd", "gl", "got", "grc", "gsw", "gun", "gv", "he", "hi", "hr", "hsb", "hu", "hy", "id", "is", "it", "ja", "kfm", "kk", "kmr", "ko", "koi", "kpv", "krl", "la", "lt", "lv", "lzh", "mdf", "mr", "mt", "myu", "myv", "nl", "no", "nyq", "olo", "orv", "otk", "pcm", "pl", "pt", "ro", "ru", "sa", "sk", "sl", "sme", "sms", "soj", "sq", "sr", "sv", "swl", "ta", "te", "th", "tl", "tpn", "tr", "ug", "uk", "ur", "vi", "wbp", "wo", "yo", "yue", "zh" ]
TAGS #task_categories-token-classification #task_ids-parsing #annotations_creators-expert-generated #language_creators-crowdsourced #multilinguality-multilingual #size_categories-1K<n<10K #source_datasets-original #language-Afrikaans #language-Assyrian Neo-Aramaic #language-South Levantine Arabic #language-Akkadian #language-Amharic #language-Apurinã #language-Akuntsu #language-Arabic #language-Belarusian #language-Bulgarian #language-Bhojpuri #language-Bambara #language-Breton #language-Russia Buriat #language-Catalan #language-Chukot #language-Coptic #language-Czech #language-Church Slavic #language-Welsh #language-Danish #language-German #language-Modern Greek (1453-) #language-English #language-Spanish #language-Estonian #language-Basque #language-Persian #language-Finnish #language-Faroese #language-French #language-Old French (842-ca. 1400) #language-Irish #language-Scottish Gaelic #language-Galician #language-Gothic #language-Ancient Greek (to 1453) #language-Swiss German #language-Mbyá Guaraní #language-Manx #language-Hebrew #language-Hindi #language-Croatian #language-Upper Sorbian #language-Hungarian #language-Armenian #language-Indonesian #language-Icelandic #language-Italian #language-Japanese #language-Khunsari #language-Kazakh #language-Northern Kurdish #language-Korean #language-Komi-Permyak #language-Komi-Zyrian #language-Karelian #language-Latin #language-Lithuanian #language-Latvian #language-Literary Chinese #language-Moksha #language-Marathi #language-Maltese #language-Mundurukú #language-Erzya #language-Dutch #language-Norwegian #language-Nayini #language-Livvi #language-Old Russian #language-Old Turkish #language-Nigerian Pidgin #language-Polish #language-Portuguese #language-Romanian #language-Russian #language-Sanskrit #language-Slovak #language-Slovenian #language-Northern Sami #language-Skolt Sami #language-Soi #language-Albanian #language-Serbian #language-Swedish #language-Swedish Sign Language #language-Tamil #language-Telugu #language-Thai #language-Tagalog #language-Tupinambá #language-Turkish #language-Uighur #language-Ukrainian #language-Urdu #language-Vietnamese #language-Warlpiri #language-Wolof #language-Yoruba #language-Yue Chinese #language-Chinese #license-unknown #constituency-parsing #dependency-parsing #region-us
# Dataset Card for Universal Dependencies Treebank ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: Universal Dependencies - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @patrickvonplaten, @jplu for adding this dataset.
[ "# Dataset Card for Universal Dependencies Treebank", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: Universal Dependencies\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @patrickvonplaten, @jplu for adding this dataset." ]
[ "TAGS\n#task_categories-token-classification #task_ids-parsing #annotations_creators-expert-generated #language_creators-crowdsourced #multilinguality-multilingual #size_categories-1K<n<10K #source_datasets-original #language-Afrikaans #language-Assyrian Neo-Aramaic #language-South Levantine Arabic #language-Akkadian #language-Amharic #language-Apurinã #language-Akuntsu #language-Arabic #language-Belarusian #language-Bulgarian #language-Bhojpuri #language-Bambara #language-Breton #language-Russia Buriat #language-Catalan #language-Chukot #language-Coptic #language-Czech #language-Church Slavic #language-Welsh #language-Danish #language-German #language-Modern Greek (1453-) #language-English #language-Spanish #language-Estonian #language-Basque #language-Persian #language-Finnish #language-Faroese #language-French #language-Old French (842-ca. 1400) #language-Irish #language-Scottish Gaelic #language-Galician #language-Gothic #language-Ancient Greek (to 1453) #language-Swiss German #language-Mbyá Guaraní #language-Manx #language-Hebrew #language-Hindi #language-Croatian #language-Upper Sorbian #language-Hungarian #language-Armenian #language-Indonesian #language-Icelandic #language-Italian #language-Japanese #language-Khunsari #language-Kazakh #language-Northern Kurdish #language-Korean #language-Komi-Permyak #language-Komi-Zyrian #language-Karelian #language-Latin #language-Lithuanian #language-Latvian #language-Literary Chinese #language-Moksha #language-Marathi #language-Maltese #language-Mundurukú #language-Erzya #language-Dutch #language-Norwegian #language-Nayini #language-Livvi #language-Old Russian #language-Old Turkish #language-Nigerian Pidgin #language-Polish #language-Portuguese #language-Romanian #language-Russian #language-Sanskrit #language-Slovak #language-Slovenian #language-Northern Sami #language-Skolt Sami #language-Soi #language-Albanian #language-Serbian #language-Swedish #language-Swedish Sign Language #language-Tamil #language-Telugu #language-Thai #language-Tagalog #language-Tupinambá #language-Turkish #language-Uighur #language-Ukrainian #language-Urdu #language-Vietnamese #language-Warlpiri #language-Wolof #language-Yoruba #language-Yue Chinese #language-Chinese #license-unknown #constituency-parsing #dependency-parsing #region-us \n", "# Dataset Card for Universal Dependencies Treebank", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: Universal Dependencies\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @patrickvonplaten, @jplu for adding this dataset." ]
1694abf4b171946c969c81e372ea993d95305ef8
### This is the v3 of my Sideview embedding, here you can download all steps saved. Personlly I recommend going in 1000 steps up from 2000, depending on if you want more style or less. *REMEMBER:* to use the embedding it will need to be in you Auto1111 embeddings folder and you will need to use the name in your prompt, see civitai page for more info. some example prompts to use: a man with a mohawk and a yellow scarf on his head and a yellow background with a black and yellow design, art by flonixsdviewv3 a man with a mask on his face and a city in the background with blue lines and a orange background with a circle, art by flonixsdviewv3 a man with dreadlocks and a gas mask on his face, with a red and black background, art by flonixsdviewv3 ### More Images on the Civit.ai page https://civitai.com/models/1373/flonixs-side-view https://civitai.com/models/1373/flonixs-side-view <img src="https://s3.amazonaws.com/moonup/production/uploads/1671040720337-63383cdec6295341204b2ade.png" width="100%"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1671040772203-63383cdec6295341204b2ade.png" width="100%"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1671040828365-63383cdec6295341204b2ade.png" width="100%"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1671040891116-63383cdec6295341204b2ade.png" width="100%"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1671040930692-63383cdec6295341204b2ade.png" width="100%"/>
Flonixcorn/SVEmbed
[ "license:cc0-1.0", "region:us" ]
2022-12-14T17:43:58+00:00
{"license": "cc0-1.0"}
2022-12-28T21:08:09+00:00
[]
[]
TAGS #license-cc0-1.0 #region-us
### This is the v3 of my Sideview embedding, here you can download all steps saved. Personlly I recommend going in 1000 steps up from 2000, depending on if you want more style or less. *REMEMBER:* to use the embedding it will need to be in you Auto1111 embeddings folder and you will need to use the name in your prompt, see civitai page for more info. some example prompts to use: a man with a mohawk and a yellow scarf on his head and a yellow background with a black and yellow design, art by flonixsdviewv3 a man with a mask on his face and a city in the background with blue lines and a orange background with a circle, art by flonixsdviewv3 a man with dreadlocks and a gas mask on his face, with a red and black background, art by flonixsdviewv3 ### More Images on the URL page URL URL <img src="URL width="100%"/> <img src="URL width="100%"/> <img src="URL width="100%"/> <img src="URL width="100%"/> <img src="URL width="100%"/>
[ "### This is the v3 of my Sideview embedding,\nhere you can download all steps saved.\nPersonlly I recommend going in 1000 steps up from 2000, depending on if you want more style or less.\n*REMEMBER:*\nto use the embedding it will need to be in you Auto1111 embeddings folder and you will need to use the name in your prompt, see civitai page for more info.\nsome example prompts to use:\na man with a mohawk and a yellow scarf on his head and a yellow background with a black and yellow design, art by flonixsdviewv3\na man with a mask on his face and a city in the background with blue lines and a orange background with a circle, art by flonixsdviewv3\na man with dreadlocks and a gas mask on his face, with a red and black background, art by flonixsdviewv3", "### More Images on the URL page URL\nURL\n\n<img src=\"URL width=\"100%\"/>\n<img src=\"URL width=\"100%\"/>\n<img src=\"URL width=\"100%\"/>\n<img src=\"URL width=\"100%\"/>\n<img src=\"URL width=\"100%\"/>" ]
[ "TAGS\n#license-cc0-1.0 #region-us \n", "### This is the v3 of my Sideview embedding,\nhere you can download all steps saved.\nPersonlly I recommend going in 1000 steps up from 2000, depending on if you want more style or less.\n*REMEMBER:*\nto use the embedding it will need to be in you Auto1111 embeddings folder and you will need to use the name in your prompt, see civitai page for more info.\nsome example prompts to use:\na man with a mohawk and a yellow scarf on his head and a yellow background with a black and yellow design, art by flonixsdviewv3\na man with a mask on his face and a city in the background with blue lines and a orange background with a circle, art by flonixsdviewv3\na man with dreadlocks and a gas mask on his face, with a red and black background, art by flonixsdviewv3", "### More Images on the URL page URL\nURL\n\n<img src=\"URL width=\"100%\"/>\n<img src=\"URL width=\"100%\"/>\n<img src=\"URL width=\"100%\"/>\n<img src=\"URL width=\"100%\"/>\n<img src=\"URL width=\"100%\"/>" ]
44b2d131cccda18ce31a97ddb0a46aec4546ad96
# Dataset Card for "nsc_batches" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dlproject/nsc_batches
[ "region:us" ]
2022-12-14T17:48:01+00:00
{"dataset_info": {"features": [{"name": "input_values", "sequence": {"sequence": {"sequence": "float32"}}}, {"name": "labels", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2000882211, "num_examples": 10000}], "download_size": 1765862751, "dataset_size": 2000882211}}
2022-12-14T17:51:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "nsc_batches" More Information needed
[ "# Dataset Card for \"nsc_batches\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"nsc_batches\"\n\nMore Information needed" ]
e17e52f87ec517cf2820afae90a91f1f50da5ff6
# Dataset Card for "dataset1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
my-projects/dataset1
[ "region:us" ]
2022-12-14T19:15:35+00:00
{"dataset_info": {"features": [{"name": "solution", "sequence": "int64"}, {"name": "coefficients", "sequence": "int64"}, {"name": "discriminant", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 4814334, "num_examples": 80000}, {"name": "test", "num_bytes": 1204417, "num_examples": 20000}], "download_size": 1554124, "dataset_size": 6018751}}
2022-12-14T19:15:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset1" More Information needed
[ "# Dataset Card for \"dataset1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset1\"\n\nMore Information needed" ]
b2283b65ba312361f6d46cdb63db19dd4808161a
# ROSE 🌹 This repo contiains the RoSE benchmark of our paper "Revisiting the Gold Standard: Grounding Summarization Evaluation with Robust Human Evaluation". Please visit [here](https://yale-lily.github.io/ROSE/) for a demo page of this project. ### ACU Annotations RoSE benchmark contains system outputs annotated with our ACU protocol. It contains four parts: - CNNDM, test set annotations - CNNDM, validation set annotations - XSum, test set annotations - SamSum, test set annotations We summarize the statistics below. | Dataset | Split | #Doc. | #Sys. | #Total Summ. | HF Name | --- | --- | --- | --- | --- | --- | | CNNDM | Test | 500 | 12 | 6000 | `cnndm_test` | | CNNDM | Validation | 1000 | 8 | 8000 | `cnndm_validation` | | XSum | Test | 500 | 8 | 4000 | `xsum` | | SamSum | Test | 500 | 8 | 4000 | `samsum` | ### Human Annotations with Different Evaluation Protocols We have system outputs annotated with four different human evaluation protocols in total. We summarize them below. | Protocol | w/ Input Document | w/ Reference Summary | Fine-grained | | --- | --- | --- | --- | | Prior | ✗ | ✗ | ✗ | | Ref-free | ✓ | ✗ | ✗ | | Ref-based | ✗ | ✓ | ✗ | | ACU | ✗ | ✓ | ✓ | We annotated two sets of system summaries. 1. Summaries of 12 fine-tuned systems. The huggingface data split name is `cnndm_protocol`. 2. Zero-shot summaries from large langauge models (GPT3, T0), together with summaries from BRIO and BART. The huggingface data split name is `cnndm_protocol_gpt3`.
Salesforce/rose
[ "language:en", "region:us" ]
2022-12-14T20:13:26+00:00
{"language": ["en"]}
2023-06-07T20:00:52+00:00
[]
[ "en" ]
TAGS #language-English #region-us
ROSE ==== This repo contiains the RoSE benchmark of our paper "Revisiting the Gold Standard: Grounding Summarization Evaluation with Robust Human Evaluation". Please visit here for a demo page of this project. ### ACU Annotations RoSE benchmark contains system outputs annotated with our ACU protocol. It contains four parts: * CNNDM, test set annotations * CNNDM, validation set annotations * XSum, test set annotations * SamSum, test set annotations We summarize the statistics below. ### Human Annotations with Different Evaluation Protocols We have system outputs annotated with four different human evaluation protocols in total. We summarize them below. We annotated two sets of system summaries. 1. Summaries of 12 fine-tuned systems. The huggingface data split name is 'cnndm\_protocol'. 2. Zero-shot summaries from large langauge models (GPT3, T0), together with summaries from BRIO and BART. The huggingface data split name is 'cnndm\_protocol\_gpt3'.
[ "### ACU Annotations\n\n\nRoSE benchmark contains system outputs annotated with our ACU protocol.\nIt contains four parts:\n\n\n* CNNDM, test set annotations\n* CNNDM, validation set annotations\n* XSum, test set annotations\n* SamSum, test set annotations\n\n\nWe summarize the statistics below.", "### Human Annotations with Different Evaluation Protocols\n\n\nWe have system outputs annotated with four different human evaluation protocols in total.\nWe summarize them below.\n\n\n\nWe annotated two sets of system summaries.\n\n\n1. Summaries of 12 fine-tuned systems. The huggingface data split name is 'cnndm\\_protocol'.\n2. Zero-shot summaries from large langauge models (GPT3, T0), together with summaries from BRIO and BART. The huggingface data split name is 'cnndm\\_protocol\\_gpt3'." ]
[ "TAGS\n#language-English #region-us \n", "### ACU Annotations\n\n\nRoSE benchmark contains system outputs annotated with our ACU protocol.\nIt contains four parts:\n\n\n* CNNDM, test set annotations\n* CNNDM, validation set annotations\n* XSum, test set annotations\n* SamSum, test set annotations\n\n\nWe summarize the statistics below.", "### Human Annotations with Different Evaluation Protocols\n\n\nWe have system outputs annotated with four different human evaluation protocols in total.\nWe summarize them below.\n\n\n\nWe annotated two sets of system summaries.\n\n\n1. Summaries of 12 fine-tuned systems. The huggingface data split name is 'cnndm\\_protocol'.\n2. Zero-shot summaries from large langauge models (GPT3, T0), together with summaries from BRIO and BART. The huggingface data split name is 'cnndm\\_protocol\\_gpt3'." ]
b8abaa3360b28cfb3e9b7105cab76004ea433a0e
# Dataset Card for "butterflies_names_multiple" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sasha/butterflies_names_multiple
[ "region:us" ]
2022-12-14T20:36:09+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "description", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "name", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3477011364.593742, "num_examples": 43459}], "download_size": 3486703583, "dataset_size": 3477011364.593742}}
2022-12-14T20:41:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "butterflies_names_multiple" More Information needed
[ "# Dataset Card for \"butterflies_names_multiple\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"butterflies_names_multiple\"\n\nMore Information needed" ]
3b0d44f06c8d64e36d074c6fd1322f34124d138b
# Dataset Card for "ev-skins-1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jubba/ev-skins-1
[ "region:us" ]
2022-12-14T21:01:07+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 9106912.0, "num_examples": 117}], "download_size": 9098056, "dataset_size": 9106912.0}}
2022-12-14T21:01:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ev-skins-1" More Information needed
[ "# Dataset Card for \"ev-skins-1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ev-skins-1\"\n\nMore Information needed" ]
4d0dfd4a3712a80e7b8498ed76e778f7cea9d21c
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://cocodataset.org/](https://cocodataset.org/) - **Repository:** - **Paper:** [Microsoft COCO: Common Objects in Context](https://arxiv.org/abs/1405.0312) - **Leaderboard:** - **Point of Contact:** ### Dataset Summary MS COCO is a large-scale object detection, segmentation, and captioning dataset. COCO has several features: Object segmentation, Recognition in context, Superpixel stuff segmentation, 330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories, 5 captions per image, 250,000 people with keypoints. As of now, there is only the 2014 subset (with Karpathy annotations and splits), but feel free to contribute the 2017 subset of COCO! ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances Each instance has the following structure: ``` { 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x480 at 0x7F69C1BA8550>, 'filepath': 'COCO_val2014_000000522418.jpg', 'sentids': [681330, 686718, 688839, 693159, 693204], 'filename': 'COCO_val2014_000000522418.jpg', 'imgid': 1, 'split': 'restval', 'sentences': { 'tokens': ['a', 'woman', 'wearing', 'a', 'net', 'on', 'her', 'head', 'cutting', 'a', 'cake'], 'raw': 'A woman wearing a net on her head cutting a cake. ', 'imgid': 1, 'sentid': 681330 }, 'cocoid': 522418 } ``` ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@VictorSanh](https://github.com/VictorSanh) for adding this dataset.
HuggingFaceM4/COCO
[ "license:cc-by-4.0", "arxiv:1405.0312", "region:us" ]
2022-12-14T21:13:57+00:00
{"license": "cc-by-4.0"}
2022-12-15T15:51:03+00:00
[ "1405.0312" ]
[]
TAGS #license-cc-by-4.0 #arxiv-1405.0312 #region-us
# Dataset Card for [Dataset Name] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: URL - Repository: - Paper: Microsoft COCO: Common Objects in Context - Leaderboard: - Point of Contact: ### Dataset Summary MS COCO is a large-scale object detection, segmentation, and captioning dataset. COCO has several features: Object segmentation, Recognition in context, Superpixel stuff segmentation, 330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories, 5 captions per image, 250,000 people with keypoints. As of now, there is only the 2014 subset (with Karpathy annotations and splits), but feel free to contribute the 2017 subset of COCO! ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances Each instance has the following structure: ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @VictorSanh for adding this dataset.
[ "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository:\n- Paper: Microsoft COCO: Common Objects in Context\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nMS COCO is a large-scale object detection, segmentation, and captioning dataset.\nCOCO has several features: Object segmentation, Recognition in context, Superpixel stuff segmentation, 330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories, 5 captions per image, 250,000 people with keypoints.\n\nAs of now, there is only the 2014 subset (with Karpathy annotations and splits), but feel free to contribute the 2017 subset of COCO!", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\nEach instance has the following structure:", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @VictorSanh for adding this dataset." ]
[ "TAGS\n#license-cc-by-4.0 #arxiv-1405.0312 #region-us \n", "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository:\n- Paper: Microsoft COCO: Common Objects in Context\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nMS COCO is a large-scale object detection, segmentation, and captioning dataset.\nCOCO has several features: Object segmentation, Recognition in context, Superpixel stuff segmentation, 330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories, 5 captions per image, 250,000 people with keypoints.\n\nAs of now, there is only the 2014 subset (with Karpathy annotations and splits), but feel free to contribute the 2017 subset of COCO!", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\nEach instance has the following structure:", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @VictorSanh for adding this dataset." ]
0f7c6fd10117019eba99feb9cad5422aa315fb51
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://google.github.io/localized-narratives/(https://google.github.io/localized-narratives/) - **Repository:**: [https://github.com/google/localized-narratives](https://github.com/google/localized-narratives) - **Paper:** [Connecting Vision and Language with Localized Narratives](https://arxiv.org/pdf/1912.03098.pdf) - **Leaderboard:** - **Point of Contact:** ### Dataset Summary Localized Narratives, a new form of multimodal image annotations connecting vision and language. We ask annotators to describe an image with their voice while simultaneously hovering their mouse over the region they are describing. Since the voice and the mouse pointer are synchronized, we can localize every single word in the description. This dense visual grounding takes the form of a mouse trace segment per word and is unique to our data. We annotated 849k images with Localized Narratives: the whole COCO, Flickr30k, and ADE20K datasets, and 671k images of Open Images, all of which we make publicly available. As of now, there is only the `OpenImages` subset, but feel free to contribute the other subset of Localized Narratives! `OpenImages_captions` is similar to the `OpenImages` subset. The differences are that captions are groupped per image (images can have multiple captions). For this subset, `timed_caption`, `traces` and `voice_recording` are not available. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances Each instance has the following structure: ``` { dataset_id: 'mscoco_val2017', image_id: '137576', annotator_id: 93, caption: 'In this image there are group of cows standing and eating th...', timed_caption: [{'utterance': 'In this', 'start_time': 0.0, 'end_time': 0.4}, ...], traces: [[{'x': 0.2086, 'y': -0.0533, 't': 0.022}, ...], ...], voice_recording: 'coco_val/coco_val_137576_93.ogg' } ``` ### Data Fields Each line represents one Localized Narrative annotation on one image by one annotator and has the following fields: - `dataset_id`: String identifying the dataset and split where the image belongs, e.g. mscoco_val2017. - `image_id` String identifier of the image, as specified on each dataset. - `annotator_id` Integer number uniquely identifying each annotator. - `caption` Image caption as a string of characters. - `timed_caption` List of timed utterances, i.e. {utterance, start_time, end_time} where utterance is a word (or group of words) and (start_time, end_time) is the time during which it was spoken, with respect to the start of the recording. - `traces` List of trace segments, one between each time the mouse pointer enters the image and goes away from it. Each trace segment is represented as a list of timed points, i.e. {x, y, t}, where x and y are the normalized image coordinates (with origin at the top-left corner of the image) and t is the time in seconds since the start of the recording. Please note that the coordinates can go a bit beyond the image, i.e. <0 or >1, as we recorded the mouse traces including a small band around the image. - `voice_recording` Relative URL path with respect to https://storage.googleapis.com/localized-narratives/voice-recordings where to find the voice recording (in OGG format) for that particular image. ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@VictorSanh](https://github.com/VictorSanh) for adding this dataset.
HuggingFaceM4/LocalizedNarratives
[ "license:cc-by-4.0", "arxiv:1912.03098", "region:us" ]
2022-12-14T21:53:31+00:00
{"license": "cc-by-4.0"}
2022-12-15T23:12:48+00:00
[ "1912.03098" ]
[]
TAGS #license-cc-by-4.0 #arxiv-1912.03098 #region-us
# Dataset Card for [Dataset Name] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: URL/URL - Repository:: [URL - Paper: Connecting Vision and Language with Localized Narratives - Leaderboard: - Point of Contact: ### Dataset Summary Localized Narratives, a new form of multimodal image annotations connecting vision and language. We ask annotators to describe an image with their voice while simultaneously hovering their mouse over the region they are describing. Since the voice and the mouse pointer are synchronized, we can localize every single word in the description. This dense visual grounding takes the form of a mouse trace segment per word and is unique to our data. We annotated 849k images with Localized Narratives: the whole COCO, Flickr30k, and ADE20K datasets, and 671k images of Open Images, all of which we make publicly available. As of now, there is only the 'OpenImages' subset, but feel free to contribute the other subset of Localized Narratives! 'OpenImages_captions' is similar to the 'OpenImages' subset. The differences are that captions are groupped per image (images can have multiple captions). For this subset, 'timed_caption', 'traces' and 'voice_recording' are not available. ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances Each instance has the following structure: ### Data Fields Each line represents one Localized Narrative annotation on one image by one annotator and has the following fields: - 'dataset_id': String identifying the dataset and split where the image belongs, e.g. mscoco_val2017. - 'image_id' String identifier of the image, as specified on each dataset. - 'annotator_id' Integer number uniquely identifying each annotator. - 'caption' Image caption as a string of characters. - 'timed_caption' List of timed utterances, i.e. {utterance, start_time, end_time} where utterance is a word (or group of words) and (start_time, end_time) is the time during which it was spoken, with respect to the start of the recording. - 'traces' List of trace segments, one between each time the mouse pointer enters the image and goes away from it. Each trace segment is represented as a list of timed points, i.e. {x, y, t}, where x and y are the normalized image coordinates (with origin at the top-left corner of the image) and t is the time in seconds since the start of the recording. Please note that the coordinates can go a bit beyond the image, i.e. <0 or >1, as we recorded the mouse traces including a small band around the image. - 'voice_recording' Relative URL path with respect to URL where to find the voice recording (in OGG format) for that particular image. ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @VictorSanh for adding this dataset.
[ "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL/URL\n- Repository:: [URL\n- Paper: Connecting Vision and Language with Localized Narratives\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nLocalized Narratives, a new form of multimodal image annotations connecting vision and language.\nWe ask annotators to describe an image with their voice while simultaneously hovering their mouse over the region they are describing.\nSince the voice and the mouse pointer are synchronized, we can localize every single word in the description.\nThis dense visual grounding takes the form of a mouse trace segment per word and is unique to our data.\nWe annotated 849k images with Localized Narratives: the whole COCO, Flickr30k, and ADE20K datasets, and 671k images of Open Images, all of which we make publicly available.\n\nAs of now, there is only the 'OpenImages' subset, but feel free to contribute the other subset of Localized Narratives!\n\n'OpenImages_captions' is similar to the 'OpenImages' subset. The differences are that captions are groupped per image (images can have multiple captions). For this subset, 'timed_caption', 'traces' and 'voice_recording' are not available.", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\nEach instance has the following structure:", "### Data Fields\n\nEach line represents one Localized Narrative annotation on one image by one annotator and has the following fields:\n\n- 'dataset_id': String identifying the dataset and split where the image belongs, e.g. mscoco_val2017.\n- 'image_id' String identifier of the image, as specified on each dataset.\n- 'annotator_id' Integer number uniquely identifying each annotator.\n- 'caption' Image caption as a string of characters.\n- 'timed_caption' List of timed utterances, i.e. {utterance, start_time, end_time} where utterance is a word (or group of words) and (start_time, end_time) is the time during which it was spoken, with respect to the start of the recording.\n- 'traces' List of trace segments, one between each time the mouse pointer enters the image and goes away from it. Each trace segment is represented as a list of timed points, i.e. {x, y, t}, where x and y are the normalized image coordinates (with origin at the top-left corner of the image) and t is the time in seconds since the start of the recording. Please note that the coordinates can go a bit beyond the image, i.e. <0 or >1, as we recorded the mouse traces including a small band around the image.\n- 'voice_recording' Relative URL path with respect to URL where to find the voice recording (in OGG format) for that particular image.", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @VictorSanh for adding this dataset." ]
[ "TAGS\n#license-cc-by-4.0 #arxiv-1912.03098 #region-us \n", "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL/URL\n- Repository:: [URL\n- Paper: Connecting Vision and Language with Localized Narratives\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nLocalized Narratives, a new form of multimodal image annotations connecting vision and language.\nWe ask annotators to describe an image with their voice while simultaneously hovering their mouse over the region they are describing.\nSince the voice and the mouse pointer are synchronized, we can localize every single word in the description.\nThis dense visual grounding takes the form of a mouse trace segment per word and is unique to our data.\nWe annotated 849k images with Localized Narratives: the whole COCO, Flickr30k, and ADE20K datasets, and 671k images of Open Images, all of which we make publicly available.\n\nAs of now, there is only the 'OpenImages' subset, but feel free to contribute the other subset of Localized Narratives!\n\n'OpenImages_captions' is similar to the 'OpenImages' subset. The differences are that captions are groupped per image (images can have multiple captions). For this subset, 'timed_caption', 'traces' and 'voice_recording' are not available.", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\nEach instance has the following structure:", "### Data Fields\n\nEach line represents one Localized Narrative annotation on one image by one annotator and has the following fields:\n\n- 'dataset_id': String identifying the dataset and split where the image belongs, e.g. mscoco_val2017.\n- 'image_id' String identifier of the image, as specified on each dataset.\n- 'annotator_id' Integer number uniquely identifying each annotator.\n- 'caption' Image caption as a string of characters.\n- 'timed_caption' List of timed utterances, i.e. {utterance, start_time, end_time} where utterance is a word (or group of words) and (start_time, end_time) is the time during which it was spoken, with respect to the start of the recording.\n- 'traces' List of trace segments, one between each time the mouse pointer enters the image and goes away from it. Each trace segment is represented as a list of timed points, i.e. {x, y, t}, where x and y are the normalized image coordinates (with origin at the top-left corner of the image) and t is the time in seconds since the start of the recording. Please note that the coordinates can go a bit beyond the image, i.e. <0 or >1, as we recorded the mouse traces including a small band around the image.\n- 'voice_recording' Relative URL path with respect to URL where to find the voice recording (in OGG format) for that particular image.", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @VictorSanh for adding this dataset." ]
ec857e7cff86f1c3069a682ac4be42072cd6c135
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Token Classification * Model: lewtun/autotrain-acronym-identification-7324788 * Dataset: acronym_identification * Config: default * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@wjenkins](https://huggingface.co/wjenkins) for evaluating this model.
autoevaluate/autoeval-eval-acronym_identification-default-01d2b7-2476976473
[ "autotrain", "evaluation", "region:us" ]
2022-12-14T22:06:49+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["acronym_identification"], "eval_info": {"task": "entity_extraction", "model": "lewtun/autotrain-acronym-identification-7324788", "metrics": ["bertscore", "angelina-wang/directional_bias_amplification"], "dataset_name": "acronym_identification", "dataset_config": "default", "dataset_split": "validation", "col_mapping": {"tokens": "tokens", "tags": "labels"}}}
2022-12-14T22:07:37+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Token Classification * Model: lewtun/autotrain-acronym-identification-7324788 * Dataset: acronym_identification * Config: default * Split: validation To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @wjenkins for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Token Classification\n* Model: lewtun/autotrain-acronym-identification-7324788\n* Dataset: acronym_identification\n* Config: default\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @wjenkins for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Token Classification\n* Model: lewtun/autotrain-acronym-identification-7324788\n* Dataset: acronym_identification\n* Config: default\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @wjenkins for evaluating this model." ]
11bb592cc69d61a07715a9dccbc2b9271607c6cc
# Dataset Card for "fno-cifar10-32" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dahoas/fno-cifar10-32
[ "region:us" ]
2022-12-15T01:38:57+00:00
{"dataset_info": {"features": [{"name": "images", "sequence": {"sequence": {"sequence": "float32"}}}], "splits": [{"name": "train", "num_bytes": 635009024, "num_examples": 50048}], "download_size": 647482139, "dataset_size": 635009024}}
2022-12-15T01:39:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fno-cifar10-32" More Information needed
[ "# Dataset Card for \"fno-cifar10-32\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fno-cifar10-32\"\n\nMore Information needed" ]
023ab9693161136e7689ce1cbd03ccd346dcb7c5
The data is originally source from (Sun et al,2021). (Liu et al, 2023) processed the data to make it a dataset vis huggingface api with taining/validation/testing splitting **Please cite:** ``` @misc{liu2023enhancing, title={Enhancing Long-form Text Generation in Mental Health with Task-adaptive Tokenization}, author={Siyang Liu and Naihao Deng and Sahand Sabour and Yilin Jia and Minlie Huang and Rada Mihalcea}, year={2023}, eprint={2310.05317}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ``` @inproceedings{sun2021psyqa, title={PsyQA: A Chinese Dataset for Generating Long Counseling Text for Mental Health Support}, author={Sun, Hao and Lin, Zhenru and Zheng, Chujie and Liu, Siyang and Huang, Minlie}, booktitle={Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021}, pages={1489--1503}, year={2021} } ```
lsy641/PsyQA
[ "license:mit", "arxiv:2310.05317", "region:us" ]
2022-12-15T01:55:23+00:00
{"license": "mit"}
2023-10-23T09:00:46+00:00
[ "2310.05317" ]
[]
TAGS #license-mit #arxiv-2310.05317 #region-us
The data is originally source from (Sun et al,2021). (Liu et al, 2023) processed the data to make it a dataset vis huggingface api with taining/validation/testing splitting Please cite:
[]
[ "TAGS\n#license-mit #arxiv-2310.05317 #region-us \n" ]
a0034cf69869dcd317ccfa67e8ec47ea1a2a54f8
# Dataset Card for "dual-fno-cifar10-32" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Dahoas/dual-fno-cifar10-32
[ "region:us" ]
2022-12-15T02:02:00+00:00
{"dataset_info": {"features": [{"name": "images", "sequence": {"sequence": {"sequence": "float32"}}}], "splits": [{"name": "train", "num_bytes": 635009024, "num_examples": 50048}], "download_size": 647528662, "dataset_size": 635009024}}
2022-12-15T02:02:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dual-fno-cifar10-32" More Information needed
[ "# Dataset Card for \"dual-fno-cifar10-32\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dual-fno-cifar10-32\"\n\nMore Information needed" ]
8eb6dce316a8cfbbd80a162685e77a9f19fbb07d
# Dataset Card for "results_valid_10rows_2022-12-15" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joddy/results_valid_10rows_2022-12-15
[ "region:us" ]
2022-12-15T03:51:04+00:00
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "resolution", "dtype": "int64"}, {"name": "attributes_loc", "dtype": {"class_label": {"names": {"0": "upper left", "1": "upper right", "2": "lower left", "3": "lower right"}}}}, {"name": "NL_text", "dtype": "string"}, {"name": "bbox_text", "dtype": "string"}, {"name": "center_text", "dtype": "string"}, {"name": "normed_object_bbox", "sequence": "int64"}, {"name": "NL_image", "dtype": "image"}, {"name": "bbox_image", "dtype": "image"}, {"name": "center_image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 16024442.0, "num_examples": 10}], "download_size": 16030250, "dataset_size": 16024442.0}}
2022-12-15T03:53:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "results_valid_10rows_2022-12-15" More Information needed
[ "# Dataset Card for \"results_valid_10rows_2022-12-15\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"results_valid_10rows_2022-12-15\"\n\nMore Information needed" ]
f846acd02f8f906b6d88ee6cb63a927294a20b7b
# Dataset Card for "AToMiC-Texts-Dedup" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
justram/AToMiC-Texts-Dedup
[ "region:us" ]
2022-12-15T04:56:17+00:00
{"dataset_info": {"features": [{"name": "language", "dtype": "string"}, {"name": "text_id", "dtype": "string"}, {"name": "page_url", "dtype": "string"}, {"name": "page_title", "dtype": "string"}, {"name": "section_title", "dtype": "string"}, {"name": "hierarchical_section_title", "dtype": "string"}, {"name": "context_page_description", "dtype": "string"}, {"name": "context_section_description", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4768023667.871489, "num_examples": 3220639}, {"name": "validation", "num_bytes": 35066965.650891684, "num_examples": 21466}, {"name": "test", "num_bytes": 26076287.261490725, "num_examples": 16362}], "download_size": 2976408849, "dataset_size": 4829166920.783871}}
2022-12-15T05:01:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AToMiC-Texts-Dedup" More Information needed
[ "# Dataset Card for \"AToMiC-Texts-Dedup\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AToMiC-Texts-Dedup\"\n\nMore Information needed" ]
10e9a9106c3bce5a23a4b9ac1816948bc2a04bce
# Dataset Card for "AToMiC-Qrels-Dedupe" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
justram/AToMiC-Qrels-Dedupe
[ "region:us" ]
2022-12-15T05:39:00+00:00
{"dataset_info": {"features": [{"name": "text_id", "dtype": "string"}, {"name": "Q0", "dtype": "string"}, {"name": "image_id", "dtype": "string"}, {"name": "rel", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 417552084, "num_examples": 5030748}, {"name": "validation", "num_bytes": 3336587, "num_examples": 38859}, {"name": "test", "num_bytes": 2551669, "num_examples": 30938}], "download_size": 226715065, "dataset_size": 423440340}}
2022-12-15T12:26:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AToMiC-Qrels-Dedupe" More Information needed
[ "# Dataset Card for \"AToMiC-Qrels-Dedupe\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AToMiC-Qrels-Dedupe\"\n\nMore Information needed" ]
035191e85dc3f96b321779dd8518954d49a37db2
# Dataset Card for "clinic-small_talk" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fathyshalab/clinic-small_talk
[ "region:us" ]
2022-12-15T06:06:59+00:00
{"dataset_info": {"features": [{"name": "Unnamed: 0", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "label_text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 54000.1, "num_examples": 805}, {"name": "test", "num_bytes": 23142.9, "num_examples": 345}], "download_size": 0, "dataset_size": 77143.0}}
2022-12-24T04:41:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "clinic-small_talk" More Information needed
[ "# Dataset Card for \"clinic-small_talk\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"clinic-small_talk\"\n\nMore Information needed" ]
5dcc41662640dbd74f9fc89fb328fd0cd666ce03
# Dataset Card for "kaggle-mbti-cleaned" This dataset originated from Kaggle [(MBTI) Myers-Briggs Personality Type Dataset](https://www.kaggle.com/datasets/datasnaek/mbti-type). Some cleaning operations are made to this dataset to make it in a usable format for text classification process. See more detail in [GitHub](https://github.com/nogibjj/MBTI-Personality-Test)
Shunian/kaggle-mbti-cleaned
[ "region:us" ]
2022-12-15T06:30:41+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 51657719, "num_examples": 327828}, {"name": "test", "num_bytes": 12922409, "num_examples": 81957}], "download_size": 42682844, "dataset_size": 64580128}}
2022-12-16T09:46:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "kaggle-mbti-cleaned" This dataset originated from Kaggle (MBTI) Myers-Briggs Personality Type Dataset. Some cleaning operations are made to this dataset to make it in a usable format for text classification process. See more detail in GitHub
[ "# Dataset Card for \"kaggle-mbti-cleaned\"\nThis dataset originated from Kaggle (MBTI) Myers-Briggs Personality Type Dataset.\n\nSome cleaning operations are made to this dataset to make it in a usable format for text classification process.\n\nSee more detail in GitHub" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"kaggle-mbti-cleaned\"\nThis dataset originated from Kaggle (MBTI) Myers-Briggs Personality Type Dataset.\n\nSome cleaning operations are made to this dataset to make it in a usable format for text classification process.\n\nSee more detail in GitHub" ]
20c1e98f15d263cdef247667794b403a4119d268
# Dataset Card for "MULTI_VALUE_mnli_present_modals" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liuyanchen1015/MULTI_VALUE_mnli_present_modals
[ "region:us" ]
2022-12-15T06:36:59+00:00
{"dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "dev_matched", "num_bytes": 211389, "num_examples": 881}, {"name": "dev_mismatched", "num_bytes": 229061, "num_examples": 936}, {"name": "test_matched", "num_bytes": 227994, "num_examples": 954}, {"name": "test_mismatched", "num_bytes": 240389, "num_examples": 1002}, {"name": "train", "num_bytes": 9136328, "num_examples": 38651}], "download_size": 6147567, "dataset_size": 10045161}}
2022-12-15T06:37:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "MULTI_VALUE_mnli_present_modals" More Information needed
[ "# Dataset Card for \"MULTI_VALUE_mnli_present_modals\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"MULTI_VALUE_mnli_present_modals\"\n\nMore Information needed" ]
7962619a7ba6dfcc7d16e4e2d948d846f9624b15
# Dataset Card for "smallnorb" ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description **NOTE:** This dataset is an unofficial port of small NORB based on a [repo from Andrea Palazzi](https://github.com/ndrplz/small_norb) using this [script](https://colab.research.google.com/drive/1Tx20uP1PrnyarsNCWf1dN9EQyr38BDIE?usp=sharing). For complete and accurate information, we highly recommend visiting the dataset's original homepage. - **Homepage:** https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/ - **Paper:** https://ieeexplore.ieee.org/document/1315150 ### Dataset Summary From the dataset's [homepage](https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/): > This database is intended for experiments in 3D object reocgnition from shape. It contains images of 50 toys belonging to 5 generic categories: four-legged animals, human figures, airplanes, trucks, and cars. The objects were imaged by two cameras under 6 lighting conditions, 9 elevations (30 to 70 degrees every 5 degrees), and 18 azimuths (0 to 340 every 20 degrees). > > The training set is composed of 5 instances of each category (instances 4, 6, 7, 8 and 9), and the test set of the remaining 5 instances (instances 0, 1, 2, 3, and 5). ## Dataset Structure ### Data Instances An example of an instance in this dataset: ``` { 'image_lt': <PIL.PngImagePlugin.PngImageFile image mode=L size=96x96 at 0x...>, 'image_rt': <PIL.PngImagePlugin.PngImageFile image mode=L size=96x96 at 0x...>, 'category': 0, 'instance': 8, 'elevation': 6, 'azimuth': 4, 'lighting': 4 } ``` ### Data Fields Explanation of this dataset's fields: - `image_lt`: a PIL image of an object from the dataset taken with one of two cameras - `image_rt`: a PIL image of an object from the dataset taken with one of two cameras - `category`: the category of the object shown in the images - `instance`: the instance of the category of the object shown in the images - `elevation`: the label of the elevation of the cameras used in capturing a picture of the object - `azimuth`: the label of the azimuth of the cameras used in capturing a picture of the object - `lighting`: the label of the lighting condition used in capturing a picture of the object For more information on what these categories and labels pertain to, please see [Dataset Summary](#dataset-summary) or the [repo](https://github.com/ndrplz/small_norb) used in processing the dataset. ### Data Splits Information on this dataset's splits: | | train | test | |------|------:|------:| | size | 24300 | 24300 | ## Additional Information ### Dataset Curators Credits from the dataset's [homepage](https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/): > [Fu Jie Huang](http://www.cs.nyu.edu/jhuangfu/), [Yann LeCun](http://yann.lecun.com/) > > Courant Institute, New York University > > October, 2005 ### Licensing Information From the dataset's [homepage](https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/): > This database is provided for research purposes. It cannot be sold. Publications that include results obtained with this database should reference the following paper: > > Y. LeCun, F.J. Huang, L. Bottou, Learning Methods for Generic Object Recognition with Invariance to Pose and Lighting. IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR) 2004 ### Citation Information From the dataset's [homepage](https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/): > Publications that include results obtained with this database should reference the following paper: > > Y. LeCun, F.J. Huang, L. Bottou, Learning Methods for Generic Object Recognition with Invariance to Pose and Lighting. IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR) 2004 ``` @inproceedings{lecun2004learning, title={Learning methods for generic object recognition with invariance to pose and lighting}, author={LeCun, Yann and Huang, Fu Jie and Bottou, Leon}, booktitle={Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.}, volume={2}, pages={II--104}, year={2004}, organization={IEEE} } ``` DOI: [10.1109/CVPR.2004.1315150](https://doi.org/10.1109/CVPR.2004.1315150) ### Contributions Code to process small NORB adapted from [Andrea Palazzi's repo](https://github.com/ndrplz/small_norb) with this [script](https://colab.research.google.com/drive/1Tx20uP1PrnyarsNCWf1dN9EQyr38BDIE?usp=sharing).
Ramos-Ramos/smallnorb
[ "region:us" ]
2022-12-15T07:29:28+00:00
{"dataset_info": {"features": [{"name": "image_lt", "dtype": "image"}, {"name": "image_rt", "dtype": "image"}, {"name": "category", "dtype": "int32"}, {"name": "instance", "dtype": "int32"}, {"name": "elevation", "dtype": "int32"}, {"name": "azimuth", "dtype": "int32"}, {"name": "lighting", "dtype": "int32"}], "splits": [{"name": "train", "num_bytes": 117947794.0, "num_examples": 24300}, {"name": "test", "num_bytes": 118130266.0, "num_examples": 24300}], "download_size": 236815224, "dataset_size": 236078060.0}}
2022-12-15T08:30:22+00:00
[]
[]
TAGS #region-us
Dataset Card for "smallnorb" ============================ Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary * Dataset Structure + Data Instances + Data Fields + Data Splits * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- NOTE: This dataset is an unofficial port of small NORB based on a repo from Andrea Palazzi using this script. For complete and accurate information, we highly recommend visiting the dataset's original homepage. * Homepage: URL * Paper: URL ### Dataset Summary From the dataset's homepage: > > This database is intended for experiments in 3D object reocgnition from shape. It contains images of 50 toys belonging to 5 generic categories: four-legged animals, human figures, airplanes, trucks, and cars. The objects were imaged by two cameras under 6 lighting conditions, 9 elevations (30 to 70 degrees every 5 degrees), and 18 azimuths (0 to 340 every 20 degrees). > > > The training set is composed of 5 instances of each category (instances 4, 6, 7, 8 and 9), and the test set of the remaining 5 instances (instances 0, 1, 2, 3, and 5). > > > Dataset Structure ----------------- ### Data Instances An example of an instance in this dataset: ### Data Fields Explanation of this dataset's fields: * 'image\_lt': a PIL image of an object from the dataset taken with one of two cameras * 'image\_rt': a PIL image of an object from the dataset taken with one of two cameras * 'category': the category of the object shown in the images * 'instance': the instance of the category of the object shown in the images * 'elevation': the label of the elevation of the cameras used in capturing a picture of the object * 'azimuth': the label of the azimuth of the cameras used in capturing a picture of the object * 'lighting': the label of the lighting condition used in capturing a picture of the object For more information on what these categories and labels pertain to, please see Dataset Summary or the repo used in processing the dataset. ### Data Splits Information on this dataset's splits: Additional Information ---------------------- ### Dataset Curators Credits from the dataset's homepage: > > Fu Jie Huang, Yann LeCun > > > Courant Institute, New York University > > > October, 2005 > > > ### Licensing Information From the dataset's homepage: > > This database is provided for research purposes. It cannot be sold. Publications that include results obtained with this database should reference the following paper: > > > Y. LeCun, F.J. Huang, L. Bottou, Learning Methods for Generic Object Recognition with Invariance to Pose and Lighting. IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR) 2004 > > > From the dataset's homepage: > > Publications that include results obtained with this database should reference the following paper: > > > Y. LeCun, F.J. Huang, L. Bottou, Learning Methods for Generic Object Recognition with Invariance to Pose and Lighting. IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR) 2004 > > > DOI: 10.1109/CVPR.2004.1315150 ### Contributions Code to process small NORB adapted from Andrea Palazzi's repo with this script.
[ "### Dataset Summary\n\n\nFrom the dataset's homepage:\n\n\n\n> \n> This database is intended for experiments in 3D object reocgnition from shape. It contains images of 50 toys belonging to 5 generic categories: four-legged animals, human figures, airplanes, trucks, and cars. The objects were imaged by two cameras under 6 lighting conditions, 9 elevations (30 to 70 degrees every 5 degrees), and 18 azimuths (0 to 340 every 20 degrees).\n> \n> \n> The training set is composed of 5 instances of each category (instances 4, 6, 7, 8 and 9), and the test set of the remaining 5 instances (instances 0, 1, 2, 3, and 5).\n> \n> \n> \n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of an instance in this dataset:", "### Data Fields\n\n\nExplanation of this dataset's fields:\n\n\n* 'image\\_lt': a PIL image of an object from the dataset taken with one of two cameras\n* 'image\\_rt': a PIL image of an object from the dataset taken with one of two cameras\n* 'category': the category of the object shown in the images\n* 'instance': the instance of the category of the object shown in the images\n* 'elevation': the label of the elevation of the cameras used in capturing a picture of the object\n* 'azimuth': the label of the azimuth of the cameras used in capturing a picture of the object\n* 'lighting': the label of the lighting condition used in capturing a picture of the object\n\n\nFor more information on what these categories and labels pertain to, please see Dataset Summary or the repo used in processing the dataset.", "### Data Splits\n\n\nInformation on this dataset's splits:\n\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nCredits from the dataset's homepage:\n\n\n\n> \n> Fu Jie Huang, Yann LeCun\n> \n> \n> Courant Institute, New York University\n> \n> \n> October, 2005\n> \n> \n>", "### Licensing Information\n\n\nFrom the dataset's homepage:\n\n\n\n> \n> This database is provided for research purposes. It cannot be sold. Publications that include results obtained with this database should reference the following paper:\n> \n> \n> Y. LeCun, F.J. Huang, L. Bottou, Learning Methods for Generic Object Recognition with Invariance to Pose and Lighting. IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR) 2004\n> \n> \n> \n\n\nFrom the dataset's homepage:\n\n\n\n> \n> Publications that include results obtained with this database should reference the following paper:\n> \n> \n> Y. LeCun, F.J. Huang, L. Bottou, Learning Methods for Generic Object Recognition with Invariance to Pose and Lighting. IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR) 2004\n> \n> \n> \n\n\nDOI: 10.1109/CVPR.2004.1315150", "### Contributions\n\n\nCode to process small NORB adapted from Andrea Palazzi's repo with this script." ]
[ "TAGS\n#region-us \n", "### Dataset Summary\n\n\nFrom the dataset's homepage:\n\n\n\n> \n> This database is intended for experiments in 3D object reocgnition from shape. It contains images of 50 toys belonging to 5 generic categories: four-legged animals, human figures, airplanes, trucks, and cars. The objects were imaged by two cameras under 6 lighting conditions, 9 elevations (30 to 70 degrees every 5 degrees), and 18 azimuths (0 to 340 every 20 degrees).\n> \n> \n> The training set is composed of 5 instances of each category (instances 4, 6, 7, 8 and 9), and the test set of the remaining 5 instances (instances 0, 1, 2, 3, and 5).\n> \n> \n> \n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of an instance in this dataset:", "### Data Fields\n\n\nExplanation of this dataset's fields:\n\n\n* 'image\\_lt': a PIL image of an object from the dataset taken with one of two cameras\n* 'image\\_rt': a PIL image of an object from the dataset taken with one of two cameras\n* 'category': the category of the object shown in the images\n* 'instance': the instance of the category of the object shown in the images\n* 'elevation': the label of the elevation of the cameras used in capturing a picture of the object\n* 'azimuth': the label of the azimuth of the cameras used in capturing a picture of the object\n* 'lighting': the label of the lighting condition used in capturing a picture of the object\n\n\nFor more information on what these categories and labels pertain to, please see Dataset Summary or the repo used in processing the dataset.", "### Data Splits\n\n\nInformation on this dataset's splits:\n\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nCredits from the dataset's homepage:\n\n\n\n> \n> Fu Jie Huang, Yann LeCun\n> \n> \n> Courant Institute, New York University\n> \n> \n> October, 2005\n> \n> \n>", "### Licensing Information\n\n\nFrom the dataset's homepage:\n\n\n\n> \n> This database is provided for research purposes. It cannot be sold. Publications that include results obtained with this database should reference the following paper:\n> \n> \n> Y. LeCun, F.J. Huang, L. Bottou, Learning Methods for Generic Object Recognition with Invariance to Pose and Lighting. IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR) 2004\n> \n> \n> \n\n\nFrom the dataset's homepage:\n\n\n\n> \n> Publications that include results obtained with this database should reference the following paper:\n> \n> \n> Y. LeCun, F.J. Huang, L. Bottou, Learning Methods for Generic Object Recognition with Invariance to Pose and Lighting. IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR) 2004\n> \n> \n> \n\n\nDOI: 10.1109/CVPR.2004.1315150", "### Contributions\n\n\nCode to process small NORB adapted from Andrea Palazzi's repo with this script." ]
1b7eb2cd766295b15100b32949fd1703a7cdf0dc
# Dataset Card for "preprocessed_common_voice_11" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
vumichien/preprocessed_common_voice_11
[ "region:us" ]
2022-12-15T08:26:33+00:00
{"dataset_info": {"features": [{"name": "audio", "struct": [{"name": "array", "sequence": "float32"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3429313257, "num_examples": 10990}, {"name": "test", "num_bytes": 1562198132, "num_examples": 4604}], "download_size": 4988499841, "dataset_size": 4991511389}}
2022-12-15T08:29:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "preprocessed_common_voice_11" More Information needed
[ "# Dataset Card for \"preprocessed_common_voice_11\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"preprocessed_common_voice_11\"\n\nMore Information needed" ]
804dcca92615e463eed5f4d887aefdda9b9035d8
# Dataset Card for "results_original_model__valid_10rows_2022-12-15" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joddy/results_original_model__valid_10rows_2022-12-15
[ "region:us" ]
2022-12-15T08:28:19+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}, {"name": "index", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4287818.0, "num_examples": 10}], "download_size": 4289522, "dataset_size": 4287818.0}}
2022-12-15T08:29:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "results_original_model__valid_10rows_2022-12-15" More Information needed
[ "# Dataset Card for \"results_original_model__valid_10rows_2022-12-15\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"results_original_model__valid_10rows_2022-12-15\"\n\nMore Information needed" ]
29af0533133cd32568b97f625fd93405f825c729
# Dataset Card for "MULTI_VALUE_mnli_regularized_reflexives_aave" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liuyanchen1015/MULTI_VALUE_mnli_regularized_reflexives_aave
[ "region:us" ]
2022-12-15T09:22:09+00:00
{"dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "dev_matched", "num_bytes": 19251, "num_examples": 94}, {"name": "dev_mismatched", "num_bytes": 23121, "num_examples": 87}, {"name": "test_matched", "num_bytes": 21604, "num_examples": 90}, {"name": "test_mismatched", "num_bytes": 20670, "num_examples": 82}, {"name": "train", "num_bytes": 936051, "num_examples": 3883}], "download_size": 578604, "dataset_size": 1020697}}
2022-12-15T09:22:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "MULTI_VALUE_mnli_regularized_reflexives_aave" More Information needed
[ "# Dataset Card for \"MULTI_VALUE_mnli_regularized_reflexives_aave\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"MULTI_VALUE_mnli_regularized_reflexives_aave\"\n\nMore Information needed" ]
da4b164f2e86974facb19440aaf12453ac2d82a2
# Dataset Card for "MULTI_VALUE_mnli_present_perfect_ever" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liuyanchen1015/MULTI_VALUE_mnli_present_perfect_ever
[ "region:us" ]
2022-12-15T09:29:14+00:00
{"dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "dev_matched", "num_bytes": 186342, "num_examples": 793}, {"name": "dev_mismatched", "num_bytes": 200160, "num_examples": 788}, {"name": "test_matched", "num_bytes": 220041, "num_examples": 875}, {"name": "test_mismatched", "num_bytes": 197234, "num_examples": 826}, {"name": "train", "num_bytes": 8005522, "num_examples": 32860}], "download_size": 5376415, "dataset_size": 8809299}}
2022-12-15T09:29:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "MULTI_VALUE_mnli_present_perfect_ever" More Information needed
[ "# Dataset Card for \"MULTI_VALUE_mnli_present_perfect_ever\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"MULTI_VALUE_mnli_present_perfect_ever\"\n\nMore Information needed" ]
3d796049d5b1e6b83d4dabbb59fd4de73264b6a5
# Dataset Card for "MULTI_VALUE_mnli_regularized_reflexives_object_pronouns" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liuyanchen1015/MULTI_VALUE_mnli_regularized_reflexives_object_pronouns
[ "region:us" ]
2022-12-15T09:29:50+00:00
{"dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "dev_matched", "num_bytes": 10417, "num_examples": 51}, {"name": "dev_mismatched", "num_bytes": 8139, "num_examples": 41}, {"name": "test_matched", "num_bytes": 11876, "num_examples": 46}, {"name": "test_mismatched", "num_bytes": 8199, "num_examples": 43}, {"name": "train", "num_bytes": 512248, "num_examples": 2249}], "download_size": 285694, "dataset_size": 550879}}
2022-12-15T09:30:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "MULTI_VALUE_mnli_regularized_reflexives_object_pronouns" More Information needed
[ "# Dataset Card for \"MULTI_VALUE_mnli_regularized_reflexives_object_pronouns\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"MULTI_VALUE_mnli_regularized_reflexives_object_pronouns\"\n\nMore Information needed" ]
38c338a6978d301ca067347ef3d864efe2636dc0
# Dataset Card for "MULTI_VALUE_mnli_not_preverbal_negator" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liuyanchen1015/MULTI_VALUE_mnli_not_preverbal_negator
[ "region:us" ]
2022-12-15T09:31:56+00:00
{"dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "dev_matched", "num_bytes": 235099, "num_examples": 1073}, {"name": "dev_mismatched", "num_bytes": 210036, "num_examples": 984}, {"name": "test_matched", "num_bytes": 226280, "num_examples": 1010}, {"name": "test_mismatched", "num_bytes": 211827, "num_examples": 1020}, {"name": "train", "num_bytes": 9729916, "num_examples": 43965}], "download_size": 6472771, "dataset_size": 10613158}}
2022-12-15T09:32:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "MULTI_VALUE_mnli_not_preverbal_negator" More Information needed
[ "# Dataset Card for \"MULTI_VALUE_mnli_not_preverbal_negator\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"MULTI_VALUE_mnli_not_preverbal_negator\"\n\nMore Information needed" ]
14280d1d8043ebf5ab156f80135429fb8575a64d
# Dataset Card for "MULTI_VALUE_MNLI_bare_past_tense" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liuyanchen1015/MULTI_VALUE_mnli_bare_past_tense
[ "region:us" ]
2022-12-15T12:11:12+00:00
{"dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 28088801, "num_examples": 131498}, {"name": "dev_matched", "num_bytes": 690136, "num_examples": 3328}, {"name": "dev_mismatched", "num_bytes": 781000, "num_examples": 3435}, {"name": "test_matched", "num_bytes": 725437, "num_examples": 3403}, {"name": "test_mismatched", "num_bytes": 777555, "num_examples": 3437}], "download_size": 20237766, "dataset_size": 31062929}}
2022-12-15T12:11:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "MULTI_VALUE_MNLI_bare_past_tense" More Information needed
[ "# Dataset Card for \"MULTI_VALUE_MNLI_bare_past_tense\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"MULTI_VALUE_MNLI_bare_past_tense\"\n\nMore Information needed" ]
e48218e53518b036fd912e83cf90bc7a925d6b8f
# Dataset Card for "MULTI_VALUE_MNLI_nomo_existential" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liuyanchen1015/MULTI_VALUE_mnli_nomo_existential
[ "region:us" ]
2022-12-15T12:11:50+00:00
{"dataset_info": {"features": [{"name": "premise", "dtype": "string"}, {"name": "hypothesis", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "idx", "dtype": "int64"}, {"name": "score", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 218957, "num_examples": 1020}, {"name": "dev_matched", "num_bytes": 5157, "num_examples": 26}, {"name": "dev_mismatched", "num_bytes": 4733, "num_examples": 22}, {"name": "test_matched", "num_bytes": 5635, "num_examples": 26}, {"name": "test_mismatched", "num_bytes": 3802, "num_examples": 19}], "download_size": 148198, "dataset_size": 238284}}
2022-12-15T12:12:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "MULTI_VALUE_MNLI_nomo_existential" More Information needed
[ "# Dataset Card for \"MULTI_VALUE_MNLI_nomo_existential\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"MULTI_VALUE_MNLI_nomo_existential\"\n\nMore Information needed" ]