sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
sequencelengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
sequencelengths
0
25
languages
sequencelengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
sequencelengths
0
352
processed_texts
sequencelengths
1
353
4249ba622beb5a668ee5eedc1e0f44c029254b13
# Overview This dataset is a subset of the huggingface wikipedia dataset with ~70'000 rows, each about a person on wikipedia. Each row contains the original wikipedia texts as sentences, as well as a paraphrased version of each sentence. For both versions full texts with the entity the wikipedia page is about being masked. # features - id: the id in the original dataset - url: the link to the wikipedia page - title: the title of the wikipedia page - text: the original wikipedia text - sentences: text split to sentences - paraphrased_sentences: text split to sentences, with each sentence paraphrased (e.g. mutated a bit) - masked_text_original: original text with entity masked in every occurence (<mask> as token) - masked_entities_original: array of entities masked in masked_text_original - masked_text_paraphrased: paraphrased text with entity masked in every occurence - masked_entities_paraphrased: array of entities msked in masked_text_paraphrased --- annotations_creators: - no-annotation - machine-generated language: - en language_creators: - found license: - afl-3.0 multilinguality: - monolingual pretty_name: wikipedia persons paraphrased and masked size_categories: - 10K<n<100K source_datasets: - extended|wikipedia tags: [] task_categories: - fill-mask task_ids: - slot-filling
Skatinger/wikipedia-persons-masked
[ "region:us" ]
2022-12-05T18:23:48+00:00
{}
2023-01-19T15:08:52+00:00
[]
[]
TAGS #region-us
# Overview This dataset is a subset of the huggingface wikipedia dataset with ~70'000 rows, each about a person on wikipedia. Each row contains the original wikipedia texts as sentences, as well as a paraphrased version of each sentence. For both versions full texts with the entity the wikipedia page is about being masked. # features - id: the id in the original dataset - url: the link to the wikipedia page - title: the title of the wikipedia page - text: the original wikipedia text - sentences: text split to sentences - paraphrased_sentences: text split to sentences, with each sentence paraphrased (e.g. mutated a bit) - masked_text_original: original text with entity masked in every occurence (<mask> as token) - masked_entities_original: array of entities masked in masked_text_original - masked_text_paraphrased: paraphrased text with entity masked in every occurence - masked_entities_paraphrased: array of entities msked in masked_text_paraphrased --- annotations_creators: - no-annotation - machine-generated language: - en language_creators: - found license: - afl-3.0 multilinguality: - monolingual pretty_name: wikipedia persons paraphrased and masked size_categories: - 10K<n<100K source_datasets: - extended|wikipedia tags: [] task_categories: - fill-mask task_ids: - slot-filling
[ "# Overview\nThis dataset is a subset of the huggingface wikipedia dataset with ~70'000 rows, each about a person on wikipedia.\nEach row contains the original wikipedia texts as sentences,\nas well as a paraphrased version of each sentence. For both versions full texts with the entity the wikipedia page is about being masked.", "# features\n- id: the id in the original dataset\n- url: the link to the wikipedia page\n- title: the title of the wikipedia page\n- text: the original wikipedia text\n- sentences: text split to sentences\n- paraphrased_sentences: text split to sentences, with each sentence paraphrased (e.g. mutated a bit)\n- masked_text_original: original text with entity masked in every occurence (<mask> as token)\n- masked_entities_original: array of entities masked in masked_text_original\n- masked_text_paraphrased: paraphrased text with entity masked in every occurence\n- masked_entities_paraphrased: array of entities msked in masked_text_paraphrased\n\n---\nannotations_creators:\n- no-annotation\n- machine-generated\nlanguage:\n- en\nlanguage_creators:\n- found\nlicense:\n- afl-3.0\nmultilinguality:\n- monolingual\npretty_name: wikipedia persons paraphrased and masked\nsize_categories:\n- 10K<n<100K\nsource_datasets:\n- extended|wikipedia\ntags: []\ntask_categories:\n- fill-mask\ntask_ids:\n- slot-filling" ]
[ "TAGS\n#region-us \n", "# Overview\nThis dataset is a subset of the huggingface wikipedia dataset with ~70'000 rows, each about a person on wikipedia.\nEach row contains the original wikipedia texts as sentences,\nas well as a paraphrased version of each sentence. For both versions full texts with the entity the wikipedia page is about being masked.", "# features\n- id: the id in the original dataset\n- url: the link to the wikipedia page\n- title: the title of the wikipedia page\n- text: the original wikipedia text\n- sentences: text split to sentences\n- paraphrased_sentences: text split to sentences, with each sentence paraphrased (e.g. mutated a bit)\n- masked_text_original: original text with entity masked in every occurence (<mask> as token)\n- masked_entities_original: array of entities masked in masked_text_original\n- masked_text_paraphrased: paraphrased text with entity masked in every occurence\n- masked_entities_paraphrased: array of entities msked in masked_text_paraphrased\n\n---\nannotations_creators:\n- no-annotation\n- machine-generated\nlanguage:\n- en\nlanguage_creators:\n- found\nlicense:\n- afl-3.0\nmultilinguality:\n- monolingual\npretty_name: wikipedia persons paraphrased and masked\nsize_categories:\n- 10K<n<100K\nsource_datasets:\n- extended|wikipedia\ntags: []\ntask_categories:\n- fill-mask\ntask_ids:\n- slot-filling" ]
5cff6ebd7229a9f33a4ba18bd66795cb9dc2b2b6
# Dataset Card for "sarcastic-news-headlines-1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liamvbetts/sarcastic-news-headlines-1
[ "region:us" ]
2022-12-05T18:42:23+00:00
{"dataset_info": {"features": [{"name": "headline", "dtype": "string"}, {"name": "is_sarcastic", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1947706, "num_examples": 26709}], "download_size": 1328814, "dataset_size": 1947706}}
2022-12-05T18:42:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sarcastic-news-headlines-1" More Information needed
[ "# Dataset Card for \"sarcastic-news-headlines-1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sarcastic-news-headlines-1\"\n\nMore Information needed" ]
006684c68df888b6fe2789e83c12731e5cfee854
# Dataset Card for "sarcastic-news-headlines-v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
liamvbetts/sarcastic-news-headlines-v2
[ "region:us" ]
2022-12-05T19:02:06+00:00
{"dataset_info": {"features": [{"name": "label", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1947706, "num_examples": 26709}], "download_size": 1328187, "dataset_size": 1947706}}
2022-12-05T19:03:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sarcastic-news-headlines-v2" More Information needed
[ "# Dataset Card for \"sarcastic-news-headlines-v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sarcastic-news-headlines-v2\"\n\nMore Information needed" ]
284f57e8bb5ed25b5e309cc0136b2cefcf2ef166
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: lewiswatson/distilbert-base-uncased-finetuned-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-emotion-default-34e541-17396354
[ "autotrain", "evaluation", "region:us" ]
2022-12-05T19:24:17+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["emotion"], "eval_info": {"task": "multi_class_classification", "model": "lewiswatson/distilbert-base-uncased-finetuned-emotion", "metrics": [], "dataset_name": "emotion", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "label"}}}
2022-12-05T19:24:44+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Multi-class Text Classification * Model: lewiswatson/distilbert-base-uncased-finetuned-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewiswatson/distilbert-base-uncased-finetuned-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewiswatson/distilbert-base-uncased-finetuned-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
3e0bb192ffa2691ac4a497c245a893b1189d82e4
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: lewtun/minilm-finetuned-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-emotion-default-34e541-17396352
[ "autotrain", "evaluation", "region:us" ]
2022-12-05T19:24:17+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["emotion"], "eval_info": {"task": "multi_class_classification", "model": "lewtun/minilm-finetuned-emotion", "metrics": [], "dataset_name": "emotion", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "label"}}}
2022-12-05T19:24:42+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Multi-class Text Classification * Model: lewtun/minilm-finetuned-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewtun/minilm-finetuned-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewtun/minilm-finetuned-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
3d24acf758fe45c0496c9eef0de8e343ee6da88f
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: lewtun/sagemaker-distilbert-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-emotion-default-34e541-17396353
[ "autotrain", "evaluation", "region:us" ]
2022-12-05T19:24:17+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["emotion"], "eval_info": {"task": "multi_class_classification", "model": "lewtun/sagemaker-distilbert-emotion", "metrics": [], "dataset_name": "emotion", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "label"}}}
2022-12-05T19:24:45+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Multi-class Text Classification * Model: lewtun/sagemaker-distilbert-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewtun/sagemaker-distilbert-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewtun/sagemaker-distilbert-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
40fef67a9c92949796bd9e1e396b278b28fc3cc2
boats
Telecom-BGDAI/boats
[ "region:us" ]
2022-12-05T19:45:34+00:00
{}
2022-12-05T19:47:13+00:00
[]
[]
TAGS #region-us
boats
[]
[ "TAGS\n#region-us \n" ]
d3ccf9d510142f299b65d9d1be0189be6ebe198a
# Dataset Card for "librispeech5k_augm_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CristianaLazar/librispeech5k_augm_train
[ "region:us" ]
2022-12-05T19:57:05+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train.360", "num_bytes": 6796635145.0, "num_examples": 5000}], "download_size": 3988908181, "dataset_size": 6796635145.0}}
2022-12-05T20:11:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech5k_augm_train" More Information needed
[ "# Dataset Card for \"librispeech5k_augm_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech5k_augm_train\"\n\nMore Information needed" ]
dc67ba949f055429bca9598c7761fee26594055b
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: autoevaluate/summarization * Dataset: autoevaluate/xsum-sample * Config: autoevaluate--xsum-sample * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-autoevaluate__xsum-sample-autoevaluate__xsum-sample-437a8a-17406355
[ "autotrain", "evaluation", "region:us" ]
2022-12-05T20:08:52+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["autoevaluate/xsum-sample"], "eval_info": {"task": "summarization", "model": "autoevaluate/summarization", "metrics": [], "dataset_name": "autoevaluate/xsum-sample", "dataset_config": "autoevaluate--xsum-sample", "dataset_split": "test", "col_mapping": {"text": "document", "target": "summary"}}}
2022-12-05T20:09:15+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: autoevaluate/summarization * Dataset: autoevaluate/xsum-sample * Config: autoevaluate--xsum-sample * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: autoevaluate/summarization\n* Dataset: autoevaluate/xsum-sample\n* Config: autoevaluate--xsum-sample\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: autoevaluate/summarization\n* Dataset: autoevaluate/xsum-sample\n* Config: autoevaluate--xsum-sample\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
05901479683557d2843ab4ab0b3105193403c690
# ObjectNet (Test set only) Original paper: [ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models](https://objectnet.dev/objectnet-a-large-scale-bias-controlled-dataset-for-pushing-the-limits-of-object-recognition-models.pdf) Homepage: https://objectnet.dev/ Bibtex: ``` @inproceedings{NEURIPS2019_97af07a1, author = {Barbu, Andrei and Mayo, David and Alverio, Julian and Luo, William and Wang, Christopher and Gutfreund, Dan and Tenenbaum, Josh and Katz, Boris}, booktitle = {Advances in Neural Information Processing Systems}, editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett}, pages = {}, publisher = {Curran Associates, Inc.}, title = {ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models}, url = {https://proceedings.neurips.cc/paper/2019/file/97af07a14cacba681feacf3012730892-Paper.pdf}, volume = {32}, year = {2019} } ```
djghosh/wds_objectnet_test
[ "region:us" ]
2022-12-05T20:28:14+00:00
{}
2022-12-12T21:18:15+00:00
[]
[]
TAGS #region-us
# ObjectNet (Test set only) Original paper: ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models Homepage: URL Bibtex:
[ "# ObjectNet (Test set only)\n\nOriginal paper: ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models\n\nHomepage: URL\n\nBibtex:" ]
[ "TAGS\n#region-us \n", "# ObjectNet (Test set only)\n\nOriginal paper: ObjectNet: A large-scale bias-controlled dataset for pushing the limits of object recognition models\n\nHomepage: URL\n\nBibtex:" ]
e8aad2d4865abb219eda627f20161a9f5e2934ad
# RESISC45 (Test set only) Original paper: [Remote Sensing Image Scene Classification: Benchmark and State of the Art](https://arxiv.org/abs/1703.00121v1) Homepage (broken link): http://www.escience.cn/people/JunweiHan/NWPU-RESISC45.html Bibtex: ``` @article{DBLP:journals/corr/ChengHL17, author = {Gong Cheng and Junwei Han and Xiaoqiang Lu}, title = {Remote Sensing Image Scene Classification: Benchmark and State of the Art}, journal = {CoRR}, volume = {abs/1703.00121}, year = {2017}, url = {http://arxiv.org/abs/1703.00121}, eprinttype = {arXiv}, eprint = {1703.00121}, timestamp = {Mon, 02 Dec 2019 09:32:19 +0100}, biburl = {https://dblp.org/rec/journals/corr/ChengHL17.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
djghosh/wds_vtab-resisc45_test
[ "arxiv:1703.00121", "region:us" ]
2022-12-05T20:55:55+00:00
{}
2022-12-12T22:10:10+00:00
[ "1703.00121" ]
[]
TAGS #arxiv-1703.00121 #region-us
# RESISC45 (Test set only) Original paper: Remote Sensing Image Scene Classification: Benchmark and State of the Art Homepage (broken link): URL Bibtex:
[ "# RESISC45 (Test set only)\n\nOriginal paper: Remote Sensing Image Scene Classification: Benchmark and State of the Art\n\nHomepage (broken link): URL\n\nBibtex:" ]
[ "TAGS\n#arxiv-1703.00121 #region-us \n", "# RESISC45 (Test set only)\n\nOriginal paper: Remote Sensing Image Scene Classification: Benchmark and State of the Art\n\nHomepage (broken link): URL\n\nBibtex:" ]
0840d3af4684ffee0372cb54a2f9b76fd40bcccc
# Dataset Card for Czech Court Decisions NER ## Dataset Description Czech Court Decisions NER is a dataset of 300 court decisions published by The Supreme Court of the Czech Republic and the Constitutional Court of the Czech Republic. In the documents, 4 types of named entities are selected. ## Dataset Features Each sample contains: - `filename`: file name in the original dataset - `text`: court decision document in plain text - `entities`: list of selected entities. Each entity contains: - `category_id`: integer identifier of the entity category - `category_str`: human-friendly category name in Czech (verbalizer) - `start`: index on which the entity starts in the source text - `end`: index on which the entity ends in the source text - `content`: entity content, it was created as `text[start:end]` - `entity_id`: unique entity string identifier - `refers_to`: some entities (mostly of category 'Reference na rozhodnutí soudu') refer to a specific other entity. `refers_to` attribute contains the `entity_id` of the referred entity The `entity_id` field was checked to be globally unique (across data samples and dataset splits.) ## Entity categories The list of the recognized entities (`category_id`, `category_str` pairs): ```python3 { 0: 'Soudní instituce', 1: 'Reference na rozhodnutí soudu', 2: 'Účinnost', 3: 'Reference zákonu' } ``` ## Dataset Source The dataset is a preprocessed adaptation of existing Czech Court Decisions Dataset [project info](https://ufal.mff.cuni.cz/ccdd), [link to data](https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-2853). This adaptation contains (almost) same data, but converted to a convenient format and with stripped leaked xml-like tags in the texts. The category names (verbalizers) were added by a Czech native speaker. ## Citation Cite authors of the [original dataset](https://ufal.mff.cuni.cz/ccdd): ```bibtex @misc{11234/1-2853, title = {Czech Court Decisions Dataset}, author = {Kr{\'{\i}}{\v z}, Vincent and Hladk{\'a}, Barbora}, url = {http://hdl.handle.net/11234/1-2853}, note = {{LINDAT}/{CLARIAH}-{CZ} digital library at the Institute of Formal and Applied Linguistics ({{\'U}FAL}), Faculty of Mathematics and Physics, Charles University}, copyright = {Creative Commons - Attribution-{NonCommercial}-{ShareAlike} 4.0 International ({CC} {BY}-{NC}-{SA} 4.0)}, year = {2014} } ```
fewshot-goes-multilingual/cs_czech-court-decisions-ner
[ "task_categories:token-classification", "task_ids:named-entity-recognition", "annotations_creators:expert-generated", "language_creators:other", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:cs", "license:cc-by-nc-sa-4.0", "czech NER", "court decisions", "region:us" ]
2022-12-05T22:03:19+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["other"], "language": ["cs"], "license": ["cc-by-nc-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": ["named-entity-recognition"], "pretty_name": "Czech Court Decisions NER", "tags": ["czech NER", "court decisions"]}
2022-12-05T23:01:04+00:00
[]
[ "cs" ]
TAGS #task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-expert-generated #language_creators-other #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-Czech #license-cc-by-nc-sa-4.0 #czech NER #court decisions #region-us
# Dataset Card for Czech Court Decisions NER ## Dataset Description Czech Court Decisions NER is a dataset of 300 court decisions published by The Supreme Court of the Czech Republic and the Constitutional Court of the Czech Republic. In the documents, 4 types of named entities are selected. ## Dataset Features Each sample contains: - 'filename': file name in the original dataset - 'text': court decision document in plain text - 'entities': list of selected entities. Each entity contains: - 'category_id': integer identifier of the entity category - 'category_str': human-friendly category name in Czech (verbalizer) - 'start': index on which the entity starts in the source text - 'end': index on which the entity ends in the source text - 'content': entity content, it was created as 'text[start:end]' - 'entity_id': unique entity string identifier - 'refers_to': some entities (mostly of category 'Reference na rozhodnutí soudu') refer to a specific other entity. 'refers_to' attribute contains the 'entity_id' of the referred entity The 'entity_id' field was checked to be globally unique (across data samples and dataset splits.) ## Entity categories The list of the recognized entities ('category_id', 'category_str' pairs): ## Dataset Source The dataset is a preprocessed adaptation of existing Czech Court Decisions Dataset project info, link to data. This adaptation contains (almost) same data, but converted to a convenient format and with stripped leaked xml-like tags in the texts. The category names (verbalizers) were added by a Czech native speaker. Cite authors of the original dataset:
[ "# Dataset Card for Czech Court Decisions NER", "## Dataset Description\nCzech Court Decisions NER is a dataset of 300 court decisions published by The Supreme Court of the Czech Republic and the Constitutional Court of the Czech Republic.\nIn the documents, 4 types of named entities are selected.", "## Dataset Features\n\nEach sample contains:\n- 'filename': file name in the original dataset\n- 'text': court decision document in plain text\n- 'entities': list of selected entities. Each entity contains:\n - 'category_id': integer identifier of the entity category\n - 'category_str': human-friendly category name in Czech (verbalizer)\n - 'start': index on which the entity starts in the source text\n - 'end': index on which the entity ends in the source text\n - 'content': entity content, it was created as 'text[start:end]'\n - 'entity_id': unique entity string identifier\n - 'refers_to': some entities (mostly of category 'Reference na rozhodnutí soudu') refer to a specific other entity. 'refers_to' attribute contains the 'entity_id' of the referred entity\n\nThe 'entity_id' field was checked to be globally unique (across data samples and dataset splits.)", "## Entity categories\n\nThe list of the recognized entities ('category_id', 'category_str' pairs):", "## Dataset Source\n\nThe dataset is a preprocessed adaptation of existing Czech Court Decisions Dataset project info, link to data. This adaptation contains (almost) same data, but converted to a convenient format and with stripped leaked xml-like tags in the texts.\nThe category names (verbalizers) were added by a Czech native speaker.\n\n\nCite authors of the original dataset:" ]
[ "TAGS\n#task_categories-token-classification #task_ids-named-entity-recognition #annotations_creators-expert-generated #language_creators-other #multilinguality-monolingual #size_categories-n<1K #source_datasets-original #language-Czech #license-cc-by-nc-sa-4.0 #czech NER #court decisions #region-us \n", "# Dataset Card for Czech Court Decisions NER", "## Dataset Description\nCzech Court Decisions NER is a dataset of 300 court decisions published by The Supreme Court of the Czech Republic and the Constitutional Court of the Czech Republic.\nIn the documents, 4 types of named entities are selected.", "## Dataset Features\n\nEach sample contains:\n- 'filename': file name in the original dataset\n- 'text': court decision document in plain text\n- 'entities': list of selected entities. Each entity contains:\n - 'category_id': integer identifier of the entity category\n - 'category_str': human-friendly category name in Czech (verbalizer)\n - 'start': index on which the entity starts in the source text\n - 'end': index on which the entity ends in the source text\n - 'content': entity content, it was created as 'text[start:end]'\n - 'entity_id': unique entity string identifier\n - 'refers_to': some entities (mostly of category 'Reference na rozhodnutí soudu') refer to a specific other entity. 'refers_to' attribute contains the 'entity_id' of the referred entity\n\nThe 'entity_id' field was checked to be globally unique (across data samples and dataset splits.)", "## Entity categories\n\nThe list of the recognized entities ('category_id', 'category_str' pairs):", "## Dataset Source\n\nThe dataset is a preprocessed adaptation of existing Czech Court Decisions Dataset project info, link to data. This adaptation contains (almost) same data, but converted to a convenient format and with stripped leaked xml-like tags in the texts.\nThe category names (verbalizers) were added by a Czech native speaker.\n\n\nCite authors of the original dataset:" ]
2de016a42923bd78c9ddac2e570af07745b3c936
# Dataset Card for "xsum_tiny_ood" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SpeedOfMagic/xsum_tiny_ood
[ "region:us" ]
2022-12-05T22:21:21+00:00
{"dataset_info": {"features": [{"name": "document", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2343786.0, "num_examples": 1100}, {"name": "dev", "num_bytes": 398593.0, "num_examples": 200}, {"name": "test", "num_bytes": 468841.0, "num_examples": 200}], "download_size": 2101221, "dataset_size": 3211220.0}}
2022-12-05T22:22:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "xsum_tiny_ood" More Information needed
[ "# Dataset Card for \"xsum_tiny_ood\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"xsum_tiny_ood\"\n\nMore Information needed" ]
0d3d437768d386e97e513943daf34adb44e7c3b5
# SUN397 (Test set only) Original paper: [SUN Database: Exploring a Large Collection of Scene Categories](https://vision.princeton.edu/projects/2010/SUN/paperIJCV.pdf) Homepage: https://vision.princeton.edu/projects/2010/SUN/ Bibtex: ``` @ARTICLE{Xiao2016-ix, title = "{SUN} database: Exploring a large collection of scene categories", author = "Xiao, Jianxiong and Ehinger, Krista A and Hays, James and Torralba, Antonio and Oliva, Aude", journal = "Int. J. Comput. Vis.", publisher = "Springer Science and Business Media LLC", volume = 119, number = 1, pages = "3--22", month = aug, year = 2016, language = "en" } ```
djghosh/wds_sun397_test
[ "region:us" ]
2022-12-05T22:21:47+00:00
{}
2022-12-12T22:20:12+00:00
[]
[]
TAGS #region-us
# SUN397 (Test set only) Original paper: SUN Database: Exploring a Large Collection of Scene Categories Homepage: URL Bibtex:
[ "# SUN397 (Test set only)\n\nOriginal paper: SUN Database: Exploring a Large Collection of Scene Categories\n\nHomepage: URL\n\nBibtex:" ]
[ "TAGS\n#region-us \n", "# SUN397 (Test set only)\n\nOriginal paper: SUN Database: Exploring a Large Collection of Scene Categories\n\nHomepage: URL\n\nBibtex:" ]
69a8ae244cfd6d825135ae01591a2582ed020c56
# AutoTrain Dataset for project: acc_keys ## Dataset Description This dataset has been automatically processed by AutoTrain for project acc_keys. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "target": 2, "text": " workon" }, { "target": 5, "text": " contact" } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "target": "ClassLabel(num_classes=11, names=['A101', 'A102', 'A103', 'A104', 'A105', 'A106', 'A107', 'A108', 'A109', 'A110', 'A112'], id=None)", "text": "Value(dtype='string', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 457 | | valid | 120 |
alanila/autotrain-data-acc_keys
[ "task_categories:text-classification", "region:us" ]
2022-12-05T22:25:46+00:00
{"task_categories": ["text-classification"]}
2022-12-05T22:26:22+00:00
[]
[]
TAGS #task_categories-text-classification #region-us
AutoTrain Dataset for project: acc\_keys ======================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project acc\_keys. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-text-classification #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
8c4b45bd4e8683f0d8bdf2b64696a6420e989c4d
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: Palak/albert-base-v2_squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@18st13](https://huggingface.co/18st13) for evaluating this model.
autoevaluate/autoeval-eval-squad-plain_text-9c2592-2347273870
[ "autotrain", "evaluation", "region:us" ]
2022-12-05T23:51:00+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["squad"], "eval_info": {"task": "extractive_question_answering", "model": "Palak/albert-base-v2_squad", "metrics": [], "dataset_name": "squad", "dataset_config": "plain_text", "dataset_split": "validation", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-12-05T23:54:03+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: Palak/albert-base-v2_squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @18st13 for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: Palak/albert-base-v2_squad\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @18st13 for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: Palak/albert-base-v2_squad\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @18st13 for evaluating this model." ]
ee280a672a3428cd539f6a4df09dcd26488137a5
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: Palak/albert-base-v2_squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@18st13](https://huggingface.co/18st13) for evaluating this model.
autoevaluate/autoeval-eval-squad-plain_text-26d159-2347473871
[ "autotrain", "evaluation", "region:us" ]
2022-12-05T23:51:06+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["squad"], "eval_info": {"task": "extractive_question_answering", "model": "Palak/albert-base-v2_squad", "metrics": [], "dataset_name": "squad", "dataset_config": "plain_text", "dataset_split": "validation", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-12-05T23:54:05+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: Palak/albert-base-v2_squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @18st13 for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: Palak/albert-base-v2_squad\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @18st13 for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: Palak/albert-base-v2_squad\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @18st13 for evaluating this model." ]
ac9f3639760fe11e707d945d179d82afbb447af0
# Dataset Card for "stack-filtered-pii-1M-java" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
loubnabnl/stack-filtered-pii-1M-java
[ "region:us" ]
2022-12-06T01:26:31+00:00
{"dataset_info": {"features": [{"name": "hexsha", "dtype": "string"}, {"name": "size", "dtype": "int64"}, {"name": "ext", "dtype": "string"}, {"name": "lang", "dtype": "string"}, {"name": "max_stars_repo_path", "dtype": "string"}, {"name": "max_stars_repo_name", "dtype": "string"}, {"name": "max_stars_repo_head_hexsha", "dtype": "string"}, {"name": "max_stars_repo_licenses", "sequence": "string"}, {"name": "max_stars_count", "dtype": "float64"}, {"name": "max_stars_repo_stars_event_min_datetime", "dtype": "string"}, {"name": "max_stars_repo_stars_event_max_datetime", "dtype": "string"}, {"name": "max_issues_repo_path", "dtype": "string"}, {"name": "max_issues_repo_name", "dtype": "string"}, {"name": "max_issues_repo_head_hexsha", "dtype": "string"}, {"name": "max_issues_repo_licenses", "sequence": "string"}, {"name": "max_issues_count", "dtype": "float64"}, {"name": "max_issues_repo_issues_event_min_datetime", "dtype": "string"}, {"name": "max_issues_repo_issues_event_max_datetime", "dtype": "string"}, {"name": "max_forks_repo_path", "dtype": "string"}, {"name": "max_forks_repo_name", "dtype": "string"}, {"name": "max_forks_repo_head_hexsha", "dtype": "string"}, {"name": "max_forks_repo_licenses", "sequence": "string"}, {"name": "max_forks_count", "dtype": "float64"}, {"name": "max_forks_repo_forks_event_min_datetime", "dtype": "string"}, {"name": "max_forks_repo_forks_event_max_datetime", "dtype": "string"}, {"name": "avg_line_length", "dtype": "float64"}, {"name": "max_line_length", "dtype": "int64"}, {"name": "alphanum_fraction", "dtype": "float64"}, {"name": "index", "dtype": "int64"}, {"name": "content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 5117781075, "num_examples": 1000000}], "download_size": 1880524833, "dataset_size": 5117781075}}
2022-12-06T01:28:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "stack-filtered-pii-1M-java" More Information needed
[ "# Dataset Card for \"stack-filtered-pii-1M-java\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"stack-filtered-pii-1M-java\"\n\nMore Information needed" ]
d16cd5478bd7423b8dfd8f206e2bc59f921d6ba6
# python datasets
kidd2012/kidd-github-issues
[ "region:us" ]
2022-12-06T03:37:20+00:00
{}
2022-12-06T05:28:52+00:00
[]
[]
TAGS #region-us
# python datasets
[ "# python datasets" ]
[ "TAGS\n#region-us \n", "# python datasets" ]
a51db0a390d45703ce29b1422e116b93a94a3f1c
# Dataset Card for "nsc_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dlproject/nsc_test
[ "region:us" ]
2022-12-06T04:26:53+00:00
{"dataset_info": {"features": [{"name": "input_values", "sequence": {"sequence": {"sequence": "float32"}}}, {"name": "attention_mask", "sequence": {"sequence": "int32"}}, {"name": "labels", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 233563168, "num_examples": 1769}], "download_size": 222492491, "dataset_size": 233563168}}
2022-12-06T04:29:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "nsc_test" More Information needed
[ "# Dataset Card for \"nsc_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"nsc_test\"\n\nMore Information needed" ]
8ff0f0dc8eb0711a340f4715b7104b9e8603999c
This repository contains the intermediate checkpoints for the model https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K. Each "epoch" corresponds to an additional 32B/256 samples seen. The purpose of releasing these checkpoints and optimizer states is to enable analysis. For the first 121 "epcohs", training was done with float16 mixed precision before switching to bfloat16 after a loss blow up.
laion/CLIP-ViT-H-14-laion2B-s32B-b79K-all-checkpoints
[ "license:mit", "region:us" ]
2022-12-06T04:43:52+00:00
{"license": "mit"}
2022-12-09T03:23:17+00:00
[]
[]
TAGS #license-mit #region-us
This repository contains the intermediate checkpoints for the model URL Each "epoch" corresponds to an additional 32B/256 samples seen. The purpose of releasing these checkpoints and optimizer states is to enable analysis. For the first 121 "epcohs", training was done with float16 mixed precision before switching to bfloat16 after a loss blow up.
[]
[ "TAGS\n#license-mit #region-us \n" ]
60d053949f6f112d60373b33c603523b8d473afb
# Dataset Card for "tokenized-recipe-nlg-gpt2-ingredients-to-recipe-end" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pratultandon/tokenized-recipe-nlg-gpt2-ingredients-to-recipe-end
[ "region:us" ]
2022-12-06T04:59:44+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 2217334238, "num_examples": 2022671}, {"name": "test", "num_bytes": 116785866, "num_examples": 106202}], "download_size": 749380879, "dataset_size": 2334120104}}
2022-12-06T05:35:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tokenized-recipe-nlg-gpt2-ingredients-to-recipe-end" More Information needed
[ "# Dataset Card for \"tokenized-recipe-nlg-gpt2-ingredients-to-recipe-end\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tokenized-recipe-nlg-gpt2-ingredients-to-recipe-end\"\n\nMore Information needed" ]
11a2d28556692050723773cd8e02b02498b646a4
See [DistilGPT2 Stable Diffusion](https://huggingface.co/FredZhang7/distilgpt2-stable-diffusion)
FredZhang7/krea-ai-prompts
[ "license:mit", "region:us" ]
2022-12-06T05:23:09+00:00
{"license": "mit"}
2022-12-06T05:37:07+00:00
[]
[]
TAGS #license-mit #region-us
See DistilGPT2 Stable Diffusion
[]
[ "TAGS\n#license-mit #region-us \n" ]
c8bfafd883966295d3a443d40af81681814e0958
# Dataset Card for "olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-exact-dedup-only" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-exact-dedup-only
[ "region:us" ]
2022-12-06T06:03:22+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "crawl_timestamp", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 33889845422, "num_examples": 5782492}], "download_size": 20360314176, "dataset_size": 33889845422}}
2022-12-06T16:58:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-exact-dedup-only" More Information needed
[ "# Dataset Card for \"olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-exact-dedup-only\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-exact-dedup-only\"\n\nMore Information needed" ]
e2f37e95cc5eb38359b6aefc2cbf98a50fd1b7e4
## Dataset Description A small subset in each dataset of `pile-v2`(~1000 samples) of [pile-v2]() dataset, each has 1,000 random samples from the original dataset. The dataset has 255MB of text (code and english). ## Languages The dataset contains technical text on programming languages and natural language with the following subsets, - Bible - TED2020 - PileOfLaw - StackExchange - GithubIssues - Opensubtitles - USPTO - S2ORC - DevDocs - CodePileReddit2022 - USENET - GNOME - ASFPublicMail - PileV2Reddit2020 - CodePilePosts - Discourse - Tanzil - arXiv - UbuntuIRC - PubMed - CodePileReddit2020 - CodePileReddit2021 - GlobalVoices - FreeLaw_Options - PileV2Posts ## Dataset Structure ```python from datasets import load_dataset load_dataset("CarperAI/pile-v2-small") ``` ### How to use it You can either load the whole dataset like above, or load a specific subset such as arxiv by specifying the folder directory: ```python load_dataset("CarperAI/pile-v2-small", data_dir="data/arxiv") ```
CarperAI/pile-v2-small-filtered
[ "task_categories:text-generation", "task_ids:language-modeling", "language_creators:crowdsourced", "multilinguality:multilingual", "size_categories:unknown", "language:en", "language:code", "region:us" ]
2022-12-06T06:08:44+00:00
{"annotations_creators": [], "language_creators": ["crowdsourced"], "language": ["en", "code"], "multilinguality": ["multilingual"], "size_categories": ["unknown"], "source_datasets": [], "task_categories": ["text-generation"], "task_ids": ["language-modeling"]}
2022-12-06T14:16:11+00:00
[]
[ "en", "code" ]
TAGS #task_categories-text-generation #task_ids-language-modeling #language_creators-crowdsourced #multilinguality-multilingual #size_categories-unknown #language-English #language-code #region-us
## Dataset Description A small subset in each dataset of 'pile-v2'(~1000 samples) of [pile-v2]() dataset, each has 1,000 random samples from the original dataset. The dataset has 255MB of text (code and english). ## Languages The dataset contains technical text on programming languages and natural language with the following subsets, - Bible - TED2020 - PileOfLaw - StackExchange - GithubIssues - Opensubtitles - USPTO - S2ORC - DevDocs - CodePileReddit2022 - USENET - GNOME - ASFPublicMail - PileV2Reddit2020 - CodePilePosts - Discourse - Tanzil - arXiv - UbuntuIRC - PubMed - CodePileReddit2020 - CodePileReddit2021 - GlobalVoices - FreeLaw_Options - PileV2Posts ## Dataset Structure ### How to use it You can either load the whole dataset like above, or load a specific subset such as arxiv by specifying the folder directory:
[ "## Dataset Description\n\nA small subset in each dataset of 'pile-v2'(~1000 samples) of [pile-v2]() dataset, each has 1,000 random samples from the original dataset. The dataset has 255MB of text (code and english).", "## Languages\nThe dataset contains technical text on programming languages and natural language with the following subsets,\n- Bible \n- TED2020\n- PileOfLaw\n- StackExchange\n- GithubIssues\n- Opensubtitles\n- USPTO\n- S2ORC\n- DevDocs\n- CodePileReddit2022\n- USENET\n- GNOME\n- ASFPublicMail\n- PileV2Reddit2020\n- CodePilePosts\n- Discourse\n- Tanzil\n- arXiv\n- UbuntuIRC\n- PubMed\n- CodePileReddit2020\n- CodePileReddit2021\n- GlobalVoices\n- FreeLaw_Options\n- PileV2Posts", "## Dataset Structure", "### How to use it\nYou can either load the whole dataset like above, or load a specific subset such as arxiv by specifying the folder directory:" ]
[ "TAGS\n#task_categories-text-generation #task_ids-language-modeling #language_creators-crowdsourced #multilinguality-multilingual #size_categories-unknown #language-English #language-code #region-us \n", "## Dataset Description\n\nA small subset in each dataset of 'pile-v2'(~1000 samples) of [pile-v2]() dataset, each has 1,000 random samples from the original dataset. The dataset has 255MB of text (code and english).", "## Languages\nThe dataset contains technical text on programming languages and natural language with the following subsets,\n- Bible \n- TED2020\n- PileOfLaw\n- StackExchange\n- GithubIssues\n- Opensubtitles\n- USPTO\n- S2ORC\n- DevDocs\n- CodePileReddit2022\n- USENET\n- GNOME\n- ASFPublicMail\n- PileV2Reddit2020\n- CodePilePosts\n- Discourse\n- Tanzil\n- arXiv\n- UbuntuIRC\n- PubMed\n- CodePileReddit2020\n- CodePileReddit2021\n- GlobalVoices\n- FreeLaw_Options\n- PileV2Posts", "## Dataset Structure", "### How to use it\nYou can either load the whole dataset like above, or load a specific subset such as arxiv by specifying the folder directory:" ]
025b58466d24eaa7462689bad4fd7c0aa2fdd631
# Dataset Card for librispeech_asr ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [LibriSpeech ASR corpus](http://www.openslr.org/12) - **Repository:** [Needs More Information] - **Paper:** [LibriSpeech: An ASR Corpus Based On Public Domain Audio Books](https://www.danielpovey.com/files/2015_icassp_librispeech.pdf) - **Leaderboard:** [The 🤗 Speech Bench](https://huggingface.co/spaces/huggingface/hf-speech-bench) - **Point of Contact:** [Daniel Povey](mailto:[email protected]) ### Dataset Summary LibriSpeech is a corpus of approximately 1000 hours of 16kHz read English speech, prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read audiobooks from the LibriVox project, and has been carefully segmented and aligned. ### Supported Tasks and Leaderboards - `automatic-speech-recognition`, `audio-speaker-identification`: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). The task has an active Hugging Face leaderboard which can be found at https://huggingface.co/spaces/huggingface/hf-speech-bench. The leaderboard ranks models uploaded to the Hub based on their WER. An external leaderboard at https://paperswithcode.com/sota/speech-recognition-on-librispeech-test-clean ranks the latest models from research and academia. ### Languages The audio is in English. There are two configurations: `clean` and `other`. The speakers in the corpus were ranked according to the WER of the transcripts of a model trained on a different dataset, and were divided roughly in the middle, with the lower-WER speakers designated as "clean" and the higher WER speakers designated as "other". ## Dataset Structure ### Data Instances A typical data point comprises the path to the audio file, usually called `file` and its transcription, called `text`. Some additional information about the speaker and the passage which contains the transcription is provided. ``` {'chapter_id': 141231, 'file': '/home/patrick/.cache/huggingface/datasets/downloads/extracted/b7ded9969e09942ab65313e691e6fc2e12066192ee8527e21d634aca128afbe2/dev_clean/1272/141231/1272-141231-0000.flac', 'audio': {'path': '/home/patrick/.cache/huggingface/datasets/downloads/extracted/b7ded9969e09942ab65313e691e6fc2e12066192ee8527e21d634aca128afbe2/dev_clean/1272/141231/1272-141231-0000.flac', 'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32), 'sampling_rate': 16000}, 'id': '1272-141231-0000', 'speaker_id': 1272, 'text': 'A MAN SAID TO THE UNIVERSE SIR I EXIST'} ``` ### Data Fields - file: A path to the downloaded audio file in .flac format. - audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`. - text: the transcription of the audio file. - id: unique id of the data sample. - speaker_id: unique id of the speaker. The same speaker id can be found for multiple data samples. - chapter_id: id of the audiobook chapter which includes the transcription. ### Data Splits The size of the corpus makes it impractical, or at least inconvenient for some users, to distribute it as a single large archive. Thus the training portion of the corpus is split into three subsets, with approximate size 100, 360 and 500 hours respectively. A simple automatic procedure was used to select the audio in the first two sets to be, on average, of higher recording quality and with accents closer to US English. An acoustic model was trained on WSJ’s si-84 data subset and was used to recognize the audio in the corpus, using a bigram LM estimated on the text of the respective books. We computed the Word Error Rate (WER) of this automatic transcript relative to our reference transcripts obtained from the book texts. The speakers in the corpus were ranked according to the WER of the WSJ model’s transcripts, and were divided roughly in the middle, with the lower-WER speakers designated as "clean" and the higher-WER speakers designated as "other". For "clean", the data is split into train, validation, and test set. The train set is further split into train.100 and train.360 respectively accounting for 100h and 360h of the training data. For "other", the data is split into train, validation, and test set. The train set contains approximately 500h of recorded speech. | | Train.500 | Train.360 | Train.100 | Valid | Test | | ----- | ------ | ----- | ---- | ---- | ---- | | clean | - | 104014 | 28539 | 2703 | 2620| | other | 148688 | - | - | 2864 | 2939 | ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information The dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators The dataset was initially created by Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. ### Licensing Information [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/) ### Citation Information ``` @inproceedings{panayotov2015librispeech, title={Librispeech: an ASR corpus based on public domain audio books}, author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev}, booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on}, pages={5206--5210}, year={2015}, organization={IEEE} } ``` ### Contributions Thanks to [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset.
nguyenvulebinh/libris_clean_100
[ "task_categories:automatic-speech-recognition", "task_categories:audio-classification", "task_ids:speaker-identification", "annotations_creators:expert-generated", "language_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "license:cc-by-4.0", "region:us" ]
2022-12-06T07:19:09+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["crowdsourced", "expert-generated"], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["automatic-speech-recognition", "audio-classification"], "task_ids": ["speaker-identification"], "paperswithcode_id": "librispeech-1", "pretty_name": "LibriSpeech", "dataset_info": [{"config_name": "clean", "features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train.100", "num_bytes": 6619683041, "num_examples": 28539}, {"name": "train.360", "num_bytes": 23898214592, "num_examples": 104014}, {"name": "validation", "num_bytes": 359572231, "num_examples": 2703}, {"name": "test", "num_bytes": 367705423, "num_examples": 2620}], "download_size": 30121377654, "dataset_size": 31245175287}, {"config_name": "other", "features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train.500", "num_bytes": 31810256902, "num_examples": 148688}, {"name": "validation", "num_bytes": 337283304, "num_examples": 2864}, {"name": "test", "num_bytes": 352396474, "num_examples": 2939}], "download_size": 31236565377, "dataset_size": 32499936680}, {"config_name": "all", "features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}], "splits": [{"name": "train.clean.100", "num_bytes": 6627791685, "num_examples": 28539}, {"name": "train.clean.360", "num_bytes": 23927767570, "num_examples": 104014}, {"name": "train.other.500", "num_bytes": 31852502880, "num_examples": 148688}, {"name": "validation.clean", "num_bytes": 359505691, "num_examples": 2703}, {"name": "validation.other", "num_bytes": 337213112, "num_examples": 2864}, {"name": "test.clean", "num_bytes": 368449831, "num_examples": 2620}, {"name": "test.other", "num_bytes": 353231518, "num_examples": 2939}], "download_size": 61357943031, "dataset_size": 63826462287}]}
2022-12-06T07:28:15+00:00
[]
[ "en" ]
TAGS #task_categories-automatic-speech-recognition #task_categories-audio-classification #task_ids-speaker-identification #annotations_creators-expert-generated #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-English #license-cc-by-4.0 #region-us
Dataset Card for librispeech\_asr ================================= Table of Contents ----------------- * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage: LibriSpeech ASR corpus * Repository: * Paper: LibriSpeech: An ASR Corpus Based On Public Domain Audio Books * Leaderboard: The Speech Bench * Point of Contact: Daniel Povey ### Dataset Summary LibriSpeech is a corpus of approximately 1000 hours of 16kHz read English speech, prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read audiobooks from the LibriVox project, and has been carefully segmented and aligned. ### Supported Tasks and Leaderboards * 'automatic-speech-recognition', 'audio-speaker-identification': The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). The task has an active Hugging Face leaderboard which can be found at URL The leaderboard ranks models uploaded to the Hub based on their WER. An external leaderboard at URL ranks the latest models from research and academia. ### Languages The audio is in English. There are two configurations: 'clean' and 'other'. The speakers in the corpus were ranked according to the WER of the transcripts of a model trained on a different dataset, and were divided roughly in the middle, with the lower-WER speakers designated as "clean" and the higher WER speakers designated as "other". Dataset Structure ----------------- ### Data Instances A typical data point comprises the path to the audio file, usually called 'file' and its transcription, called 'text'. Some additional information about the speaker and the passage which contains the transcription is provided. ### Data Fields * file: A path to the downloaded audio file in .flac format. * audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: 'dataset[0]["audio"]' the audio file is automatically decoded and resampled to 'dataset.features["audio"].sampling\_rate'. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the '"audio"' column, *i.e.* 'dataset[0]["audio"]' should always be preferred over 'dataset["audio"][0]'. * text: the transcription of the audio file. * id: unique id of the data sample. * speaker\_id: unique id of the speaker. The same speaker id can be found for multiple data samples. * chapter\_id: id of the audiobook chapter which includes the transcription. ### Data Splits The size of the corpus makes it impractical, or at least inconvenient for some users, to distribute it as a single large archive. Thus the training portion of the corpus is split into three subsets, with approximate size 100, 360 and 500 hours respectively. A simple automatic procedure was used to select the audio in the first two sets to be, on average, of higher recording quality and with accents closer to US English. An acoustic model was trained on WSJ’s si-84 data subset and was used to recognize the audio in the corpus, using a bigram LM estimated on the text of the respective books. We computed the Word Error Rate (WER) of this automatic transcript relative to our reference transcripts obtained from the book texts. The speakers in the corpus were ranked according to the WER of the WSJ model’s transcripts, and were divided roughly in the middle, with the lower-WER speakers designated as "clean" and the higher-WER speakers designated as "other". For "clean", the data is split into train, validation, and test set. The train set is further split into train.100 and train.360 respectively accounting for 100h and 360h of the training data. For "other", the data is split into train, validation, and test set. The train set contains approximately 500h of recorded speech. Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information The dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset. Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators The dataset was initially created by Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. ### Licensing Information CC BY 4.0 ### Contributions Thanks to @patrickvonplaten for adding this dataset.
[ "### Dataset Summary\n\n\nLibriSpeech is a corpus of approximately 1000 hours of 16kHz read English speech, prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read audiobooks from the LibriVox project, and has been carefully segmented and aligned.", "### Supported Tasks and Leaderboards\n\n\n* 'automatic-speech-recognition', 'audio-speaker-identification': The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). The task has an active Hugging Face leaderboard which can be found at URL The leaderboard ranks models uploaded to the Hub based on their WER. An external leaderboard at URL ranks the latest models from research and academia.", "### Languages\n\n\nThe audio is in English. There are two configurations: 'clean' and 'other'.\nThe speakers in the corpus were ranked according to the WER of the transcripts of a model trained on\na different dataset, and were divided roughly in the middle,\nwith the lower-WER speakers designated as \"clean\" and the higher WER speakers designated as \"other\".\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA typical data point comprises the path to the audio file, usually called 'file' and its transcription, called 'text'. Some additional information about the speaker and the passage which contains the transcription is provided.", "### Data Fields\n\n\n* file: A path to the downloaded audio file in .flac format.\n* audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: 'dataset[0][\"audio\"]' the audio file is automatically decoded and resampled to 'dataset.features[\"audio\"].sampling\\_rate'. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the '\"audio\"' column, *i.e.* 'dataset[0][\"audio\"]' should always be preferred over 'dataset[\"audio\"][0]'.\n* text: the transcription of the audio file.\n* id: unique id of the data sample.\n* speaker\\_id: unique id of the speaker. The same speaker id can be found for multiple data samples.\n* chapter\\_id: id of the audiobook chapter which includes the transcription.", "### Data Splits\n\n\nThe size of the corpus makes it impractical, or at least inconvenient\nfor some users, to distribute it as a single large archive. Thus the\ntraining portion of the corpus is split into three subsets, with approximate size 100, 360 and 500 hours respectively.\nA simple automatic\nprocedure was used to select the audio in the first two sets to be, on\naverage, of higher recording quality and with accents closer to US\nEnglish. An acoustic model was trained on WSJ’s si-84 data subset\nand was used to recognize the audio in the corpus, using a bigram\nLM estimated on the text of the respective books. We computed the\nWord Error Rate (WER) of this automatic transcript relative to our\nreference transcripts obtained from the book texts.\nThe speakers in the corpus were ranked according to the WER of\nthe WSJ model’s transcripts, and were divided roughly in the middle,\nwith the lower-WER speakers designated as \"clean\" and the higher-WER speakers designated as \"other\".\n\n\nFor \"clean\", the data is split into train, validation, and test set. The train set is further split into train.100 and train.360\nrespectively accounting for 100h and 360h of the training data.\nFor \"other\", the data is split into train, validation, and test set. The train set contains approximately 500h of recorded speech.\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nThe dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nThe dataset was initially created by Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur.", "### Licensing Information\n\n\nCC BY 4.0", "### Contributions\n\n\nThanks to @patrickvonplaten for adding this dataset." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #task_categories-audio-classification #task_ids-speaker-identification #annotations_creators-expert-generated #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-English #license-cc-by-4.0 #region-us \n", "### Dataset Summary\n\n\nLibriSpeech is a corpus of approximately 1000 hours of 16kHz read English speech, prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read audiobooks from the LibriVox project, and has been carefully segmented and aligned.", "### Supported Tasks and Leaderboards\n\n\n* 'automatic-speech-recognition', 'audio-speaker-identification': The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). The task has an active Hugging Face leaderboard which can be found at URL The leaderboard ranks models uploaded to the Hub based on their WER. An external leaderboard at URL ranks the latest models from research and academia.", "### Languages\n\n\nThe audio is in English. There are two configurations: 'clean' and 'other'.\nThe speakers in the corpus were ranked according to the WER of the transcripts of a model trained on\na different dataset, and were divided roughly in the middle,\nwith the lower-WER speakers designated as \"clean\" and the higher WER speakers designated as \"other\".\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA typical data point comprises the path to the audio file, usually called 'file' and its transcription, called 'text'. Some additional information about the speaker and the passage which contains the transcription is provided.", "### Data Fields\n\n\n* file: A path to the downloaded audio file in .flac format.\n* audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: 'dataset[0][\"audio\"]' the audio file is automatically decoded and resampled to 'dataset.features[\"audio\"].sampling\\_rate'. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the '\"audio\"' column, *i.e.* 'dataset[0][\"audio\"]' should always be preferred over 'dataset[\"audio\"][0]'.\n* text: the transcription of the audio file.\n* id: unique id of the data sample.\n* speaker\\_id: unique id of the speaker. The same speaker id can be found for multiple data samples.\n* chapter\\_id: id of the audiobook chapter which includes the transcription.", "### Data Splits\n\n\nThe size of the corpus makes it impractical, or at least inconvenient\nfor some users, to distribute it as a single large archive. Thus the\ntraining portion of the corpus is split into three subsets, with approximate size 100, 360 and 500 hours respectively.\nA simple automatic\nprocedure was used to select the audio in the first two sets to be, on\naverage, of higher recording quality and with accents closer to US\nEnglish. An acoustic model was trained on WSJ’s si-84 data subset\nand was used to recognize the audio in the corpus, using a bigram\nLM estimated on the text of the respective books. We computed the\nWord Error Rate (WER) of this automatic transcript relative to our\nreference transcripts obtained from the book texts.\nThe speakers in the corpus were ranked according to the WER of\nthe WSJ model’s transcripts, and were divided roughly in the middle,\nwith the lower-WER speakers designated as \"clean\" and the higher-WER speakers designated as \"other\".\n\n\nFor \"clean\", the data is split into train, validation, and test set. The train set is further split into train.100 and train.360\nrespectively accounting for 100h and 360h of the training data.\nFor \"other\", the data is split into train, validation, and test set. The train set contains approximately 500h of recorded speech.\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nThe dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nThe dataset was initially created by Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur.", "### Licensing Information\n\n\nCC BY 4.0", "### Contributions\n\n\nThanks to @patrickvonplaten for adding this dataset." ]
b93d8a9da47a40a289a5ac0914e16ceb3248bafd
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: facebook/bart-large-cnn * Dataset: gigaword * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@geiright](https://huggingface.co/geiright) for evaluating this model.
autoevaluate/autoeval-eval-gigaword-default-2df74a-2350473902
[ "autotrain", "evaluation", "region:us" ]
2022-12-06T08:07:04+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["gigaword"], "eval_info": {"task": "summarization", "model": "facebook/bart-large-cnn", "metrics": ["rouge"], "dataset_name": "gigaword", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "document", "target": "summary"}}}
2022-12-06T08:12:27+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: facebook/bart-large-cnn * Dataset: gigaword * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @geiright for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: facebook/bart-large-cnn\n* Dataset: gigaword\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @geiright for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: facebook/bart-large-cnn\n* Dataset: gigaword\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @geiright for evaluating this model." ]
01600ce7eabbf42a5ee7c82b82f49a11597b3a5f
# Dataset Card for MultiSports ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://deeperaction.github.io/datasets/multisports.html - **Repository:** https://github.com/MCG-NJU/MultiSports - **Paper:** https://arxiv.org/abs/2105.07404 - **Leaderboard:** https://paperswithcode.com/dataset/multisports - **Point of Contact:** mailto: [email protected] ### Dataset Summary Spatio-temporal action localization is an important and challenging problem in video understanding. Previous action detection benchmarks are limited in aspects of small numbers of instances in a trimmed video or low-level atomic actions. MultiSports is a multi-person dataset of spatio-temporal localized sports actions. Please refer to [this paper](https://arxiv.org/abs/2105.07404) for more details. Please refer to [this repository](https://github.com/MCG-NJU/MultiSports) for evaluation. ### Supported Tasks and Leaderboards - `Spatial-temporal action localization` Details about evaluation can be found in the [GitHub Repository](https://github.com/mcG-NJU/MultiSports). Previous challenge results can be found in [this page](https://deeperaction.github.io/results/index.html) and [this CodaLab challenge](https://codalab.lisn.upsaclay.fr/competitions/3736). ### Languages The class labels in the dataset are in English. ## Dataset Structure ### Data Instances Demo is available on [dataset homepage](https://deeperaction.github.io/datasets/multisports.html). The dataset contains ```rawframes.tar``` and ```multisports_GT.pkl```. The GT pkl file is a dictionary with the following structure: ``` { 'labels': ['label1', 'label2', ...], 'train_videos': [['train_vid_1', 'train_vid_2', ...]], 'test_videos': [['test_vid_1', 'test_vid_2', ...]], 'nframes': { 'vid_1': nframes_1, 'vid_2': nframes_2, ... }, 'resolution': { 'vid_1': resolution_1, 'vid_2': resolution_2, ... }, 'gttubes': { 'vid_1': { 'label_1': [tube_1, tube_2, ...], 'label_2': [tube_1, tube_2, ...], ... } ... } } ``` Here a ```tube``` is a ```numpy.ndarray``` with ```nframes``` rows and 5 columns ```<frame number> <x1> <y1> <x2> <y2>```. ### Data Fields Raw frames are organized according to their sport category. The pickle file of GT contains the following fields. - labels: list of labels - train_videos: a list with one split element containing the list of training videos - test_videos: a list with one split element containing the list of validation videos - nframes: dictionary that gives the number of frames for each video - resolution: dictionary that output a tuple ```(h,w)``` of the resolution for each video - gttubes: dictionary that contains the gt tubes for each video. Gt tubes are dictionary that associates from each index of label, a list of tubes. A ```tube``` is a ```numpy.ndarray``` with ```nframes``` rows and 5 columns ```<frame number> <x1> <y1> <x2> <y2>```. Please note that the label index starts from 0 and the frame index starts from 1. For the label index ```i```, the label name is ```labels[i]```. <details> <summary> Click here to see the full list of MultiSports class labels mapping: </summary> |id|Class| |--|-----| | 0 | aerobic push up | | 1 | aerobic explosive push up | | 2 | aerobic explosive support | | 3 | aerobic leg circle | | 4 | aerobic helicopter | | 5 | aerobic support | | 6 | aerobic v support | | 7 | aerobic horizontal support | | 8 | aerobic straight jump | | 9 | aerobic illusion | | 10 | aerobic bent leg(s) jump | | 11 | aerobic pike jump | | 12 | aerobic straddle jump | | 13 | aerobic split jump | | 14 | aerobic scissors leap | | 15 | aerobic kick jump | | 16 | aerobic off axis jump | | 17 | aerobic butterfly jump | | 18 | aerobic split | | 19 | aerobic turn | | 20 | aerobic balance turn | | 21 | volleyball serve | | 22 | volleyball block | | 23 | volleyball first pass | | 24 | volleyball defend | | 25 | volleyball protect | | 26 | volleyball second pass | | 27 | volleyball adjust | | 28 | volleyball save | | 29 | volleyball second attack | | 30 | volleyball spike | | 31 | volleyball dink | | 32 | volleyball no offensive attack | | 33 | football shoot | | 34 | football long pass | | 35 | football short pass | | 36 | football through pass | | 37 | football cross | | 38 | football dribble | | 39 | football trap | | 40 | football throw | | 41 | football diving | | 42 | football tackle | | 43 | football steal | | 44 | football clearance | | 45 | football block | | 46 | football press | | 47 | football aerial duels | | 48 | basketball pass | | 49 | basketball drive | | 50 | basketball dribble | | 51 | basketball 3-point shot | | 52 | basketball 2-point shot | | 53 | basketball free throw | | 54 | basketball block | | 55 | basketball offensive rebound | | 56 | basketball defensive rebound | | 57 | basketball pass steal | | 58 | basketball dribble steal | | 59 | basketball interfere shot | | 60 | basketball pick-and-roll defensive | | 61 | basketball sag | | 62 | basketball screen | | 63 | basketball pass-inbound | | 64 | basketball save | | 65 | basketball jump ball | </details> ### Data Splits | |train |validation| test | |-------------|------:|---------:|------:| |# of tubes |28514 |10116 | - | *GT for test split is not provided. Please wait for the new competition to start. Information will be updated in [dataset homepage](https://deeperaction.github.io/datasets/multisports.html).* ## Dataset Creation ### Curation Rationale Spatio-temporal action detection is an important and challenging problem in video understanding. Previous action detection benchmarks are limited in aspects of small numbers of instances in a trimmed video or low-level atomic actions. ### Source Data #### Initial Data Collection and Normalization > After choosing the four sports, we search for their competition videos by querying the name of sports like volleyball and the name of competition levels like Olympics and World Cup on YouTube, and then down- load videos from top search results. For each video, we only select high-resolution, e.g. 720P or 1080P, competition records and then manually cut them into clips of minutes, with less shot changes in each clip and to be more suitable for action detection. #### Who are the source language producers? The annotators of action categories and temporal boundaries are professional athletes of the corresponding sports. Please refer to [the paper](https://arxiv.org/abs/2105.07404) for more information. ### Annotations #### Annotation process 1. (FIRST STAGE) A team of professional athletes generate records of the action la- bel, the starting and ending frame, and the person box in the starting frame, which can ensure the efficiency, accu- racy and consistency of our annotation results. 2. At least one annotator with domain knowledge double-check the annotations, correct wrong or inaccurate ones and also add missing annotations 3. (SECOND STAGE) With the help of FCOT tracking algorithm, a team of crowd-sourced annotators adjust bounding boxes of tracking results at each frame for each record. 4. Double-check each instance by playing it in 5fps and manually correct the inaccurate bounding boxes. #### Who are the annotators? For the first stage, annotators are professional athletes. For the second stage, annotators are common volunteers. ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators Authors of [this paper](https://arxiv.org/abs/2105.07404) - Yixuan Li - Lei Chen - Runyu He - Zhenzhi Wang - Gangshan Wu - Limin Wang ### Licensing Information <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/">Creative Commons Attribution-NonCommercial 4.0 International License</a>. ### Citation Information If you find this dataset useful, please cite as ``` @InProceedings{Li_2021_ICCV, author = {Li, Yixuan and Chen, Lei and He, Runyu and Wang, Zhenzhi and Wu, Gangshan and Wang, Limin}, title = {MultiSports: A Multi-Person Video Dataset of Spatio-Temporally Localized Sports Actions}, booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, month = {October}, year = {2021}, pages = {13536-13545} } ``` ### Contributions Thanks to [@Judie1999](https://github.com/Judie1999) for adding this dataset.
MCG-NJU/MultiSports
[ "task_categories:image-classification", "task_categories:object-detection", "task_categories:other", "task_ids:multi-class-image-classification", "annotations_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:monolingual", "source_datasets:original", "language:en", "license:cc-by-nc-4.0", "video", "action detection", "spatial-temporal action localization", "arxiv:2105.07404", "region:us" ]
2022-12-06T08:32:53+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["expert-generated"], "language": ["en"], "license": ["cc-by-nc-4.0"], "multilinguality": ["monolingual"], "size_categories": [], "source_datasets": ["original"], "task_categories": ["image-classification", "object-detection", "other"], "task_ids": ["multi-class-image-classification"], "pretty_name": "MultiSports", "tags": ["video", "action detection", "spatial-temporal action localization"], "extra_gated_heading": "Acknowledge license to accept the repository", "extra_gated_prompt": "This work is licensed under a Creative Commons Attribution-NonCommercial 4.0 International License", "extra_gated_fields": {"I agree to use this dataset for non-commerical use ONLY": "checkbox"}}
2022-12-13T07:47:16+00:00
[ "2105.07404" ]
[ "en" ]
TAGS #task_categories-image-classification #task_categories-object-detection #task_categories-other #task_ids-multi-class-image-classification #annotations_creators-crowdsourced #language_creators-expert-generated #multilinguality-monolingual #source_datasets-original #language-English #license-cc-by-nc-4.0 #video #action detection #spatial-temporal action localization #arxiv-2105.07404 #region-us
Dataset Card for MultiSports ============================ Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: URL * Leaderboard: URL * Point of Contact: mailto: runyu\_he@URL ### Dataset Summary Spatio-temporal action localization is an important and challenging problem in video understanding. Previous action detection benchmarks are limited in aspects of small numbers of instances in a trimmed video or low-level atomic actions. MultiSports is a multi-person dataset of spatio-temporal localized sports actions. Please refer to this paper for more details. Please refer to this repository for evaluation. ### Supported Tasks and Leaderboards * 'Spatial-temporal action localization' Details about evaluation can be found in the GitHub Repository. Previous challenge results can be found in this page and this CodaLab challenge. ### Languages The class labels in the dataset are in English. Dataset Structure ----------------- ### Data Instances Demo is available on dataset homepage. The dataset contains and . The GT pkl file is a dictionary with the following structure: Here a is a with rows and 5 columns . ### Data Fields Raw frames are organized according to their sport category. The pickle file of GT contains the following fields. * labels: list of labels * train\_videos: a list with one split element containing the list of training videos * test\_videos: a list with one split element containing the list of validation videos * nframes: dictionary that gives the number of frames for each video * resolution: dictionary that output a tuple of the resolution for each video * gttubes: dictionary that contains the gt tubes for each video. Gt tubes are dictionary that associates from each index of label, a list of tubes. A is a with rows and 5 columns . Please note that the label index starts from 0 and the frame index starts from 1. For the label index , the label name is . Click here to see the full list of MultiSports class labels mapping: ### Data Splits *GT for test split is not provided. Please wait for the new competition to start. Information will be updated in dataset homepage.* Dataset Creation ---------------- ### Curation Rationale Spatio-temporal action detection is an important and challenging problem in video understanding. Previous action detection benchmarks are limited in aspects of small numbers of instances in a trimmed video or low-level atomic actions. ### Source Data #### Initial Data Collection and Normalization > > After choosing the four sports, we search for their competition videos by querying the name of sports like volleyball and the name of competition levels like Olympics and World Cup on YouTube, and then down- load videos from top search results. For each video, we only select high-resolution, e.g. 720P or 1080P, competition records and then manually cut them into clips of minutes, with less shot changes in each clip and to be more suitable for action detection. > > > #### Who are the source language producers? The annotators of action categories and temporal boundaries are professional athletes of the corresponding sports. Please refer to the paper for more information. ### Annotations #### Annotation process 1. (FIRST STAGE) A team of professional athletes generate records of the action la- bel, the starting and ending frame, and the person box in the starting frame, which can ensure the efficiency, accu- racy and consistency of our annotation results. 2. At least one annotator with domain knowledge double-check the annotations, correct wrong or inaccurate ones and also add missing annotations 3. (SECOND STAGE) With the help of FCOT tracking algorithm, a team of crowd-sourced annotators adjust bounding boxes of tracking results at each frame for each record. 4. Double-check each instance by playing it in 5fps and manually correct the inaccurate bounding boxes. #### Who are the annotators? For the first stage, annotators are professional athletes. For the second stage, annotators are common volunteers. ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators Authors of this paper * Yixuan Li * Lei Chen * Runyu He * Zhenzhi Wang * Gangshan Wu * Limin Wang ### Licensing Information <a rel="license" href="URL alt="Creative Commons License" style="border-width:0" src="https://i.URL /> This work is licensed under a <a rel="license" href="URL Commons Attribution-NonCommercial 4.0 International License. If you find this dataset useful, please cite as ### Contributions Thanks to @Judie1999 for adding this dataset.
[ "### Dataset Summary\n\n\nSpatio-temporal action localization is an important and challenging problem in video understanding. Previous action detection benchmarks are limited in aspects of small numbers of instances in a trimmed video or low-level atomic actions. MultiSports is a multi-person dataset of spatio-temporal localized sports actions. Please refer to this paper for more details. Please refer to this repository for evaluation.", "### Supported Tasks and Leaderboards\n\n\n* 'Spatial-temporal action localization'\n\n\nDetails about evaluation can be found in the GitHub Repository. Previous challenge results can be found in this page and this CodaLab challenge.", "### Languages\n\n\nThe class labels in the dataset are in English.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nDemo is available on dataset homepage.\n\n\nThe dataset contains and . The GT pkl file is a dictionary with the following structure:\n\n\nHere a is a with rows and 5 columns .", "### Data Fields\n\n\nRaw frames are organized according to their sport category. The pickle file of GT contains the following fields.\n\n\n* labels: list of labels\n* train\\_videos: a list with one split element containing the list of training videos\n* test\\_videos: a list with one split element containing the list of validation videos\n* nframes: dictionary that gives the number of frames for each video\n* resolution: dictionary that output a tuple of the resolution for each video\n* gttubes: dictionary that contains the gt tubes for each video. Gt tubes are dictionary that associates from each index of label, a list of tubes. A is a with rows and 5 columns .\n\n\nPlease note that the label index starts from 0 and the frame index starts from 1. For the label index , the label name is .\n\n\n\n\n Click here to see the full list of MultiSports class labels mapping:", "### Data Splits\n\n\n\n*GT for test split is not provided. Please wait for the new competition to start. Information will be updated in dataset homepage.*\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nSpatio-temporal action detection is an important and challenging problem in video understanding. Previous action detection benchmarks are limited in aspects of small numbers of instances in a trimmed video or low-level atomic actions.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\n\n> \n> After choosing the four sports, we search for their competition videos by querying the name of sports like volleyball and the name of competition levels like Olympics and World Cup on YouTube, and then down- load videos from top search results. For each video, we only select high-resolution, e.g. 720P or 1080P, competition records and then manually cut them into clips of minutes, with less shot changes in each clip and to be more suitable for action detection.\n> \n> \n>", "#### Who are the source language producers?\n\n\nThe annotators of action categories and temporal boundaries are professional athletes of the corresponding sports. Please refer to the paper for more information.", "### Annotations", "#### Annotation process\n\n\n1. (FIRST STAGE) A team of professional athletes generate records of the action la- bel, the starting and ending frame, and the person box in the starting frame, which can ensure the efficiency, accu- racy and consistency of our annotation results.\n2. At least one annotator with domain knowledge double-check the annotations, correct wrong or inaccurate ones and also add missing annotations\n3. (SECOND STAGE) With the help of FCOT tracking algorithm, a team of crowd-sourced annotators adjust bounding boxes of tracking results at each frame for each record.\n4. Double-check each instance by playing it in 5fps and manually correct the inaccurate bounding boxes.", "#### Who are the annotators?\n\n\nFor the first stage, annotators are professional athletes. For the second stage, annotators are common volunteers.", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nAuthors of this paper\n\n\n* Yixuan Li\n* Lei Chen\n* Runyu He\n* Zhenzhi Wang\n* Gangshan Wu\n* Limin Wang", "### Licensing Information\n\n\n<a rel=\"license\" href=\"URL alt=\"Creative Commons License\" style=\"border-width:0\" src=\"https://i.URL /> \nThis work is licensed under a <a rel=\"license\" href=\"URL Commons Attribution-NonCommercial 4.0 International License.\n\n\nIf you find this dataset useful, please cite as", "### Contributions\n\n\nThanks to @Judie1999 for adding this dataset." ]
[ "TAGS\n#task_categories-image-classification #task_categories-object-detection #task_categories-other #task_ids-multi-class-image-classification #annotations_creators-crowdsourced #language_creators-expert-generated #multilinguality-monolingual #source_datasets-original #language-English #license-cc-by-nc-4.0 #video #action detection #spatial-temporal action localization #arxiv-2105.07404 #region-us \n", "### Dataset Summary\n\n\nSpatio-temporal action localization is an important and challenging problem in video understanding. Previous action detection benchmarks are limited in aspects of small numbers of instances in a trimmed video or low-level atomic actions. MultiSports is a multi-person dataset of spatio-temporal localized sports actions. Please refer to this paper for more details. Please refer to this repository for evaluation.", "### Supported Tasks and Leaderboards\n\n\n* 'Spatial-temporal action localization'\n\n\nDetails about evaluation can be found in the GitHub Repository. Previous challenge results can be found in this page and this CodaLab challenge.", "### Languages\n\n\nThe class labels in the dataset are in English.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nDemo is available on dataset homepage.\n\n\nThe dataset contains and . The GT pkl file is a dictionary with the following structure:\n\n\nHere a is a with rows and 5 columns .", "### Data Fields\n\n\nRaw frames are organized according to their sport category. The pickle file of GT contains the following fields.\n\n\n* labels: list of labels\n* train\\_videos: a list with one split element containing the list of training videos\n* test\\_videos: a list with one split element containing the list of validation videos\n* nframes: dictionary that gives the number of frames for each video\n* resolution: dictionary that output a tuple of the resolution for each video\n* gttubes: dictionary that contains the gt tubes for each video. Gt tubes are dictionary that associates from each index of label, a list of tubes. A is a with rows and 5 columns .\n\n\nPlease note that the label index starts from 0 and the frame index starts from 1. For the label index , the label name is .\n\n\n\n\n Click here to see the full list of MultiSports class labels mapping:", "### Data Splits\n\n\n\n*GT for test split is not provided. Please wait for the new competition to start. Information will be updated in dataset homepage.*\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nSpatio-temporal action detection is an important and challenging problem in video understanding. Previous action detection benchmarks are limited in aspects of small numbers of instances in a trimmed video or low-level atomic actions.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\n\n> \n> After choosing the four sports, we search for their competition videos by querying the name of sports like volleyball and the name of competition levels like Olympics and World Cup on YouTube, and then down- load videos from top search results. For each video, we only select high-resolution, e.g. 720P or 1080P, competition records and then manually cut them into clips of minutes, with less shot changes in each clip and to be more suitable for action detection.\n> \n> \n>", "#### Who are the source language producers?\n\n\nThe annotators of action categories and temporal boundaries are professional athletes of the corresponding sports. Please refer to the paper for more information.", "### Annotations", "#### Annotation process\n\n\n1. (FIRST STAGE) A team of professional athletes generate records of the action la- bel, the starting and ending frame, and the person box in the starting frame, which can ensure the efficiency, accu- racy and consistency of our annotation results.\n2. At least one annotator with domain knowledge double-check the annotations, correct wrong or inaccurate ones and also add missing annotations\n3. (SECOND STAGE) With the help of FCOT tracking algorithm, a team of crowd-sourced annotators adjust bounding boxes of tracking results at each frame for each record.\n4. Double-check each instance by playing it in 5fps and manually correct the inaccurate bounding boxes.", "#### Who are the annotators?\n\n\nFor the first stage, annotators are professional athletes. For the second stage, annotators are common volunteers.", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nAuthors of this paper\n\n\n* Yixuan Li\n* Lei Chen\n* Runyu He\n* Zhenzhi Wang\n* Gangshan Wu\n* Limin Wang", "### Licensing Information\n\n\n<a rel=\"license\" href=\"URL alt=\"Creative Commons License\" style=\"border-width:0\" src=\"https://i.URL /> \nThis work is licensed under a <a rel=\"license\" href=\"URL Commons Attribution-NonCommercial 4.0 International License.\n\n\nIf you find this dataset useful, please cite as", "### Contributions\n\n\nThanks to @Judie1999 for adding this dataset." ]
d4dc2845e2a15fbb32b480c5881ec724b81a6705
Over 20,000 512x512 mel spectrograms of 5 second samples of music from my Spotify liked playlist. The code to convert from audio to spectrogram and vice versa can be found in https://github.com/teticio/audio-diffusion along with scripts to train and run inference using De-noising Diffusion Probabilistic Models. ``` x_res = 512 y_res = 512 sample_rate = 22050 n_fft = 2048 hop_length = 512 ```
teticio/audio-diffusion-512
[ "task_categories:image-to-image", "size_categories:10K<n<100K", "audio", "spectrograms", "region:us" ]
2022-12-06T09:26:24+00:00
{"size_categories": ["10K<n<100K"], "source_datasets": [], "task_categories": ["image-to-image"], "task_ids": [], "pretty_name": "Mel spectrograms of music", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "audio_file", "dtype": "string"}, {"name": "slice", "dtype": "int16"}], "splits": [{"name": "train", "num_bytes": 1903861364.293, "num_examples": 10663}], "download_size": 1903696036, "dataset_size": 1903861364.293}, "tags": ["audio", "spectrograms"]}
2023-06-19T19:34:16+00:00
[]
[]
TAGS #task_categories-image-to-image #size_categories-10K<n<100K #audio #spectrograms #region-us
Over 20,000 512x512 mel spectrograms of 5 second samples of music from my Spotify liked playlist. The code to convert from audio to spectrogram and vice versa can be found in URL along with scripts to train and run inference using De-noising Diffusion Probabilistic Models.
[]
[ "TAGS\n#task_categories-image-to-image #size_categories-10K<n<100K #audio #spectrograms #region-us \n" ]
cd0134d435c080bb352b8b352a799ff35007dbb9
# Dataset Card for "librispeech5k_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CristianaLazar/librispeech5k_train
[ "region:us" ]
2022-12-06T10:51:00+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train.360", "num_bytes": 6796635145.0, "num_examples": 5000}], "download_size": 3988908181, "dataset_size": 6796635145.0}}
2022-12-06T11:07:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech5k_train" More Information needed
[ "# Dataset Card for \"librispeech5k_train\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech5k_train\"\n\nMore Information needed" ]
308e97469d4cc58caaef04f110ebbd65dce628fc
# Dataset Card for "uber-reviews" ## Dataset Description - **Homepage:** Kaggle Challenge - **Repository:** https://www.kaggle.com/datasets/jschne61701/uber-rides-costumer-reviews-dataset - **Paper:** N.A. - **Leaderboard:** N.A. - **Point of Contact:** N.A. ### Dataset Summary Using Python's Beautiful Soup library and Scrappy framework, scraped date, star rating, and comment from all reviews from 2013 - 2019. ### Languages english ### Citation Information https://www.kaggle.com/datasets/jschne61701/uber-rides-costumer-reviews-dataset https://www.sitejabber.com/reviews/uber.com https://www.consumeraffairs.com/travel/uber.html https://www.kaggle.com/purvank/uber-rider-reviews-dataset ### Contributions Thanks to [@davidberenstein1957](https://github.com/davidberenstein1957) for adding this dataset.
argilla/uber-reviews
[ "task_categories:text-classification", "task_ids:sentiment-classification", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:unknown", "region:us" ]
2022-12-06T11:47:18+00:00
{"language": ["en"], "license": ["unknown"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["sentiment-classification"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "dtype": "null"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 2761597, "num_examples": 2347}], "download_size": 1691346, "dataset_size": 2761597}}
2022-12-06T12:00:28+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-sentiment-classification #size_categories-1K<n<10K #source_datasets-original #language-English #license-unknown #region-us
# Dataset Card for "uber-reviews" ## Dataset Description - Homepage: Kaggle Challenge - Repository: URL - Paper: N.A. - Leaderboard: N.A. - Point of Contact: N.A. ### Dataset Summary Using Python's Beautiful Soup library and Scrappy framework, scraped date, star rating, and comment from all reviews from 2013 - 2019. ### Languages english URL URL URL URL ### Contributions Thanks to @davidberenstein1957 for adding this dataset.
[ "# Dataset Card for \"uber-reviews\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nUsing Python's Beautiful Soup library and Scrappy framework, scraped date, star rating, and comment from all reviews from 2013 - 2019.", "### Languages\n\nenglish \n\n\n\nURL\nURL\nURL\nURL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_ids-sentiment-classification #size_categories-1K<n<10K #source_datasets-original #language-English #license-unknown #region-us \n", "# Dataset Card for \"uber-reviews\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nUsing Python's Beautiful Soup library and Scrappy framework, scraped date, star rating, and comment from all reviews from 2013 - 2019.", "### Languages\n\nenglish \n\n\n\nURL\nURL\nURL\nURL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
d42d44b7dea25b643c58fc02fc19f5b81ee1d372
This DataSet contain nba games from 2019 tp 2022
KDAM1/BasketballGames
[ "task_categories:other", "multilinguality:monolingual", "language:en", "license:unknown", "region:us" ]
2022-12-06T11:56:33+00:00
{"annotations_creators": [], "language_creators": [], "language": ["en"], "license": ["unknown"], "multilinguality": ["monolingual"], "size_categories": [], "source_datasets": [], "task_categories": ["other"], "task_ids": [], "pretty_name": "BasketballGames", "tags": []}
2022-12-06T12:11:11+00:00
[]
[ "en" ]
TAGS #task_categories-other #multilinguality-monolingual #language-English #license-unknown #region-us
This DataSet contain nba games from 2019 tp 2022
[]
[ "TAGS\n#task_categories-other #multilinguality-monolingual #language-English #license-unknown #region-us \n" ]
f57199dde555a1e4858be6fdb307dc0d5060761f
# Dataset Card for "tripadvisor-hotel-reviews" ## Dataset Description - **Homepage:** Kaggle Challenge - **Repository:** https://www.kaggle.com/datasets/andrewmvd/trip-advisor-hotel-reviews - **Paper:** https://zenodo.org/record/1219899 - **Leaderboard:** N.A. - **Point of Contact:** N.A. ### Dataset Summary Hotels play a crucial role in traveling and with the increased access to information new pathways of selecting the best ones emerged. With this dataset, consisting of 20k reviews crawled from Tripadvisor, you can explore what makes a great hotel and maybe even use this model in your travels! Citations on a scale from 1 to 5. ### Languages english ### Citation Information If you use this dataset in your research, please credit the authors. Citation Alam, M. H., Ryu, W.-J., Lee, S., 2016. Joint multi-grain topic sentiment: modeling semantic aspects for online reviews. Information Sciences 339, 206–223. DOI License CC BY NC 4.0 Splash banner ### Contributions Thanks to [@davidberenstein1957](https://github.com/davidberenstein1957) for adding this dataset.
argilla/tripadvisor-hotel-reviews
[ "task_categories:text-classification", "task_ids:sentiment-classification", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:cc-by-nc-4.0", "region:us" ]
2022-12-06T13:04:42+00:00
{"language": ["en"], "license": ["cc-by-nc-4.0"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["sentiment-classification"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "dtype": "null"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 31840239, "num_examples": 20491}], "download_size": 19678149, "dataset_size": 31840239}}
2022-12-07T07:10:56+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-sentiment-classification #size_categories-10K<n<100K #source_datasets-original #language-English #license-cc-by-nc-4.0 #region-us
# Dataset Card for "tripadvisor-hotel-reviews" ## Dataset Description - Homepage: Kaggle Challenge - Repository: URL - Paper: URL - Leaderboard: N.A. - Point of Contact: N.A. ### Dataset Summary Hotels play a crucial role in traveling and with the increased access to information new pathways of selecting the best ones emerged. With this dataset, consisting of 20k reviews crawled from Tripadvisor, you can explore what makes a great hotel and maybe even use this model in your travels! Citations on a scale from 1 to 5. ### Languages english If you use this dataset in your research, please credit the authors. Citation Alam, M. H., Ryu, W.-J., Lee, S., 2016. Joint multi-grain topic sentiment: modeling semantic aspects for online reviews. Information Sciences 339, 206–223. DOI License CC BY NC 4.0 Splash banner ### Contributions Thanks to @davidberenstein1957 for adding this dataset.
[ "# Dataset Card for \"tripadvisor-hotel-reviews\"", "## Dataset Description\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: URL\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\nHotels play a crucial role in traveling and with the increased access to information new pathways of selecting the best ones emerged.\nWith this dataset, consisting of 20k reviews crawled from Tripadvisor, you can explore what makes a great hotel and maybe even use this model in your travels!\nCitations on a scale from 1 to 5.", "### Languages\nenglish \n\nIf you use this dataset in your research, please credit the authors.\nCitation\nAlam, M. H., Ryu, W.-J., Lee, S., 2016. Joint multi-grain topic sentiment: modeling semantic aspects for online reviews. Information Sciences 339, 206–223.\nDOI\nLicense\nCC BY NC 4.0\nSplash banner", "### Contributions\nThanks to @davidberenstein1957 for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_ids-sentiment-classification #size_categories-10K<n<100K #source_datasets-original #language-English #license-cc-by-nc-4.0 #region-us \n", "# Dataset Card for \"tripadvisor-hotel-reviews\"", "## Dataset Description\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: URL\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\nHotels play a crucial role in traveling and with the increased access to information new pathways of selecting the best ones emerged.\nWith this dataset, consisting of 20k reviews crawled from Tripadvisor, you can explore what makes a great hotel and maybe even use this model in your travels!\nCitations on a scale from 1 to 5.", "### Languages\nenglish \n\nIf you use this dataset in your research, please credit the authors.\nCitation\nAlam, M. H., Ryu, W.-J., Lee, S., 2016. Joint multi-grain topic sentiment: modeling semantic aspects for online reviews. Information Sciences 339, 206–223.\nDOI\nLicense\nCC BY NC 4.0\nSplash banner", "### Contributions\nThanks to @davidberenstein1957 for adding this dataset." ]
f88caaaae5963fd57542ed0a0b80eff469cbf9f5
# Dataset Card for "twitter-genderbias" ## Dataset Description - **Homepage:** Kaggle Challenge - **Repository:** https://www.kaggle.com/datasets/kevinmorgado/gender-bias-spanish - **Paper:** N.A. - **Leaderboard:** N.A. - **Point of Contact:** N.A. ### Dataset Summary This dataset contains more than 1900 labeled Spanish tweets with the category biased or non-biased. This was made for a Hackathon to reduce gender bias on the internet. - contents: Text - label: - biased - non-biased ### Languages spanish ### Citation Information https://www.kaggle.com/datasets/kevinmorgado/gender-bias-spanish ### Contributions Thanks to [@davidberenstein1957](https://github.com/davidberenstein1957) for adding this dataset.
argilla/twitter-genderbias
[ "task_categories:text-classification", "task_ids:sentiment-classification", "task_ids:sentiment-analysis", "size_categories:1K<n<10K", "source_datasets:original", "language:es", "license:unknown", "region:us" ]
2022-12-06T13:17:03+00:00
{"language": ["es"], "license": ["unknown"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["sentiment-classification", "sentiment-analysis"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "dtype": "null"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 573508, "num_examples": 1914}], "download_size": 373847, "dataset_size": 573508}}
2022-12-06T16:21:21+00:00
[]
[ "es" ]
TAGS #task_categories-text-classification #task_ids-sentiment-classification #task_ids-sentiment-analysis #size_categories-1K<n<10K #source_datasets-original #language-Spanish #license-unknown #region-us
# Dataset Card for "twitter-genderbias" ## Dataset Description - Homepage: Kaggle Challenge - Repository: URL - Paper: N.A. - Leaderboard: N.A. - Point of Contact: N.A. ### Dataset Summary This dataset contains more than 1900 labeled Spanish tweets with the category biased or non-biased. This was made for a Hackathon to reduce gender bias on the internet. - contents: Text - label: - biased - non-biased ### Languages spanish URL ### Contributions Thanks to @davidberenstein1957 for adding this dataset.
[ "# Dataset Card for \"twitter-genderbias\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nThis dataset contains more than 1900 labeled Spanish tweets with the category biased or non-biased. This was made for a Hackathon to reduce gender bias on the internet.\n\n- contents: Text\n- label:\n - biased\n - non-biased", "### Languages\n\nspanish \n\n\n\nURL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_ids-sentiment-classification #task_ids-sentiment-analysis #size_categories-1K<n<10K #source_datasets-original #language-Spanish #license-unknown #region-us \n", "# Dataset Card for \"twitter-genderbias\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nThis dataset contains more than 1900 labeled Spanish tweets with the category biased or non-biased. This was made for a Hackathon to reduce gender bias on the internet.\n\n- contents: Text\n- label:\n - biased\n - non-biased", "### Languages\n\nspanish \n\n\n\nURL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
4d56aff3af34b027cace01c0dcc3b1f9445872f4
# Dataset Card for "twitter-coronavirus" ## Dataset Description - **Homepage:** Kaggle Challenge - **Repository:** https://www.kaggle.com/datasets/datatattle/covid-19-nlp-text-classification - **Paper:** N.A. - **Leaderboard:** N.A. - **Point of Contact:** N.A. ### Dataset Summary Perform Text Classification on the data. The tweets have been pulled from Twitter and manual tagging has been done then. The names and usernames have been given codes to avoid any privacy concerns. Columns: 1) Location 2) Tweet At 3) Original Tweet 4) Label - Extremely Negative - Negative - Neutral - Positive - Extremely Positive ### Languages english ### Citation Information https://www.kaggle.com/datasets/datatattle/covid-19-nlp-text-classification ### Contributions Thanks to [@davidberenstein1957](https://github.com/davidberenstein1957) for adding this dataset.
argilla/twitter-coronavirus
[ "task_categories:text-classification", "task_ids:sentiment-classification", "task_ids:sentiment-analysis", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:unknown", "region:us" ]
2022-12-06T13:54:07+00:00
{"language": ["en"], "license": ["unknown"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["sentiment-classification", "sentiment-analysis"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "dtype": "null"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "struct": [{"name": "location", "dtype": "string"}, {"name": "screen_name", "dtype": "int64"}, {"name": "split", "dtype": "string"}, {"name": "user_name", "dtype": "int64"}]}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 25394534, "num_examples": 44955}], "download_size": 15712627, "dataset_size": 25394534}}
2022-12-06T16:20:31+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-sentiment-classification #task_ids-sentiment-analysis #size_categories-10K<n<100K #source_datasets-original #language-English #license-unknown #region-us
# Dataset Card for "twitter-coronavirus" ## Dataset Description - Homepage: Kaggle Challenge - Repository: URL - Paper: N.A. - Leaderboard: N.A. - Point of Contact: N.A. ### Dataset Summary Perform Text Classification on the data. The tweets have been pulled from Twitter and manual tagging has been done then. The names and usernames have been given codes to avoid any privacy concerns. Columns: 1) Location 2) Tweet At 3) Original Tweet 4) Label - Extremely Negative - Negative - Neutral - Positive - Extremely Positive ### Languages english URL ### Contributions Thanks to @davidberenstein1957 for adding this dataset.
[ "# Dataset Card for \"twitter-coronavirus\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nPerform Text Classification on the data. The tweets have been pulled from Twitter and manual tagging has been done then.\nThe names and usernames have been given codes to avoid any privacy concerns.\n\nColumns:\n1) Location\n2) Tweet At\n3) Original Tweet\n4) Label\n - Extremely Negative\n - Negative\n - Neutral\n - Positive\n - Extremely Positive", "### Languages\n\nenglish \n\n\n\nURL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_ids-sentiment-classification #task_ids-sentiment-analysis #size_categories-10K<n<100K #source_datasets-original #language-English #license-unknown #region-us \n", "# Dataset Card for \"twitter-coronavirus\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nPerform Text Classification on the data. The tweets have been pulled from Twitter and manual tagging has been done then.\nThe names and usernames have been given codes to avoid any privacy concerns.\n\nColumns:\n1) Location\n2) Tweet At\n3) Original Tweet\n4) Label\n - Extremely Negative\n - Negative\n - Neutral\n - Positive\n - Extremely Positive", "### Languages\n\nenglish \n\n\n\nURL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
5f7b5b3276e087b395a0157e5aa8f37ff8679f62
# Dataset Card for "scalableMLDL1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marvmk/scalableMLDL1
[ "region:us" ]
2022-12-06T14:03:14+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 5726523552, "num_examples": 5962}, {"name": "test", "num_bytes": 2546311152, "num_examples": 2651}], "download_size": 1397383253, "dataset_size": 8272834704}}
2022-12-06T14:05:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "scalableMLDL1" More Information needed
[ "# Dataset Card for \"scalableMLDL1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"scalableMLDL1\"\n\nMore Information needed" ]
9c42976fd43cd0dd562b3627e7f2b43419181455
# Dataset Card for "librispeech_validation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CristianaLazar/librispeech_validation
[ "region:us" ]
2022-12-06T14:19:25+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "validation", "num_bytes": 3218271771.125, "num_examples": 2703}], "download_size": 1286700444, "dataset_size": 3218271771.125}}
2022-12-06T14:28:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech_validation" More Information needed
[ "# Dataset Card for \"librispeech_validation\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech_validation\"\n\nMore Information needed" ]
08ea56d47b74bc85ef202198f1f896d4a18561f1
# Dataset Card for "speech2emotion" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
wofeishenling/speech2emotion
[ "region:us" ]
2022-12-06T14:31:10+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "label", "dtype": {"class_label": {"names": {"0": "neu", "1": "hap", "2": "ang", "3": "sad"}}}}, {"name": "transcription", "dtype": "string"}], "splits": [{"name": "session1", "num_bytes": 164498926.375, "num_examples": 1085}, {"name": "session2", "num_bytes": 153414523.125, "num_examples": 1023}, {"name": "session3", "num_bytes": 163876335.125, "num_examples": 1151}, {"name": "session4", "num_bytes": 146259809.125, "num_examples": 1031}, {"name": "session5", "num_bytes": 178359204.875, "num_examples": 1241}], "download_size": 788677878, "dataset_size": 806408798.625}}
2023-02-15T07:42:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "speech2emotion" More Information needed
[ "# Dataset Card for \"speech2emotion\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"speech2emotion\"\n\nMore Information needed" ]
390eadd23da82efb8905eb877dda85a73c8a6d0d
# Dataset Card for "leicester_loaded_annotations" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
davanstrien/leicester_loaded_annotations
[ "region:us" ]
2022-12-06T14:55:00+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "choice", "dtype": "string"}, {"name": "annotator", "dtype": "int64"}, {"name": "annotation_id", "dtype": "int64"}, {"name": "created_at", "dtype": "string"}, {"name": "updated_at", "dtype": "string"}, {"name": "lead_time", "dtype": "float64"}, {"name": "image_url", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "loaded_images", "dtype": "image"}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "local_desc", "1": "county_desc", "2": "major_residences_index", "3": "advert", "4": "county_trades", "5": "county_residence_alpha", "6": "index_general_or_place", "7": "title_page", "8": "adverts_index_alpha", "9": "adverts_index_business_cat", "10": "prefatory_text"}}}}], "splits": [{"name": "train", "num_bytes": 1096673288.0, "num_examples": 525}], "download_size": 1064406432, "dataset_size": 1096673288.0}}
2022-12-06T20:17:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "leicester_loaded_annotations" More Information needed
[ "# Dataset Card for \"leicester_loaded_annotations\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"leicester_loaded_annotations\"\n\nMore Information needed" ]
04a26f61e5df35b37f763582d1157b4c763470ea
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: knkarthick/MEETING_SUMMARY * Dataset: bazzhangz/sumdataset * Config: bazzhangz--sumdataset * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@bazzhangz](https://huggingface.co/bazzhangz) for evaluating this model.
autoevaluate/autoeval-eval-bazzhangz__sumdataset-bazzhangz__sumdataset-18687b-2355774138
[ "autotrain", "evaluation", "region:us" ]
2022-12-06T15:26:09+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["bazzhangz/sumdataset"], "eval_info": {"task": "summarization", "model": "knkarthick/MEETING_SUMMARY", "metrics": [], "dataset_name": "bazzhangz/sumdataset", "dataset_config": "bazzhangz--sumdataset", "dataset_split": "train", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-06T15:57:43+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: knkarthick/MEETING_SUMMARY * Dataset: bazzhangz/sumdataset * Config: bazzhangz--sumdataset * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @bazzhangz for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: knkarthick/MEETING_SUMMARY\n* Dataset: bazzhangz/sumdataset\n* Config: bazzhangz--sumdataset\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @bazzhangz for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: knkarthick/MEETING_SUMMARY\n* Dataset: bazzhangz/sumdataset\n* Config: bazzhangz--sumdataset\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @bazzhangz for evaluating this model." ]
4111ff2c9a6086993cbadf8e57cb0c4178494282
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: google/pegasus-cnn_dailymail * Dataset: cnn_dailymail * Config: 3.0.0 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@AkankshaK](https://huggingface.co/AkankshaK) for evaluating this model.
autoevaluate/autoeval-eval-cnn_dailymail-3.0.0-5c4aa4-2355874139
[ "autotrain", "evaluation", "region:us" ]
2022-12-06T15:26:15+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["cnn_dailymail"], "eval_info": {"task": "summarization", "model": "google/pegasus-cnn_dailymail", "metrics": ["meteor"], "dataset_name": "cnn_dailymail", "dataset_config": "3.0.0", "dataset_split": "test", "col_mapping": {"text": "article", "target": "highlights"}}}
2022-12-06T17:12:47+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: google/pegasus-cnn_dailymail * Dataset: cnn_dailymail * Config: 3.0.0 * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @AkankshaK for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/pegasus-cnn_dailymail\n* Dataset: cnn_dailymail\n* Config: 3.0.0\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @AkankshaK for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/pegasus-cnn_dailymail\n* Dataset: cnn_dailymail\n* Config: 3.0.0\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @AkankshaK for evaluating this model." ]
dd64a3ea4fda4d360487cf28888ffeb65b91fd32
# Dataset Card for "leicester_loaded_annotations_binary" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
davanstrien/leicester_loaded_annotations_binary
[ "region:us" ]
2022-12-06T15:57:06+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "other", "1": "county_trades"}}}}], "splits": [{"name": "train", "num_bytes": 1090143420.0, "num_examples": 525}], "download_size": 0, "dataset_size": 1090143420.0}}
2022-12-07T13:59:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "leicester_loaded_annotations_binary" More Information needed
[ "# Dataset Card for \"leicester_loaded_annotations_binary\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"leicester_loaded_annotations_binary\"\n\nMore Information needed" ]
1aa02586aa87aa4123fc625a03b6a8ecfb45aac8
# Dataset Card for "librispeech_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CristianaLazar/librispeech_test
[ "region:us" ]
2022-12-06T17:15:36+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "test", "num_bytes": 3140310938.5, "num_examples": 2620}], "download_size": 1297324022, "dataset_size": 3140310938.5}}
2022-12-06T17:22:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech_test" More Information needed
[ "# Dataset Card for \"librispeech_test\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech_test\"\n\nMore Information needed" ]
f2dd0c74d63d03f3932d0f9b85bc827ec2779d99
# ImageNet-1k (Test set only) Original paper: [ImageNet Large Scale Visual Recognition Challenge](https://arxiv.org/abs/1409.0575) Homepage: https://www.image-net.org/ Bibtex: ``` @article{ILSVRC15, Author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei}, Title = {{ImageNet Large Scale Visual Recognition Challenge}}, Year = {2015}, journal = {International Journal of Computer Vision (IJCV)}, doi = {10.1007/s11263-015-0816-y}, volume={115}, number={3}, pages={211-252} } ```
djghosh/wds_imagenet1k_test
[ "arxiv:1409.0575", "region:us" ]
2022-12-06T18:43:25+00:00
{}
2022-12-12T21:01:44+00:00
[ "1409.0575" ]
[]
TAGS #arxiv-1409.0575 #region-us
# ImageNet-1k (Test set only) Original paper: ImageNet Large Scale Visual Recognition Challenge Homepage: URL Bibtex:
[ "# ImageNet-1k (Test set only)\n\nOriginal paper: ImageNet Large Scale Visual Recognition Challenge\n\nHomepage: URL\n\nBibtex:" ]
[ "TAGS\n#arxiv-1409.0575 #region-us \n", "# ImageNet-1k (Test set only)\n\nOriginal paper: ImageNet Large Scale Visual Recognition Challenge\n\nHomepage: URL\n\nBibtex:" ]
5e2ef70db24ea5ac74fd863ba2a416a2dc698379
# Araina Text Corpus Text corpus in [Aranese variety of Gascon dialect of Occitan](https://en.wikipedia.org/wiki/Aranese_dialect). ## Corpora - `_nogues`: Literary texts translated by Antòni Nogués. Sourced from [institutestudisaranesi.cat](http://www.institutestudisaranesi.cat/colleccion-antoni-nogues/#1541013646532-338ed5f5-a3aa) - `_suils`: Language educational material by Jordi Suïls Subirà - `_conselh`: Administrative proceedings from Conselh Generau d'Aran ## Project Araina This corpus was prepared as part of [Project Araina](https://www.projecte-araina.org) with support from Culture Department of the Catalan autonomous government. Aquest corpus s'ha elaborat en el marc del [Projecte Araina](https://www.projecte-araina.org) amb el suport del Departament de Cultura de la Generalitat de Catalunya. <img src="https://github.com/collectivat/cmusphinx-models/raw/master/img/logo_generalitat.png" width="400"/>
collectivat/araina-text-corpus
[ "task_categories:text-generation", "task_ids:language-modeling", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "language:oc", "license:cc0-1.0", "region:us" ]
2022-12-06T18:59:41+00:00
{"annotations_creators": ["found"], "language_creators": ["found"], "language": ["oc"], "license": "cc0-1.0", "multilinguality": ["monolingual"], "size_categories": ["1M<n<10M"], "source_datasets": ["original"], "task_categories": ["text-generation"], "task_ids": ["language-modeling"]}
2022-12-30T15:42:30+00:00
[]
[ "oc" ]
TAGS #task_categories-text-generation #task_ids-language-modeling #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-Occitan (post 1500) #license-cc0-1.0 #region-us
# Araina Text Corpus Text corpus in Aranese variety of Gascon dialect of Occitan. ## Corpora - '_nogues': Literary texts translated by Antòni Nogués. Sourced from URL - '_suils': Language educational material by Jordi Suïls Subirà - '_conselh': Administrative proceedings from Conselh Generau d'Aran ## Project Araina This corpus was prepared as part of Project Araina with support from Culture Department of the Catalan autonomous government. Aquest corpus s'ha elaborat en el marc del Projecte Araina amb el suport del Departament de Cultura de la Generalitat de Catalunya. <img src="URL width="400"/>
[ "# Araina Text Corpus\n\nText corpus in Aranese variety of Gascon dialect of Occitan.", "## Corpora\n\n- '_nogues': Literary texts translated by Antòni Nogués. Sourced from URL\n- '_suils': Language educational material by Jordi Suïls Subirà\n- '_conselh': Administrative proceedings from Conselh Generau d'Aran", "## Project Araina\n\nThis corpus was prepared as part of Project Araina with support from Culture Department of the Catalan autonomous government.\n\nAquest corpus s'ha elaborat en el marc del Projecte Araina amb el suport del Departament de Cultura de la Generalitat de Catalunya.\n\n<img src=\"URL width=\"400\"/>" ]
[ "TAGS\n#task_categories-text-generation #task_ids-language-modeling #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-Occitan (post 1500) #license-cc0-1.0 #region-us \n", "# Araina Text Corpus\n\nText corpus in Aranese variety of Gascon dialect of Occitan.", "## Corpora\n\n- '_nogues': Literary texts translated by Antòni Nogués. Sourced from URL\n- '_suils': Language educational material by Jordi Suïls Subirà\n- '_conselh': Administrative proceedings from Conselh Generau d'Aran", "## Project Araina\n\nThis corpus was prepared as part of Project Araina with support from Culture Department of the Catalan autonomous government.\n\nAquest corpus s'ha elaborat en el marc del Projecte Araina amb el suport del Departament de Cultura de la Generalitat de Catalunya.\n\n<img src=\"URL width=\"400\"/>" ]
070a7c8bdf18d2e9c2f2552f593a18673548f66c
Custom Marist QA dataset to train Kevin - version 12/01/22
Bonorinoa/kevin_train_12_6
[ "region:us" ]
2022-12-06T19:03:46+00:00
{}
2022-12-06T19:05:08+00:00
[]
[]
TAGS #region-us
Custom Marist QA dataset to train Kevin - version 12/01/22
[]
[ "TAGS\n#region-us \n" ]
eab24cf63fc482f610795ecc93c6e5bc40317a68
# Dataset Card for "librispeech_augm_validation-tiny" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CristianaLazar/librispeech_augm_validation-tiny
[ "region:us" ]
2022-12-06T19:11:18+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "validation", "num_bytes": 3218271771.125, "num_examples": 2703}], "download_size": 1320733851, "dataset_size": 3218271771.125}}
2022-12-06T19:23:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech_augm_validation-tiny" More Information needed
[ "# Dataset Card for \"librispeech_augm_validation-tiny\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech_augm_validation-tiny\"\n\nMore Information needed" ]
ee99e225418ab025716b664a25f535cee4bee363
# Dataset Card for "starter2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) Generated by a fine-tuned GPT3 curie model over 2020 and 2022 on 10.000 conversation starters. Generation pipeline: - generate 3 conversation starters - fine-tuned curie classify 0-1 - take top 1 Humans have reviewed and fixed/deleted these conversation starters afterwards
Langame/starter2
[ "region:us" ]
2022-12-06T19:26:51+00:00
{"viewer": true, "dataset_info": {"features": [{"name": "content", "dtype": "string"}, {"name": "state", "dtype": "string"}, {"name": "apiCompletionModel", "dtype": "string"}, {"name": "createdAt", "dtype": "timestamp[ns, tz=UTC]"}, {"name": "completionType", "dtype": "float64"}, {"name": "apiClassificationModel", "dtype": "string"}, {"name": "fixGrammar", "dtype": "bool"}, {"name": "shard", "dtype": "float64"}, {"name": "parallelCompletions", "dtype": "float64"}, {"name": "disabled", "dtype": "bool"}, {"name": "brokenGrammar", "dtype": "string"}, {"name": "profanityThreshold", "dtype": "float64"}, {"name": "tweet", "dtype": "bool"}, {"name": "conversationStarters", "list": [{"name": "aiTopics", "sequence": "string"}, {"name": "broken_grammar", "dtype": "string"}, {"name": "classification", "dtype": "string"}, {"name": "conversation_starter", "dtype": "string"}]}, {"name": "topics", "sequence": "string"}, {"name": "embedding", "sequence": "float64"}, {"name": "error", "dtype": "string"}, {"name": "developer_message", "dtype": "string"}, {"name": "aiTopics", "sequence": "string"}, {"name": "tags", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 8891417, "num_examples": 3072}], "download_size": 6983130, "dataset_size": 8891417}}
2023-02-23T08:40:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "starter2" More Information needed Generated by a fine-tuned GPT3 curie model over 2020 and 2022 on 10.000 conversation starters. Generation pipeline: - generate 3 conversation starters - fine-tuned curie classify 0-1 - take top 1 Humans have reviewed and fixed/deleted these conversation starters afterwards
[ "# Dataset Card for \"starter2\"\n\nMore Information needed\n\nGenerated by a fine-tuned GPT3 curie model over 2020 and 2022 on 10.000 conversation starters.\n\nGeneration pipeline:\n- generate 3 conversation starters\n- fine-tuned curie classify 0-1\n- take top 1\n\nHumans have reviewed and fixed/deleted these conversation starters afterwards" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"starter2\"\n\nMore Information needed\n\nGenerated by a fine-tuned GPT3 curie model over 2020 and 2022 on 10.000 conversation starters.\n\nGeneration pipeline:\n- generate 3 conversation starters\n- fine-tuned curie classify 0-1\n- take top 1\n\nHumans have reviewed and fixed/deleted these conversation starters afterwards" ]
ede3b50f99f5a4bdf343ef3a0bbea3482198d790
# Pastel Style Embedding / Textual Inversion <img alt="Showcase" src="https://huggingface.co/datasets/Nerfgun3/pastel_style/resolve/main/pastel_style.jpg"/> ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: ```"pastel_style"``` Personally, I would recommend to use my embeddings with a strength of 0.8, like ```"(pastel_style:0.8)"``` I trained the embedding two epochs until 6000 steps. I hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: "Nerfgun3#7508" ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Nerfgun3/pastel_style
[ "language:en", "license:creativeml-openrail-m", "stable-diffusion", "text-to-image", "image-to-image", "region:us" ]
2022-12-06T19:33:11+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "thumbnail": "https://huggingface.co/datasets/Nerfgun3/pastel_style/resolve/main/pastel_style.jpg", "tags": ["stable-diffusion", "text-to-image", "image-to-image"], "inference": false}
2022-12-06T19:38:55+00:00
[]
[ "en" ]
TAGS #language-English #license-creativeml-openrail-m #stable-diffusion #text-to-image #image-to-image #region-us
# Pastel Style Embedding / Textual Inversion <img alt="Showcase" src="URL ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: Personally, I would recommend to use my embeddings with a strength of 0.8, like I trained the embedding two epochs until 6000 steps. I hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: "Nerfgun3#7508" ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here
[ "# Pastel Style Embedding / Textual Inversion\n\n<img alt=\"Showcase\" src=\"URL", "## Usage\n\nTo use this embedding you have to download the file aswell as drop it into the \"\\stable-diffusion-webui\\embeddings\" folder\n\nTo use it in a prompt: \n\nPersonally, I would recommend to use my embeddings with a strength of 0.8, like \n\nI trained the embedding two epochs until 6000 steps.\n\nI hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: \"Nerfgun3#7508\"", "## License\n\nThis embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.\nThe CreativeML OpenRAIL License specifies: \n\n1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content \n2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license\n3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)\nPlease read the full license here" ]
[ "TAGS\n#language-English #license-creativeml-openrail-m #stable-diffusion #text-to-image #image-to-image #region-us \n", "# Pastel Style Embedding / Textual Inversion\n\n<img alt=\"Showcase\" src=\"URL", "## Usage\n\nTo use this embedding you have to download the file aswell as drop it into the \"\\stable-diffusion-webui\\embeddings\" folder\n\nTo use it in a prompt: \n\nPersonally, I would recommend to use my embeddings with a strength of 0.8, like \n\nI trained the embedding two epochs until 6000 steps.\n\nI hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: \"Nerfgun3#7508\"", "## License\n\nThis embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.\nThe CreativeML OpenRAIL License specifies: \n\n1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content \n2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license\n3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)\nPlease read the full license here" ]
d00c469f1dc9f5b8f968b95321eedeebe6bd35ea
# Dataset Card for "gal_yair_large" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_166000_1664x832_fixed
[ "region:us" ]
2022-12-06T19:49:12+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": {"class_label": {"names": {"0": "ad", "1": "ae", "2": "al", "3": "aq", "4": "ar", "5": "au", "6": "bd", "7": "be", "8": "bg", "9": "bm", "10": "bo", "11": "br", "12": "bt", "13": "bw", "14": "ca", "15": "ch", "16": "cl", "17": "co", "18": "cz", "19": "de", "20": "dk", "21": "ec", "22": "ee", "23": "es", "24": "fi", "25": "fr", "26": "gb", "27": "gh", "28": "gl", "29": "gr", "30": "gt", "31": "hk", "32": "hr", "33": "hu", "34": "id", "35": "ie", "36": "il", "37": "is", "38": "it", "39": "ix", "40": "jp", "41": "kg", "42": "kh", "43": "kr", "44": "la", "45": "lk", "46": "ls", "47": "lt", "48": "lu", "49": "lv", "50": "me", "51": "mg", "52": "mk", "53": "mn", "54": "mo", "55": "mt", "56": "mx", "57": "my", "58": "nl", "59": "no", "60": "nz", "61": "pe", "62": "ph", "63": "pl", "64": "pt", "65": "ro", "66": "rs", "67": "ru", "68": "se", "69": "sg", "70": "si", "71": "sk", "72": "sn", "73": "sz", "74": "th", "75": "tn", "76": "tr", "77": "tw", "78": "ua", "79": "ug", "80": "us", "81": "uy", "82": "za"}}}}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 8079449515.0, "num_examples": 166000}], "download_size": 22205924633, "dataset_size": 8079449515.0}}
2022-12-06T20:36:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gal_yair_large" More Information needed
[ "# Dataset Card for \"gal_yair_large\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gal_yair_large\"\n\nMore Information needed" ]
63b1d0965e3a941703e33272b61ba508411376d0
# Dataset Card for "scalableMLDL2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
marvmk/scalableMLDL2
[ "region:us" ]
2022-12-06T20:53:05+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 5726523552, "num_examples": 5962}, {"name": "test", "num_bytes": 2546311152, "num_examples": 2651}], "download_size": 1397392104, "dataset_size": 8272834704}}
2022-12-06T22:08:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "scalableMLDL2" More Information needed
[ "# Dataset Card for \"scalableMLDL2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"scalableMLDL2\"\n\nMore Information needed" ]
6e48c15a2ea7559cac10e5c5a453a2ef5913577e
# Splash Art Style Embedding / Textual Inversion <img alt="Showcase" src="https://huggingface.co/datasets/Nerfgun3/splash_art/resolve/main/splashart.jpg"/> ## Usage I uploaded two different versions. Both embeddings create splash art images, although the splash_art2 is more consistent, splash_art generates more generic images than splash_art2. Enjoy! To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: ```"splash_art"``` or ```"splash_art2"``` depending on which version you use Personally, I would recommend to use my embeddings with a strength of 0.8, like ```"(splash_art:0.8)"``` or ```"(splash_art2:0.8)"``` I trained the embedding two epochs until 6800 steps. I hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: "Nerfgun3#7508" ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Nerfgun3/splash_art
[ "language:en", "license:creativeml-openrail-m", "stable-diffusion", "text-to-image", "image-to-image", "region:us" ]
2022-12-06T20:55:26+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "thumbnail": "https://huggingface.co/datasets/Nerfgun3/splash_art/resolve/main/splashart.jpg", "tags": ["stable-diffusion", "text-to-image", "image-to-image"], "inference": false}
2022-12-06T21:01:28+00:00
[]
[ "en" ]
TAGS #language-English #license-creativeml-openrail-m #stable-diffusion #text-to-image #image-to-image #region-us
# Splash Art Style Embedding / Textual Inversion <img alt="Showcase" src="URL ## Usage I uploaded two different versions. Both embeddings create splash art images, although the splash_art2 is more consistent, splash_art generates more generic images than splash_art2. Enjoy! To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: or depending on which version you use Personally, I would recommend to use my embeddings with a strength of 0.8, like or I trained the embedding two epochs until 6800 steps. I hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: "Nerfgun3#7508" ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here
[ "# Splash Art Style Embedding / Textual Inversion\n\n<img alt=\"Showcase\" src=\"URL", "## Usage\n\nI uploaded two different versions. Both embeddings create splash art images, although the splash_art2 is more consistent, splash_art generates more generic images than splash_art2. Enjoy! \n\nTo use this embedding you have to download the file aswell as drop it into the \"\\stable-diffusion-webui\\embeddings\" folder\n\nTo use it in a prompt: or depending on which version you use\n\nPersonally, I would recommend to use my embeddings with a strength of 0.8, like or \n\nI trained the embedding two epochs until 6800 steps.\n\nI hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: \"Nerfgun3#7508\"", "## License\n\nThis embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.\nThe CreativeML OpenRAIL License specifies: \n\n1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content \n2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license\n3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)\nPlease read the full license here" ]
[ "TAGS\n#language-English #license-creativeml-openrail-m #stable-diffusion #text-to-image #image-to-image #region-us \n", "# Splash Art Style Embedding / Textual Inversion\n\n<img alt=\"Showcase\" src=\"URL", "## Usage\n\nI uploaded two different versions. Both embeddings create splash art images, although the splash_art2 is more consistent, splash_art generates more generic images than splash_art2. Enjoy! \n\nTo use this embedding you have to download the file aswell as drop it into the \"\\stable-diffusion-webui\\embeddings\" folder\n\nTo use it in a prompt: or depending on which version you use\n\nPersonally, I would recommend to use my embeddings with a strength of 0.8, like or \n\nI trained the embedding two epochs until 6800 steps.\n\nI hope you enjoy the embedding. If you have any questions, you can ask me anything via Discord: \"Nerfgun3#7508\"", "## License\n\nThis embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.\nThe CreativeML OpenRAIL License specifies: \n\n1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content \n2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license\n3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)\nPlease read the full license here" ]
36ab7c5086b4abfb1f13576b048abac0ceb4fea5
# Dataset Card for "cantonese_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tilos/cantonese_processed
[ "region:us" ]
2022-12-06T21:53:29+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 18830225280, "num_examples": 19605}], "download_size": 1276665418, "dataset_size": 18830225280}}
2022-12-06T22:26:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cantonese_processed" More Information needed
[ "# Dataset Card for \"cantonese_processed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cantonese_processed\"\n\nMore Information needed" ]
e560d7c1989174b135a17e69e68889dbf38e0628
# Dataset Card for "image50" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Steven0633/image50
[ "region:us" ]
2022-12-06T22:19:21+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "arrange chairs", "1": "arrange flowers", "2": "bake potato", "3": "beat eggs", "4": "bend knee", "5": "bend tree", "6": "bind hair", "7": "bite apple", "8": "block door", "9": "block window", "10": "boil egg", "11": "boil potato", "12": "break bowl", "13": "break cup", "14": "break door", "15": "break egg", "16": "break glass", "17": "break window", "18": "burn book", "19": "burn paper", "20": "burn tree", "21": "burn wood", "22": "burst balloon", "23": "burst door", "24": "carry bag", "25": "carry book", "26": "carry umbrella", "27": "chop carrot", "28": "chop meat", "29": "chop onion", "30": "chop tree", "31": "chop wood", "32": "close book", "33": "close cabinet", "34": "close door", "35": "close drawer", "36": "close window", "37": "coil rope", "38": "cook egg", "39": "cook meat", "40": "cook onion", "41": "cook potato", "42": "crack bottle", "43": "crack egg", "44": "crack glass", "45": "crack window", "46": "crash car", "47": "crop hair", "48": "cut apple", "49": "cut meat", "50": "cut onion", "51": "cut potato", "52": "cut tree", "53": "cut wood", "54": "fasten door", "55": "fasten window", "56": "fold paper", "57": "fry egg", "58": "fry meat", "59": "fry potato", "60": "grate carrot", "61": "grate potato", "62": "grind meat", "63": "hang bag", "64": "hang shirt", "65": "ignite paper", "66": "ignite wood", "67": "insert key", "68": "kick door", "69": "kick football", "70": "knot rope", "71": "label bottle", "72": "label box", "73": "lock cabinet", "74": "lock door", "75": "lock drawer", "76": "lock window", "77": "mash potato", "78": "mix eggs", "79": "open bottle", "80": "open box", "81": "open cabinet", "82": "open door", "83": "open drawer", "84": "open umbrella", "85": "open window", "86": "park car", "87": "peel apple", "88": "peel banana", "89": "peel carrot", "90": "peel orange", "91": "peel potato", "92": "pile books", "93": "pile boxes", "94": "pile wood", "95": "pitch baseball", "96": "ride bicycle", "97": "rip paper", "98": "roll paper", "99": "roll umbrella", "100": "saw tree", "101": "saw wood", "102": "scratch car", "103": "scratch knee", "104": "shave hair", "105": "shut door", "106": "shut window", "107": "skin knee", "108": "slice apple", "109": "slice meat", "110": "slice onion", "111": "slice potato", "112": "smash door", "113": "smash window", "114": "soak hair", "115": "soak shirt", "116": "spill coffee", "117": "split tree", "118": "split wood", "119": "squeeze bottle", "120": "squeeze orange", "121": "stain paper", "122": "stain shirt", "123": "stir coffee", "124": "stir soup", "125": "strip tree", "126": "tear book", "127": "tear paper", "128": "tear shirt", "129": "throw apple", "130": "throw baseball", "131": "throw football", "132": "throw frisbee", "133": "tie shoe", "134": "trim hair", "135": "trim tree", "136": "twist hair", "137": "twist rope", "138": "wrap book", "139": "wrap box"}}}}], "splits": [{"name": "train", "num_bytes": 191648684.53815603, "num_examples": 6126}, {"name": "test", "num_bytes": 20857643.465843983, "num_examples": 681}], "download_size": 213918792, "dataset_size": 212506328.004}}
2022-12-06T22:38:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "image50" More Information needed
[ "# Dataset Card for \"image50\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"image50\"\n\nMore Information needed" ]
d1b52887a64651e934e908daba767954f8299ac0
# Dataset Card for "595Gao" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
epigone707/595Gao
[ "region:us" ]
2022-12-06T22:22:59+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "arrange+chairs", "1": "arrange+flowers", "2": "bake+potato", "3": "beat+eggs", "4": "bend+knee", "5": "bend+tree", "6": "bind+hair", "7": "bite+apple", "8": "block+door", "9": "block+window", "10": "boil+egg", "11": "boil+potato", "12": "break+bowl", "13": "break+cup", "14": "break+door", "15": "break+egg", "16": "break+glass", "17": "break+window", "18": "burn+book", "19": "burn+paper", "20": "burn+tree", "21": "burn+wood", "22": "burst+balloon", "23": "burst+door", "24": "carry+bag", "25": "carry+book", "26": "carry+umbrella", "27": "chop+carrot", "28": "chop+meat", "29": "chop+onion", "30": "chop+tree", "31": "chop+wood", "32": "close+book", "33": "close+cabinet", "34": "close+door", "35": "close+drawer", "36": "close+window", "37": "coil+rope", "38": "cook+egg", "39": "cook+meat", "40": "cook+onion", "41": "cook+potato", "42": "crack+bottle", "43": "crack+egg", "44": "crack+glass", "45": "crack+window", "46": "crash+car", "47": "crop+hair", "48": "cut+apple", "49": "cut+meat", "50": "cut+onion", "51": "cut+potato", "52": "cut+tree", "53": "cut+wood", "54": "fasten+door", "55": "fasten+window", "56": "fold+paper", "57": "fry+egg", "58": "fry+meat", "59": "fry+potato", "60": "grate+carrot", "61": "grate+potato", "62": "grind+meat", "63": "hang+bag", "64": "hang+shirt", "65": "ignite+paper", "66": "ignite+wood", "67": "insert+key", "68": "kick+door", "69": "kick+football", "70": "knot+rope", "71": "label+bottle", "72": "label+box", "73": "lock+cabinet", "74": "lock+door", "75": "lock+drawer", "76": "lock+window", "77": "mash+potato", "78": "mix+eggs", "79": "open+bottle", "80": "open+box", "81": "open+cabinet", "82": "open+door", "83": "open+drawer", "84": "open+umbrella", "85": "open+window", "86": "park+car", "87": "peel+apple", "88": "peel+banana", "89": "peel+carrot", "90": "peel+orange", "91": "peel+potato", "92": "pile+books", "93": "pile+boxes", "94": "pile+wood", "95": "pitch+baseball", "96": "ride+bicycle", "97": "rip+paper", "98": "roll+paper", "99": "roll+umbrella", "100": "saw+tree", "101": "saw+wood", "102": "scratch+car", "103": "scratch+knee", "104": "shave+hair", "105": "shut+door", "106": "shut+window", "107": "skin+knee", "108": "slice+apple", "109": "slice+meat", "110": "slice+onion", "111": "slice+potato", "112": "smash+door", "113": "smash+window", "114": "soak+hair", "115": "soak+shirt", "116": "spill+coffee", "117": "split+tree", "118": "split+wood", "119": "squeeze+bottle", "120": "squeeze+orange", "121": "stain+paper", "122": "stain+shirt", "123": "stir+coffee", "124": "stir+soup", "125": "strip+tree", "126": "tear+book", "127": "tear+paper", "128": "tear+shirt", "129": "throw+apple", "130": "throw+baseball", "131": "throw+football", "132": "throw+frisbee", "133": "tie+shoe", "134": "trim+hair", "135": "trim+tree", "136": "twist+hair", "137": "twist+rope", "138": "wrap+book", "139": "wrap+box"}}}}], "splits": [{"name": "train", "num_bytes": 165337731.7298711, "num_examples": 1843}, {"name": "test", "num_bytes": 20775526.807128906, "num_examples": 205}], "download_size": 187898542, "dataset_size": 186113258.537}}
2022-12-07T00:40:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "595Gao" More Information needed
[ "# Dataset Card for \"595Gao\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"595Gao\"\n\nMore Information needed" ]
d590bb4a2b1c16c734401837fced22716efdcdaf
# Dataset Card for "tagesschau" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tillschwoerer/tagesschau
[ "task_categories:text-classification", "task_ids:topic-classification", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:de", "newspapers", "germany", "2022", "region:us" ]
2022-12-06T23:08:19+00:00
{"annotations_creators": ["found"], "language_creators": ["found"], "language": ["de"], "license": [], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": [], "task_categories": ["text-classification"], "task_ids": ["topic-classification"], "pretty_name": "tagesschau", "tags": ["newspapers", "germany", "2022"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "amerika", "1": "asien", "2": "finanzen", "3": "innenpolitik", "4": "sportschau", "5": "unternehmen", "6": "verbraucher"}}}}], "splits": [{"name": "train", "num_bytes": 4400114, "num_examples": 1200}, {"name": "validation", "num_bytes": 555716, "num_examples": 150}, {"name": "test", "num_bytes": 555716, "num_examples": 150}], "download_size": 3412287, "dataset_size": 5511546}}
2022-12-06T23:21:09+00:00
[]
[ "de" ]
TAGS #task_categories-text-classification #task_ids-topic-classification #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-1K<n<10K #language-German #newspapers #germany #2022 #region-us
# Dataset Card for "tagesschau" More Information needed
[ "# Dataset Card for \"tagesschau\"\n\n\n\nMore Information needed" ]
[ "TAGS\n#task_categories-text-classification #task_ids-topic-classification #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-1K<n<10K #language-German #newspapers #germany #2022 #region-us \n", "# Dataset Card for \"tagesschau\"\n\n\n\nMore Information needed" ]
4aa60b5519c07adf32aec7f2417b73de0238c351
# Dataset Card for "sheet_music_ede2110" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EmileEsmaili/sheet_music_ede2110
[ "region:us" ]
2022-12-07T00:03:50+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2229356112.491, "num_examples": 9219}], "download_size": 1211789844, "dataset_size": 2229356112.491}}
2022-12-09T06:41:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sheet_music_ede2110" More Information needed
[ "# Dataset Card for \"sheet_music_ede2110\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sheet_music_ede2110\"\n\nMore Information needed" ]
49ec483e772970fbf6f46919372c4e9b6a60bcee
# Dataset Card for "lyoko-ultimate" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Madiator2011/lyoko-ultimate
[ "region:us" ]
2022-12-07T00:14:50+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 24808769.89, "num_examples": 1435}], "download_size": 24242906, "dataset_size": 24808769.89}}
2022-12-07T00:20:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lyoko-ultimate" More Information needed
[ "# Dataset Card for \"lyoko-ultimate\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lyoko-ultimate\"\n\nMore Information needed" ]
4299deb1ab6aa43c277fb79d8da1c3e2e52a144b
# Dataset Card for "news_corpus_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hieule/news_corpus_v2
[ "region:us" ]
2022-12-07T04:59:58+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "sapo", "dtype": "string"}, {"name": "cates", "sequence": "string"}, {"name": "publish", "dtype": "timestamp[us]"}, {"name": "text_content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 3228940922, "num_examples": 1000001}], "download_size": 1616424455, "dataset_size": 3228940922}}
2022-12-07T07:27:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "news_corpus_v2" More Information needed
[ "# Dataset Card for \"news_corpus_v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"news_corpus_v2\"\n\nMore Information needed" ]
96123958828d02684be0db371e5876c0bbe0f2de
# Dataset Card for "news-summary" ## Dataset Description - **Homepage:** Kaggle Challenge - **Repository:** https://www.kaggle.com/datasets/clmentbisaillon/fake-and-real-news-dataset?select=True.csv - **Paper:** N.A. - **Leaderboard:** N.A. - **Point of Contact:** N.A. ### Dataset Summary Officially it was supposed to be used for classification but, can you use this data set to summarize news articles? ### Languages english ### Citation Information Acknowledgements Ahmed H, Traore I, Saad S. “Detecting opinion spams and fake news using text classification”, Journal of Security and Privacy, Volume 1, Issue 1, Wiley, January/February 2018. Ahmed H, Traore I, Saad S. (2017) “Detection of Online Fake News Using N-Gram Analysis and Machine Learning Techniques. In: Traore I., Woungang I., Awad A. (eds) Intelligent, Secure, and Dependable Systems in Distributed and Cloud Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer, Cham (pp. 127-138). ### Contributions Thanks to [@davidberenstein1957](https://github.com/davidberenstein1957) for adding this dataset.
argilla/news-summary
[ "task_categories:summarization", "task_ids:news-articles-summarization", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:cc-by-nc-4.0", "region:us" ]
2022-12-07T05:39:38+00:00
{"language": ["en"], "license": ["cc-by-nc-4.0"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["summarization"], "task_ids": ["news-articles-summarization"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "prediction", "list": [{"name": "score", "dtype": "float64"}, {"name": "text", "dtype": "string"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "dtype": "null"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 2563132.0446374374, "num_examples": 1000}, {"name": "test", "num_bytes": 52331466.955362566, "num_examples": 20417}], "download_size": 33207109, "dataset_size": 54894599.0}}
2023-03-16T09:36:12+00:00
[]
[ "en" ]
TAGS #task_categories-summarization #task_ids-news-articles-summarization #size_categories-10K<n<100K #source_datasets-original #language-English #license-cc-by-nc-4.0 #region-us
# Dataset Card for "news-summary" ## Dataset Description - Homepage: Kaggle Challenge - Repository: URL - Paper: N.A. - Leaderboard: N.A. - Point of Contact: N.A. ### Dataset Summary Officially it was supposed to be used for classification but, can you use this data set to summarize news articles? ### Languages english Acknowledgements Ahmed H, Traore I, Saad S. “Detecting opinion spams and fake news using text classification”, Journal of Security and Privacy, Volume 1, Issue 1, Wiley, January/February 2018. Ahmed H, Traore I, Saad S. (2017) “Detection of Online Fake News Using N-Gram Analysis and Machine Learning Techniques. In: Traore I., Woungang I., Awad A. (eds) Intelligent, Secure, and Dependable Systems in Distributed and Cloud Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer, Cham (pp. 127-138). ### Contributions Thanks to @davidberenstein1957 for adding this dataset.
[ "# Dataset Card for \"news-summary\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nOfficially it was supposed to be used for classification but, can you use this data set to summarize news articles?", "### Languages\n\nenglish \n\n\n\nAcknowledgements\n\nAhmed H, Traore I, Saad S. “Detecting opinion spams and fake news using text classification”, Journal of Security and Privacy, Volume 1, Issue 1, Wiley, January/February 2018.\nAhmed H, Traore I, Saad S. (2017) “Detection of Online Fake News Using N-Gram Analysis and Machine Learning Techniques. In: Traore I., Woungang I., Awad A. (eds) Intelligent, Secure, and Dependable Systems in Distributed and Cloud Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer, Cham (pp. 127-138).", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
[ "TAGS\n#task_categories-summarization #task_ids-news-articles-summarization #size_categories-10K<n<100K #source_datasets-original #language-English #license-cc-by-nc-4.0 #region-us \n", "# Dataset Card for \"news-summary\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nOfficially it was supposed to be used for classification but, can you use this data set to summarize news articles?", "### Languages\n\nenglish \n\n\n\nAcknowledgements\n\nAhmed H, Traore I, Saad S. “Detecting opinion spams and fake news using text classification”, Journal of Security and Privacy, Volume 1, Issue 1, Wiley, January/February 2018.\nAhmed H, Traore I, Saad S. (2017) “Detection of Online Fake News Using N-Gram Analysis and Machine Learning Techniques. In: Traore I., Woungang I., Awad A. (eds) Intelligent, Secure, and Dependable Systems in Distributed and Cloud Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer, Cham (pp. 127-138).", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
9eadedec22e78a74591874b01d1472d6a7f4a02a
# AutoTrain Dataset for project: enzydg ## Dataset Description This dataset has been automatically processed by AutoTrain for project enzydg. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "id": 155, "feat_compound_iso_smiles": "CC1(C2CC(=O)C1(C(=O)C2)C)C", "feat_target_sequence": "MKQLATPFQEYSQKYENIRLERDGGVLLVTVHTEGKSLVWTSTAHDELAYCFHDIACDRENKVVILTGTGPSFCNEIDFTSFNLGTPHDWDEIIFEGQRLLNNLLSIEVPVIAAVNGPVTNAPEIPVMSDIVLAAESATFQDGPHFPSGIVPGDGAHVVWPHVLGSNRGRYFLLTGQELDARTALDYGAVNEVLSEQELLPRAWELARGIAEKPLLARRYARKVLTRQLRRVMEADLSLGLAHEALAAIDLGMESEQ", "target": 13.621903419494629 }, { "id": 180, "feat_compound_iso_smiles": "C1=CC(=CC=C1C2=COC3=C(C2=O)C=CC(=C3)O[C@H]4[C@@H]([C@H]([C@@H]([C@H](O4)CO)O)O)O)O", "feat_target_sequence": "MAFPAGFGWAAATAAYQVEGGWDADGKGPCVWDTFTHQGGERVFKNQTGDVACGSYTLWEEDLKCIKQLGLTHYRFSLSWSRLLPDGTTGFINQKGIDYYNKIIDDLLKNGVTPIVTLYHFDLPQTLEDQGGWLSEAIIESFDKYAQFCFSTFGDRVKQWITINEANVLSVMSYDLGMFPPGIPHFGTGGYQAAHNLIKAHARSWHSYDSLFRKKQKGMVSLSLFAVWLEPADPNSVSDQEAAKRAITFHLDLFAKPIFIDGDYPEVVKSQIASMSQKQGYPSSRLPEFTEEEKKMIKGTADFFAVQYYTTRLIKYQENKKGELGILQDAEIEFFPDPSWKNVDAIYVVPWGVCKLLKYIKDTYNNPVIYITENGFPQSDPAPLDDTQRWEYFRQTFQELFKAIQLDKVNLQVYCAWSLLDNFEWNQGYSSRFGLFHVDFEDPARPRVPYTSAKEYAKIIRNNGLEAHL", "target": 17.67270851135254 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "id": "Value(dtype='int64', id=None)", "feat_compound_iso_smiles": "Value(dtype='string', id=None)", "feat_target_sequence": "Value(dtype='string', id=None)", "target": "Value(dtype='float32', id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 160 | | valid | 44 |
Shone/autotrain-data-enzydg
[ "region:us" ]
2022-12-07T05:53:14+00:00
{}
2022-12-07T05:54:50+00:00
[]
[]
TAGS #region-us
AutoTrain Dataset for project: enzydg ===================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project enzydg. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
2d2d471b4fcbb11b3486bbc9ffef249db55d6b15
# Dataset Card for "news-fakenews" ## Dataset Description - **Homepage:** Kaggle Challenge - **Repository:** https://www.kaggle.com/datasets/clmentbisaillon/fake-and-real-news-dataset?select=True.csv - **Paper:** N.A. - **Leaderboard:** N.A. - **Point of Contact:** N.A. ### Dataset Summary Can you use this data set to make an algorithm able to determine if an article is fake news or not ? ### Languages english ### Citation Information Acknowledgements Ahmed H, Traore I, Saad S. “Detecting opinion spams and fake news using text classification”, Journal of Security and Privacy, Volume 1, Issue 1, Wiley, January/February 2018. Ahmed H, Traore I, Saad S. (2017) “Detection of Online Fake News Using N-Gram Analysis and Machine Learning Techniques. In: Traore I., Woungang I., Awad A. (eds) Intelligent, Secure, and Dependable Systems in Distributed and Cloud Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer, Cham (pp. 127-138). ### Contributions Thanks to [@davidberenstein1957](https://github.com/davidberenstein1957) for adding this dataset.
argilla/news-fakenews
[ "task_categories:text-classification", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "region:us" ]
2022-12-07T06:37:24+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "dtype": "null"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 227222498, "num_examples": 44898}], "download_size": 138350597, "dataset_size": 227222498}}
2022-12-07T07:09:34+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-10K<n<100K #source_datasets-original #language-English #region-us
# Dataset Card for "news-fakenews" ## Dataset Description - Homepage: Kaggle Challenge - Repository: URL - Paper: N.A. - Leaderboard: N.A. - Point of Contact: N.A. ### Dataset Summary Can you use this data set to make an algorithm able to determine if an article is fake news or not ? ### Languages english Acknowledgements Ahmed H, Traore I, Saad S. “Detecting opinion spams and fake news using text classification”, Journal of Security and Privacy, Volume 1, Issue 1, Wiley, January/February 2018. Ahmed H, Traore I, Saad S. (2017) “Detection of Online Fake News Using N-Gram Analysis and Machine Learning Techniques. In: Traore I., Woungang I., Awad A. (eds) Intelligent, Secure, and Dependable Systems in Distributed and Cloud Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer, Cham (pp. 127-138). ### Contributions Thanks to @davidberenstein1957 for adding this dataset.
[ "# Dataset Card for \"news-fakenews\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nCan you use this data set to make an algorithm able to determine if an article is fake news or not ?", "### Languages\n\nenglish \n\n\n\nAcknowledgements\n\nAhmed H, Traore I, Saad S. “Detecting opinion spams and fake news using text classification”, Journal of Security and Privacy, Volume 1, Issue 1, Wiley, January/February 2018.\nAhmed H, Traore I, Saad S. (2017) “Detection of Online Fake News Using N-Gram Analysis and Machine Learning Techniques. In: Traore I., Woungang I., Awad A. (eds) Intelligent, Secure, and Dependable Systems in Distributed and Cloud Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer, Cham (pp. 127-138).", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #size_categories-10K<n<100K #source_datasets-original #language-English #region-us \n", "# Dataset Card for \"news-fakenews\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nCan you use this data set to make an algorithm able to determine if an article is fake news or not ?", "### Languages\n\nenglish \n\n\n\nAcknowledgements\n\nAhmed H, Traore I, Saad S. “Detecting opinion spams and fake news using text classification”, Journal of Security and Privacy, Volume 1, Issue 1, Wiley, January/February 2018.\nAhmed H, Traore I, Saad S. (2017) “Detection of Online Fake News Using N-Gram Analysis and Machine Learning Techniques. In: Traore I., Woungang I., Awad A. (eds) Intelligent, Secure, and Dependable Systems in Distributed and Cloud Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer, Cham (pp. 127-138).", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
7e9e5c457c4a57965c113925d2d94cc885861821
# Dataset Card for "olm-october-2022-tokenized-1024-exact-dedup-only" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-october-2022-tokenized-1024-exact-dedup-only
[ "region:us" ]
2022-12-07T07:01:16+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 81122930784.0, "num_examples": 13177864}], "download_size": 21799520270, "dataset_size": 81122930784.0}}
2022-12-07T07:49:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-october-2022-tokenized-1024-exact-dedup-only" More Information needed
[ "# Dataset Card for \"olm-october-2022-tokenized-1024-exact-dedup-only\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-october-2022-tokenized-1024-exact-dedup-only\"\n\nMore Information needed" ]
2e8f1098ddeaca92b7300156a5a9395662992eda
# Dataset Card for "python-code-ds-mini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dipesh/python-code-ds-mini
[ "region:us" ]
2022-12-07T07:07:26+00:00
{"dataset_info": {"features": [{"name": "code", "dtype": "string"}, {"name": "code_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1455104.6452533905, "num_examples": 2521}, {"name": "validation", "num_bytes": 162191.35474660958, "num_examples": 281}], "download_size": 742200, "dataset_size": 1617296.0}}
2022-12-09T23:33:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "python-code-ds-mini" More Information needed
[ "# Dataset Card for \"python-code-ds-mini\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"python-code-ds-mini\"\n\nMore Information needed" ]
58103080018cf8568802b39651be1e008765d6d6
# Dataset Card for "olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-no-bigscience-filters" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-no-bigscience-filters
[ "region:us" ]
2022-12-07T07:11:06+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "crawl_timestamp", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 33670789930.82474, "num_examples": 16442332}], "download_size": 21113002013, "dataset_size": 33670789930.82474}}
2022-12-07T07:36:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-no-bigscience-filters" More Information needed
[ "# Dataset Card for \"olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-no-bigscience-filters\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-no-bigscience-filters\"\n\nMore Information needed" ]
15d1d222788d13a1db7f17992ad4bef5aff06dad
# Dataset Card for "Process_tested" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shularp/Process_tested
[ "region:us" ]
2022-12-07T08:18:22+00:00
{"dataset_info": {"features": [{"name": "sentence_arb_Arab", "dtype": "string"}, {"name": "sentence_eng_Latn", "dtype": "string"}], "splits": [{"name": "dev", "num_bytes": 333842, "num_examples": 997}, {"name": "devtest", "num_bytes": 351455, "num_examples": 1012}], "download_size": 411360, "dataset_size": 685297}}
2022-12-07T08:20:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Process_tested" More Information needed
[ "# Dataset Card for \"Process_tested\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Process_tested\"\n\nMore Information needed" ]
5fbf1af3b9112e7ec74ca4fd94bf92fd9b8abaf0
# Dataset Card for "Process_tested_02" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shularp/Process_tested_02
[ "region:us" ]
2022-12-07T08:20:24+00:00
{"dataset_info": {"features": [{"name": "translation", "struct": [{"name": "ar", "dtype": "string"}, {"name": "en", "dtype": "string"}]}, {"name": "id", "sequence": "int64"}], "splits": [{"name": "dev", "num_bytes": 361758, "num_examples": 997}], "download_size": 199462, "dataset_size": 361758}}
2022-12-07T08:20:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Process_tested_02" More Information needed
[ "# Dataset Card for \"Process_tested_02\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Process_tested_02\"\n\nMore Information needed" ]
0f3e7aab79bf370979764275e3d12d74d8235bc6
# Dataset Card for "medical-domain" ## Dataset Description - **Homepage:** Kaggle Challenge - **Repository:** https://www.kaggle.com/datasets/tboyle10/medicaltranscriptions - **Paper:** N.A. - **Leaderboard:** N.A. - **Point of Contact:** N.A. ### Dataset Summary Medical transcription data scraped from mtsamples.com Medical data is extremely hard to find due to HIPAA privacy regulations. This dataset offers a solution by providing medical transcription samples. This dataset contains sample medical transcriptions for various medical specialties. ### Languages english ### Citation Information Acknowledgements Medical transcription data scraped from mtsamples.com ### Contributions Thanks to [@davidberenstein1957](https://github.com/davidberenstein1957) for adding this dataset.
argilla/medical-domain
[ "task_categories:text-classification", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "region:us" ]
2022-12-07T08:47:29+00:00
{"language": ["en"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "dtype": "null"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 30903523, "num_examples": 4966}], "download_size": 14846569, "dataset_size": 30903523}}
2022-12-07T11:57:58+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-10K<n<100K #source_datasets-original #language-English #region-us
# Dataset Card for "medical-domain" ## Dataset Description - Homepage: Kaggle Challenge - Repository: URL - Paper: N.A. - Leaderboard: N.A. - Point of Contact: N.A. ### Dataset Summary Medical transcription data scraped from URL Medical data is extremely hard to find due to HIPAA privacy regulations. This dataset offers a solution by providing medical transcription samples. This dataset contains sample medical transcriptions for various medical specialties. ### Languages english Acknowledgements Medical transcription data scraped from URL ### Contributions Thanks to @davidberenstein1957 for adding this dataset.
[ "# Dataset Card for \"medical-domain\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nMedical transcription data scraped from URL\nMedical data is extremely hard to find due to HIPAA privacy regulations. This dataset offers a solution by providing medical transcription samples.\nThis dataset contains sample medical transcriptions for various medical specialties.", "### Languages\n\nenglish \n\n\n\nAcknowledgements\n\nMedical transcription data scraped from URL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #size_categories-10K<n<100K #source_datasets-original #language-English #region-us \n", "# Dataset Card for \"medical-domain\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nMedical transcription data scraped from URL\nMedical data is extremely hard to find due to HIPAA privacy regulations. This dataset offers a solution by providing medical transcription samples.\nThis dataset contains sample medical transcriptions for various medical specialties.", "### Languages\n\nenglish \n\n\n\nAcknowledgements\n\nMedical transcription data scraped from URL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
1f2a465dcf1201ead498edc5051e35225b0c479c
# Dataset Card for "banking_sentiment_setfit" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
argilla/banking_sentiment_setfit
[ "region:us" ]
2022-12-07T09:03:18+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "negative", "1": "neutral"}}}}], "splits": [{"name": "train", "num_bytes": 7433.25, "num_examples": 108}, {"name": "test", "num_bytes": 2477.75, "num_examples": 36}], "download_size": 8087, "dataset_size": 9911.0}}
2022-12-07T09:08:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "banking_sentiment_setfit" More Information needed
[ "# Dataset Card for \"banking_sentiment_setfit\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"banking_sentiment_setfit\"\n\nMore Information needed" ]
7bb3b21e604df9388b7525812d3f723ef9e677b3
# AutoTrain Dataset for project: boolq ## Dataset Description This dataset has been automatically processed by AutoTrain for project boolq. ### Languages The BCP-47 code for the dataset's language is en. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "context": "is an abstract the same as a summary", "question": "Abstract (summary) -- An abstract is a brief summary of a research article, thesis, review, conference proceeding, or any in-depth analysis of a particular subject and is often used to help the reader quickly ascertain the paper's purpose. When used, an abstract always appears at the beginning of a manuscript or typescript, acting as the point-of-entry for any given academic paper or patent application. Abstracting and indexing services for various academic disciplines are aimed at compiling a body of literature for that particular subject.", "answers.text": [ "757" ], "answers.answer_start": [ -1 ], "feat_id": null, "feat_title": null }, { "context": "was the opening of jumeirah beach park in 2009", "question": "Jumeirah Beach Hotel -- Jumeirah Beach Hotel is a hotel in Dubai, United Arab Emirates. The hotel, which opened in 1997, is operated by the Dubai-based hotelier Jumeirah. The hotel contains 598 rooms and suites, 19 beachfront villas, and 20 restaurants and bars. This wave-shaped hotel complements the sail-shaped Burj Al Arab, which is adjacent to the Jumeirah Beach Hotel.", "answers.text": [ "2817" ], "answers.answer_start": [ -1 ], "feat_id": null, "feat_title": null } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "context": "Value(dtype='string', id=None)", "question": "Value(dtype='string', id=None)", "answers.text": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)", "answers.answer_start": "Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None)", "feat_id": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)", "feat_title": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 10786 | | valid | 135411 |
kn0w1dge/BoolQTrueFalse
[ "language:en", "doi:10.57967/hf/0175", "region:us" ]
2022-12-07T09:29:37+00:00
{"language": ["en"]}
2022-12-07T09:34:15+00:00
[]
[ "en" ]
TAGS #language-English #doi-10.57967/hf/0175 #region-us
AutoTrain Dataset for project: boolq ==================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project boolq. ### Languages The BCP-47 code for the dataset's language is en. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#language-English #doi-10.57967/hf/0175 #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
d5578010787ddb0996fba9bc1b136ad011c590da
# Dataset Card for "Process_tested-facebook-flores" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shularp/Process_tested-facebook-flores
[ "region:us" ]
2022-12-07T09:43:46+00:00
{"dataset_info": {"features": [{"name": "translation", "struct": [{"name": "ar", "dtype": "string"}, {"name": "en", "dtype": "string"}]}, {"name": "id", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 361758, "num_examples": 997}, {"name": "test", "num_bytes": 379791, "num_examples": 1012}], "download_size": 412821, "dataset_size": 741549}}
2022-12-07T10:09:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Process_tested-facebook-flores" More Information needed
[ "# Dataset Card for \"Process_tested-facebook-flores\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Process_tested-facebook-flores\"\n\nMore Information needed" ]
24f3995831700f0ee85092eb40f0a8d0be57d50c
# Dataset Card for "librispeech15k_augm_train-tiny" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CristianaLazar/librispeech15k_augm_train-tiny
[ "region:us" ]
2022-12-07T09:51:40+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "int64"}, {"name": "chapter_id", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train.360", "num_bytes": 20473737704.0, "num_examples": 15000}], "download_size": 12376533972, "dataset_size": 20473737704.0}}
2022-12-07T11:13:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "librispeech15k_augm_train-tiny" More Information needed
[ "# Dataset Card for \"librispeech15k_augm_train-tiny\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"librispeech15k_augm_train-tiny\"\n\nMore Information needed" ]
f25de9b018404f9cc708b6835a326d1c38b923a3
# Dataset Card for "Process_tested-Shularp-Process_tested-facebook-floresarb_Arab_to_eng_Latn" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shularp/Process_tested-Shularp-Process_tested-facebook-floresarb_Arab_to_eng_Latn
[ "region:us" ]
2022-12-07T10:18:20+00:00
{"dataset_info": {"features": [{"name": "translation", "struct": [{"name": "ar", "dtype": "string"}, {"name": "en", "dtype": "string"}]}, {"name": "id", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 361758, "num_examples": 997}, {"name": "test", "num_bytes": 379791, "num_examples": 1012}], "download_size": 412821, "dataset_size": 741549}}
2022-12-07T10:18:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Process_tested-Shularp-Process_tested-facebook-floresarb_Arab_to_eng_Latn" More Information needed
[ "# Dataset Card for \"Process_tested-Shularp-Process_tested-facebook-floresarb_Arab_to_eng_Latn\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Process_tested-Shularp-Process_tested-facebook-floresarb_Arab_to_eng_Latn\"\n\nMore Information needed" ]
12141ed192361f8a2721aaae8c391b435a397365
# Dataset Card for "news_commentary" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shularp/news_commentary
[ "region:us" ]
2022-12-07T10:29:05+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "translation", "dtype": {"translation": {"languages": ["ar", "en"]}}}], "splits": [{"name": "train", "num_bytes": 72589357.6306394, "num_examples": 74868}, {"name": "test", "num_bytes": 8065807.369360597, "num_examples": 8319}], "download_size": 45743247, "dataset_size": 80655165.0}}
2022-12-07T10:29:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "news_commentary" More Information needed
[ "# Dataset Card for \"news_commentary\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"news_commentary\"\n\nMore Information needed" ]
f61ed293f976a0b025b4004c06d41b12ecfd799c
About Dataset This is a subset of the ArXiv dataset from Kaggle https://www.kaggle.com/datasets/Cornell-University/arxiv About ArXiv For nearly 30 years, ArXiv has served the public and research communities by providing open access to scholarly articles, from the vast branches of physics to the many subdisciplines of computer science to everything in between, including math, statistics, electrical engineering, quantitative biology, and economics. This rich corpus of information offers significant, but sometimes overwhelming depth. In these times of unique global challenges, efficient extraction of insights from data is essential. To help make the arXiv more accessible, we present a free, open pipeline on Kaggle to the machine-readable arXiv dataset: a repository of 1.7 million articles, with relevant features such as article titles, authors, categories, abstracts, full text PDFs, and more. Our hope is to empower new use cases that can lead to the exploration of richer machine learning techniques that combine multi-modal features towards applications like trend analysis, paper recommender engines, category prediction, co-citation networks, knowledge graph construction and semantic search interfaces. The dataset is freely available via Google Cloud Storage buckets (more info here). Stay tuned for weekly updates to the dataset! ArXiv is a collaboratively funded, community-supported resource founded by Paul Ginsparg in 1991 and maintained and operated by Cornell University. The release of this dataset was featured further in a Kaggle blog post here. See here for more information. ArXiv On Kaggle Metadata This dataset is a mirror of the original ArXiv data. Because the full dataset is rather large (1.1TB and growing), this dataset provides only a metadata file in the json format. This file contains an entry for each paper, containing: id: ArXiv ID (can be used to access the paper, see below) submitter: Who submitted the paper authors: Authors of the paper title: Title of the paper comments: Additional info, such as number of pages and figures journal-ref: Information about the journal the paper was published in doi: [https://www.doi.org](Digital Object Identifier) abstract: The abstract of the paper categories: Categories / tags in the ArXiv system versions: A version history You can access each paper directly on ArXiv using these links: https://arxiv.org/abs/{id}: Page for this paper including its abstract and further links https://arxiv.org/pdf/{id}: Direct link to download the PDF License Creative Commons CC0 1.0 Universal Public Domain Dedication applies to the metadata in this dataset. See https://arxiv.org/help/license for further details and licensing on individual papers. Acknowledgements The original data is maintained by ArXiv, huge thanks to the team for building and maintaining this dataset. We're using https://github.com/mattbierbaum/arxiv-public-datasets to pull the original data, thanks to Matt Bierbaum for providing this tool.
mwitiderrick/arXiv
[ "license:cc0-1.0", "region:us" ]
2022-12-07T10:30:20+00:00
{"license": "cc0-1.0"}
2022-12-07T10:46:56+00:00
[]
[]
TAGS #license-cc0-1.0 #region-us
About Dataset This is a subset of the ArXiv dataset from Kaggle URL About ArXiv For nearly 30 years, ArXiv has served the public and research communities by providing open access to scholarly articles, from the vast branches of physics to the many subdisciplines of computer science to everything in between, including math, statistics, electrical engineering, quantitative biology, and economics. This rich corpus of information offers significant, but sometimes overwhelming depth. In these times of unique global challenges, efficient extraction of insights from data is essential. To help make the arXiv more accessible, we present a free, open pipeline on Kaggle to the machine-readable arXiv dataset: a repository of 1.7 million articles, with relevant features such as article titles, authors, categories, abstracts, full text PDFs, and more. Our hope is to empower new use cases that can lead to the exploration of richer machine learning techniques that combine multi-modal features towards applications like trend analysis, paper recommender engines, category prediction, co-citation networks, knowledge graph construction and semantic search interfaces. The dataset is freely available via Google Cloud Storage buckets (more info here). Stay tuned for weekly updates to the dataset! ArXiv is a collaboratively funded, community-supported resource founded by Paul Ginsparg in 1991 and maintained and operated by Cornell University. The release of this dataset was featured further in a Kaggle blog post here. See here for more information. ArXiv On Kaggle Metadata This dataset is a mirror of the original ArXiv data. Because the full dataset is rather large (1.1TB and growing), this dataset provides only a metadata file in the json format. This file contains an entry for each paper, containing: id: ArXiv ID (can be used to access the paper, see below) submitter: Who submitted the paper authors: Authors of the paper title: Title of the paper comments: Additional info, such as number of pages and figures journal-ref: Information about the journal the paper was published in doi: URL abstract: The abstract of the paper categories: Categories / tags in the ArXiv system versions: A version history You can access each paper directly on ArXiv using these links: URL Page for this paper including its abstract and further links URL Direct link to download the PDF License Creative Commons CC0 1.0 Universal Public Domain Dedication applies to the metadata in this dataset. See URL for further details and licensing on individual papers. Acknowledgements The original data is maintained by ArXiv, huge thanks to the team for building and maintaining this dataset. We're using URL to pull the original data, thanks to Matt Bierbaum for providing this tool.
[]
[ "TAGS\n#license-cc0-1.0 #region-us \n" ]
d9f5eae4f7a61c89806edec33e450230b880091e
# Dataset Card for "un_multi-ar-en" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Shularp/un_multi-ar-en
[ "region:us" ]
2022-12-07T10:56:27+00:00
{"dataset_info": {"features": [{"name": "translation", "dtype": {"translation": {"languages": ["ar", "en"]}}}], "splits": [{"name": "train", "num_bytes": 4189844561, "num_examples": 9759125}], "download_size": 1926773979, "dataset_size": 4189844561}}
2022-12-07T11:00:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "un_multi-ar-en" More Information needed
[ "# Dataset Card for \"un_multi-ar-en\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"un_multi-ar-en\"\n\nMore Information needed" ]
11ab6c4fb880495f2e3ef4298b57e36cd0b2cfb5
# Dataset Card for "medical-keywords" ## Dataset Description - **Homepage:** Kaggle Challenge - **Repository:** https://www.kaggle.com/datasets/tboyle10/medicaltranscriptions - **Paper:** N.A. - **Leaderboard:** N.A. - **Point of Contact:** N.A. ### Dataset Summary Medical transcription data scraped from mtsamples.com Medical data is extremely hard to find due to HIPAA privacy regulations. This dataset offers a solution by providing medical transcription samples. This dataset contains sample medical transcriptions for various medical specialties. ### Languages english ### Citation Information Acknowledgements Medical transcription data scraped from mtsamples.com ### Contributions Thanks to [@davidberenstein1957](https://github.com/davidberenstein1957) for adding this dataset.
argilla/medical-keywords
[ "task_categories:token-classification", "task_ids:named-entity-recognition", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "region:us" ]
2022-12-07T11:49:17+00:00
{"language": ["en"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": ["keyphrase-extraction", "named-entity-recognition"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "prediction", "list": [{"name": "end", "dtype": "int64"}, {"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}, {"name": "start", "dtype": "int64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "dtype": "null"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "id", "dtype": "null"}, {"name": "metadata", "struct": [{"name": "medical_specialty", "dtype": "string"}]}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "timestamp[us]"}, {"name": "metrics", "dtype": "null"}], "splits": [{"name": "train", "num_bytes": 58986555, "num_examples": 148699}], "download_size": 17498377, "dataset_size": 58986555}}
2022-12-07T12:00:34+00:00
[]
[ "en" ]
TAGS #task_categories-token-classification #task_ids-named-entity-recognition #size_categories-100K<n<1M #source_datasets-original #language-English #region-us
# Dataset Card for "medical-keywords" ## Dataset Description - Homepage: Kaggle Challenge - Repository: URL - Paper: N.A. - Leaderboard: N.A. - Point of Contact: N.A. ### Dataset Summary Medical transcription data scraped from URL Medical data is extremely hard to find due to HIPAA privacy regulations. This dataset offers a solution by providing medical transcription samples. This dataset contains sample medical transcriptions for various medical specialties. ### Languages english Acknowledgements Medical transcription data scraped from URL ### Contributions Thanks to @davidberenstein1957 for adding this dataset.
[ "# Dataset Card for \"medical-keywords\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nMedical transcription data scraped from URL\nMedical data is extremely hard to find due to HIPAA privacy regulations. This dataset offers a solution by providing medical transcription samples.\nThis dataset contains sample medical transcriptions for various medical specialties.", "### Languages\n\nenglish \n\n\n\nAcknowledgements\n\nMedical transcription data scraped from URL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
[ "TAGS\n#task_categories-token-classification #task_ids-named-entity-recognition #size_categories-100K<n<1M #source_datasets-original #language-English #region-us \n", "# Dataset Card for \"medical-keywords\"", "## Dataset Description\n\n- Homepage: Kaggle Challenge\n- Repository: URL\n- Paper: N.A.\n- Leaderboard: N.A.\n- Point of Contact: N.A.", "### Dataset Summary\n\nMedical transcription data scraped from URL\nMedical data is extremely hard to find due to HIPAA privacy regulations. This dataset offers a solution by providing medical transcription samples.\nThis dataset contains sample medical transcriptions for various medical specialties.", "### Languages\n\nenglish \n\n\n\nAcknowledgements\n\nMedical transcription data scraped from URL", "### Contributions\n\nThanks to @davidberenstein1957 for adding this dataset." ]
660af60fe25551275f1f67914a152ed359e8b935
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: lewtun/minilm-finetuned-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-emotion-default-e0ea2e-17426357
[ "autotrain", "evaluation", "region:us" ]
2022-12-07T12:27:28+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["emotion"], "eval_info": {"task": "multi_class_classification", "model": "lewtun/minilm-finetuned-emotion", "metrics": [], "dataset_name": "emotion", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "label"}}}
2022-12-07T12:27:54+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Multi-class Text Classification * Model: lewtun/minilm-finetuned-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewtun/minilm-finetuned-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewtun/minilm-finetuned-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
782f20d6e116a526d592b5cf249d429dee73719f
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: lewtun/sagemaker-distilbert-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-emotion-default-e0ea2e-17426358
[ "autotrain", "evaluation", "region:us" ]
2022-12-07T12:27:53+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["emotion"], "eval_info": {"task": "multi_class_classification", "model": "lewtun/sagemaker-distilbert-emotion", "metrics": [], "dataset_name": "emotion", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "label"}}}
2022-12-07T12:28:22+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Multi-class Text Classification * Model: lewtun/sagemaker-distilbert-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewtun/sagemaker-distilbert-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewtun/sagemaker-distilbert-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
c16fc003e832c68a86ce8f451915f04ac3883817
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: lewiswatson/distilbert-base-uncased-finetuned-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-emotion-default-e0ea2e-17426359
[ "autotrain", "evaluation", "region:us" ]
2022-12-07T12:27:53+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["emotion"], "eval_info": {"task": "multi_class_classification", "model": "lewiswatson/distilbert-base-uncased-finetuned-emotion", "metrics": [], "dataset_name": "emotion", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "label"}}}
2022-12-07T12:28:20+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Multi-class Text Classification * Model: lewiswatson/distilbert-base-uncased-finetuned-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewiswatson/distilbert-base-uncased-finetuned-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Multi-class Text Classification\n* Model: lewiswatson/distilbert-base-uncased-finetuned-emotion\n* Dataset: emotion\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
3c96f3e1bb9fe73d0100d8680b5e84f6984c2186
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/roberta-base-squad2 * Dataset: autoevaluate/squad-sample * Config: autoevaluate--squad-sample * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-autoevaluate__squad-sample-autoevaluate__squad-sample-778ba0-17436361
[ "autotrain", "evaluation", "region:us" ]
2022-12-07T12:28:20+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["autoevaluate/squad-sample"], "eval_info": {"task": "extractive_question_answering", "model": "autoevaluate/roberta-base-squad2", "metrics": [], "dataset_name": "autoevaluate/squad-sample", "dataset_config": "autoevaluate--squad-sample", "dataset_split": "test", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-12-07T12:28:50+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: autoevaluate/roberta-base-squad2 * Dataset: autoevaluate/squad-sample * Config: autoevaluate--squad-sample * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: autoevaluate/roberta-base-squad2\n* Dataset: autoevaluate/squad-sample\n* Config: autoevaluate--squad-sample\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: autoevaluate/roberta-base-squad2\n* Dataset: autoevaluate/squad-sample\n* Config: autoevaluate--squad-sample\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
e617dfb7c33abc745d247e8155215edcf32b3a8d
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/extractive-question-answering * Dataset: autoevaluate/squad-sample * Config: autoevaluate--squad-sample * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-autoevaluate__squad-sample-autoevaluate__squad-sample-778ba0-17436362
[ "autotrain", "evaluation", "region:us" ]
2022-12-07T12:28:20+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["autoevaluate/squad-sample"], "eval_info": {"task": "extractive_question_answering", "model": "autoevaluate/extractive-question-answering", "metrics": [], "dataset_name": "autoevaluate/squad-sample", "dataset_config": "autoevaluate--squad-sample", "dataset_split": "test", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-12-07T12:28:43+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: autoevaluate/extractive-question-answering * Dataset: autoevaluate/squad-sample * Config: autoevaluate--squad-sample * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: autoevaluate/extractive-question-answering\n* Dataset: autoevaluate/squad-sample\n* Config: autoevaluate--squad-sample\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: autoevaluate/extractive-question-answering\n* Dataset: autoevaluate/squad-sample\n* Config: autoevaluate--squad-sample\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
aa68f82199d3686be94fe56eb7097dc979573a6a
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: autoevaluate/extractive-question-answering-not-evaluated * Dataset: autoevaluate/squad-sample * Config: autoevaluate--squad-sample * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-staging-eval-autoevaluate__squad-sample-autoevaluate__squad-sample-778ba0-17436360
[ "autotrain", "evaluation", "region:us" ]
2022-12-07T12:28:20+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["autoevaluate/squad-sample"], "eval_info": {"task": "extractive_question_answering", "model": "autoevaluate/extractive-question-answering-not-evaluated", "metrics": [], "dataset_name": "autoevaluate/squad-sample", "dataset_config": "autoevaluate--squad-sample", "dataset_split": "test", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-12-07T12:28:44+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: autoevaluate/extractive-question-answering-not-evaluated * Dataset: autoevaluate/squad-sample * Config: autoevaluate--squad-sample * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: autoevaluate/extractive-question-answering-not-evaluated\n* Dataset: autoevaluate/squad-sample\n* Config: autoevaluate--squad-sample\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: autoevaluate/extractive-question-answering-not-evaluated\n* Dataset: autoevaluate/squad-sample\n* Config: autoevaluate--squad-sample\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
41aef9650901fa2169213f38d49bd6491eb9b77a
# CLUE-NER 命名实体识别数据集 字段说明 + `text`: 文本 + `entities`: 文本中包含的实体 + `id`: 实体 `id` + `entity`: 实体对应的字符串 + `start_offset`: 实体开始位置 + `end_offset`: 实体结束位置的下一位 + `label`: 实体对应的开始位置
xusenlin/clue-ner
[ "language:zh", "license:apache-2.0", "named entity recognition", "clue", "region:us" ]
2022-12-07T13:14:03+00:00
{"language": ["zh"], "license": "apache-2.0", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "entities", "list": [{"name": "id", "dtype": "int64"}, {"name": "entity", "dtype": "string"}, {"name": "start_offset", "dtype": "int64"}, {"name": "end_offset", "dtype": "int64"}, {"name": "label", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 2443356, "num_examples": 10748}, {"name": "test", "num_bytes": 154492, "num_examples": 1345}, {"name": "validation", "num_bytes": 309106, "num_examples": 1343}], "download_size": 1658426, "dataset_size": 2906954}, "tags": ["named entity recognition", "clue"]}
2022-12-07T14:22:37+00:00
[]
[ "zh" ]
TAGS #language-Chinese #license-apache-2.0 #named entity recognition #clue #region-us
# CLUE-NER 命名实体识别数据集 字段说明 + 'text': 文本 + 'entities': 文本中包含的实体 + 'id': 实体 'id' + 'entity': 实体对应的字符串 + 'start_offset': 实体开始位置 + 'end_offset': 实体结束位置的下一位 + 'label': 实体对应的开始位置
[ "# CLUE-NER 命名实体识别数据集\n\n字段说明\n\n+ 'text': 文本\n\n+ 'entities': 文本中包含的实体\n\n + 'id': 实体 'id'\n\n + 'entity': 实体对应的字符串\n\n + 'start_offset': 实体开始位置\n\n + 'end_offset': 实体结束位置的下一位\n\n + 'label': 实体对应的开始位置" ]
[ "TAGS\n#language-Chinese #license-apache-2.0 #named entity recognition #clue #region-us \n", "# CLUE-NER 命名实体识别数据集\n\n字段说明\n\n+ 'text': 文本\n\n+ 'entities': 文本中包含的实体\n\n + 'id': 实体 'id'\n\n + 'entity': 实体对应的字符串\n\n + 'start_offset': 实体开始位置\n\n + 'end_offset': 实体结束位置的下一位\n\n + 'label': 实体对应的开始位置" ]
f3208919bc8a6695b6612a77e990db4180c0b000
# Dataset Card for "arxiv-pyserini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
cakiki/arxiv-pyserini
[ "region:us" ]
2022-12-07T13:27:11+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "submitter", "dtype": "string"}, {"name": "authors", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "comments", "dtype": "string"}, {"name": "journal-ref", "dtype": "string"}, {"name": "doi", "dtype": "string"}, {"name": "report-no", "dtype": "string"}, {"name": "categories", "dtype": "string"}, {"name": "license", "dtype": "string"}, {"name": "abstract", "dtype": "string"}, {"name": "versions", "list": [{"name": "created", "dtype": "string"}, {"name": "version", "dtype": "string"}]}, {"name": "update_date", "dtype": "string"}, {"name": "authors_parsed", "sequence": {"sequence": "string"}}], "splits": [{"name": "train", "num_bytes": 3217788413, "num_examples": 2171090}], "download_size": 1801274080, "dataset_size": 3217788413}}
2022-12-07T15:31:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "arxiv-pyserini" More Information needed
[ "# Dataset Card for \"arxiv-pyserini\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"arxiv-pyserini\"\n\nMore Information needed" ]
9bbf3992d70b4282f5f98b09b56bc43160fe45ed
# Dataset Card for "small-oscar-dedup" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ola13/small-oscar-dedup
[ "region:us" ]
2022-12-07T13:44:16+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "domain", "dtype": "string"}, {"name": "perplexity", "dtype": "float64"}, {"name": "dup_ratio", "dtype": "float64"}, {"name": "pairs", "sequence": {"sequence": "int64"}}, {"name": "repetitions", "sequence": "binary"}, {"name": "cluster", "sequence": {"sequence": "int64"}}], "splits": [{"name": "train", "num_bytes": 323557137, "num_examples": 43200}], "download_size": 0, "dataset_size": 323557137}}
2022-12-07T15:48:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "small-oscar-dedup" More Information needed
[ "# Dataset Card for \"small-oscar-dedup\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"small-oscar-dedup\"\n\nMore Information needed" ]
870af1f0f8f400666648265478d86b6dd1524bbb
# Dataset Card for "small-oscar-repetitions" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ola13/small-oscar-repetitions
[ "region:us" ]
2022-12-07T13:44:34+00:00
{"dataset_info": {"features": [{"name": "repetition", "dtype": "binary"}, {"name": "ids", "sequence": "int64"}, {"name": "num_docs", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 55598378, "num_examples": 166667}], "download_size": 0, "dataset_size": 55598378}}
2022-12-07T15:49:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "small-oscar-repetitions" More Information needed
[ "# Dataset Card for \"small-oscar-repetitions\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"small-oscar-repetitions\"\n\nMore Information needed" ]
05c3d12fab6ce69c72e9288a0fee86c021e40f71
# Dataset Card for "olm-october-2022-tokenized-1024-no-bigscience-filters" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-october-2022-tokenized-1024-no-bigscience-filters
[ "region:us" ]
2022-12-07T14:01:41+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 79176169656.0, "num_examples": 12861626}], "download_size": 21440888036, "dataset_size": 79176169656.0}}
2022-12-07T14:49:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-october-2022-tokenized-1024-no-bigscience-filters" More Information needed
[ "# Dataset Card for \"olm-october-2022-tokenized-1024-no-bigscience-filters\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-october-2022-tokenized-1024-no-bigscience-filters\"\n\nMore Information needed" ]
7b405c185b5d8ce3bc0d74767e2c9107cc69a58a
# CMeEE 中文医学命名实体识别数据集 字段说明 + `text`: 文本 + `entities`: 文本中包含的实体 + `id`: 实体 `id` + `entity`: 实体对应的字符串 + `start_offset`: 实体开始位置 + `end_offset`: 实体结束位置的下一位 + `label`: 实体对应的开始位置
xusenlin/cmeee
[ "region:us" ]
2022-12-07T14:16:08+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "entities", "list": [{"name": "id", "dtype": "int64"}, {"name": "entity", "dtype": "string"}, {"name": "start_offset", "dtype": "int64"}, {"name": "end_offset", "dtype": "int64"}, {"name": "label", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 5289666, "num_examples": 15000}, {"name": "test", "num_bytes": 461472, "num_examples": 3000}, {"name": "validation", "num_bytes": 1752698, "num_examples": 5000}], "download_size": 3359069, "dataset_size": 7503836}}
2022-12-07T14:24:00+00:00
[]
[]
TAGS #region-us
# CMeEE 中文医学命名实体识别数据集 字段说明 + 'text': 文本 + 'entities': 文本中包含的实体 + 'id': 实体 'id' + 'entity': 实体对应的字符串 + 'start_offset': 实体开始位置 + 'end_offset': 实体结束位置的下一位 + 'label': 实体对应的开始位置
[ "# CMeEE 中文医学命名实体识别数据集\n\n字段说明\n\n+ 'text': 文本\n\n+ 'entities': 文本中包含的实体\n\n + 'id': 实体 'id'\n\n + 'entity': 实体对应的字符串\n\n + 'start_offset': 实体开始位置\n\n + 'end_offset': 实体结束位置的下一位\n\n + 'label': 实体对应的开始位置" ]
[ "TAGS\n#region-us \n", "# CMeEE 中文医学命名实体识别数据集\n\n字段说明\n\n+ 'text': 文本\n\n+ 'entities': 文本中包含的实体\n\n + 'id': 实体 'id'\n\n + 'entity': 实体对应的字符串\n\n + 'start_offset': 实体开始位置\n\n + 'end_offset': 实体结束位置的下一位\n\n + 'label': 实体对应的开始位置" ]
0b87dc36a9332970a119ee6d719c0729b1ced900
# 人民日报命名实体识别数据集 字段说明 + `text`: 文本 + `entities`: 文本中包含的实体 + `id`: 实体 `id` + `entity`: 实体对应的字符串 + `start_offset`: 实体开始位置 + `end_offset`: 实体结束位置的下一位 + `label`: 实体对应的开始位置
xusenlin/people-daily-ner
[ "region:us" ]
2022-12-07T14:28:09+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "entities", "list": [{"name": "id", "dtype": "int64"}, {"name": "entity", "dtype": "string"}, {"name": "start_offset", "dtype": "int64"}, {"name": "end_offset", "dtype": "int64"}, {"name": "label", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 4564472, "num_examples": 20864}, {"name": "test", "num_bytes": 1025142, "num_examples": 4636}, {"name": "validation", "num_bytes": 510546, "num_examples": 2318}], "download_size": 3891711, "dataset_size": 6100160}}
2022-12-07T14:31:42+00:00
[]
[]
TAGS #region-us
# 人民日报命名实体识别数据集 字段说明 + 'text': 文本 + 'entities': 文本中包含的实体 + 'id': 实体 'id' + 'entity': 实体对应的字符串 + 'start_offset': 实体开始位置 + 'end_offset': 实体结束位置的下一位 + 'label': 实体对应的开始位置
[ "# 人民日报命名实体识别数据集\n\n字段说明\n\n+ 'text': 文本\n\n+ 'entities': 文本中包含的实体\n\n + 'id': 实体 'id'\n\n + 'entity': 实体对应的字符串\n\n + 'start_offset': 实体开始位置\n\n + 'end_offset': 实体结束位置的下一位\n\n + 'label': 实体对应的开始位置" ]
[ "TAGS\n#region-us \n", "# 人民日报命名实体识别数据集\n\n字段说明\n\n+ 'text': 文本\n\n+ 'entities': 文本中包含的实体\n\n + 'id': 实体 'id'\n\n + 'entity': 实体对应的字符串\n\n + 'start_offset': 实体开始位置\n\n + 'end_offset': 实体结束位置的下一位\n\n + 'label': 实体对应的开始位置" ]
2efa75f1087ac76dc82534a615a9467b0e9b9d8e
# DuIE 关系抽取数据集 字段说明 + `text`: 文本 + `spo_list`: 文本中包含的关系三元组 + `subject`: 头实体(主语) + `subject_type`: 头实体(主语)的类型 + `object`: 尾实体(主语) + `object_type`: 尾实体(主语)的类型 + `predicate`: 关系
xusenlin/duie
[ "region:us" ]
2022-12-07T14:41:25+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "spo_list", "list": [{"name": "predicate", "dtype": "string"}, {"name": "object_type", "dtype": "string"}, {"name": "subject_type", "dtype": "string"}, {"name": "object", "dtype": "string"}, {"name": "subject", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 51849478, "num_examples": 172983}, {"name": "validation", "num_bytes": 6512116, "num_examples": 21626}], "download_size": 32568292, "dataset_size": 58361594}}
2022-12-07T14:49:54+00:00
[]
[]
TAGS #region-us
# DuIE 关系抽取数据集 字段说明 + 'text': 文本 + 'spo_list': 文本中包含的关系三元组 + 'subject': 头实体(主语) + 'subject_type': 头实体(主语)的类型 + 'object': 尾实体(主语) + 'object_type': 尾实体(主语)的类型 + 'predicate': 关系
[ "# DuIE 关系抽取数据集\n\n字段说明\n\n+ 'text': 文本\n\n+ 'spo_list': 文本中包含的关系三元组\n\n + 'subject': 头实体(主语)\n\n + 'subject_type': 头实体(主语)的类型\n\n + 'object': 尾实体(主语)\n\n + 'object_type': 尾实体(主语)的类型\n\n + 'predicate': 关系" ]
[ "TAGS\n#region-us \n", "# DuIE 关系抽取数据集\n\n字段说明\n\n+ 'text': 文本\n\n+ 'spo_list': 文本中包含的关系三元组\n\n + 'subject': 头实体(主语)\n\n + 'subject_type': 头实体(主语)的类型\n\n + 'object': 尾实体(主语)\n\n + 'object_type': 尾实体(主语)的类型\n\n + 'predicate': 关系" ]
f124af73444fb36f19a8c4cab2436c2079d61a94
# Dataset Card for "banking_with_vectors" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dvilasuero/banking_with_vectors
[ "region:us" ]
2022-12-07T14:52:52+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "activate_my_card", "1": "age_limit", "2": "apple_pay_or_google_pay", "3": "atm_support", "4": "automatic_top_up", "5": "balance_not_updated_after_bank_transfer", "6": "balance_not_updated_after_cheque_or_cash_deposit", "7": "beneficiary_not_allowed", "8": "cancel_transfer", "9": "card_about_to_expire", "10": "card_acceptance", "11": "card_arrival", "12": "card_delivery_estimate", "13": "card_linking", "14": "card_not_working", "15": "card_payment_fee_charged", "16": "card_payment_not_recognised", "17": "card_payment_wrong_exchange_rate", "18": "card_swallowed", "19": "cash_withdrawal_charge", "20": "cash_withdrawal_not_recognised", "21": "change_pin", "22": "compromised_card", "23": "contactless_not_working", "24": "country_support", "25": "declined_card_payment", "26": "declined_cash_withdrawal", "27": "declined_transfer", "28": "direct_debit_payment_not_recognised", "29": "disposable_card_limits", "30": "edit_personal_details", "31": "exchange_charge", "32": "exchange_rate", "33": "exchange_via_app", "34": "extra_charge_on_statement", "35": "failed_transfer", "36": "fiat_currency_support", "37": "get_disposable_virtual_card", "38": "get_physical_card", "39": "getting_spare_card", "40": "getting_virtual_card", "41": "lost_or_stolen_card", "42": "lost_or_stolen_phone", "43": "order_physical_card", "44": "passcode_forgotten", "45": "pending_card_payment", "46": "pending_cash_withdrawal", "47": "pending_top_up", "48": "pending_transfer", "49": "pin_blocked", "50": "receiving_money", "51": "Refund_not_showing_up", "52": "request_refund", "53": "reverted_card_payment?", "54": "supported_cards_and_currencies", "55": "terminate_account", "56": "top_up_by_bank_transfer_charge", "57": "top_up_by_card_charge", "58": "top_up_by_cash_or_cheque", "59": "top_up_failed", "60": "top_up_limits", "61": "top_up_reverted", "62": "topping_up_by_card", "63": "transaction_charged_twice", "64": "transfer_fee_charged", "65": "transfer_into_account", "66": "transfer_not_received_by_recipient", "67": "transfer_timing", "68": "unable_to_verify_identity", "69": "verify_my_identity", "70": "verify_source_of_funds", "71": "verify_top_up", "72": "virtual_card_not_working", "73": "visa_or_mastercard", "74": "why_verify_identity", "75": "wrong_amount_of_cash_received", "76": "wrong_exchange_rate_for_cash_withdrawal"}}}}], "splits": [{"name": "test", "num_bytes": 204010, "num_examples": 3080}], "download_size": 89116, "dataset_size": 204010}}
2022-12-07T14:53:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "banking_with_vectors" More Information needed
[ "# Dataset Card for \"banking_with_vectors\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"banking_with_vectors\"\n\nMore Information needed" ]
a37e2f0d19c749948fd7e0a090936832a3dae865
# Dataset Card for "banking77_vectors" Install `pip install fast-sentence-transformers` ```python from fast_sentence_transformers import FastSentenceTransformer as SentenceTransformer from datasets import load_dataset # use any sentence-transformer encoder = SentenceTransformer("all-MiniLM-L6-v2", device="cpu") dataset = load_dataset("banking77", split="test") dataset = dataset.map(lambda batch: {"vector": encoder.encode(batch["text"])}, batch_size=32, batched=True) ``` [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dvilasuero/banking77_vectors
[ "region:us" ]
2022-12-07T14:54:00+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "activate_my_card", "1": "age_limit", "2": "apple_pay_or_google_pay", "3": "atm_support", "4": "automatic_top_up", "5": "balance_not_updated_after_bank_transfer", "6": "balance_not_updated_after_cheque_or_cash_deposit", "7": "beneficiary_not_allowed", "8": "cancel_transfer", "9": "card_about_to_expire", "10": "card_acceptance", "11": "card_arrival", "12": "card_delivery_estimate", "13": "card_linking", "14": "card_not_working", "15": "card_payment_fee_charged", "16": "card_payment_not_recognised", "17": "card_payment_wrong_exchange_rate", "18": "card_swallowed", "19": "cash_withdrawal_charge", "20": "cash_withdrawal_not_recognised", "21": "change_pin", "22": "compromised_card", "23": "contactless_not_working", "24": "country_support", "25": "declined_card_payment", "26": "declined_cash_withdrawal", "27": "declined_transfer", "28": "direct_debit_payment_not_recognised", "29": "disposable_card_limits", "30": "edit_personal_details", "31": "exchange_charge", "32": "exchange_rate", "33": "exchange_via_app", "34": "extra_charge_on_statement", "35": "failed_transfer", "36": "fiat_currency_support", "37": "get_disposable_virtual_card", "38": "get_physical_card", "39": "getting_spare_card", "40": "getting_virtual_card", "41": "lost_or_stolen_card", "42": "lost_or_stolen_phone", "43": "order_physical_card", "44": "passcode_forgotten", "45": "pending_card_payment", "46": "pending_cash_withdrawal", "47": "pending_top_up", "48": "pending_transfer", "49": "pin_blocked", "50": "receiving_money", "51": "Refund_not_showing_up", "52": "request_refund", "53": "reverted_card_payment?", "54": "supported_cards_and_currencies", "55": "terminate_account", "56": "top_up_by_bank_transfer_charge", "57": "top_up_by_card_charge", "58": "top_up_by_cash_or_cheque", "59": "top_up_failed", "60": "top_up_limits", "61": "top_up_reverted", "62": "topping_up_by_card", "63": "transaction_charged_twice", "64": "transfer_fee_charged", "65": "transfer_into_account", "66": "transfer_not_received_by_recipient", "67": "transfer_timing", "68": "unable_to_verify_identity", "69": "verify_my_identity", "70": "verify_source_of_funds", "71": "verify_top_up", "72": "virtual_card_not_working", "73": "visa_or_mastercard", "74": "why_verify_identity", "75": "wrong_amount_of_cash_received", "76": "wrong_exchange_rate_for_cash_withdrawal"}}}}, {"name": "vector", "sequence": "float32"}], "splits": [{"name": "test", "num_bytes": 4947210, "num_examples": 3080}], "download_size": 6749950, "dataset_size": 4947210}}
2022-12-07T14:56:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "banking77_vectors" Install 'pip install fast-sentence-transformers' More Information needed
[ "# Dataset Card for \"banking77_vectors\"\n\n\nInstall 'pip install fast-sentence-transformers'\n\n\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"banking77_vectors\"\n\n\nInstall 'pip install fast-sentence-transformers'\n\n\n\nMore Information needed" ]
f2e1c4c7628e20d6919b9192a7cd8f576c3a0603
Each line includes one example, represented as a JSON object. The critical fields are: sentence: The natural language sentence describing the pair of images for this example.\ left_url: The URL of the left image in the pair.\ right_url: The URL of the right image in the pair.\ label: The label: true or false.\ identifier: The unique identifier for the image, in the format: split-set_id-pair_id-sentence-id. split is the split of the data (train, test, or development). set_id is the unique identifier of the original eight-image set used in the sentence-writing task. pair_id indicates which of the pairs in the set it corresponds to (and is between 0 and 3). sentence-id indicates which of the sentences is associated with this pair (and is either 0 or 1 -- each image pair is associated with at most two sentences).
HuggingFaceM4/NLVR2
[ "license:cc-by-4.0", "region:us" ]
2022-12-07T15:44:41+00:00
{"license": "cc-by-4.0"}
2022-12-21T15:45:19+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
Each line includes one example, represented as a JSON object. The critical fields are: sentence: The natural language sentence describing the pair of images for this example.\ left_url: The URL of the left image in the pair.\ right_url: The URL of the right image in the pair.\ label: The label: true or false.\ identifier: The unique identifier for the image, in the format: split-set_id-pair_id-sentence-id. split is the split of the data (train, test, or development). set_id is the unique identifier of the original eight-image set used in the sentence-writing task. pair_id indicates which of the pairs in the set it corresponds to (and is between 0 and 3). sentence-id indicates which of the sentences is associated with this pair (and is either 0 or 1 -- each image pair is associated with at most two sentences).
[]
[ "TAGS\n#license-cc-by-4.0 #region-us \n" ]
50836aef3cb9e4b7a4b040ece9eabe1f71ca4f39
# Dataset Card for "testcocotrade" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
davanstrien/testcocotrade
[ "region:us" ]
2022-12-07T16:02:52+00:00
{"dataset_info": {"features": [{"name": "image_id", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "width", "dtype": "int32"}, {"name": "height", "dtype": "int32"}, {"name": "objects", "list": [{"name": "category_id", "dtype": {"class_label": {"names": {"0": "Image", "1": "Main heading (CAPS)", "2": "Page header (TRADES)", "3": "Running heads", "4": "Section title", "5": "Text Box"}}}}, {"name": "image_id", "dtype": "string"}, {"name": "id", "dtype": "int64"}, {"name": "area", "dtype": "int64"}, {"name": "bbox", "sequence": "float32", "length": 4}, {"name": "segmentation", "list": {"list": "float32"}}, {"name": "iscrowd", "dtype": "bool"}, {"name": "ignore", "dtype": "int32"}]}], "splits": [{"name": "train", "num_bytes": 11022954.0, "num_examples": 6}], "download_size": 10923350, "dataset_size": 11022954.0}}
2022-12-07T16:03:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "testcocotrade" More Information needed
[ "# Dataset Card for \"testcocotrade\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"testcocotrade\"\n\nMore Information needed" ]
ec1b0fb9b9b7741fb241f4379ff6d2be4d1fbf6c
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@parambharat](https://github.com/parambharat) for adding this dataset.
parambharat/tamil_asr_corpus
[ "task_categories:automatic-speech-recognition", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:extended|common_voice", "source_datasets:extended|openslr", "language:ta", "license:cc-by-4.0", "region:us" ]
2022-12-07T16:36:05+00:00
{"annotations_creators": ["found"], "language_creators": ["found"], "language": ["ta"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["extended|common_voice", "extended|openslr"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "Tamil ASR Corpus", "tags": []}
2022-12-07T17:32:59+00:00
[]
[ "ta" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-extended|common_voice #source_datasets-extended|openslr #language-Tamil #license-cc-by-4.0 #region-us
# Dataset Card for [Dataset Name] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @parambharat for adding this dataset.
[ "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @parambharat for adding this dataset." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-extended|common_voice #source_datasets-extended|openslr #language-Tamil #license-cc-by-4.0 #region-us \n", "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @parambharat for adding this dataset." ]
20b5dcbdbc4776c1412131c0b06b319eac97ef8b
# Dataset Card for NKJP1M – The manually annotated subcorpus of the National Corpus of Polish ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [NKJP1M](http://clip.ipipan.waw.pl/NationalCorpusOfPolish) - **Repository:** [NKJP1M-SGJP](http://download.sgjp.pl/morfeusz/current/) - **Paper:** [NKJP book](http://nkjp.pl/settings/papers/NKJP_ksiazka.pdf) - **Point of Contact:** mailto:[email protected] ### Dataset Summary This is the official dataset for NKJP1M – the 1-million token balanced subcorpus of the National Corpus of Polish (Narodowy Korpus Języka Polskiego) Besides the text (divided into paragraphs/samples and sentences) the set contains lemmas and morpho-syntactic tags for all tokens in the corpus. This release, known as NKJP1M-SGJP, corresponds to the version 1.2 of the corpus with later corrections and improvements. In particular the morpho-syntactic annotation has been aligned with the present version of Morfeusz2 SGJP morphological analyser (as of 2022.12.04). ### Supported Tasks and Leaderboards The main use of this resource lays in training models for lemmatisation and part of speech tagging of Polish. ### Languages Polish (monolingual) ## Dataset Structure ### Data Instances ``` {'nkjp_text': 'NKJP_1M_1102000002', 'nkjp_par': 'morph_1-p', 'nkjp_sent': 'morph_1.18-s', 'tokens': ['-', 'Nie', 'mam', 'pieniędzy', ',', 'da', 'mi', 'pani', 'wywiad', '?'], 'lemmas': ['-', 'nie', 'mieć', 'pieniądz', ',', 'dać', 'ja', 'pani', 'wywiad', '?'], 'cposes': [8, 11, 10, 9, 8, 10, 9, 9, 9, 8], 'poses': [19, 25, 12, 35, 19, 12, 28, 35, 35, 19], 'tags': [266, 464, 213, 923, 266, 218, 692, 988, 961, 266], 'nps': [False, False, False, False, True, False, False, False, False, True], 'nkjp_ids': ['morph_1.9-seg', 'morph_1.10-seg', 'morph_1.11-seg', 'morph_1.12-seg', 'morph_1.13-seg', 'morph_1.14-seg', 'morph_1.15-seg', 'morph_1.16-seg', 'morph_1.17-seg', 'morph_1.18-seg']} ``` ### Data Fields - `nkjp_text`, `nkjp_par`, `nkjp_sent` (strings): XML identifiers of the present text (document), paragraph and sentence in NKJP. (These allow to map the data point back to the source corpus and to identify paragraphs/samples.) - `tokens` (sequence of strings): tokens of the text defined as in NKJP. - `lemmas` (sequence of strings): lemmas corresponding to the tokens. - `tags` (sequence of labels): morpho-syntactic tags according to Morfeusz2 tagset (1019 distinct tags). - `poses` (sequence of labels): flexemic class (detailed part of speech, 40 classes) – the first element of the corresponding tag. - `cposes` (sequence of labels): coarse part of speech (13 classes): all verbal and deverbal flexemic classes get mapped to a `V`, nominal – `N`, adjectival – `A`, “strange” (abbreviations, alien elements, symbols, emojis…) – `X`, rest as in `poses`. - `nps` (sequence of booleans): `True` means that the corresponding token is not preceded by a space in the source text. - `nkjp_ids` (sequence of strings): XML identifiers of particular tokens in NKJP (probably an overkill). ### Data Splits | | Train | Validation | Test | | ----- | ------ | ----- | ---- | | sentences | 68943 | 7755 | 8964 | | tokens | 978368 | 112454 | 125059 | ## Dataset Creation ### Curation Rationale The National Corpus of Polish (NKJP) was envisioned as the reference corpus of contemporary Polish. The manually annotated subcorpus (NKJP1M) was thought of as the training data for various NLP tasks. ### Source Data NKJP is balanced with respect to Polish readership. The detailed rationale is described in Chapter 3 of the [NKJP book](http://nkjp.pl/settings/papers/NKJP_ksiazka.pdf) (roughly: 50% press, 30% books, 10% speech, 10% other). The corpus contains texts from the years 1945–2010 (with 80% of the text in the range 1990–2010). Only original Polish texts were gathered (no translations from other languages). The composition of NKJP1M follows this schema (see Chapter 5). ### Annotations The rules of morphosyntactic annotation used for NKJP are discussed in Chapter 6 of the [NKJP book](http://nkjp.pl/settings/papers/NKJP_ksiazka.pdf). Presently (2020), the corpus uses a common tagset with the morphological analyzer [Morfeusz 2](http://morfeusz.sgjp.pl/). #### Annotation process The texts were processed with Morfeusz and then the resulting annotations were manually disambiguated and validated/corrected. Each text sample was independently processed by two annotators. In case of annotation conflicts an adjudicator stepped in. ### Licensing Information ![Creative Commons License](https://i.creativecommons.org/l/by/4.0/80x15.png) This work is licensed under a [Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/). ### Citation Information Info on the source corpus: [link](http://nkjp.pl/settings/papers/NKJP_ksiazka.pdf) ``` @Book{nkjp:12, editor = "Adam Przepiórkowski and Mirosław Bańko and Rafał L. Górski and Barbara Lewandowska-Tomaszczyk", title = "Narodowy Korpus Języka Polskiego", year = 2012, address = "Warszawa", pdf = "http://nkjp.pl/settings/papers/NKJP_ksiazka.pdf", publisher = "Wydawnictwo Naukowe PWN"} ``` Current annotation scheme: [link](https://jezyk-polski.pl/index.php/jp/article/view/72) ``` @article{ kie:etal:21, author = "Kieraś, Witold and Woliński, Marcin and Nitoń, Bartłomiej", doi = "https://doi.org/10.31286/JP.101.2.5", title = "Nowe wielowarstwowe znakowanie lingwistyczne zrównoważonego {N}arodowego {K}orpusu {J}ęzyka {P}olskiego", url = "https://jezyk-polski.pl/index.php/jp/article/view/72", journal = "Język Polski", number = "2", volume = "CI", year = "2021", pages = "59--70" } ``` <!-- ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset. -->
ipipan/nkjp1m
[ "task_categories:token-classification", "task_ids:part-of-speech", "task_ids:lemmatization", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:pl", "license:cc-by-4.0", "National Corpus of Polish", "Narodowy Korpus Języka Polskiego", "region:us" ]
2022-12-07T16:41:20+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated"], "language": ["pl"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": ["part-of-speech", "lemmatization"], "pretty_name": "NKJP1M", "tags": ["National Corpus of Polish", "Narodowy Korpus J\u0119zyka Polskiego"], "dataset_info": {"features": [{"name": "nkjp_text", "dtype": "string"}, {"name": "nkjp_par", "dtype": "string"}, {"name": "nkjp_sent", "dtype": "string"}, {"name": "tokens", "sequence": "string"}, {"name": "lemmas", "sequence": "string"}, {"name": "cposes", "sequence": {"class_label": {"names": {"0": "A", "1": "Adv", "2": "Comp", "3": "Conj", "4": "Dig", "5": "Interj", "6": "N", "7": "Num", "8": "Part", "9": "Prep", "10": "Punct", "11": "V", "12": "X"}}}}, {"name": "poses", "sequence": {"class_label": {"names": {"0": "adj", "1": "adja", "2": "adjc", "3": "adjp", "4": "adv", "5": "aglt", "6": "bedzie", "7": "brev", "8": "comp", "9": "conj", "10": "depr", "11": "dig", "12": "fin", "13": "frag", "14": "ger", "15": "imps", "16": "impt", "17": "inf", "18": "interj", "19": "interp", "20": "num", "21": "numcomp", "22": "pact", "23": "pacta", "24": "pant", "25": "part", "26": "pcon", "27": "ppas", "28": "ppron12", "29": "ppron3", "30": "praet", "31": "pred", "32": "prep", "33": "romandig", "34": "siebie", "35": "subst", "36": "sym", "37": "winien", "38": "xxs", "39": "xxx"}}}}, {"name": "tags", "sequence": {"class_label": {"names": {"0": "adj:pl:acc:f:com", "1": "adj:pl:acc:f:pos", "2": "adj:pl:acc:f:sup", "3": "adj:pl:acc:m1:com", "4": "adj:pl:acc:m1:pos", "5": "adj:pl:acc:m1:sup", "6": "adj:pl:acc:m2:com", "7": "adj:pl:acc:m2:pos", "8": "adj:pl:acc:m2:sup", "9": "adj:pl:acc:m3:com", "10": "adj:pl:acc:m3:pos", "11": "adj:pl:acc:m3:sup", "12": "adj:pl:acc:n:com", "13": "adj:pl:acc:n:pos", "14": "adj:pl:acc:n:sup", "15": "adj:pl:dat:f:com", "16": "adj:pl:dat:f:pos", "17": "adj:pl:dat:f:sup", "18": "adj:pl:dat:m1:com", "19": "adj:pl:dat:m1:pos", "20": "adj:pl:dat:m1:sup", "21": "adj:pl:dat:m2:pos", "22": "adj:pl:dat:m3:com", "23": "adj:pl:dat:m3:pos", "24": "adj:pl:dat:n:pos", "25": "adj:pl:dat:n:sup", "26": "adj:pl:gen:f:com", "27": "adj:pl:gen:f:pos", "28": "adj:pl:gen:f:sup", "29": "adj:pl:gen:m1:com", "30": "adj:pl:gen:m1:pos", "31": "adj:pl:gen:m1:sup", "32": "adj:pl:gen:m2:com", "33": "adj:pl:gen:m2:pos", "34": "adj:pl:gen:m2:sup", "35": "adj:pl:gen:m3:com", "36": "adj:pl:gen:m3:pos", "37": "adj:pl:gen:m3:sup", "38": "adj:pl:gen:n:com", "39": "adj:pl:gen:n:pos", "40": "adj:pl:gen:n:sup", "41": "adj:pl:inst:f:com", "42": "adj:pl:inst:f:pos", "43": "adj:pl:inst:f:sup", "44": "adj:pl:inst:m1:com", "45": "adj:pl:inst:m1:pos", "46": "adj:pl:inst:m1:sup", "47": "adj:pl:inst:m2:pos", "48": "adj:pl:inst:m3:com", "49": "adj:pl:inst:m3:pos", "50": "adj:pl:inst:m3:sup", "51": "adj:pl:inst:n:com", "52": "adj:pl:inst:n:pos", "53": "adj:pl:inst:n:sup", "54": "adj:pl:loc:f:com", "55": "adj:pl:loc:f:pos", "56": "adj:pl:loc:f:sup", "57": "adj:pl:loc:m1:com", "58": "adj:pl:loc:m1:pos", "59": "adj:pl:loc:m1:sup", "60": "adj:pl:loc:m2:pos", "61": "adj:pl:loc:m3:com", "62": "adj:pl:loc:m3:pos", "63": "adj:pl:loc:m3:sup", "64": "adj:pl:loc:n:com", "65": "adj:pl:loc:n:pos", "66": "adj:pl:loc:n:sup", "67": "adj:pl:nom:f:com", "68": "adj:pl:nom:f:pos", "69": "adj:pl:nom:f:sup", "70": "adj:pl:nom:m1:com", "71": "adj:pl:nom:m1:pos", "72": "adj:pl:nom:m1:sup", "73": "adj:pl:nom:m2:com", "74": "adj:pl:nom:m2:pos", "75": "adj:pl:nom:m2:sup", "76": "adj:pl:nom:m3:com", "77": "adj:pl:nom:m3:pos", "78": "adj:pl:nom:m3:sup", "79": "adj:pl:nom:n:com", "80": "adj:pl:nom:n:pos", "81": "adj:pl:nom:n:sup", "82": "adj:pl:voc:f:pos", "83": "adj:pl:voc:m1:pos", "84": "adj:pl:voc:m2:pos", "85": "adj:pl:voc:n:pos", "86": "adj:sg:acc:f:com", "87": "adj:sg:acc:f:pos", "88": "adj:sg:acc:f:sup", "89": "adj:sg:acc:m1:com", "90": "adj:sg:acc:m1:pos", "91": "adj:sg:acc:m1:sup", "92": "adj:sg:acc:m2:com", "93": "adj:sg:acc:m2:pos", "94": "adj:sg:acc:m2:sup", "95": "adj:sg:acc:m3:com", "96": "adj:sg:acc:m3:pos", "97": "adj:sg:acc:m3:sup", "98": "adj:sg:acc:n:com", "99": "adj:sg:acc:n:pos", "100": "adj:sg:acc:n:sup", "101": "adj:sg:dat:f:com", "102": "adj:sg:dat:f:pos", "103": "adj:sg:dat:f:sup", "104": "adj:sg:dat:m1:com", "105": "adj:sg:dat:m1:pos", "106": "adj:sg:dat:m1:sup", "107": "adj:sg:dat:m2:pos", "108": "adj:sg:dat:m3:com", "109": "adj:sg:dat:m3:pos", "110": "adj:sg:dat:m3:sup", "111": "adj:sg:dat:n:com", "112": "adj:sg:dat:n:pos", "113": "adj:sg:dat:n:sup", "114": "adj:sg:gen:f:com", "115": "adj:sg:gen:f:pos", "116": "adj:sg:gen:f:sup", "117": "adj:sg:gen:m1:com", "118": "adj:sg:gen:m1:pos", "119": "adj:sg:gen:m1:sup", "120": "adj:sg:gen:m2:pos", "121": "adj:sg:gen:m2:sup", "122": "adj:sg:gen:m3:com", "123": "adj:sg:gen:m3:pos", "124": "adj:sg:gen:m3:sup", "125": "adj:sg:gen:n:com", "126": "adj:sg:gen:n:pos", "127": "adj:sg:gen:n:sup", "128": "adj:sg:inst:f:com", "129": "adj:sg:inst:f:pos", "130": "adj:sg:inst:f:sup", "131": "adj:sg:inst:m1:com", "132": "adj:sg:inst:m1:pos", "133": "adj:sg:inst:m1:sup", "134": "adj:sg:inst:m2:com", "135": "adj:sg:inst:m2:pos", "136": "adj:sg:inst:m2:sup", "137": "adj:sg:inst:m3:com", "138": "adj:sg:inst:m3:pos", "139": "adj:sg:inst:m3:sup", "140": "adj:sg:inst:n:com", "141": "adj:sg:inst:n:pos", "142": "adj:sg:inst:n:sup", "143": "adj:sg:loc:f:com", "144": "adj:sg:loc:f:pos", "145": "adj:sg:loc:f:sup", "146": "adj:sg:loc:m1:com", "147": "adj:sg:loc:m1:pos", "148": "adj:sg:loc:m1:sup", "149": "adj:sg:loc:m2:com", "150": "adj:sg:loc:m2:pos", "151": "adj:sg:loc:m3:com", "152": "adj:sg:loc:m3:pos", "153": "adj:sg:loc:m3:sup", "154": "adj:sg:loc:n:com", "155": "adj:sg:loc:n:pos", "156": "adj:sg:loc:n:sup", "157": "adj:sg:nom:f:com", "158": "adj:sg:nom:f:pos", "159": "adj:sg:nom:f:sup", "160": "adj:sg:nom:m1:com", "161": "adj:sg:nom:m1:pos", "162": "adj:sg:nom:m1:sup", "163": "adj:sg:nom:m2:com", "164": "adj:sg:nom:m2:pos", "165": "adj:sg:nom:m2:sup", "166": "adj:sg:nom:m3:com", "167": "adj:sg:nom:m3:pos", "168": "adj:sg:nom:m3:sup", "169": "adj:sg:nom:n:com", "170": "adj:sg:nom:n:pos", "171": "adj:sg:nom:n:sup", "172": "adj:sg:voc:f:pos", "173": "adj:sg:voc:f:sup", "174": "adj:sg:voc:m1:pos", "175": "adj:sg:voc:m1:sup", "176": "adj:sg:voc:m2:pos", "177": "adj:sg:voc:m3:pos", "178": "adj:sg:voc:n:pos", "179": "adja", "180": "adjc", "181": "adjp:dat", "182": "adjp:gen", "183": "adv", "184": "adv:com", "185": "adv:pos", "186": "adv:sup", "187": "aglt:pl:pri:imperf:nwok", "188": "aglt:pl:sec:imperf:nwok", "189": "aglt:sg:pri:imperf:nwok", "190": "aglt:sg:pri:imperf:wok", "191": "aglt:sg:sec:imperf:nwok", "192": "aglt:sg:sec:imperf:wok", "193": "bedzie:pl:pri:imperf", "194": "bedzie:pl:sec:imperf", "195": "bedzie:pl:ter:imperf", "196": "bedzie:sg:pri:imperf", "197": "bedzie:sg:sec:imperf", "198": "bedzie:sg:ter:imperf", "199": "brev:npun", "200": "brev:pun", "201": "comp", "202": "conj", "203": "depr:pl:acc:m2", "204": "depr:pl:nom:m2", "205": "depr:pl:voc:m2", "206": "dig", "207": "fin:pl:pri:imperf", "208": "fin:pl:pri:perf", "209": "fin:pl:sec:imperf", "210": "fin:pl:sec:perf", "211": "fin:pl:ter:imperf", "212": "fin:pl:ter:perf", "213": "fin:sg:pri:imperf", "214": "fin:sg:pri:perf", "215": "fin:sg:sec:imperf", "216": "fin:sg:sec:perf", "217": "fin:sg:ter:imperf", "218": "fin:sg:ter:perf", "219": "frag", "220": "ger:pl:acc:n:imperf:aff", "221": "ger:pl:acc:n:perf:aff", "222": "ger:pl:dat:n:perf:aff", "223": "ger:pl:gen:n:imperf:aff", "224": "ger:pl:gen:n:perf:aff", "225": "ger:pl:inst:n:imperf:aff", "226": "ger:pl:inst:n:perf:aff", "227": "ger:pl:loc:n:imperf:aff", "228": "ger:pl:loc:n:perf:aff", "229": "ger:pl:nom:n:imperf:aff", "230": "ger:pl:nom:n:perf:aff", "231": "ger:sg:acc:n:imperf:aff", "232": "ger:sg:acc:n:imperf:neg", "233": "ger:sg:acc:n:perf:aff", "234": "ger:sg:acc:n:perf:neg", "235": "ger:sg:dat:n:imperf:aff", "236": "ger:sg:dat:n:perf:aff", "237": "ger:sg:dat:n:perf:neg", "238": "ger:sg:gen:n:imperf:aff", "239": "ger:sg:gen:n:imperf:neg", "240": "ger:sg:gen:n:perf:aff", "241": "ger:sg:gen:n:perf:neg", "242": "ger:sg:inst:n:imperf:aff", "243": "ger:sg:inst:n:imperf:neg", "244": "ger:sg:inst:n:perf:aff", "245": "ger:sg:inst:n:perf:neg", "246": "ger:sg:loc:n:imperf:aff", "247": "ger:sg:loc:n:imperf:neg", "248": "ger:sg:loc:n:perf:aff", "249": "ger:sg:loc:n:perf:neg", "250": "ger:sg:nom:n:imperf:aff", "251": "ger:sg:nom:n:imperf:neg", "252": "ger:sg:nom:n:perf:aff", "253": "ger:sg:nom:n:perf:neg", "254": "imps:imperf", "255": "imps:perf", "256": "impt:pl:pri:imperf", "257": "impt:pl:pri:perf", "258": "impt:pl:sec:imperf", "259": "impt:pl:sec:perf", "260": "impt:sg:pri:imperf", "261": "impt:sg:sec:imperf", "262": "impt:sg:sec:perf", "263": "inf:imperf", "264": "inf:perf", "265": "interj", "266": "interp", "267": "num:pl:acc:f:congr:ncol", "268": "num:pl:acc:f:rec", "269": "num:pl:acc:f:rec:ncol", "270": "num:pl:acc:m1:rec", "271": "num:pl:acc:m1:rec:col", "272": "num:pl:acc:m1:rec:ncol", "273": "num:pl:acc:m2:congr:ncol", "274": "num:pl:acc:m2:rec", "275": "num:pl:acc:m2:rec:ncol", "276": "num:pl:acc:m3:congr", "277": "num:pl:acc:m3:congr:ncol", "278": "num:pl:acc:m3:rec", "279": "num:pl:acc:m3:rec:ncol", "280": "num:pl:acc:n:congr:ncol", "281": "num:pl:acc:n:rec", "282": "num:pl:acc:n:rec:col", "283": "num:pl:acc:n:rec:ncol", "284": "num:pl:dat:f:congr", "285": "num:pl:dat:f:congr:ncol", "286": "num:pl:dat:m1:congr", "287": "num:pl:dat:m1:congr:col", "288": "num:pl:dat:m1:congr:ncol", "289": "num:pl:dat:m2:congr", "290": "num:pl:dat:m3:congr:ncol", "291": "num:pl:dat:n:congr", "292": "num:pl:dat:n:congr:ncol", "293": "num:pl:gen:f:congr", "294": "num:pl:gen:f:congr:ncol", "295": "num:pl:gen:f:rec", "296": "num:pl:gen:f:rec:ncol", "297": "num:pl:gen:m1:congr", "298": "num:pl:gen:m1:congr:ncol", "299": "num:pl:gen:m1:rec", "300": "num:pl:gen:m1:rec:col", "301": "num:pl:gen:m2:congr", "302": "num:pl:gen:m2:congr:ncol", "303": "num:pl:gen:m2:rec", "304": "num:pl:gen:m3:congr", "305": "num:pl:gen:m3:congr:ncol", "306": "num:pl:gen:m3:rec", "307": "num:pl:gen:m3:rec:ncol", "308": "num:pl:gen:n:congr", "309": "num:pl:gen:n:congr:ncol", "310": "num:pl:gen:n:rec", "311": "num:pl:gen:n:rec:col", "312": "num:pl:inst:f:congr", "313": "num:pl:inst:f:congr:ncol", "314": "num:pl:inst:m1:congr", "315": "num:pl:inst:m1:congr:ncol", "316": "num:pl:inst:m1:rec:col", "317": "num:pl:inst:m2:congr", "318": "num:pl:inst:m2:congr:ncol", "319": "num:pl:inst:m3:congr", "320": "num:pl:inst:m3:congr:ncol", "321": "num:pl:inst:n:congr", "322": "num:pl:inst:n:congr:ncol", "323": "num:pl:inst:n:rec:col", "324": "num:pl:loc:f:congr", "325": "num:pl:loc:f:congr:ncol", "326": "num:pl:loc:m1:congr", "327": "num:pl:loc:m1:congr:ncol", "328": "num:pl:loc:m2:congr", "329": "num:pl:loc:m2:congr:ncol", "330": "num:pl:loc:m3:congr", "331": "num:pl:loc:m3:congr:ncol", "332": "num:pl:loc:n:congr", "333": "num:pl:loc:n:congr:ncol", "334": "num:pl:nom:f:congr:ncol", "335": "num:pl:nom:f:rec", "336": "num:pl:nom:f:rec:ncol", "337": "num:pl:nom:m1:congr:ncol", "338": "num:pl:nom:m1:rec", "339": "num:pl:nom:m1:rec:col", "340": "num:pl:nom:m1:rec:ncol", "341": "num:pl:nom:m2:congr:ncol", "342": "num:pl:nom:m2:rec", "343": "num:pl:nom:m2:rec:ncol", "344": "num:pl:nom:m3:congr:ncol", "345": "num:pl:nom:m3:rec", "346": "num:pl:nom:m3:rec:ncol", "347": "num:pl:nom:n:congr", "348": "num:pl:nom:n:congr:ncol", "349": "num:pl:nom:n:rec", "350": "num:pl:nom:n:rec:col", "351": "num:pl:nom:n:rec:ncol", "352": "num:sg:acc:f:rec", "353": "num:sg:acc:f:rec:ncol", "354": "num:sg:acc:m1:rec:ncol", "355": "num:sg:acc:m2:rec", "356": "num:sg:acc:m3:rec", "357": "num:sg:acc:m3:rec:ncol", "358": "num:sg:acc:n:rec", "359": "num:sg:gen:f:rec", "360": "num:sg:gen:m3:rec", "361": "num:sg:gen:n:rec", "362": "num:sg:inst:m3:rec", "363": "num:sg:loc:f:rec", "364": "num:sg:loc:m3:congr", "365": "num:sg:loc:m3:rec", "366": "num:sg:nom:f:rec", "367": "num:sg:nom:m2:rec", "368": "num:sg:nom:m3:rec", "369": "num:sg:nom:m3:rec:ncol", "370": "num:sg:nom:n:rec", "371": "numcomp", "372": "pact:pl:acc:f:imperf:aff", "373": "pact:pl:acc:f:imperf:neg", "374": "pact:pl:acc:m1:imperf:aff", "375": "pact:pl:acc:m2:imperf:aff", "376": "pact:pl:acc:m3:imperf:aff", "377": "pact:pl:acc:m3:imperf:neg", "378": "pact:pl:acc:n:imperf:aff", "379": "pact:pl:acc:n:imperf:neg", "380": "pact:pl:dat:f:imperf:aff", "381": "pact:pl:dat:m1:imperf:aff", "382": "pact:pl:dat:m2:imperf:aff", "383": "pact:pl:dat:m3:imperf:aff", "384": "pact:pl:dat:n:imperf:aff", "385": "pact:pl:gen:f:imperf:aff", "386": "pact:pl:gen:f:imperf:neg", "387": "pact:pl:gen:m1:imperf:aff", "388": "pact:pl:gen:m1:imperf:neg", "389": "pact:pl:gen:m2:imperf:aff", "390": "pact:pl:gen:m3:imperf:aff", "391": "pact:pl:gen:m3:imperf:neg", "392": "pact:pl:gen:n:imperf:aff", "393": "pact:pl:inst:f:imperf:aff", "394": "pact:pl:inst:m1:imperf:aff", "395": "pact:pl:inst:m2:imperf:aff", "396": "pact:pl:inst:m3:imperf:aff", "397": "pact:pl:inst:m3:imperf:neg", "398": "pact:pl:inst:n:imperf:aff", "399": "pact:pl:inst:n:imperf:neg", "400": "pact:pl:loc:f:imperf:aff", "401": "pact:pl:loc:m1:imperf:aff", "402": "pact:pl:loc:m3:imperf:aff", "403": "pact:pl:loc:m3:imperf:neg", "404": "pact:pl:loc:n:imperf:aff", "405": "pact:pl:loc:n:imperf:neg", "406": "pact:pl:nom:f:imperf:aff", "407": "pact:pl:nom:f:imperf:neg", "408": "pact:pl:nom:m1:imperf:aff", "409": "pact:pl:nom:m2:imperf:aff", "410": "pact:pl:nom:m3:imperf:aff", "411": "pact:pl:nom:n:imperf:aff", "412": "pact:pl:nom:n:imperf:neg", "413": "pact:pl:voc:f:imperf:aff", "414": "pact:sg:acc:f:imperf:aff", "415": "pact:sg:acc:f:imperf:neg", "416": "pact:sg:acc:m1:imperf:aff", "417": "pact:sg:acc:m2:imperf:aff", "418": "pact:sg:acc:m3:imperf:aff", "419": "pact:sg:acc:n:imperf:aff", "420": "pact:sg:acc:n:imperf:neg", "421": "pact:sg:dat:f:imperf:aff", "422": "pact:sg:dat:m1:imperf:aff", "423": "pact:sg:dat:m2:imperf:aff", "424": "pact:sg:dat:m3:imperf:aff", "425": "pact:sg:dat:n:imperf:aff", "426": "pact:sg:gen:f:imperf:aff", "427": "pact:sg:gen:f:imperf:neg", "428": "pact:sg:gen:m1:imperf:aff", "429": "pact:sg:gen:m1:imperf:neg", "430": "pact:sg:gen:m2:imperf:aff", "431": "pact:sg:gen:m3:imperf:aff", "432": "pact:sg:gen:m3:imperf:neg", "433": "pact:sg:gen:n:imperf:aff", "434": "pact:sg:gen:n:imperf:neg", "435": "pact:sg:inst:f:imperf:aff", "436": "pact:sg:inst:f:imperf:neg", "437": "pact:sg:inst:m1:imperf:aff", "438": "pact:sg:inst:m1:imperf:neg", "439": "pact:sg:inst:m2:imperf:aff", "440": "pact:sg:inst:m2:imperf:neg", "441": "pact:sg:inst:m3:imperf:aff", "442": "pact:sg:inst:m3:imperf:neg", "443": "pact:sg:inst:n:imperf:aff", "444": "pact:sg:loc:f:imperf:aff", "445": "pact:sg:loc:f:imperf:neg", "446": "pact:sg:loc:m1:imperf:aff", "447": "pact:sg:loc:m2:imperf:aff", "448": "pact:sg:loc:m3:imperf:aff", "449": "pact:sg:loc:m3:imperf:neg", "450": "pact:sg:loc:n:imperf:aff", "451": "pact:sg:loc:n:imperf:neg", "452": "pact:sg:nom:f:imperf:aff", "453": "pact:sg:nom:f:imperf:neg", "454": "pact:sg:nom:m1:imperf:aff", "455": "pact:sg:nom:m1:imperf:neg", "456": "pact:sg:nom:m2:imperf:aff", "457": "pact:sg:nom:m3:imperf:aff", "458": "pact:sg:nom:m3:imperf:neg", "459": "pact:sg:nom:n:imperf:aff", "460": "pact:sg:nom:n:imperf:neg", "461": "pact:sg:voc:m1:imperf:aff", "462": "pacta", "463": "pant:perf", "464": "part", "465": "part:nwok", "466": "part:wok", "467": "pcon:imperf", "468": "ppas:pl:acc:f:imperf:aff", "469": "ppas:pl:acc:f:perf:aff", "470": "ppas:pl:acc:f:perf:neg", "471": "ppas:pl:acc:m1:imperf:aff", "472": "ppas:pl:acc:m1:imperf:neg", "473": "ppas:pl:acc:m1:perf:aff", "474": "ppas:pl:acc:m1:perf:neg", "475": "ppas:pl:acc:m2:imperf:aff", "476": "ppas:pl:acc:m2:perf:aff", "477": "ppas:pl:acc:m3:imperf:aff", "478": "ppas:pl:acc:m3:perf:aff", "479": "ppas:pl:acc:m3:perf:neg", "480": "ppas:pl:acc:n:imperf:aff", "481": "ppas:pl:acc:n:imperf:neg", "482": "ppas:pl:acc:n:perf:aff", "483": "ppas:pl:acc:n:perf:neg", "484": "ppas:pl:dat:f:imperf:aff", "485": "ppas:pl:dat:f:perf:aff", "486": "ppas:pl:dat:f:perf:neg", "487": "ppas:pl:dat:m1:imperf:aff", "488": "ppas:pl:dat:m1:perf:aff", "489": "ppas:pl:dat:m1:perf:neg", "490": "ppas:pl:dat:m2:imperf:aff", "491": "ppas:pl:dat:m3:imperf:aff", "492": "ppas:pl:dat:m3:perf:aff", "493": "ppas:pl:dat:n:imperf:aff", "494": "ppas:pl:dat:n:perf:aff", "495": "ppas:pl:gen:f:imperf:aff", "496": "ppas:pl:gen:f:imperf:neg", "497": "ppas:pl:gen:f:perf:aff", "498": "ppas:pl:gen:f:perf:neg", "499": "ppas:pl:gen:m1:imperf:aff", "500": "ppas:pl:gen:m1:imperf:neg", "501": "ppas:pl:gen:m1:perf:aff", "502": "ppas:pl:gen:m1:perf:neg", "503": "ppas:pl:gen:m2:imperf:aff", "504": "ppas:pl:gen:m2:perf:aff", "505": "ppas:pl:gen:m3:imperf:aff", "506": "ppas:pl:gen:m3:imperf:neg", "507": "ppas:pl:gen:m3:perf:aff", "508": "ppas:pl:gen:m3:perf:neg", "509": "ppas:pl:gen:n:imperf:aff", "510": "ppas:pl:gen:n:perf:aff", "511": "ppas:pl:gen:n:perf:neg", "512": "ppas:pl:inst:f:imperf:aff", "513": "ppas:pl:inst:f:perf:aff", "514": "ppas:pl:inst:m1:imperf:aff", "515": "ppas:pl:inst:m1:perf:aff", "516": "ppas:pl:inst:m2:perf:aff", "517": "ppas:pl:inst:m3:imperf:aff", "518": "ppas:pl:inst:m3:perf:aff", "519": "ppas:pl:inst:n:imperf:aff", "520": "ppas:pl:inst:n:perf:aff", "521": "ppas:pl:loc:f:imperf:aff", "522": "ppas:pl:loc:f:imperf:neg", "523": "ppas:pl:loc:f:perf:aff", "524": "ppas:pl:loc:f:perf:neg", "525": "ppas:pl:loc:m1:imperf:aff", "526": "ppas:pl:loc:m1:perf:aff", "527": "ppas:pl:loc:m2:imperf:aff", "528": "ppas:pl:loc:m3:imperf:aff", "529": "ppas:pl:loc:m3:perf:aff", "530": "ppas:pl:loc:m3:perf:neg", "531": "ppas:pl:loc:n:imperf:aff", "532": "ppas:pl:loc:n:perf:aff", "533": "ppas:pl:loc:n:perf:neg", "534": "ppas:pl:nom:f:imperf:aff", "535": "ppas:pl:nom:f:imperf:neg", "536": "ppas:pl:nom:f:perf:aff", "537": "ppas:pl:nom:f:perf:neg", "538": "ppas:pl:nom:m1:imperf:aff", "539": "ppas:pl:nom:m1:imperf:neg", "540": "ppas:pl:nom:m1:perf:aff", "541": "ppas:pl:nom:m1:perf:neg", "542": "ppas:pl:nom:m2:imperf:aff", "543": "ppas:pl:nom:m2:perf:aff", "544": "ppas:pl:nom:m3:imperf:aff", "545": "ppas:pl:nom:m3:imperf:neg", "546": "ppas:pl:nom:m3:perf:aff", "547": "ppas:pl:nom:m3:perf:neg", "548": "ppas:pl:nom:n:imperf:aff", "549": "ppas:pl:nom:n:perf:aff", "550": "ppas:pl:nom:n:perf:neg", "551": "ppas:pl:voc:f:imperf:aff", "552": "ppas:sg:acc:f:imperf:aff", "553": "ppas:sg:acc:f:imperf:neg", "554": "ppas:sg:acc:f:perf:aff", "555": "ppas:sg:acc:f:perf:neg", "556": "ppas:sg:acc:m1:imperf:aff", "557": "ppas:sg:acc:m1:perf:aff", "558": "ppas:sg:acc:m2:imperf:aff", "559": "ppas:sg:acc:m2:perf:aff", "560": "ppas:sg:acc:m3:imperf:aff", "561": "ppas:sg:acc:m3:imperf:neg", "562": "ppas:sg:acc:m3:perf:aff", "563": "ppas:sg:acc:m3:perf:neg", "564": "ppas:sg:acc:n:imperf:aff", "565": "ppas:sg:acc:n:perf:aff", "566": "ppas:sg:acc:n:perf:neg", "567": "ppas:sg:dat:f:imperf:aff", "568": "ppas:sg:dat:f:imperf:neg", "569": "ppas:sg:dat:f:perf:aff", "570": "ppas:sg:dat:f:perf:neg", "571": "ppas:sg:dat:m1:imperf:aff", "572": "ppas:sg:dat:m1:perf:aff", "573": "ppas:sg:dat:m2:perf:aff", "574": "ppas:sg:dat:m3:imperf:aff", "575": "ppas:sg:dat:m3:perf:aff", "576": "ppas:sg:dat:n:perf:aff", "577": "ppas:sg:gen:f:imperf:aff", "578": "ppas:sg:gen:f:imperf:neg", "579": "ppas:sg:gen:f:perf:aff", "580": "ppas:sg:gen:f:perf:neg", "581": "ppas:sg:gen:m1:imperf:aff", "582": "ppas:sg:gen:m1:perf:aff", "583": "ppas:sg:gen:m1:perf:neg", "584": "ppas:sg:gen:m2:imperf:aff", "585": "ppas:sg:gen:m2:perf:aff", "586": "ppas:sg:gen:m3:imperf:aff", "587": "ppas:sg:gen:m3:imperf:neg", "588": "ppas:sg:gen:m3:perf:aff", "589": "ppas:sg:gen:m3:perf:neg", "590": "ppas:sg:gen:n:imperf:aff", "591": "ppas:sg:gen:n:imperf:neg", "592": "ppas:sg:gen:n:perf:aff", "593": "ppas:sg:gen:n:perf:neg", "594": "ppas:sg:inst:f:imperf:aff", "595": "ppas:sg:inst:f:imperf:neg", "596": "ppas:sg:inst:f:perf:aff", "597": "ppas:sg:inst:f:perf:neg", "598": "ppas:sg:inst:m1:imperf:aff", "599": "ppas:sg:inst:m1:imperf:neg", "600": "ppas:sg:inst:m1:perf:aff", "601": "ppas:sg:inst:m1:perf:neg", "602": "ppas:sg:inst:m2:imperf:aff", "603": "ppas:sg:inst:m2:perf:aff", "604": "ppas:sg:inst:m3:imperf:aff", "605": "ppas:sg:inst:m3:imperf:neg", "606": "ppas:sg:inst:m3:perf:aff", "607": "ppas:sg:inst:m3:perf:neg", "608": "ppas:sg:inst:n:imperf:aff", "609": "ppas:sg:inst:n:imperf:neg", "610": "ppas:sg:inst:n:perf:aff", "611": "ppas:sg:inst:n:perf:neg", "612": "ppas:sg:loc:f:imperf:aff", "613": "ppas:sg:loc:f:perf:aff", "614": "ppas:sg:loc:f:perf:neg", "615": "ppas:sg:loc:m1:imperf:aff", "616": "ppas:sg:loc:m1:perf:aff", "617": "ppas:sg:loc:m2:imperf:aff", "618": "ppas:sg:loc:m3:imperf:aff", "619": "ppas:sg:loc:m3:imperf:neg", "620": "ppas:sg:loc:m3:perf:aff", "621": "ppas:sg:loc:m3:perf:neg", "622": "ppas:sg:loc:n:imperf:aff", "623": "ppas:sg:loc:n:perf:aff", "624": "ppas:sg:loc:n:perf:neg", "625": "ppas:sg:nom:f:imperf:aff", "626": "ppas:sg:nom:f:imperf:neg", "627": "ppas:sg:nom:f:perf:aff", "628": "ppas:sg:nom:f:perf:neg", "629": "ppas:sg:nom:m1:imperf:aff", "630": "ppas:sg:nom:m1:imperf:neg", "631": "ppas:sg:nom:m1:perf:aff", "632": "ppas:sg:nom:m1:perf:neg", "633": "ppas:sg:nom:m2:imperf:aff", "634": "ppas:sg:nom:m2:perf:aff", "635": "ppas:sg:nom:m3:imperf:aff", "636": "ppas:sg:nom:m3:imperf:neg", "637": "ppas:sg:nom:m3:perf:aff", "638": "ppas:sg:nom:m3:perf:neg", "639": "ppas:sg:nom:n:imperf:aff", "640": "ppas:sg:nom:n:imperf:neg", "641": "ppas:sg:nom:n:perf:aff", "642": "ppas:sg:nom:n:perf:neg", "643": "ppas:sg:voc:m1:perf:aff", "644": "ppas:sg:voc:m2:imperf:aff", "645": "ppas:sg:voc:m3:perf:aff", "646": "ppron12:pl:acc:f:pri", "647": "ppron12:pl:acc:f:sec", "648": "ppron12:pl:acc:m1:pri", "649": "ppron12:pl:acc:m1:sec", "650": "ppron12:pl:acc:m2:sec", "651": "ppron12:pl:acc:n:sec", "652": "ppron12:pl:dat:f:pri", "653": "ppron12:pl:dat:f:sec", "654": "ppron12:pl:dat:m1:pri", "655": "ppron12:pl:dat:m1:sec", "656": "ppron12:pl:dat:m3:sec", "657": "ppron12:pl:gen:f:pri", "658": "ppron12:pl:gen:f:sec", "659": "ppron12:pl:gen:m1:pri", "660": "ppron12:pl:gen:m1:sec", "661": "ppron12:pl:gen:m2:pri", "662": "ppron12:pl:inst:f:pri", "663": "ppron12:pl:inst:m1:pri", "664": "ppron12:pl:inst:m1:sec", "665": "ppron12:pl:inst:n:pri", "666": "ppron12:pl:loc:f:sec", "667": "ppron12:pl:loc:m1:pri", "668": "ppron12:pl:loc:m1:sec", "669": "ppron12:pl:loc:m3:sec", "670": "ppron12:pl:nom:f:pri", "671": "ppron12:pl:nom:f:sec", "672": "ppron12:pl:nom:m1:pri", "673": "ppron12:pl:nom:m1:sec", "674": "ppron12:pl:nom:m2:pri", "675": "ppron12:pl:nom:n:sec", "676": "ppron12:pl:voc:m1:sec", "677": "ppron12:pl:voc:m2:sec", "678": "ppron12:sg:acc:f:pri:akc", "679": "ppron12:sg:acc:f:sec:akc", "680": "ppron12:sg:acc:f:sec:nakc", "681": "ppron12:sg:acc:m1:pri:akc", "682": "ppron12:sg:acc:m1:pri:nakc", "683": "ppron12:sg:acc:m1:sec:akc", "684": "ppron12:sg:acc:m1:sec:nakc", "685": "ppron12:sg:acc:m2:pri:akc", "686": "ppron12:sg:acc:m2:sec:nakc", "687": "ppron12:sg:acc:m3:pri:akc", "688": "ppron12:sg:acc:m3:sec:nakc", "689": "ppron12:sg:acc:n:pri:akc", "690": "ppron12:sg:acc:n:sec:nakc", "691": "ppron12:sg:dat:f:pri:akc", "692": "ppron12:sg:dat:f:pri:nakc", "693": "ppron12:sg:dat:f:sec:akc", "694": "ppron12:sg:dat:f:sec:nakc", "695": "ppron12:sg:dat:m1:pri:akc", "696": "ppron12:sg:dat:m1:pri:nakc", "697": "ppron12:sg:dat:m1:sec:akc", "698": "ppron12:sg:dat:m1:sec:nakc", "699": "ppron12:sg:dat:m2:pri:nakc", "700": "ppron12:sg:dat:m2:sec:akc", "701": "ppron12:sg:dat:m2:sec:nakc", "702": "ppron12:sg:gen:f:pri:akc", "703": "ppron12:sg:gen:f:sec:akc", "704": "ppron12:sg:gen:f:sec:nakc", "705": "ppron12:sg:gen:m1:pri:akc", "706": "ppron12:sg:gen:m1:sec:akc", "707": "ppron12:sg:gen:m1:sec:nakc", "708": "ppron12:sg:gen:m2:pri:akc", "709": "ppron12:sg:gen:m2:sec:akc", "710": "ppron12:sg:gen:m2:sec:nakc", "711": "ppron12:sg:gen:n:pri:akc", "712": "ppron12:sg:inst:f:pri", "713": "ppron12:sg:inst:f:sec", "714": "ppron12:sg:inst:m1:pri", "715": "ppron12:sg:inst:m1:pri:nakc", "716": "ppron12:sg:inst:m1:sec", "717": "ppron12:sg:inst:n:sec", "718": "ppron12:sg:loc:f:pri", "719": "ppron12:sg:loc:f:sec", "720": "ppron12:sg:loc:m1:pri", "721": "ppron12:sg:loc:m1:sec", "722": "ppron12:sg:loc:m3:pri", "723": "ppron12:sg:nom:f:pri", "724": "ppron12:sg:nom:f:sec", "725": "ppron12:sg:nom:m1:pri", "726": "ppron12:sg:nom:m1:pri:nakc", "727": "ppron12:sg:nom:m1:sec", "728": "ppron12:sg:nom:m2:pri", "729": "ppron12:sg:nom:m2:sec", "730": "ppron12:sg:nom:m3:pri", "731": "ppron12:sg:nom:m3:sec", "732": "ppron12:sg:nom:n:sec", "733": "ppron12:sg:voc:f:sec", "734": "ppron12:sg:voc:m1:sec", "735": "ppron12:sg:voc:m2:sec", "736": "ppron12:sg:voc:n:sec", "737": "ppron3:pl:acc:f:ter:akc:npraep", "738": "ppron3:pl:acc:f:ter:akc:praep", "739": "ppron3:pl:acc:m1:ter:akc:npraep", "740": "ppron3:pl:acc:m1:ter:akc:praep", "741": "ppron3:pl:acc:m2:ter:akc:npraep", "742": "ppron3:pl:acc:m2:ter:akc:praep", "743": "ppron3:pl:acc:m3:ter:akc:npraep", "744": "ppron3:pl:acc:m3:ter:akc:praep", "745": "ppron3:pl:acc:n:ter:akc:npraep", "746": "ppron3:pl:acc:n:ter:akc:praep", "747": "ppron3:pl:dat:f:ter:akc:npraep", "748": "ppron3:pl:dat:f:ter:akc:praep", "749": "ppron3:pl:dat:m1:ter:akc:npraep", "750": "ppron3:pl:dat:m1:ter:akc:praep", "751": "ppron3:pl:dat:m2:ter:akc:npraep", "752": "ppron3:pl:dat:m3:ter:akc:npraep", "753": "ppron3:pl:dat:m3:ter:akc:praep", "754": "ppron3:pl:dat:n:ter:akc:npraep", "755": "ppron3:pl:gen:f:ter:akc:npraep", "756": "ppron3:pl:gen:f:ter:akc:praep", "757": "ppron3:pl:gen:m1:ter:akc:npraep", "758": "ppron3:pl:gen:m1:ter:akc:praep", "759": "ppron3:pl:gen:m2:ter:akc:npraep", "760": "ppron3:pl:gen:m2:ter:akc:praep", "761": "ppron3:pl:gen:m3:ter:akc:npraep", "762": "ppron3:pl:gen:m3:ter:akc:praep", "763": "ppron3:pl:gen:n:ter:akc:npraep", "764": "ppron3:pl:gen:n:ter:akc:praep", "765": "ppron3:pl:inst:f:ter:akc:npraep", "766": "ppron3:pl:inst:f:ter:akc:praep", "767": "ppron3:pl:inst:m1:ter:akc:npraep", "768": "ppron3:pl:inst:m1:ter:akc:praep", "769": "ppron3:pl:inst:m2:ter:akc:npraep", "770": "ppron3:pl:inst:m2:ter:akc:praep", "771": "ppron3:pl:inst:m3:ter:akc:npraep", "772": "ppron3:pl:inst:m3:ter:akc:praep", "773": "ppron3:pl:inst:n:ter:akc:npraep", "774": "ppron3:pl:inst:n:ter:akc:praep", "775": "ppron3:pl:loc:f:ter:akc:praep", "776": "ppron3:pl:loc:m1:ter:akc:praep", "777": "ppron3:pl:loc:m2:ter:akc:praep", "778": "ppron3:pl:loc:m3:ter:akc:praep", "779": "ppron3:pl:loc:n:ter:akc:praep", "780": "ppron3:pl:nom:f:ter:akc:npraep", "781": "ppron3:pl:nom:f:ter:nakc:npraep", "782": "ppron3:pl:nom:m1:ter:akc:npraep", "783": "ppron3:pl:nom:m2:ter:akc:npraep", "784": "ppron3:pl:nom:m3:ter:akc:npraep", "785": "ppron3:pl:nom:n:ter:akc:npraep", "786": "ppron3:sg:acc:f:ter:akc:npraep", "787": "ppron3:sg:acc:f:ter:akc:praep", "788": "ppron3:sg:acc:m1:ter:akc:npraep", "789": "ppron3:sg:acc:m1:ter:akc:praep", "790": "ppron3:sg:acc:m1:ter:nakc:npraep", "791": "ppron3:sg:acc:m1:ter:nakc:praep", "792": "ppron3:sg:acc:m2:ter:akc:praep", "793": "ppron3:sg:acc:m2:ter:nakc:npraep", "794": "ppron3:sg:acc:m2:ter:nakc:praep", "795": "ppron3:sg:acc:m3:ter:akc:npraep", "796": "ppron3:sg:acc:m3:ter:akc:praep", "797": "ppron3:sg:acc:m3:ter:nakc:npraep", "798": "ppron3:sg:acc:m3:ter:nakc:praep", "799": "ppron3:sg:acc:n:ter:akc:npraep", "800": "ppron3:sg:acc:n:ter:akc:praep", "801": "ppron3:sg:dat:f:ter:akc:npraep", "802": "ppron3:sg:dat:f:ter:akc:praep", "803": "ppron3:sg:dat:m1:ter:akc:npraep", "804": "ppron3:sg:dat:m1:ter:akc:praep", "805": "ppron3:sg:dat:m1:ter:nakc:npraep", "806": "ppron3:sg:dat:m2:ter:akc:npraep", "807": "ppron3:sg:dat:m2:ter:nakc:npraep", "808": "ppron3:sg:dat:m3:ter:akc:npraep", "809": "ppron3:sg:dat:m3:ter:akc:praep", "810": "ppron3:sg:dat:m3:ter:nakc:npraep", "811": "ppron3:sg:dat:n:ter:akc:npraep", "812": "ppron3:sg:dat:n:ter:akc:praep", "813": "ppron3:sg:dat:n:ter:nakc:npraep", "814": "ppron3:sg:gen:f:ter:akc:npraep", "815": "ppron3:sg:gen:f:ter:akc:praep", "816": "ppron3:sg:gen:m1:ter:akc:npraep", "817": "ppron3:sg:gen:m1:ter:akc:praep", "818": "ppron3:sg:gen:m1:ter:nakc:npraep", "819": "ppron3:sg:gen:m1:ter:nakc:praep", "820": "ppron3:sg:gen:m2:ter:akc:npraep", "821": "ppron3:sg:gen:m2:ter:akc:praep", "822": "ppron3:sg:gen:m2:ter:nakc:npraep", "823": "ppron3:sg:gen:m3:ter:akc:npraep", "824": "ppron3:sg:gen:m3:ter:akc:praep", "825": "ppron3:sg:gen:m3:ter:nakc:npraep", "826": "ppron3:sg:gen:m3:ter:nakc:praep", "827": "ppron3:sg:gen:n:ter:akc:npraep", "828": "ppron3:sg:gen:n:ter:akc:praep", "829": "ppron3:sg:gen:n:ter:nakc:npraep", "830": "ppron3:sg:inst:f:ter:akc:praep", "831": "ppron3:sg:inst:m1:ter:akc:npraep", "832": "ppron3:sg:inst:m1:ter:akc:praep", "833": "ppron3:sg:inst:m2:ter:akc:npraep", "834": "ppron3:sg:inst:m2:ter:akc:praep", "835": "ppron3:sg:inst:m3:ter:akc:npraep", "836": "ppron3:sg:inst:m3:ter:akc:praep", "837": "ppron3:sg:inst:n:ter:akc:npraep", "838": "ppron3:sg:inst:n:ter:akc:praep", "839": "ppron3:sg:loc:f:ter:akc:praep", "840": "ppron3:sg:loc:m1:ter:akc:praep", "841": "ppron3:sg:loc:m2:ter:akc:praep", "842": "ppron3:sg:loc:m3:ter:akc:praep", "843": "ppron3:sg:loc:n:ter:akc:praep", "844": "ppron3:sg:nom:f:ter:akc:npraep", "845": "ppron3:sg:nom:f:ter:akc:praep", "846": "ppron3:sg:nom:m1:ter:akc:npraep", "847": "ppron3:sg:nom:m2:ter:akc:npraep", "848": "ppron3:sg:nom:m2:ter:akc:praep", "849": "ppron3:sg:nom:m3:ter:akc:npraep", "850": "ppron3:sg:nom:n:ter:akc:npraep", "851": "praet:pl:f:imperf", "852": "praet:pl:f:perf", "853": "praet:pl:m1:imperf", "854": "praet:pl:m1:imperf:agl", "855": "praet:pl:m1:perf", "856": "praet:pl:m2:imperf", "857": "praet:pl:m2:perf", "858": "praet:pl:m3:imperf", "859": "praet:pl:m3:perf", "860": "praet:pl:n:imperf", "861": "praet:pl:n:perf", "862": "praet:sg:f:imperf", "863": "praet:sg:f:imperf:agl", "864": "praet:sg:f:imperf:nagl", "865": "praet:sg:f:perf", "866": "praet:sg:m1:imperf", "867": "praet:sg:m1:imperf:agl", "868": "praet:sg:m1:imperf:nagl", "869": "praet:sg:m1:perf", "870": "praet:sg:m1:perf:agl", "871": "praet:sg:m1:perf:nagl", "872": "praet:sg:m2:imperf", "873": "praet:sg:m2:imperf:nagl", "874": "praet:sg:m2:perf", "875": "praet:sg:m2:perf:nagl", "876": "praet:sg:m3:imperf", "877": "praet:sg:m3:imperf:nagl", "878": "praet:sg:m3:perf", "879": "praet:sg:m3:perf:nagl", "880": "praet:sg:n:imperf", "881": "praet:sg:n:perf", "882": "pred", "883": "prep:acc", "884": "prep:acc:nwok", "885": "prep:acc:wok", "886": "prep:dat", "887": "prep:gen", "888": "prep:gen:nwok", "889": "prep:gen:wok", "890": "prep:inst", "891": "prep:inst:nwok", "892": "prep:inst:wok", "893": "prep:loc", "894": "prep:loc:nwok", "895": "prep:loc:wok", "896": "prep:nom", "897": "romandig", "898": "siebie:acc", "899": "siebie:dat", "900": "siebie:gen", "901": "siebie:inst", "902": "siebie:loc", "903": "subst:pl:acc:f", "904": "subst:pl:acc:m1", "905": "subst:pl:acc:m1:pt", "906": "subst:pl:acc:m2", "907": "subst:pl:acc:m3", "908": "subst:pl:acc:n:col", "909": "subst:pl:acc:n:ncol", "910": "subst:pl:acc:n:pt", "911": "subst:pl:dat:f", "912": "subst:pl:dat:m1", "913": "subst:pl:dat:m1:pt", "914": "subst:pl:dat:m2", "915": "subst:pl:dat:m3", "916": "subst:pl:dat:n:col", "917": "subst:pl:dat:n:ncol", "918": "subst:pl:dat:n:pt", "919": "subst:pl:gen:f", "920": "subst:pl:gen:m1", "921": "subst:pl:gen:m1:pt", "922": "subst:pl:gen:m2", "923": "subst:pl:gen:m3", "924": "subst:pl:gen:n:col", "925": "subst:pl:gen:n:ncol", "926": "subst:pl:gen:n:pt", "927": "subst:pl:inst:f", "928": "subst:pl:inst:m1", "929": "subst:pl:inst:m1:pt", "930": "subst:pl:inst:m2", "931": "subst:pl:inst:m3", "932": "subst:pl:inst:n:col", "933": "subst:pl:inst:n:ncol", "934": "subst:pl:inst:n:pt", "935": "subst:pl:loc:f", "936": "subst:pl:loc:m1", "937": "subst:pl:loc:m1:pt", "938": "subst:pl:loc:m2", "939": "subst:pl:loc:m3", "940": "subst:pl:loc:n:col", "941": "subst:pl:loc:n:ncol", "942": "subst:pl:loc:n:pt", "943": "subst:pl:nom:f", "944": "subst:pl:nom:m1", "945": "subst:pl:nom:m1:pt", "946": "subst:pl:nom:m2", "947": "subst:pl:nom:m3", "948": "subst:pl:nom:n:col", "949": "subst:pl:nom:n:ncol", "950": "subst:pl:nom:n:pt", "951": "subst:pl:voc:f", "952": "subst:pl:voc:m1", "953": "subst:pl:voc:m1:pt", "954": "subst:pl:voc:m3", "955": "subst:pl:voc:n:col", "956": "subst:pl:voc:n:ncol", "957": "subst:pl:voc:n:pt", "958": "subst:sg:acc:f", "959": "subst:sg:acc:m1", "960": "subst:sg:acc:m2", "961": "subst:sg:acc:m3", "962": "subst:sg:acc:n:col", "963": "subst:sg:acc:n:ncol", "964": "subst:sg:dat:f", "965": "subst:sg:dat:m1", "966": "subst:sg:dat:m2", "967": "subst:sg:dat:m3", "968": "subst:sg:dat:n:col", "969": "subst:sg:dat:n:ncol", "970": "subst:sg:gen:f", "971": "subst:sg:gen:m1", "972": "subst:sg:gen:m2", "973": "subst:sg:gen:m3", "974": "subst:sg:gen:n:col", "975": "subst:sg:gen:n:ncol", "976": "subst:sg:inst:f", "977": "subst:sg:inst:m1", "978": "subst:sg:inst:m2", "979": "subst:sg:inst:m3", "980": "subst:sg:inst:n:col", "981": "subst:sg:inst:n:ncol", "982": "subst:sg:loc:f", "983": "subst:sg:loc:m1", "984": "subst:sg:loc:m2", "985": "subst:sg:loc:m3", "986": "subst:sg:loc:n:col", "987": "subst:sg:loc:n:ncol", "988": "subst:sg:nom:f", "989": "subst:sg:nom:m1", "990": "subst:sg:nom:m2", "991": "subst:sg:nom:m3", "992": "subst:sg:nom:n:col", "993": "subst:sg:nom:n:ncol", "994": "subst:sg:voc:f", "995": "subst:sg:voc:m1", "996": "subst:sg:voc:m2", "997": "subst:sg:voc:m3", "998": "subst:sg:voc:n:col", "999": "subst:sg:voc:n:ncol", "1000": "sym", "1001": "winien:pl:f:imperf", "1002": "winien:pl:m1:imperf", "1003": "winien:pl:m2:imperf", "1004": "winien:pl:m3:imperf", "1005": "winien:pl:n:imperf", "1006": "winien:sg:f:imperf", "1007": "winien:sg:m1:imperf", "1008": "winien:sg:m2:imperf", "1009": "winien:sg:m3:imperf", "1010": "winien:sg:n:imperf", "1011": "xxs:acc", "1012": "xxs:dat", "1013": "xxs:gen", "1014": "xxs:inst", "1015": "xxs:loc", "1016": "xxs:nom", "1017": "xxs:voc", "1018": "xxx"}}}}, {"name": "nps", "sequence": "bool"}, {"name": "nkjp_ids", "sequence": "string"}], "config_name": "nkjp1m", "splits": [{"name": "test", "num_bytes": 8324533, "num_examples": 8964}, {"name": "train", "num_bytes": 65022406, "num_examples": 68943}, {"name": "validation", "num_bytes": 7465442, "num_examples": 7755}], "download_size": 16167009, "dataset_size": 80812381}}
2022-12-07T16:47:51+00:00
[]
[ "pl" ]
TAGS #task_categories-token-classification #task_ids-part-of-speech #task_ids-lemmatization #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-Polish #license-cc-by-4.0 #National Corpus of Polish #Narodowy Korpus Języka Polskiego #region-us
Dataset Card for NKJP1M – The manually annotated subcorpus of the National Corpus of Polish =========================================================================================== Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage: NKJP1M * Repository: NKJP1M-SGJP * Paper: NKJP book * Point of Contact: mailto:morfeusz@URL ### Dataset Summary This is the official dataset for NKJP1M – the 1-million token balanced subcorpus of the National Corpus of Polish (Narodowy Korpus Języka Polskiego) Besides the text (divided into paragraphs/samples and sentences) the set contains lemmas and morpho-syntactic tags for all tokens in the corpus. This release, known as NKJP1M-SGJP, corresponds to the version 1.2 of the corpus with later corrections and improvements. In particular the morpho-syntactic annotation has been aligned with the present version of Morfeusz2 SGJP morphological analyser (as of 2022.12.04). ### Supported Tasks and Leaderboards The main use of this resource lays in training models for lemmatisation and part of speech tagging of Polish. ### Languages Polish (monolingual) Dataset Structure ----------------- ### Data Instances ### Data Fields * 'nkjp\_text', 'nkjp\_par', 'nkjp\_sent' (strings): XML identifiers of the present text (document), paragraph and sentence in NKJP. (These allow to map the data point back to the source corpus and to identify paragraphs/samples.) * 'tokens' (sequence of strings): tokens of the text defined as in NKJP. * 'lemmas' (sequence of strings): lemmas corresponding to the tokens. * 'tags' (sequence of labels): morpho-syntactic tags according to Morfeusz2 tagset (1019 distinct tags). * 'poses' (sequence of labels): flexemic class (detailed part of speech, 40 classes) – the first element of the corresponding tag. * 'cposes' (sequence of labels): coarse part of speech (13 classes): all verbal and deverbal flexemic classes get mapped to a 'V', nominal – 'N', adjectival – 'A', “strange” (abbreviations, alien elements, symbols, emojis…) – 'X', rest as in 'poses'. * 'nps' (sequence of booleans): 'True' means that the corresponding token is not preceded by a space in the source text. * 'nkjp\_ids' (sequence of strings): XML identifiers of particular tokens in NKJP (probably an overkill). ### Data Splits Dataset Creation ---------------- ### Curation Rationale The National Corpus of Polish (NKJP) was envisioned as the reference corpus of contemporary Polish. The manually annotated subcorpus (NKJP1M) was thought of as the training data for various NLP tasks. ### Source Data NKJP is balanced with respect to Polish readership. The detailed rationale is described in Chapter 3 of the NKJP book (roughly: 50% press, 30% books, 10% speech, 10% other). The corpus contains texts from the years 1945–2010 (with 80% of the text in the range 1990–2010). Only original Polish texts were gathered (no translations from other languages). The composition of NKJP1M follows this schema (see Chapter 5). ### Annotations The rules of morphosyntactic annotation used for NKJP are discussed in Chapter 6 of the NKJP book. Presently (2020), the corpus uses a common tagset with the morphological analyzer Morfeusz 2. #### Annotation process The texts were processed with Morfeusz and then the resulting annotations were manually disambiguated and validated/corrected. Each text sample was independently processed by two annotators. In case of annotation conflicts an adjudicator stepped in. ### Licensing Information !Creative Commons License This work is licensed under a Creative Commons Attribution 4.0 International License. Info on the source corpus: link Current annotation scheme: link
[ "### Dataset Summary\n\n\nThis is the official dataset for NKJP1M – the 1-million token balanced subcorpus of the National Corpus of Polish (Narodowy Korpus Języka Polskiego)\n\n\nBesides the text (divided into paragraphs/samples and sentences) the set contains lemmas and morpho-syntactic tags for all tokens in the corpus.\n\n\nThis release, known as NKJP1M-SGJP, corresponds to the version 1.2 of the corpus with later corrections and improvements. In particular the morpho-syntactic annotation has been aligned with the present version of Morfeusz2 SGJP morphological analyser (as of 2022.12.04).", "### Supported Tasks and Leaderboards\n\n\nThe main use of this resource lays in training models for lemmatisation and part of speech tagging of Polish.", "### Languages\n\n\nPolish (monolingual)\n\n\nDataset Structure\n-----------------", "### Data Instances", "### Data Fields\n\n\n* 'nkjp\\_text', 'nkjp\\_par', 'nkjp\\_sent' (strings): XML identifiers of the present text (document), paragraph and sentence in NKJP. (These allow to map the data point back to the source corpus and to identify paragraphs/samples.)\n* 'tokens' (sequence of strings): tokens of the text defined as in NKJP.\n* 'lemmas' (sequence of strings): lemmas corresponding to the tokens.\n* 'tags' (sequence of labels): morpho-syntactic tags according to Morfeusz2 tagset (1019 distinct tags).\n* 'poses' (sequence of labels): flexemic class (detailed part of speech, 40 classes) – the first element of the corresponding tag.\n* 'cposes' (sequence of labels): coarse part of speech (13 classes): all verbal and deverbal flexemic classes get mapped to a 'V', nominal – 'N', adjectival – 'A', “strange” (abbreviations, alien elements, symbols, emojis…) – 'X', rest as in 'poses'.\n* 'nps' (sequence of booleans): 'True' means that the corresponding token is not preceded by a space in the source text.\n* 'nkjp\\_ids' (sequence of strings): XML identifiers of particular tokens in NKJP (probably an overkill).", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nThe National Corpus of Polish (NKJP) was envisioned as the reference corpus of contemporary Polish.\n\n\nThe manually annotated subcorpus (NKJP1M) was thought of as the training data for various NLP tasks.", "### Source Data\n\n\nNKJP is balanced with respect to Polish readership. The detailed rationale is described in Chapter 3 of the NKJP book (roughly: 50% press, 30% books, 10% speech, 10% other). The corpus contains texts from the years 1945–2010 (with 80% of the text in the range 1990–2010). Only original Polish texts were gathered (no translations from other languages). The composition of NKJP1M follows this schema (see Chapter 5).", "### Annotations\n\n\nThe rules of morphosyntactic annotation used for NKJP are discussed in Chapter 6 of the NKJP book. Presently (2020), the corpus uses a common tagset with the morphological analyzer Morfeusz 2.", "#### Annotation process\n\n\nThe texts were processed with Morfeusz and then the resulting annotations were manually disambiguated and validated/corrected. Each text sample was independently processed by two annotators. In case of annotation conflicts an adjudicator stepped in.", "### Licensing Information\n\n\n!Creative Commons License This work is licensed under a Creative Commons Attribution 4.0 International License.\n\n\nInfo on the source corpus: link\n\n\nCurrent annotation scheme: link" ]
[ "TAGS\n#task_categories-token-classification #task_ids-part-of-speech #task_ids-lemmatization #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-Polish #license-cc-by-4.0 #National Corpus of Polish #Narodowy Korpus Języka Polskiego #region-us \n", "### Dataset Summary\n\n\nThis is the official dataset for NKJP1M – the 1-million token balanced subcorpus of the National Corpus of Polish (Narodowy Korpus Języka Polskiego)\n\n\nBesides the text (divided into paragraphs/samples and sentences) the set contains lemmas and morpho-syntactic tags for all tokens in the corpus.\n\n\nThis release, known as NKJP1M-SGJP, corresponds to the version 1.2 of the corpus with later corrections and improvements. In particular the morpho-syntactic annotation has been aligned with the present version of Morfeusz2 SGJP morphological analyser (as of 2022.12.04).", "### Supported Tasks and Leaderboards\n\n\nThe main use of this resource lays in training models for lemmatisation and part of speech tagging of Polish.", "### Languages\n\n\nPolish (monolingual)\n\n\nDataset Structure\n-----------------", "### Data Instances", "### Data Fields\n\n\n* 'nkjp\\_text', 'nkjp\\_par', 'nkjp\\_sent' (strings): XML identifiers of the present text (document), paragraph and sentence in NKJP. (These allow to map the data point back to the source corpus and to identify paragraphs/samples.)\n* 'tokens' (sequence of strings): tokens of the text defined as in NKJP.\n* 'lemmas' (sequence of strings): lemmas corresponding to the tokens.\n* 'tags' (sequence of labels): morpho-syntactic tags according to Morfeusz2 tagset (1019 distinct tags).\n* 'poses' (sequence of labels): flexemic class (detailed part of speech, 40 classes) – the first element of the corresponding tag.\n* 'cposes' (sequence of labels): coarse part of speech (13 classes): all verbal and deverbal flexemic classes get mapped to a 'V', nominal – 'N', adjectival – 'A', “strange” (abbreviations, alien elements, symbols, emojis…) – 'X', rest as in 'poses'.\n* 'nps' (sequence of booleans): 'True' means that the corresponding token is not preceded by a space in the source text.\n* 'nkjp\\_ids' (sequence of strings): XML identifiers of particular tokens in NKJP (probably an overkill).", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nThe National Corpus of Polish (NKJP) was envisioned as the reference corpus of contemporary Polish.\n\n\nThe manually annotated subcorpus (NKJP1M) was thought of as the training data for various NLP tasks.", "### Source Data\n\n\nNKJP is balanced with respect to Polish readership. The detailed rationale is described in Chapter 3 of the NKJP book (roughly: 50% press, 30% books, 10% speech, 10% other). The corpus contains texts from the years 1945–2010 (with 80% of the text in the range 1990–2010). Only original Polish texts were gathered (no translations from other languages). The composition of NKJP1M follows this schema (see Chapter 5).", "### Annotations\n\n\nThe rules of morphosyntactic annotation used for NKJP are discussed in Chapter 6 of the NKJP book. Presently (2020), the corpus uses a common tagset with the morphological analyzer Morfeusz 2.", "#### Annotation process\n\n\nThe texts were processed with Morfeusz and then the resulting annotations were manually disambiguated and validated/corrected. Each text sample was independently processed by two annotators. In case of annotation conflicts an adjudicator stepped in.", "### Licensing Information\n\n\n!Creative Commons License This work is licensed under a Creative Commons Attribution 4.0 International License.\n\n\nInfo on the source corpus: link\n\n\nCurrent annotation scheme: link" ]
a09f12672eb54455d73f6c4dd23f728900100cf7
# Dataset Card for "fa-paraphrase" This dataset contains over 1.1 million rows. Each row contains a pair of Farsi sentences which are a paraphrase of each other. The datasets used to create this dataset can be found here: * [tapaco](https://huggingface.co/datasets/tapaco) * [kaggle](https://www.kaggle.com/datasets/armannikkhah/persian-paraphrase-dataset) [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
alighasemi/fa-paraphrase
[ "region:us" ]
2022-12-07T17:13:10+00:00
{"Tasks": ["Text2Text Generation"], "Fine-Grained Tasks": ["paraphrase", "query-paraphrasing"], "Languages": ["Persian"], "Multilinguality": ["monolingual", "fa", "fa-IR"], "Sizes": ["n>1M"], "dataset_info": {"features": [{"name": "sentence1", "dtype": "string"}, {"name": "sentence2", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 139373682.4, "num_examples": 881408}, {"name": "test", "num_bytes": 17421710.3, "num_examples": 110176}, {"name": "validation", "num_bytes": 17421710.3, "num_examples": 110176}], "download_size": 98032993, "dataset_size": 174217103.00000003}}
2022-12-07T17:33:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "fa-paraphrase" This dataset contains over 1.1 million rows. Each row contains a pair of Farsi sentences which are a paraphrase of each other. The datasets used to create this dataset can be found here: * tapaco * kaggle More Information needed
[ "# Dataset Card for \"fa-paraphrase\"\n\nThis dataset contains over 1.1 million rows. Each row contains a pair of Farsi sentences which are a paraphrase of each other. The datasets used to create this dataset can be found here:\n\n* tapaco\n* kaggle\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"fa-paraphrase\"\n\nThis dataset contains over 1.1 million rows. Each row contains a pair of Farsi sentences which are a paraphrase of each other. The datasets used to create this dataset can be found here:\n\n* tapaco\n* kaggle\n\nMore Information needed" ]
e9c5fce8b29f5d64e1ee45ce9222b1269ac6e729
# Dataset Card for "medmcqa_age_gender_custom" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
society-ethics/medmcqa_age_gender_custom
[ "region:us" ]
2022-12-07T18:29:43+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "opa", "dtype": "string"}, {"name": "opb", "dtype": "string"}, {"name": "opc", "dtype": "string"}, {"name": "opd", "dtype": "string"}, {"name": "cop", "dtype": {"class_label": {"names": {"0": "a", "1": "b", "2": "c", "3": "d"}}}}, {"name": "choice_type", "dtype": "string"}, {"name": "exp", "dtype": "string"}, {"name": "subject_name", "dtype": "string"}, {"name": "topic_name", "dtype": "string"}, {"name": "age.infant", "dtype": "bool"}, {"name": "age.child_preschool", "dtype": "bool"}, {"name": "age.child", "dtype": "bool"}, {"name": "age.adolescent", "dtype": "bool"}, {"name": "age.adult", "dtype": "bool"}, {"name": "age.middle_aged", "dtype": "bool"}, {"name": "age.aged", "dtype": "bool"}, {"name": "age.aged_80_over", "dtype": "bool"}, {"name": "gender.male", "dtype": "bool"}, {"name": "gender.female", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 132131827, "num_examples": 182822}], "download_size": 86345498, "dataset_size": 132131827}}
2022-12-07T18:30:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medmcqa_age_gender_custom" More Information needed
[ "# Dataset Card for \"medmcqa_age_gender_custom\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medmcqa_age_gender_custom\"\n\nMore Information needed" ]
620b1279d40e6081d8f782318268dc082b9c945b
# Dataset Card for "olm-bookcorpus-tokenized-1024" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-bookcorpus-tokenized-1024
[ "region:us" ]
2022-12-07T19:28:08+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 8534733804.0, "num_examples": 1386409}], "download_size": 2291578601, "dataset_size": 8534733804.0}}
2022-12-07T19:33:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-bookcorpus-tokenized-1024" More Information needed
[ "# Dataset Card for \"olm-bookcorpus-tokenized-1024\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-bookcorpus-tokenized-1024\"\n\nMore Information needed" ]