sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
sequencelengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
sequencelengths
0
25
languages
sequencelengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
sequencelengths
0
352
processed_texts
sequencelengths
1
353
6bf472660d9a2fe3b880326cceaa0a345a98e2b4
# Dataset Card for "alignments" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
taejunkim/alignments
[ "region:us" ]
2022-12-28T07:48:57+00:00
{"dataset_info": {"features": [{"name": "mix_id", "dtype": "string"}, {"name": "track_id", "dtype": "string"}, {"name": "case_name", "dtype": "string"}, {"name": "feature", "dtype": "string"}, {"name": "metric", "dtype": "string"}, {"name": "key_change", "dtype": "int64"}, {"name": "match_rate", "dtype": "float64"}, {"name": "match_rate_raw", "dtype": "float64"}, {"name": "matched_beats", "dtype": "int64"}, {"name": "matched_beats_raw", "dtype": "int64"}, {"name": "matched_time_mix", "dtype": "float64"}, {"name": "matched_time_track", "dtype": "float64"}, {"name": "mix_cue_in_beat", "dtype": "float64"}, {"name": "mix_cue_out_beat", "dtype": "float64"}, {"name": "track_cue_in_beat", "dtype": "float64"}, {"name": "track_cue_out_beat", "dtype": "float64"}, {"name": "mix_cue_in_time", "dtype": "float64"}, {"name": "mix_cue_out_time", "dtype": "float64"}, {"name": "track_cue_in_time", "dtype": "float64"}, {"name": "track_cue_out_time", "dtype": "float64"}, {"name": "cost", "dtype": "float64"}, {"name": "__index_level_0__", "dtype": "int64"}, {"name": "wp", "sequence": {"sequence": "int64"}}], "splits": [{"name": "train", "num_bytes": 22961341, "num_examples": 6600}], "download_size": 3089520, "dataset_size": 22961341}}
2022-12-28T07:49:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "alignments" More Information needed
[ "# Dataset Card for \"alignments\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"alignments\"\n\nMore Information needed" ]
c5ea3f8bc1df7c02ab2516cf1aeff73a6f3b71ec
# Dataset Card for "SmokeFire" Wildfires or forest fires are unpredictable catastrophic and destructive events that affect rural areas. The impact of these events affects both vegetation and wildlife. This dataset can be used to train networks able to detect smoke and/or fire in forest environments. ## Data Sources & Description - **This dataset consist of sample from two datasets hosted on Kaggle:** - [Forest Fire](https://www.kaggle.com/datasets/kutaykutlu/forest-fire?select=train_fire) - [Forest Fire Images](https://www.kaggle.com/datasets/mohnishsaiprasad/forest-fire-images) - **The datasets consist of:** - 2525 **Fire** samples - 2525 **Smoke** samples - 2525 **Normal** samples - **The dataset is splitted into:** - Train Set -> 6060 samples - Validation Set -> 756 samples - Test Set -> 759 samples
EdBianchi/SmokeFire
[ "region:us" ]
2022-12-28T09:21:45+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "Fire", "1": "Normal", "2": "Smoke"}}}}], "splits": [{"name": "train", "num_bytes": 166216842.46, "num_examples": 6060}, {"name": "test", "num_bytes": 89193578.0, "num_examples": 759}, {"name": "validation", "num_bytes": 75838884.0, "num_examples": 756}], "download_size": 890673915, "dataset_size": 331249304.46000004}}
2022-12-29T14:45:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "SmokeFire" Wildfires or forest fires are unpredictable catastrophic and destructive events that affect rural areas. The impact of these events affects both vegetation and wildlife. This dataset can be used to train networks able to detect smoke and/or fire in forest environments. ## Data Sources & Description - This dataset consist of sample from two datasets hosted on Kaggle: - Forest Fire - Forest Fire Images - The datasets consist of: - 2525 Fire samples - 2525 Smoke samples - 2525 Normal samples - The dataset is splitted into: - Train Set -> 6060 samples - Validation Set -> 756 samples - Test Set -> 759 samples
[ "# Dataset Card for \"SmokeFire\"\nWildfires or forest fires are unpredictable catastrophic and destructive events that affect rural areas. The impact of these events affects both vegetation and wildlife.\nThis dataset can be used to train networks able to detect smoke and/or fire in forest environments.", "## Data Sources & Description\n- This dataset consist of sample from two datasets hosted on Kaggle:\n - Forest Fire\n - Forest Fire Images\n- The datasets consist of:\n - 2525 Fire samples\n - 2525 Smoke samples\n - 2525 Normal samples\n- The dataset is splitted into:\n - Train Set -> 6060 samples\n - Validation Set -> 756 samples\n - Test Set -> 759 samples" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"SmokeFire\"\nWildfires or forest fires are unpredictable catastrophic and destructive events that affect rural areas. The impact of these events affects both vegetation and wildlife.\nThis dataset can be used to train networks able to detect smoke and/or fire in forest environments.", "## Data Sources & Description\n- This dataset consist of sample from two datasets hosted on Kaggle:\n - Forest Fire\n - Forest Fire Images\n- The datasets consist of:\n - 2525 Fire samples\n - 2525 Smoke samples\n - 2525 Normal samples\n- The dataset is splitted into:\n - Train Set -> 6060 samples\n - Validation Set -> 756 samples\n - Test Set -> 759 samples" ]
89e629172b5896f83ce0c0f0a7d0dc88179d373b
# Dataset Card for "kira-dog" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fabiochiu/kira-dog
[ "region:us" ]
2022-12-28T09:33:51+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1300183.0, "num_examples": 5}], "download_size": 1301094, "dataset_size": 1300183.0}}
2022-12-28T09:41:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "kira-dog" More Information needed
[ "# Dataset Card for \"kira-dog\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"kira-dog\"\n\nMore Information needed" ]
60208e25d934da68e9f0e0b69f4e71354b62c4a7
# Introduction Face Synthetics dataset is a collection of diverse synthetic face images with ground truth labels. It was introduced in our paper Fake It Till You Make It: Face analysis in the wild using synthetic data alone. Our dataset contains: 100,000 images of faces at 512 x 512 pixel resolution 70 standard facial landmark annotations per-pixel semantic class anotations It can be used to train machine learning systems for face-related tasks such as landmark localization and face parsing, showing that synthetic data can both match real data in accuracy as well as open up new approaches where manual labelling would be impossible. Some images also include hands and off-center distractor faces in addition to primary faces centered in the image. The Face Synthetics dataset can be used for non-commercial research, and is licensed under the license found in LICENSE.txt. # Dataset Layout The Face Synthetics dataset is a single .zip file containing color images, segmentation images, and 2D landmark coordinates in a text file. ```markdown dataset.zip | |- {frame_id}.png # Rendered image of a face |- {frame_id}_seg.pmg # Segmentation image |- {frame_id}_ldmks.txt # Landmark annotations for 70 facial landmarks (x,y) ``` # Download A small subset of the original dataset can be found here; in order to train models in the entire dataset, please refer to [Microsoft original repo](https://github.com/microsoft/FaceSynthetics).
hedrergudene/FakeItTillYouMakeIt
[ "region:us" ]
2022-12-28T11:20:38+00:00
{}
2022-12-28T11:34:26+00:00
[]
[]
TAGS #region-us
# Introduction Face Synthetics dataset is a collection of diverse synthetic face images with ground truth labels. It was introduced in our paper Fake It Till You Make It: Face analysis in the wild using synthetic data alone. Our dataset contains: 100,000 images of faces at 512 x 512 pixel resolution 70 standard facial landmark annotations per-pixel semantic class anotations It can be used to train machine learning systems for face-related tasks such as landmark localization and face parsing, showing that synthetic data can both match real data in accuracy as well as open up new approaches where manual labelling would be impossible. Some images also include hands and off-center distractor faces in addition to primary faces centered in the image. The Face Synthetics dataset can be used for non-commercial research, and is licensed under the license found in URL. # Dataset Layout The Face Synthetics dataset is a single .zip file containing color images, segmentation images, and 2D landmark coordinates in a text file. # Download A small subset of the original dataset can be found here; in order to train models in the entire dataset, please refer to Microsoft original repo.
[ "# Introduction\n\nFace Synthetics dataset is a collection of diverse synthetic face images with ground truth labels.\n\nIt was introduced in our paper Fake It Till You Make It: Face analysis in the wild using synthetic data alone.\n\nOur dataset contains:\n\n100,000 images of faces at 512 x 512 pixel resolution\n70 standard facial landmark annotations\nper-pixel semantic class anotations\nIt can be used to train machine learning systems for face-related tasks such as landmark localization and face parsing, showing that synthetic data can both match real data in accuracy as well as open up new approaches where manual labelling would be impossible.\n\nSome images also include hands and off-center distractor faces in addition to primary faces centered in the image.\n\nThe Face Synthetics dataset can be used for non-commercial research, and is licensed under the license found in URL.", "# Dataset Layout\nThe Face Synthetics dataset is a single .zip file containing color images, segmentation images, and 2D landmark coordinates in a text file.", "# Download\nA small subset of the original dataset can be found here; in order to train models in the entire dataset, please refer to Microsoft original repo." ]
[ "TAGS\n#region-us \n", "# Introduction\n\nFace Synthetics dataset is a collection of diverse synthetic face images with ground truth labels.\n\nIt was introduced in our paper Fake It Till You Make It: Face analysis in the wild using synthetic data alone.\n\nOur dataset contains:\n\n100,000 images of faces at 512 x 512 pixel resolution\n70 standard facial landmark annotations\nper-pixel semantic class anotations\nIt can be used to train machine learning systems for face-related tasks such as landmark localization and face parsing, showing that synthetic data can both match real data in accuracy as well as open up new approaches where manual labelling would be impossible.\n\nSome images also include hands and off-center distractor faces in addition to primary faces centered in the image.\n\nThe Face Synthetics dataset can be used for non-commercial research, and is licensed under the license found in URL.", "# Dataset Layout\nThe Face Synthetics dataset is a single .zip file containing color images, segmentation images, and 2D landmark coordinates in a text file.", "# Download\nA small subset of the original dataset can be found here; in order to train models in the entire dataset, please refer to Microsoft original repo." ]
e36371eed2075cb024898054fd380b9bb4577da4
# Dataset Card for "processed_roberta_EHR_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
enpassant/processed_roberta_EHR_dataset
[ "region:us" ]
2022-12-28T11:55:43+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 73729188.0, "num_examples": 23907}, {"name": "test", "num_bytes": 18414564.0, "num_examples": 5971}], "download_size": 23660173, "dataset_size": 92143752.0}}
2022-12-28T14:56:25+00:00
[]
[]
TAGS #region-us
# Dataset Card for "processed_roberta_EHR_dataset" More Information needed
[ "# Dataset Card for \"processed_roberta_EHR_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"processed_roberta_EHR_dataset\"\n\nMore Information needed" ]
4113b6719a283cc09af083532482f2fb5b71ab99
# Dataset Card for "agnews_weak_labeling" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
argilla/agnews_weak_labeling
[ "language:en", "region:us" ]
2022-12-28T14:16:31+00:00
{"language": "en", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "dtype": "null"}, {"name": "prediction_agent", "dtype": "null"}, {"name": "annotation", "dtype": "string"}, {"name": "annotation_agent", "dtype": "null"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "null"}, {"name": "metadata", "struct": [{"name": "split", "dtype": "string"}]}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "null"}, {"name": "metrics", "dtype": "null"}, {"name": "vectors", "struct": [{"name": "mini-lm-sentence-transformers", "sequence": "float64"}]}], "splits": [{"name": "train", "num_bytes": 25212139, "num_examples": 7000}], "download_size": 20872343, "dataset_size": 25212139}}
2023-07-13T10:46:28+00:00
[]
[ "en" ]
TAGS #language-English #region-us
# Dataset Card for "agnews_weak_labeling" More Information needed
[ "# Dataset Card for \"agnews_weak_labeling\"\n\nMore Information needed" ]
[ "TAGS\n#language-English #region-us \n", "# Dataset Card for \"agnews_weak_labeling\"\n\nMore Information needed" ]
606ad92803e43a887e4d57b71a2ca8a61075ce39
# Dataset Card for "test-16722377061524" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
albertvillanova/bad-request
[ "region:us" ]
2022-12-28T14:57:24+00:00
{"dataset_info": {"features": [{"name": "x", "dtype": "int64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 48, "num_examples": 3}], "download_size": 950, "dataset_size": 48}}
2022-12-28T14:57:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test-16722377061524" More Information needed
[ "# Dataset Card for \"test-16722377061524\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test-16722377061524\"\n\nMore Information needed" ]
f8f76ee688abff1197ccc21a305e9c22c5a195f7
# Dataset Card for "ehr-roberta-tokenized_datasets-12-2022" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
enpassant/ehr-roberta-tokenized_datasets-12-2022
[ "region:us" ]
2022-12-28T16:05:47+00:00
{"dataset_info": {"features": [{"name": "text", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2740313917.0, "num_examples": 1067816}, {"name": "test", "num_bytes": 144100283.0, "num_examples": 56152}], "download_size": 478603077, "dataset_size": 2884414200.0}}
2022-12-28T16:30:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "ehr-roberta-tokenized_datasets-12-2022" More Information needed
[ "# Dataset Card for \"ehr-roberta-tokenized_datasets-12-2022\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"ehr-roberta-tokenized_datasets-12-2022\"\n\nMore Information needed" ]
96861cc20719df721e7300949c0817e853725274
# Dataset Card for "dreambooth-hackathon-rick-and-morty-images" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Kayvane/dreambooth-hackathon-rick-and-morty-images
[ "region:us" ]
2022-12-28T16:06:54+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3505542.0, "num_examples": 17}], "download_size": 3500492, "dataset_size": 3505542.0}}
2022-12-28T16:07:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dreambooth-hackathon-rick-and-morty-images" More Information needed
[ "# Dataset Card for \"dreambooth-hackathon-rick-and-morty-images\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dreambooth-hackathon-rick-and-morty-images\"\n\nMore Information needed" ]
5855fc5968b3b9db7b1cabb465646acd91bc7c67
### imagenette-160px-facebook-convnext-tiny-224.mk.tar.gz ```python data = mk.get("imagenette", version="160px") df = mk.DataFrame.read("https://huggingface.co/datasets/meerkat-ml/meerkat-dataframes/resolve/main/imagenette-160px-facebook-convnext-tiny-224.mk.tar.gz") df = data.merge(df[["img_id", "logits", "pred"]], on="img_id") ```
meerkat-ml/meerkat-dataframes
[ "region:us" ]
2022-12-28T16:17:09+00:00
{}
2023-03-12T03:48:46+00:00
[]
[]
TAGS #region-us
### URL
[ "### URL" ]
[ "TAGS\n#region-us \n", "### URL" ]
2469f5c61089862475618b921e5690297bccaa21
# Dataset Card for "bookcorpus_deduplicated" ## Dataset Summary This is a deduplicated version of the original [Book Corpus dataset](https://huggingface.co/datasets/bookcorpus). The Book Corpus (Zhu et al., 2015), which was used to train popular models such as BERT, has a substantial amount of exact-duplicate documents according to [Bandy and Vincent (2021)](https://arxiv.org/abs/2105.05241) [Bandy and Vincent (2021)](https://arxiv.org/abs/2105.05241) find that thousands of books in BookCorpus are duplicated, with only 7,185 unique books out of 11,038 total. Effect of deduplication - Num of lines: 38832894 VS 74004228 - Dataset size: 2.91GB VS 4.63GB The duplicate text has been droped and only the first appearance is kept. The order of text appearance is kept. ## Why deduplicate? Deduplication of training data has showed various advantages, including: - require fewer training steps to achieve the same or better accuracy - train models that emit memorized text ten times less frequently - reduce carbon emission and energy consumption cf [Deduplicating Training Data Makes Language Models Better](https://arxiv.org/abs/2107.06499) ## Deduplication script ```python import pandas as pd from datasets import load_dataset dataset = load_dataset("bookcorpus")["train"]["text"] df = pd.Dataframe({"text":dataset}) # drop duplicates(exact match) df_filtered = df["text"].drop_duplicates() df_filtered.to_csv("bookcorpus_filtered.csv","index"=False,"header"=False) new_dataset = load_dataset("text",data_files={"train":"bookcorpus_filtered.csv"}) ``` The running time is short, less than several minutes. More sophicated deduplication algorithms can be applied to improve the performance, such as https://github.com/google-research/deduplicate-text-datasets ## Reference ```bib @misc{https://doi.org/10.48550/arxiv.2105.05241, doi = {10.48550/ARXIV.2105.05241}, url = {https://arxiv.org/abs/2105.05241}, author = {Bandy, Jack and Vincent, Nicholas}, keywords = {Computation and Language (cs.CL), Computers and Society (cs.CY), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Addressing "Documentation Debt" in Machine Learning Research: A Retrospective Datasheet for BookCorpus}, publisher = {arXiv}, year = {2021}, copyright = {arXiv.org perpetual, non-exclusive license} } ``` ```bib @misc{https://doi.org/10.48550/arxiv.2107.06499, doi = {10.48550/ARXIV.2107.06499}, url = {https://arxiv.org/abs/2107.06499}, author = {Lee, Katherine and Ippolito, Daphne and Nystrom, Andrew and Zhang, Chiyuan and Eck, Douglas and Callison-Burch, Chris and Carlini, Nicholas}, keywords = {Computation and Language (cs.CL), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Deduplicating Training Data Makes Language Models Better}, publisher = {arXiv}, year = {2021}, copyright = {arXiv.org perpetual, non-exclusive license} } ``` ```bib @misc{https://doi.org/10.48550/arxiv.2209.00099, doi = {10.48550/ARXIV.2209.00099}, url = {https://arxiv.org/abs/2209.00099}, author = {Treviso, Marcos and Ji, Tianchu and Lee, Ji-Ung and van Aken, Betty and Cao, Qingqing and Ciosici, Manuel R. and Hassid, Michael and Heafield, Kenneth and Hooker, Sara and Martins, Pedro H. and Martins, André F. T. and Milder, Peter and Raffel, Colin and Simpson, Edwin and Slonim, Noam and Balasubramanian, Niranjan and Derczynski, Leon and Schwartz, Roy}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Methods for Natural Language Processing: A Survey}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } ``` [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_deduplicated
[ "arxiv:2105.05241", "arxiv:2107.06499", "arxiv:2209.00099", "region:us" ]
2022-12-28T16:41:10+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2867856394, "num_examples": 38832894}], "download_size": 1794567875, "dataset_size": 2867856394}}
2022-12-29T16:24:22+00:00
[ "2105.05241", "2107.06499", "2209.00099" ]
[]
TAGS #arxiv-2105.05241 #arxiv-2107.06499 #arxiv-2209.00099 #region-us
# Dataset Card for "bookcorpus_deduplicated" ## Dataset Summary This is a deduplicated version of the original Book Corpus dataset. The Book Corpus (Zhu et al., 2015), which was used to train popular models such as BERT, has a substantial amount of exact-duplicate documents according to Bandy and Vincent (2021) Bandy and Vincent (2021) find that thousands of books in BookCorpus are duplicated, with only 7,185 unique books out of 11,038 total. Effect of deduplication - Num of lines: 38832894 VS 74004228 - Dataset size: 2.91GB VS 4.63GB The duplicate text has been droped and only the first appearance is kept. The order of text appearance is kept. ## Why deduplicate? Deduplication of training data has showed various advantages, including: - require fewer training steps to achieve the same or better accuracy - train models that emit memorized text ten times less frequently - reduce carbon emission and energy consumption cf Deduplicating Training Data Makes Language Models Better ## Deduplication script The running time is short, less than several minutes. More sophicated deduplication algorithms can be applied to improve the performance, such as URL ## Reference More Information needed
[ "# Dataset Card for \"bookcorpus_deduplicated\"", "## Dataset Summary\nThis is a deduplicated version of the original Book Corpus dataset.\nThe Book Corpus (Zhu et al., 2015), which was used to train popular models such as BERT, has a substantial amount of exact-duplicate documents according to Bandy and Vincent (2021)\nBandy and Vincent (2021) find that thousands of books in BookCorpus are duplicated, with only 7,185 unique books out of 11,038 total.\n\nEffect of deduplication\n- Num of lines: 38832894 VS 74004228 \n- Dataset size: 2.91GB VS 4.63GB\n\nThe duplicate text has been droped and only the first appearance is kept. \nThe order of text appearance is kept.", "## Why deduplicate?\nDeduplication of training data has showed various advantages, including:\n- require fewer training steps to achieve the same or better accuracy\n- train models that emit memorized text ten times less frequently\n- reduce carbon emission and energy consumption\n\ncf Deduplicating Training Data Makes Language Models Better", "## Deduplication script\n\nThe running time is short, less than several minutes.\nMore sophicated deduplication algorithms can be applied to improve the performance, such as URL", "## Reference\n\n\n\n\n\n\nMore Information needed" ]
[ "TAGS\n#arxiv-2105.05241 #arxiv-2107.06499 #arxiv-2209.00099 #region-us \n", "# Dataset Card for \"bookcorpus_deduplicated\"", "## Dataset Summary\nThis is a deduplicated version of the original Book Corpus dataset.\nThe Book Corpus (Zhu et al., 2015), which was used to train popular models such as BERT, has a substantial amount of exact-duplicate documents according to Bandy and Vincent (2021)\nBandy and Vincent (2021) find that thousands of books in BookCorpus are duplicated, with only 7,185 unique books out of 11,038 total.\n\nEffect of deduplication\n- Num of lines: 38832894 VS 74004228 \n- Dataset size: 2.91GB VS 4.63GB\n\nThe duplicate text has been droped and only the first appearance is kept. \nThe order of text appearance is kept.", "## Why deduplicate?\nDeduplication of training data has showed various advantages, including:\n- require fewer training steps to achieve the same or better accuracy\n- train models that emit memorized text ten times less frequently\n- reduce carbon emission and energy consumption\n\ncf Deduplicating Training Data Makes Language Models Better", "## Deduplication script\n\nThe running time is short, less than several minutes.\nMore sophicated deduplication algorithms can be applied to improve the performance, such as URL", "## Reference\n\n\n\n\n\n\nMore Information needed" ]
0510ee55922a550615bf58c10d65c7856f534863
# Dataset Card for "EHR-roberta-base-tokenized-dataset-2022-12" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
enpassant/EHR-roberta-base-tokenized-dataset-2022-12
[ "region:us" ]
2022-12-28T16:48:58+00:00
{"dataset_info": {"features": [{"name": "text", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2738869059.0, "num_examples": 1067255}, {"name": "test", "num_bytes": 145534885.0, "num_examples": 56709}], "download_size": 477844625, "dataset_size": 2884403944.0}}
2022-12-28T16:50:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "EHR-roberta-base-tokenized-dataset-2022-12" More Information needed
[ "# Dataset Card for \"EHR-roberta-base-tokenized-dataset-2022-12\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"EHR-roberta-base-tokenized-dataset-2022-12\"\n\nMore Information needed" ]
beabfde25ee24531ac93f03f317e737c7ec45945
|Dataset|Bytes|Samples|Capping| |-------|-----|-------|-------| |[Unnatural Instructions](https://huggingface.co/datasets/mrm8488/unnatural-instructions-full) | 27M | 66010 | / | |[Big-Bench](https://huggingface.co/datasets/bigbench) | 1.7G | 2631238| / | |[FLAN](https://huggingface.co/datasets/Muennighoff/flan) | 3.1G | 3354260 | [30K examples per dataset max with 10 templates total (So 3K / template)](https://github.com/Muennighoff/FLAN/blob/main/flan/tasks.py) | |[SuperNatural-Instructions](https://huggingface.co/datasets/Muennighoff/natural-instructions) | 7.4G | 7101558 | / | |[StackOverflow](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_titlebody_best_voted_answer_jsonl) | 9.0G | 4730542 | / | |[xP3-EN](https://huggingface.co/datasets/bigscience/xP3) | 37G | 31495184 | [100K examples per data subset per prompt allowed (So 100K / template)](https://github.com/bigscience-workshop/bigscience/blob/e848657707a549dda35c8b3cc63a96d2064b2983/data/xp3/prepare_xp3_train.py#L15) | |Total|58GB|49378792|
taskydata/realtasky
[ "language:en", "region:us" ]
2022-12-28T16:55:33+00:00
{"language": ["en"]}
2023-03-22T10:46:54+00:00
[]
[ "en" ]
TAGS #language-English #region-us
[]
[ "TAGS\n#language-English #region-us \n" ]
332ffe76d488b894416602afc03392070d601394
# Dataset Card for "dreambooth-hackathon-rick-and-morty-images-2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Kayvane/dreambooth-hackathon-rick-and-morty-images-2
[ "region:us" ]
2022-12-28T17:11:56+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3482571.0, "num_examples": 24}], "download_size": 3481016, "dataset_size": 3482571.0}}
2022-12-28T17:12:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dreambooth-hackathon-rick-and-morty-images-2" More Information needed
[ "# Dataset Card for \"dreambooth-hackathon-rick-and-morty-images-2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dreambooth-hackathon-rick-and-morty-images-2\"\n\nMore Information needed" ]
09fc389f0dd7de7d4fc331930bfd2ce434b2b9f9
# Dataset Card for "bookcorpus_deduplicated_small" First 10K(0.25%) examples of [bookcorpus_deduplicated](https://huggingface.co/datasets/saibo/bookcorpus_deduplicated) size: 7.4MB [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_deduplicated_small
[ "region:us" ]
2022-12-28T18:55:18+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7321888, "num_examples": 100000}], "download_size": 4495653, "dataset_size": 7321888}}
2022-12-29T16:14:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_deduplicated_small" First 10K(0.25%) examples of bookcorpus_deduplicated size: 7.4MB More Information needed
[ "# Dataset Card for \"bookcorpus_deduplicated_small\"\n\nFirst 10K(0.25%) examples of bookcorpus_deduplicated\n\nsize: 7.4MB\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_deduplicated_small\"\n\nFirst 10K(0.25%) examples of bookcorpus_deduplicated\n\nsize: 7.4MB\n\nMore Information needed" ]
788fb2722316ee7cad1ace2f6c94e563556a1d3e
### Roboflow Dataset Page [https://universe.roboflow.com/augmented-startups/football-player-detection-kucab](https://universe.roboflow.com/augmented-startups/football-player-detection-kucab?ref=roboflow2huggingface) ### Citation ``` @misc{ football-player-detection-kucab_dataset, title = { Football-Player-Detection Dataset }, type = { Open Source Dataset }, author = { Augmented Startups }, howpublished = { \url{ https://universe.roboflow.com/augmented-startups/football-player-detection-kucab } }, url = { https://universe.roboflow.com/augmented-startups/football-player-detection-kucab }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { nov }, note = { visited on 2022-12-29 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on November 21, 2022 at 6:50 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 1232 images. Track-players-and-football are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
keremberke/football-object-detection
[ "task_categories:object-detection", "roboflow", "region:us" ]
2022-12-28T20:09:47+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow"]}
2023-01-04T20:39:21+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #region-us
### Roboflow Dataset Page URL ### License CC BY 4.0 ### Dataset Summary This dataset was exported via URL on November 21, 2022 at 6:50 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 1232 images. Track-players-and-football are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
[ "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on November 21, 2022 at 6:50 PM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 1232 images.\nTrack-players-and-football are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n\nNo image augmentation techniques were applied." ]
[ "TAGS\n#task_categories-object-detection #roboflow #region-us \n", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on November 21, 2022 at 6:50 PM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 1232 images.\nTrack-players-and-football are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n\nNo image augmentation techniques were applied." ]
0d66f0f14193eeeb497776a71e21bcf0b5777cb7
lol
vukrosic/derambooth-vuk-512-images
[ "region:us" ]
2022-12-28T20:28:33+00:00
{}
2022-12-28T20:29:35+00:00
[]
[]
TAGS #region-us
lol
[]
[ "TAGS\n#region-us \n" ]
33232aa37505411d5d74d0e6bd31f44d7fe1ba9f
# Dataset Card for "dreambooth-hackathon-rick-and-morty-images-square" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Kayvane/dreambooth-hackathon-rick-and-morty-images-square
[ "region:us" ]
2022-12-28T22:46:25+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2583462.0, "num_examples": 20}], "download_size": 2582753, "dataset_size": 2583462.0}}
2022-12-28T22:46:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dreambooth-hackathon-rick-and-morty-images-square" More Information needed
[ "# Dataset Card for \"dreambooth-hackathon-rick-and-morty-images-square\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dreambooth-hackathon-rick-and-morty-images-square\"\n\nMore Information needed" ]
3374e9e6adbbcf37e3917bc754245129a7d2ef84
# Dataset Card for "medspeech" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arnepeine/medspeech
[ "region:us" ]
2022-12-28T23:41:53+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 133517.0, "num_examples": 3}], "download_size": 126401, "dataset_size": 133517.0}}
2023-01-03T10:35:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "medspeech" More Information needed
[ "# Dataset Card for \"medspeech\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"medspeech\"\n\nMore Information needed" ]
11744ece8c44cefe140dfbac6be7f2de66600523
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - jampatoisnli.github.io - **Repository:** - https://github.com/ruth-ann/jampatoisnli - **Paper:** - https://arxiv.org/abs/2212.03419 - **Point of Contact:** - Ruth-Ann Armsrong: [email protected] ### Dataset Summary JamPatoisNLI provides the first dataset for natural language inference in a creole language, Jamaican Patois. Many of the most-spoken low-resource languages are creoles. These languages commonly have a lexicon derived from a major world language and a distinctive grammar reflecting the languages of the original speakers and the process of language birth by creolization. This gives them a distinctive place in exploring the effectiveness of transfer from large monolingual or multilingual pretrained models. ### Supported Tasks and Leaderboards Natural language inference ### Languages Jamaican Patois ### Data Fields premise, hypothesis, label ### Data Splits Train: 250 Val: 200 Test: 200 ### Data set creation + Annotations Premise collection: 97% of examples from Twitter; remaining pulled from literature and online cultural website Hypothesis construction: For each premise, hypothesis written by native speaker (our first author) so that pair’s classification would be E, N or C Label validation: Random sample of 100 sentence pairs double annotated by fluent speakers ### Social Impact of Dataset JamPatoisNLI is a low-resource language dataset in an English-based Creole spoken in the Caribbean, Jamaican Patois. The creation of the dataset contributes to expanding the scope of NLP research to under-explored languages across the world. ### Dataset Curators [@ruth-ann](https://github.com/ruth-ann) ### Citation Information @misc{https://doi.org/10.48550/arxiv.2212.03419, doi = {10.48550/ARXIV.2212.03419}, url = {https://arxiv.org/abs/2212.03419}, author = {Armstrong, Ruth-Ann and Hewitt, John and Manning, Christopher}, keywords = {Computation and Language (cs.CL), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences, I.2.7}, title = {JamPatoisNLI: A Jamaican Patois Natural Language Inference Dataset}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } ### Contributions Thanks to Prof. Christopher Manning and John Hewitt for their contributions, guidance, facilitation and support related to the creation of this dataset.
Ruth-Ann/jampatoisnli
[ "task_categories:text-classification", "task_ids:natural-language-inference", "annotations_creators:expert-generated", "language_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "multilinguality:other-english-based-creole", "size_categories:n<1K", "source_datasets:original", "language:jam", "license:other", "creole", "low-resource-language", "arxiv:2212.03419", "region:us" ]
2022-12-29T05:22:50+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated", "found"], "language": ["jam"], "license": ["other"], "multilinguality": ["monolingual", "other-english-based-creole"], "size_categories": ["n<1K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference"], "pretty_name": "JamPatoisNLI", "tags": ["creole", "low-resource-language"]}
2022-12-31T03:25:34+00:00
[ "2212.03419" ]
[ "jam" ]
TAGS #task_categories-text-classification #task_ids-natural-language-inference #annotations_creators-expert-generated #language_creators-expert-generated #language_creators-found #multilinguality-monolingual #multilinguality-other-english-based-creole #size_categories-n<1K #source_datasets-original #language-Jamaican Creole English #license-other #creole #low-resource-language #arxiv-2212.03419 #region-us
# Dataset Card for [Dataset Name] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - URL - Repository: - URL - Paper: - URL - Point of Contact: - Ruth-Ann Armsrong: armstrongruthanna@URL ### Dataset Summary JamPatoisNLI provides the first dataset for natural language inference in a creole language, Jamaican Patois. Many of the most-spoken low-resource languages are creoles. These languages commonly have a lexicon derived from a major world language and a distinctive grammar reflecting the languages of the original speakers and the process of language birth by creolization. This gives them a distinctive place in exploring the effectiveness of transfer from large monolingual or multilingual pretrained models. ### Supported Tasks and Leaderboards Natural language inference ### Languages Jamaican Patois ### Data Fields premise, hypothesis, label ### Data Splits Train: 250 Val: 200 Test: 200 ### Data set creation + Annotations Premise collection: 97% of examples from Twitter; remaining pulled from literature and online cultural website Hypothesis construction: For each premise, hypothesis written by native speaker (our first author) so that pair’s classification would be E, N or C Label validation: Random sample of 100 sentence pairs double annotated by fluent speakers ### Social Impact of Dataset JamPatoisNLI is a low-resource language dataset in an English-based Creole spoken in the Caribbean, Jamaican Patois. The creation of the dataset contributes to expanding the scope of NLP research to under-explored languages across the world. ### Dataset Curators @ruth-ann @misc{URL doi = {10.48550/ARXIV.2212.03419}, url = {URL author = {Armstrong, Ruth-Ann and Hewitt, John and Manning, Christopher}, keywords = {Computation and Language (cs.CL), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences, I.2.7}, title = {JamPatoisNLI: A Jamaican Patois Natural Language Inference Dataset}, publisher = {arXiv}, year = {2022}, copyright = {URL perpetual, non-exclusive license} } ### Contributions Thanks to Prof. Christopher Manning and John Hewitt for their contributions, guidance, facilitation and support related to the creation of this dataset.
[ "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- URL\n- Repository:\n- URL\n- Paper:\n- URL\n- Point of Contact:\n- Ruth-Ann Armsrong: armstrongruthanna@URL", "### Dataset Summary\n\nJamPatoisNLI provides the first dataset for natural language inference in a creole language, Jamaican Patois. \nMany of the most-spoken low-resource languages are creoles. These languages commonly have a lexicon derived from \na major world language and a distinctive grammar reflecting the languages of the original speakers and the process\nof language birth by creolization. This gives them a distinctive place in exploring the effectiveness of transfer\nfrom large monolingual or multilingual pretrained models.", "### Supported Tasks and Leaderboards\n\nNatural language inference", "### Languages\n\nJamaican Patois", "### Data Fields\n\npremise, hypothesis, label", "### Data Splits\n\nTrain: 250\nVal: 200\nTest: 200", "### Data set creation + Annotations\n\nPremise collection: \n97% of examples from Twitter; remaining pulled from literature and online cultural website\n\nHypothesis construction: \nFor each premise, hypothesis written by native speaker (our first author) so that pair’s classification would be E, N or C\n\nLabel validation: \nRandom sample of 100 sentence pairs double annotated by fluent speakers", "### Social Impact of Dataset\n\nJamPatoisNLI is a low-resource language dataset in an English-based Creole spoken in the Caribbean,\nJamaican Patois. The creation of the dataset contributes to expanding the scope of NLP research\nto under-explored languages across the world.", "### Dataset Curators\n\n@ruth-ann\n\n\n\n\n@misc{URL\n doi = {10.48550/ARXIV.2212.03419},\n \n url = {URL\n \n author = {Armstrong, Ruth-Ann and Hewitt, John and Manning, Christopher},\n \n keywords = {Computation and Language (cs.CL), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences, I.2.7},\n \n title = {JamPatoisNLI: A Jamaican Patois Natural Language Inference Dataset},\n \n publisher = {arXiv},\n \n year = {2022},\n \n copyright = {URL perpetual, non-exclusive license}\n}", "### Contributions\n\nThanks to Prof. Christopher Manning and John Hewitt for their contributions, guidance, facilitation and support related to the creation of this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_ids-natural-language-inference #annotations_creators-expert-generated #language_creators-expert-generated #language_creators-found #multilinguality-monolingual #multilinguality-other-english-based-creole #size_categories-n<1K #source_datasets-original #language-Jamaican Creole English #license-other #creole #low-resource-language #arxiv-2212.03419 #region-us \n", "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- URL\n- Repository:\n- URL\n- Paper:\n- URL\n- Point of Contact:\n- Ruth-Ann Armsrong: armstrongruthanna@URL", "### Dataset Summary\n\nJamPatoisNLI provides the first dataset for natural language inference in a creole language, Jamaican Patois. \nMany of the most-spoken low-resource languages are creoles. These languages commonly have a lexicon derived from \na major world language and a distinctive grammar reflecting the languages of the original speakers and the process\nof language birth by creolization. This gives them a distinctive place in exploring the effectiveness of transfer\nfrom large monolingual or multilingual pretrained models.", "### Supported Tasks and Leaderboards\n\nNatural language inference", "### Languages\n\nJamaican Patois", "### Data Fields\n\npremise, hypothesis, label", "### Data Splits\n\nTrain: 250\nVal: 200\nTest: 200", "### Data set creation + Annotations\n\nPremise collection: \n97% of examples from Twitter; remaining pulled from literature and online cultural website\n\nHypothesis construction: \nFor each premise, hypothesis written by native speaker (our first author) so that pair’s classification would be E, N or C\n\nLabel validation: \nRandom sample of 100 sentence pairs double annotated by fluent speakers", "### Social Impact of Dataset\n\nJamPatoisNLI is a low-resource language dataset in an English-based Creole spoken in the Caribbean,\nJamaican Patois. The creation of the dataset contributes to expanding the scope of NLP research\nto under-explored languages across the world.", "### Dataset Curators\n\n@ruth-ann\n\n\n\n\n@misc{URL\n doi = {10.48550/ARXIV.2212.03419},\n \n url = {URL\n \n author = {Armstrong, Ruth-Ann and Hewitt, John and Manning, Christopher},\n \n keywords = {Computation and Language (cs.CL), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences, I.2.7},\n \n title = {JamPatoisNLI: A Jamaican Patois Natural Language Inference Dataset},\n \n publisher = {arXiv},\n \n year = {2022},\n \n copyright = {URL perpetual, non-exclusive license}\n}", "### Contributions\n\nThanks to Prof. Christopher Manning and John Hewitt for their contributions, guidance, facilitation and support related to the creation of this dataset." ]
1ef34fc1b87fe5f5d99e89da477c37499d69866a
# Dataset Card for [opus] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description **Disclaimer.** Loading of dataset is slow, thus it may not be feasible when loading at scale. I'd suggest to use the other OPUS datasets on Huggingface which loads a specific corpus. Loads [OPUS](https://opus.nlpl.eu/) as HuggingFace dataset. OPUS is an open parallel corpus covering 700+ languages and 1100+ datasets. Given a `src` and `tgt` language, this repository can load *all* available parallel corpus. To my knowledge, other OPUS datasets on Huggingface loads a specific corpus **Requirements**. ``` pip install pandas # pip install my fork of `opustools` git clone https://github.com/larrylawl/OpusTools.git pip install -e OpusTools/opustools_pkg ``` **Example Usage**. ``` # args follows `opustools`: https://pypi.org/project/opustools/ src="en" tgt="id" download_dir="data" # dir to save downloaded files corpus="bible-uedin" # corpus name. Leave as `None` to download all available corpus for the src-tgt pair. dataset = load_dataset("larrylawl/opus", src=src, tgt=tgt, download_dir=download_dir, corpus=corpus) ) ``` **Disclaimer**. This repository is still in active development. Do make a PR if there're any issues! ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages Available languages can be viewed on the [OPUS API](https://opus.nlpl.eu/opusapi/?languages=True) ## Dataset Structure ### Data Instances ``` {'src': 'In the beginning God created the heavens and the earth .', 'tgt': 'Pada mulanya , waktu Allah mulai menciptakan alam semesta'} ``` ### Data Fields ``` features = { "src": datasets.Value("string"), "tgt": datasets.Value("string"), } ``` ### Data Splits Merged all data into train split. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@larrylawl](https://larrylawl.github.io/) for adding this dataset.
larrylawl/opus
[ "task_categories:translation", "annotations_creators:expert-generated", "annotations_creators:found", "language_creators:found", "language_creators:expert-generated", "multilinguality:translation", "parallel-corpus", "region:us" ]
2022-12-29T06:08:54+00:00
{"annotations_creators": ["expert-generated", "found"], "language_creators": ["found", "expert-generated"], "license": [], "multilinguality": ["translation"], "size_categories": [], "source_datasets": [], "task_categories": ["translation"], "task_ids": [], "pretty_name": "opus", "tags": ["parallel-corpus"]}
2023-01-17T03:03:16+00:00
[]
[]
TAGS #task_categories-translation #annotations_creators-expert-generated #annotations_creators-found #language_creators-found #language_creators-expert-generated #multilinguality-translation #parallel-corpus #region-us
# Dataset Card for [opus] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description Disclaimer. Loading of dataset is slow, thus it may not be feasible when loading at scale. I'd suggest to use the other OPUS datasets on Huggingface which loads a specific corpus. Loads OPUS as HuggingFace dataset. OPUS is an open parallel corpus covering 700+ languages and 1100+ datasets. Given a 'src' and 'tgt' language, this repository can load *all* available parallel corpus. To my knowledge, other OPUS datasets on Huggingface loads a specific corpus Requirements. Example Usage. Disclaimer. This repository is still in active development. Do make a PR if there're any issues! ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Available languages can be viewed on the OPUS API ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits Merged all data into train split. ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @larrylawl for adding this dataset.
[ "# Dataset Card for [opus]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\nDisclaimer. Loading of dataset is slow, thus it may not be feasible when loading at scale. I'd suggest to use the other OPUS datasets on Huggingface which loads a specific corpus.\n\nLoads OPUS as HuggingFace dataset. OPUS is an open parallel corpus covering 700+ languages and 1100+ datasets.\n\nGiven a 'src' and 'tgt' language, this repository can load *all* available parallel corpus. To my knowledge, other OPUS datasets on Huggingface loads a specific corpus\n\n\nRequirements. \n\n\n\nExample Usage.\n\n\n\nDisclaimer.\n\nThis repository is still in active development. Do make a PR if there're any issues!", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages\n\nAvailable languages can be viewed on the OPUS API", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits\n\nMerged all data into train split.", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @larrylawl for adding this dataset." ]
[ "TAGS\n#task_categories-translation #annotations_creators-expert-generated #annotations_creators-found #language_creators-found #language_creators-expert-generated #multilinguality-translation #parallel-corpus #region-us \n", "# Dataset Card for [opus]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\nDisclaimer. Loading of dataset is slow, thus it may not be feasible when loading at scale. I'd suggest to use the other OPUS datasets on Huggingface which loads a specific corpus.\n\nLoads OPUS as HuggingFace dataset. OPUS is an open parallel corpus covering 700+ languages and 1100+ datasets.\n\nGiven a 'src' and 'tgt' language, this repository can load *all* available parallel corpus. To my knowledge, other OPUS datasets on Huggingface loads a specific corpus\n\n\nRequirements. \n\n\n\nExample Usage.\n\n\n\nDisclaimer.\n\nThis repository is still in active development. Do make a PR if there're any issues!", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages\n\nAvailable languages can be viewed on the OPUS API", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits\n\nMerged all data into train split.", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @larrylawl for adding this dataset." ]
ad0eefef4ed9f64ec797d4d6281629062d5f4100
# Dataset Card for "beats-mixes" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
djmix/beats-mixes
[ "region:us" ]
2022-12-29T06:16:48+00:00
{"dataset_info": {"features": [{"name": "mix_id", "dtype": "string"}, {"name": "beats", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 425961256, "num_examples": 5040}], "download_size": 244903841, "dataset_size": 425961256}}
2022-12-29T06:17:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "beats-mixes" More Information needed
[ "# Dataset Card for \"beats-mixes\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"beats-mixes\"\n\nMore Information needed" ]
02d61b31c87b7fb5857c86540f5c4b5fcd76bfd9
# Dataset Card for "beats-tracks" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
djmix/beats-tracks
[ "region:us" ]
2022-12-29T06:17:48+00:00
{"dataset_info": {"features": [{"name": "track_id", "dtype": "string"}, {"name": "beats", "sequence": "float64"}], "splits": [{"name": "train", "num_bytes": 402775482, "num_examples": 63038}], "download_size": 118607513, "dataset_size": 402775482}}
2022-12-29T06:18:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "beats-tracks" More Information needed
[ "# Dataset Card for \"beats-tracks\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"beats-tracks\"\n\nMore Information needed" ]
8fb1fe1c91f7c8ac8cfc4dec4ca6ebc13a16efc5
# Dataset Card for "transitions" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
djmix/transitions
[ "region:us" ]
2022-12-29T06:34:38+00:00
{"dataset_info": {"features": [{"name": "tran_id", "dtype": "string"}, {"name": "mix_id", "dtype": "string"}, {"name": "i_tran", "dtype": "int32"}, {"name": "i_track_prev", "dtype": "int32"}, {"name": "i_track_next", "dtype": "int32"}, {"name": "track_id_prev", "dtype": "string"}, {"name": "track_id_next", "dtype": "string"}, {"name": "match_rate_prev", "dtype": "float32"}, {"name": "match_rate_next", "dtype": "float32"}, {"name": "matched_beats_prev", "dtype": "int32"}, {"name": "matched_beats_next", "dtype": "int32"}, {"name": "overlap_wpts", "dtype": "int32"}, {"name": "overlap_beats", "dtype": "float32"}, {"name": "tran_wpts", "dtype": "int32"}, {"name": "extra_wpts_prev", "dtype": "int32"}, {"name": "extra_wpts_next", "dtype": "int32"}, {"name": "extra_beats_prev", "dtype": "float32"}, {"name": "extra_beats_next", "dtype": "float32"}, {"name": "last_wpt_prev", "dtype": "int32"}, {"name": "last_wpt_next", "dtype": "int32"}, {"name": "total_wpt_prev", "dtype": "int32"}, {"name": "total_wpt_next", "dtype": "int32"}, {"name": "matched_time_mix_prev", "dtype": "float32"}, {"name": "matched_time_mix_next", "dtype": "float32"}, {"name": "matched_time_track_prev", "dtype": "float32"}, {"name": "matched_time_track_next", "dtype": "float32"}, {"name": "timestamp_prev", "dtype": "float32"}, {"name": "timestamp_next", "dtype": "float32"}, {"name": "case_name_prev", "dtype": "string"}, {"name": "case_name_next", "dtype": "string"}, {"name": "feature_prev", "dtype": "string"}, {"name": "feature_next", "dtype": "string"}, {"name": "metric_prev", "dtype": "string"}, {"name": "metric_next", "dtype": "string"}, {"name": "key_change_prev", "dtype": "int32"}, {"name": "key_change_next", "dtype": "int32"}, {"name": "mix_cue_in_beat_prev", "dtype": "int32"}, {"name": "mix_cue_in_beat_next", "dtype": "int32"}, {"name": "mix_cue_out_beat_prev", "dtype": "int32"}, {"name": "mix_cue_out_beat_next", "dtype": "int32"}, {"name": "track_cue_in_beat_prev", "dtype": "int32"}, {"name": "track_cue_in_beat_next", "dtype": "int32"}, {"name": "track_cue_out_beat_prev", "dtype": "int32"}, {"name": "track_cue_out_beat_next", "dtype": "int32"}, {"name": "mix_cue_in_time_prev", "dtype": "float32"}, {"name": "mix_cue_in_time_next", "dtype": "float32"}, {"name": "mix_cue_out_time_prev", "dtype": "float32"}, {"name": "mix_cue_out_time_next", "dtype": "float32"}, {"name": "track_cue_in_time_prev", "dtype": "float32"}, {"name": "track_cue_in_time_next", "dtype": "float32"}, {"name": "track_cue_out_time_prev", "dtype": "float32"}, {"name": "track_cue_out_time_next", "dtype": "float32"}, {"name": "cost_prev", "dtype": "float32"}, {"name": "cost_next", "dtype": "float32"}, {"name": "wp_prev", "sequence": {"sequence": "int32"}}, {"name": "wp_next", "sequence": {"sequence": "int32"}}, {"name": "wp_raw_prev", "sequence": {"sequence": "int32"}}, {"name": "wp_raw_next", "sequence": {"sequence": "int32"}}], "splits": [{"name": "train", "num_bytes": 3980668452, "num_examples": 64748}], "download_size": 1355715395, "dataset_size": 3980668452}}
2022-12-29T06:37:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "transitions" More Information needed
[ "# Dataset Card for \"transitions\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"transitions\"\n\nMore Information needed" ]
87519c39b1886a35e6c94dd1e39211a8587cd394
# Dataset Card for XAlign ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Known Limitations](#known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [XAlign homepage](https://github.com/tushar117/XAlign) - **Repository:** [XAlign repo](https://github.com/tushar117/XAlign) - **Paper:** [XAlign: Cross-lingual Fact-to-Text Alignment and Generation for Low-Resource Languages](https://arxiv.org/abs/2202.00291) - **Leaderboard:** [Papers With Code Leaderboard for XAlign](https://paperswithcode.com/sota/data-to-text-generation-on-xalign) - **Point of Contact:** [Tushar Abhishek]([email protected]) ### Dataset Summary It consists of an extensive collection of a high quality cross-lingual fact-to-text dataset where facts are in English and corresponding sentences are in native language for person biographies. The Train & validation splits are created using distant supervision methods and Test data is generated through human annotations. ### Supported Tasks and Leaderboards - 'Data-to-text Generation': XAlign dataset can be used to train cross-lingual data-to-text generation models. The model performance can measured through any text generation evaluation metrics by taking average across all the languages. [Sagare et al. (2022)](https://arxiv.org/abs/2209.11252) reported average BLEU score of 29.27 and average METEOR score of 53.64 over the test set. - 'Relation Extraction': XAlign could also be used for cross-lingual relation extraction where relations in English can be extracted from associated native sentence. See [Papers With Code Leaderboard](https://paperswithcode.com/sota/data-to-text-generation-on-xalign) for more models. ### Languages Assamese (as), Bengali (bn), Gujarati (gu), Hindi (hi), Kannada (kn), Malayalam (ml), Marathi (mr), Oriya (or), Punjabi (pa), Tamil (ta), Telugu (te), and English (en). ## Dataset Structure ### Data Fields Each record consist of the following entries: - sentence (string) : Native language wikipedia sentence. (non-native language strings were removed.) - `facts` (List[Dict]) : List of facts associated with the sentence where each fact is stored as dictionary. - language (string) : Language identifier. The `facts` key contains list of facts where each facts is stored as dictionary. A single record within fact list contains following entries: - subject (string) : central entity. - object (string) : entity or a piece of information about the subject. - predicate (string) : relationship that connects the subject and the object. - qualifiers (List[Dict]) : It provide additional information about the fact, is stored as list of qualifier where each record is a dictionary. The dictionary contains two keys: qualifier_predicate to represent property of qualifer and qualifier_object to store value for the qualifier's predicate. ### Data Instances Example from English ``` { "sentence": "Mark Paul Briers (born 21 April 1968) is a former English cricketer.", "facts": [ { "subject": "Mark Briers", "predicate": "date of birth", "object": "21 April 1968", "qualifiers": [] }, { "subject": "Mark Briers", "predicate": "occupation", "object": "cricketer", "qualifiers": [] }, { "subject": "Mark Briers", "predicate": "country of citizenship", "object": "United Kingdom", "qualifiers": [] } ], "language": "en" } ``` Example from one of the low-resource languages (i.e. Hindi) ``` { "sentence": "बोरिस पास्तेरनाक १९५८ में साहित्य के क्षेत्र में नोबेल पुरस्कार विजेता रहे हैं।", "facts": [ { "subject": "Boris Pasternak", "predicate": "nominated for", "object": "Nobel Prize in Literature", "qualifiers": [ { "qualifier_predicate": "point in time", "qualifier_subject": "1958" } ] } ], "language": "hi" } ``` ### Data Splits The XAlign dataset has 3 splits: train, validation, and test. Below are the statistics the dataset. | Dataset splits | Number of Instances in Split | | --- | --- | | Train | 499155 | | Validation | 55469 | | Test | 7425 | ## Dataset Creation ### Curation Rationale Most of the existing Data-to-Text datasets are available in English. Also, the structured Wikidata entries for person entities in low resource languages are minuscule in number compared to that in English. Thus, monolingual Data-to-Text for low resource languages suffers from data sparsity. XAlign dataset would be useful in creation of cross-lingual Data-to-Text generation systems that take a set of English facts as input and generates a sentence capturing the fact-semantics in the specified language. ### Source Data #### Initial Data Collection and Normalization The dataset creation process starts with an intial list of ~95K person entities selected from Wikidata and each of which has a link to a corresponding Wikipedia page in at least one of our 11 low resource languages. This leads to a dataset where every instance is a tuple containing entityID, English Wikidata facts, language identifier, Wikipedia URL for the entityID. The facts (in English) are extracted from the 20201221 WikiData dump for each entity using the [WikiData](https://query.wikidata.org) APIs. The facts are gathered only for the speficied Wikidata property (or relation) types that captures most useful factual information for person entities: WikibaseItem, Time, Quantity, and Monolingualtext.This leads to overall ~0.55M data instances across all the 12 languages. Also, for each language, the sentences (along with section information) are extracted from 20210520 Wikipedia XML dump using the pre-processing steps as described [here](https://arxiv.org/abs/2202.00291). For every (entity, language) pair, the pre-processed dataset contains a set of English Wikidata facts and a set of Wikipedia sentences in that language. In order to create train and validation dataset, these are later passed through a two-stage automatic aligner as proposed in [abhishek et al. (2022)](https://arxiv.org/abs/2202.00291) to associate a sentence with a subset of facts. #### Who are the source language producers? The text are extracted from Wikipedia and facts are retrieved from Wikidata. ### Annotations #### Annotation process The Manual annotation of Test dataset was done in two phases. For both the phases, the annotators were presented with (low resource language sentence, list of English facts). They were asked to mark facts present in the given sentence. There were also specific guidelines to ignore redundant facts, handle abbreviations, etc. More detailed annotation guidelines and ethical statement are mentioned [here](https://docs.google.com/document/d/1ucGlf-Jm1ywQ_Fjw9f2UqPeMWPlBnlZA46UY7KuZ0EE/edit) . In the first phase, we got 60 instances labeled per language by a set of 8 expert annotators (trusted graduate students who understood the task very well). In phase 2, we selected 8 annotators per language from the [National Register of Translators](https://www.ntm.org.in/languages/english/nrtdb.aspx}). We tested these annotators using phase 1 data as golden control set, and shortlisted up to 4 annotators per language who scored highest (on Kappa score with golden annotations). #### Who are the annotators? Human annotators were selected appropriately (after screening) from [National Translation Mission](https://www.ntm.org.in) for Test set creation. ### Personal and Sensitive Information The dataset does not involve collection or storage of any personally identifiable information or offensive information at any stage. ## Considerations for Using the Data ### Social Impact of Dataset The purpose of the this dataset is to help develop cross-lingual Data-to-Text generation systems that are vital in many downstream Natural Language Processing (NLP) applications like automated dialog systems, domain-specific chatbots, open domain question answering, authoring sports reports, etc. These systems will be useful for powering business applications like Wikipedia text generation given English Infoboxes, automated generation of non-English product descriptions using English product attributes, etc. ### Known Limitations The XAlign dataset focus only on person biographies and system developed on this dataset might not be generalized to other domains. ## Additional Information ### Dataset Curators This dataset is collected by Tushar Abhishek, Shivprasad Sagare, Bhavyajeet Singh, Anubhav Sharma, Manish Gupta and Vasudeva Varma of Information Retrieval and Extraction Lab (IREL), Hyderabad, India. They released [scripts](https://github.com/tushar117/xalign) to collect and process the data into the Data-to-Text format. ### Licensing Information The XAlign dataset is released under the [MIT License](https://github.com/tushar117/XAlign/blob/main/LICENSE). ### Citation Information ``` @article{abhishek2022xalign, title={XAlign: Cross-lingual Fact-to-Text Alignment and Generation for Low-Resource Languages}, author={Abhishek, Tushar and Sagare, Shivprasad and Singh, Bhavyajeet and Sharma, Anubhav and Gupta, Manish and Varma, Vasudeva}, journal={arXiv preprint arXiv:2202.00291}, year={2022} } ``` ### Contributions Thanks to [Tushar Abhishek](https://github.com/tushar117), [Shivprasad Sagare](https://github.com/ShivprasadSagare), [Bhavyajeet Singh](https://github.com/bhavyajeet), [Anubhav Sharma](https://github.com/anubhav-sharma13), [Manish Gupta](https://github.com/blitzprecision) and [Vasudeva Varma]([email protected]) for adding this dataset. Additional thanks to the annotators from National Translation Mission for their crucial contributions to creation of the test dataset: Bhaswati Bhattacharya, Aditi Sarkar, Raghunandan B. S., Satish M., Rashmi G.Rao, Vidyarashmi PN, Neelima Bhide, Anand Bapat, Krishna Rao N V, Nagalakshmi DV, Aditya Bhardwaj Vuppula, Nirupama Patel, Asir. T, Sneha Gupta, Dinesh Kumar, Jasmin Gilani, Vivek R, Sivaprasad S, Pranoy J, Ashutosh Bharadwaj, Balaji Venkateshwar, Vinkesh Bansal, Vaishnavi Udyavara, Ramandeep Singh, Khushi Goyal, Yashasvi LN Pasumarthy and Naren Akash.
tushar117/xalign
[ "task_categories:table-to-text", "task_ids:rdf-to-text", "annotations_creators:found", "language_creators:crowdsourced", "multilinguality:multilingual", "size_categories:100K<n<1M", "source_datasets:original", "language:as", "language:bn", "language:gu", "language:hi", "language:kn", "language:ml", "language:mr", "language:or", "language:pa", "language:ta", "language:te", "language:en", "license:cc-by-nc-sa-4.0", "license:mit", "xalign", "NLG", "low-resource", "LRL", "arxiv:2202.00291", "arxiv:2209.11252", "region:us" ]
2022-12-29T06:50:10+00:00
{"annotations_creators": ["found"], "language_creators": ["crowdsourced"], "language": ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te", "en"], "license": ["cc-by-nc-sa-4.0", "mit"], "multilinguality": ["multilingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["table-to-text"], "task_ids": ["rdf-to-text"], "paperswithcode_id": "xalign", "pretty_name": "XAlign", "configs": ["release_v1"], "tags": ["xalign", "NLG", "low-resource", "LRL"]}
2023-01-01T20:39:30+00:00
[ "2202.00291", "2209.11252" ]
[ "as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te", "en" ]
TAGS #task_categories-table-to-text #task_ids-rdf-to-text #annotations_creators-found #language_creators-crowdsourced #multilinguality-multilingual #size_categories-100K<n<1M #source_datasets-original #language-Assamese #language-Bengali #language-Gujarati #language-Hindi #language-Kannada #language-Malayalam #language-Marathi #language-Oriya (macrolanguage) #language-Panjabi #language-Tamil #language-Telugu #language-English #license-cc-by-nc-sa-4.0 #license-mit #xalign #NLG #low-resource #LRL #arxiv-2202.00291 #arxiv-2209.11252 #region-us
Dataset Card for XAlign ======================= Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage: XAlign homepage * Repository: XAlign repo * Paper: XAlign: Cross-lingual Fact-to-Text Alignment and Generation for Low-Resource Languages * Leaderboard: Papers With Code Leaderboard for XAlign * Point of Contact: Tushar Abhishek ### Dataset Summary It consists of an extensive collection of a high quality cross-lingual fact-to-text dataset where facts are in English and corresponding sentences are in native language for person biographies. The Train & validation splits are created using distant supervision methods and Test data is generated through human annotations. ### Supported Tasks and Leaderboards * 'Data-to-text Generation': XAlign dataset can be used to train cross-lingual data-to-text generation models. The model performance can measured through any text generation evaluation metrics by taking average across all the languages. Sagare et al. (2022) reported average BLEU score of 29.27 and average METEOR score of 53.64 over the test set. * 'Relation Extraction': XAlign could also be used for cross-lingual relation extraction where relations in English can be extracted from associated native sentence. See Papers With Code Leaderboard for more models. ### Languages Assamese (as), Bengali (bn), Gujarati (gu), Hindi (hi), Kannada (kn), Malayalam (ml), Marathi (mr), Oriya (or), Punjabi (pa), Tamil (ta), Telugu (te), and English (en). Dataset Structure ----------------- ### Data Fields Each record consist of the following entries: * sentence (string) : Native language wikipedia sentence. (non-native language strings were removed.) * 'facts' (List[Dict]) : List of facts associated with the sentence where each fact is stored as dictionary. * language (string) : Language identifier. The 'facts' key contains list of facts where each facts is stored as dictionary. A single record within fact list contains following entries: * subject (string) : central entity. * object (string) : entity or a piece of information about the subject. * predicate (string) : relationship that connects the subject and the object. * qualifiers (List[Dict]) : It provide additional information about the fact, is stored as list of qualifier where each record is a dictionary. The dictionary contains two keys: qualifier\_predicate to represent property of qualifer and qualifier\_object to store value for the qualifier's predicate. ### Data Instances Example from English Example from one of the low-resource languages (i.e. Hindi) ### Data Splits The XAlign dataset has 3 splits: train, validation, and test. Below are the statistics the dataset. Dataset Creation ---------------- ### Curation Rationale Most of the existing Data-to-Text datasets are available in English. Also, the structured Wikidata entries for person entities in low resource languages are minuscule in number compared to that in English. Thus, monolingual Data-to-Text for low resource languages suffers from data sparsity. XAlign dataset would be useful in creation of cross-lingual Data-to-Text generation systems that take a set of English facts as input and generates a sentence capturing the fact-semantics in the specified language. ### Source Data #### Initial Data Collection and Normalization The dataset creation process starts with an intial list of ~95K person entities selected from Wikidata and each of which has a link to a corresponding Wikipedia page in at least one of our 11 low resource languages. This leads to a dataset where every instance is a tuple containing entityID, English Wikidata facts, language identifier, Wikipedia URL for the entityID. The facts (in English) are extracted from the 20201221 WikiData dump for each entity using the WikiData APIs. The facts are gathered only for the speficied Wikidata property (or relation) types that captures most useful factual information for person entities: WikibaseItem, Time, Quantity, and Monolingualtext.This leads to overall ~0.55M data instances across all the 12 languages. Also, for each language, the sentences (along with section information) are extracted from 20210520 Wikipedia XML dump using the pre-processing steps as described here. For every (entity, language) pair, the pre-processed dataset contains a set of English Wikidata facts and a set of Wikipedia sentences in that language. In order to create train and validation dataset, these are later passed through a two-stage automatic aligner as proposed in abhishek et al. (2022) to associate a sentence with a subset of facts. #### Who are the source language producers? The text are extracted from Wikipedia and facts are retrieved from Wikidata. ### Annotations #### Annotation process The Manual annotation of Test dataset was done in two phases. For both the phases, the annotators were presented with (low resource language sentence, list of English facts). They were asked to mark facts present in the given sentence. There were also specific guidelines to ignore redundant facts, handle abbreviations, etc. More detailed annotation guidelines and ethical statement are mentioned here . In the first phase, we got 60 instances labeled per language by a set of 8 expert annotators (trusted graduate students who understood the task very well). In phase 2, we selected 8 annotators per language from the National Register of Translators. We tested these annotators using phase 1 data as golden control set, and shortlisted up to 4 annotators per language who scored highest (on Kappa score with golden annotations). #### Who are the annotators? Human annotators were selected appropriately (after screening) from National Translation Mission for Test set creation. ### Personal and Sensitive Information The dataset does not involve collection or storage of any personally identifiable information or offensive information at any stage. Considerations for Using the Data --------------------------------- ### Social Impact of Dataset The purpose of the this dataset is to help develop cross-lingual Data-to-Text generation systems that are vital in many downstream Natural Language Processing (NLP) applications like automated dialog systems, domain-specific chatbots, open domain question answering, authoring sports reports, etc. These systems will be useful for powering business applications like Wikipedia text generation given English Infoboxes, automated generation of non-English product descriptions using English product attributes, etc. ### Known Limitations The XAlign dataset focus only on person biographies and system developed on this dataset might not be generalized to other domains. Additional Information ---------------------- ### Dataset Curators This dataset is collected by Tushar Abhishek, Shivprasad Sagare, Bhavyajeet Singh, Anubhav Sharma, Manish Gupta and Vasudeva Varma of Information Retrieval and Extraction Lab (IREL), Hyderabad, India. They released scripts to collect and process the data into the Data-to-Text format. ### Licensing Information The XAlign dataset is released under the MIT License. ### Contributions Thanks to Tushar Abhishek, Shivprasad Sagare, Bhavyajeet Singh, Anubhav Sharma, Manish Gupta and Vasudeva Varma for adding this dataset. Additional thanks to the annotators from National Translation Mission for their crucial contributions to creation of the test dataset: Bhaswati Bhattacharya, Aditi Sarkar, Raghunandan B. S., Satish M., Rashmi G.Rao, Vidyarashmi PN, Neelima Bhide, Anand Bapat, Krishna Rao N V, Nagalakshmi DV, Aditya Bhardwaj Vuppula, Nirupama Patel, Asir. T, Sneha Gupta, Dinesh Kumar, Jasmin Gilani, Vivek R, Sivaprasad S, Pranoy J, Ashutosh Bharadwaj, Balaji Venkateshwar, Vinkesh Bansal, Vaishnavi Udyavara, Ramandeep Singh, Khushi Goyal, Yashasvi LN Pasumarthy and Naren Akash.
[ "### Dataset Summary\n\n\nIt consists of an extensive collection of a high quality cross-lingual fact-to-text dataset where facts are in English and corresponding sentences are in native language for person biographies. The Train & validation splits are created using distant supervision methods and Test data is generated through human annotations.", "### Supported Tasks and Leaderboards\n\n\n* 'Data-to-text Generation': XAlign dataset can be used to train cross-lingual data-to-text generation models. The model performance can measured through any text generation evaluation metrics by taking average across all the languages. Sagare et al. (2022) reported average BLEU score of 29.27 and average METEOR score of 53.64 over the test set.\n* 'Relation Extraction': XAlign could also be used for cross-lingual relation extraction where relations in English can be extracted from associated native sentence.\n\n\nSee Papers With Code Leaderboard for more models.", "### Languages\n\n\nAssamese (as), Bengali (bn), Gujarati (gu), Hindi (hi), Kannada (kn), Malayalam (ml), Marathi (mr), Oriya (or), Punjabi (pa), Tamil (ta), Telugu (te), and English (en).\n\n\nDataset Structure\n-----------------", "### Data Fields\n\n\nEach record consist of the following entries:\n\n\n* sentence (string) : Native language wikipedia sentence. (non-native language strings were removed.)\n* 'facts' (List[Dict]) : List of facts associated with the sentence where each fact is stored as dictionary.\n* language (string) : Language identifier.\n\n\nThe 'facts' key contains list of facts where each facts is stored as dictionary. A single record within fact list contains following entries:\n\n\n* subject (string) : central entity.\n* object (string) : entity or a piece of information about the subject.\n* predicate (string) : relationship that connects the subject and the object.\n* qualifiers (List[Dict]) : It provide additional information about the fact, is stored as list of qualifier where each record is a dictionary. The dictionary contains two keys: qualifier\\_predicate to represent property of qualifer and qualifier\\_object to store value for the qualifier's predicate.", "### Data Instances\n\n\nExample from English\n\n\nExample from one of the low-resource languages (i.e. Hindi)", "### Data Splits\n\n\nThe XAlign dataset has 3 splits: train, validation, and test. Below are the statistics the dataset.\n\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nMost of the existing Data-to-Text datasets are available in English. Also, the structured Wikidata entries for person entities in low resource languages are minuscule in number compared to that in English. Thus, monolingual Data-to-Text for low resource languages suffers from data sparsity. XAlign dataset would be useful in creation of cross-lingual Data-to-Text generation systems that take a set of English facts as input and generates a sentence capturing the fact-semantics in the specified language.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nThe dataset creation process starts with an intial list of ~95K person entities selected from Wikidata and each of which has a link to a corresponding Wikipedia page in at least one of our 11 low resource languages. This leads to a dataset where every instance is a tuple containing entityID, English Wikidata facts, language identifier, Wikipedia URL for the entityID. The facts (in English) are extracted from the 20201221 WikiData dump for each entity using the WikiData APIs. The facts are gathered only for the speficied Wikidata property (or relation) types that captures most useful factual information for person entities: WikibaseItem, Time, Quantity, and Monolingualtext.This leads to overall ~0.55M data instances across all the 12 languages. Also, for each language, the sentences (along with section information) are extracted from 20210520 Wikipedia XML dump using the pre-processing steps as described here.\n\n\nFor every (entity, language) pair, the pre-processed dataset contains a set of English Wikidata facts and a set of Wikipedia sentences in that language. In order to create train and validation dataset, these are later passed through a two-stage automatic aligner as proposed in abhishek et al. (2022) to associate a sentence with a subset of facts.", "#### Who are the source language producers?\n\n\nThe text are extracted from Wikipedia and facts are retrieved from Wikidata.", "### Annotations", "#### Annotation process\n\n\nThe Manual annotation of Test dataset was done in two phases. For both the phases, the annotators were presented with (low resource language sentence, list of English facts). They were asked to mark facts present in the given sentence. There were also specific guidelines to ignore redundant facts, handle abbreviations, etc. More detailed annotation guidelines and ethical statement are mentioned here\n. In the first phase, we got 60 instances labeled per language by a set of 8 expert annotators (trusted graduate students who understood the task very well). In phase 2, we selected 8 annotators per language from the National Register of Translators. We tested these annotators using phase 1 data as golden control set, and shortlisted up to 4 annotators per language who scored highest (on Kappa score with golden annotations).", "#### Who are the annotators?\n\n\nHuman annotators were selected appropriately (after screening) from National Translation Mission for Test set creation.", "### Personal and Sensitive Information\n\n\nThe dataset does not involve collection or storage of any personally identifiable information or offensive information at any stage.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset\n\n\nThe purpose of the this dataset is to help develop cross-lingual Data-to-Text generation systems that are vital in many downstream Natural Language Processing (NLP) applications like automated dialog systems, domain-specific chatbots, open domain question answering, authoring sports reports, etc. These systems will be useful for powering business applications like Wikipedia text generation given English Infoboxes, automated generation of non-English product descriptions using English product attributes, etc.", "### Known Limitations\n\n\nThe XAlign dataset focus only on person biographies and system developed on this dataset might not be generalized to other domains.\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nThis dataset is collected by Tushar Abhishek, Shivprasad Sagare, Bhavyajeet Singh, Anubhav Sharma, Manish Gupta and Vasudeva Varma of Information Retrieval and Extraction Lab (IREL), Hyderabad, India. They released scripts to collect and process the data into the Data-to-Text format.", "### Licensing Information\n\n\nThe XAlign dataset is released under the MIT License.", "### Contributions\n\n\nThanks to Tushar Abhishek, Shivprasad Sagare, Bhavyajeet Singh, Anubhav Sharma, Manish Gupta and Vasudeva Varma for adding this dataset.\n\n\nAdditional thanks to the annotators from National Translation Mission for their crucial contributions to creation of the test dataset: Bhaswati Bhattacharya, Aditi Sarkar, Raghunandan B. S., Satish M., Rashmi G.Rao, Vidyarashmi PN, Neelima Bhide, Anand Bapat, Krishna Rao N V, Nagalakshmi DV, Aditya Bhardwaj\nVuppula, Nirupama Patel, Asir. T, Sneha Gupta, Dinesh Kumar, Jasmin Gilani, Vivek R, Sivaprasad S, Pranoy J, Ashutosh Bharadwaj, Balaji Venkateshwar, Vinkesh Bansal, Vaishnavi Udyavara, Ramandeep Singh, Khushi Goyal, Yashasvi LN Pasumarthy and Naren Akash." ]
[ "TAGS\n#task_categories-table-to-text #task_ids-rdf-to-text #annotations_creators-found #language_creators-crowdsourced #multilinguality-multilingual #size_categories-100K<n<1M #source_datasets-original #language-Assamese #language-Bengali #language-Gujarati #language-Hindi #language-Kannada #language-Malayalam #language-Marathi #language-Oriya (macrolanguage) #language-Panjabi #language-Tamil #language-Telugu #language-English #license-cc-by-nc-sa-4.0 #license-mit #xalign #NLG #low-resource #LRL #arxiv-2202.00291 #arxiv-2209.11252 #region-us \n", "### Dataset Summary\n\n\nIt consists of an extensive collection of a high quality cross-lingual fact-to-text dataset where facts are in English and corresponding sentences are in native language for person biographies. The Train & validation splits are created using distant supervision methods and Test data is generated through human annotations.", "### Supported Tasks and Leaderboards\n\n\n* 'Data-to-text Generation': XAlign dataset can be used to train cross-lingual data-to-text generation models. The model performance can measured through any text generation evaluation metrics by taking average across all the languages. Sagare et al. (2022) reported average BLEU score of 29.27 and average METEOR score of 53.64 over the test set.\n* 'Relation Extraction': XAlign could also be used for cross-lingual relation extraction where relations in English can be extracted from associated native sentence.\n\n\nSee Papers With Code Leaderboard for more models.", "### Languages\n\n\nAssamese (as), Bengali (bn), Gujarati (gu), Hindi (hi), Kannada (kn), Malayalam (ml), Marathi (mr), Oriya (or), Punjabi (pa), Tamil (ta), Telugu (te), and English (en).\n\n\nDataset Structure\n-----------------", "### Data Fields\n\n\nEach record consist of the following entries:\n\n\n* sentence (string) : Native language wikipedia sentence. (non-native language strings were removed.)\n* 'facts' (List[Dict]) : List of facts associated with the sentence where each fact is stored as dictionary.\n* language (string) : Language identifier.\n\n\nThe 'facts' key contains list of facts where each facts is stored as dictionary. A single record within fact list contains following entries:\n\n\n* subject (string) : central entity.\n* object (string) : entity or a piece of information about the subject.\n* predicate (string) : relationship that connects the subject and the object.\n* qualifiers (List[Dict]) : It provide additional information about the fact, is stored as list of qualifier where each record is a dictionary. The dictionary contains two keys: qualifier\\_predicate to represent property of qualifer and qualifier\\_object to store value for the qualifier's predicate.", "### Data Instances\n\n\nExample from English\n\n\nExample from one of the low-resource languages (i.e. Hindi)", "### Data Splits\n\n\nThe XAlign dataset has 3 splits: train, validation, and test. Below are the statistics the dataset.\n\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nMost of the existing Data-to-Text datasets are available in English. Also, the structured Wikidata entries for person entities in low resource languages are minuscule in number compared to that in English. Thus, monolingual Data-to-Text for low resource languages suffers from data sparsity. XAlign dataset would be useful in creation of cross-lingual Data-to-Text generation systems that take a set of English facts as input and generates a sentence capturing the fact-semantics in the specified language.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nThe dataset creation process starts with an intial list of ~95K person entities selected from Wikidata and each of which has a link to a corresponding Wikipedia page in at least one of our 11 low resource languages. This leads to a dataset where every instance is a tuple containing entityID, English Wikidata facts, language identifier, Wikipedia URL for the entityID. The facts (in English) are extracted from the 20201221 WikiData dump for each entity using the WikiData APIs. The facts are gathered only for the speficied Wikidata property (or relation) types that captures most useful factual information for person entities: WikibaseItem, Time, Quantity, and Monolingualtext.This leads to overall ~0.55M data instances across all the 12 languages. Also, for each language, the sentences (along with section information) are extracted from 20210520 Wikipedia XML dump using the pre-processing steps as described here.\n\n\nFor every (entity, language) pair, the pre-processed dataset contains a set of English Wikidata facts and a set of Wikipedia sentences in that language. In order to create train and validation dataset, these are later passed through a two-stage automatic aligner as proposed in abhishek et al. (2022) to associate a sentence with a subset of facts.", "#### Who are the source language producers?\n\n\nThe text are extracted from Wikipedia and facts are retrieved from Wikidata.", "### Annotations", "#### Annotation process\n\n\nThe Manual annotation of Test dataset was done in two phases. For both the phases, the annotators were presented with (low resource language sentence, list of English facts). They were asked to mark facts present in the given sentence. There were also specific guidelines to ignore redundant facts, handle abbreviations, etc. More detailed annotation guidelines and ethical statement are mentioned here\n. In the first phase, we got 60 instances labeled per language by a set of 8 expert annotators (trusted graduate students who understood the task very well). In phase 2, we selected 8 annotators per language from the National Register of Translators. We tested these annotators using phase 1 data as golden control set, and shortlisted up to 4 annotators per language who scored highest (on Kappa score with golden annotations).", "#### Who are the annotators?\n\n\nHuman annotators were selected appropriately (after screening) from National Translation Mission for Test set creation.", "### Personal and Sensitive Information\n\n\nThe dataset does not involve collection or storage of any personally identifiable information or offensive information at any stage.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset\n\n\nThe purpose of the this dataset is to help develop cross-lingual Data-to-Text generation systems that are vital in many downstream Natural Language Processing (NLP) applications like automated dialog systems, domain-specific chatbots, open domain question answering, authoring sports reports, etc. These systems will be useful for powering business applications like Wikipedia text generation given English Infoboxes, automated generation of non-English product descriptions using English product attributes, etc.", "### Known Limitations\n\n\nThe XAlign dataset focus only on person biographies and system developed on this dataset might not be generalized to other domains.\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nThis dataset is collected by Tushar Abhishek, Shivprasad Sagare, Bhavyajeet Singh, Anubhav Sharma, Manish Gupta and Vasudeva Varma of Information Retrieval and Extraction Lab (IREL), Hyderabad, India. They released scripts to collect and process the data into the Data-to-Text format.", "### Licensing Information\n\n\nThe XAlign dataset is released under the MIT License.", "### Contributions\n\n\nThanks to Tushar Abhishek, Shivprasad Sagare, Bhavyajeet Singh, Anubhav Sharma, Manish Gupta and Vasudeva Varma for adding this dataset.\n\n\nAdditional thanks to the annotators from National Translation Mission for their crucial contributions to creation of the test dataset: Bhaswati Bhattacharya, Aditi Sarkar, Raghunandan B. S., Satish M., Rashmi G.Rao, Vidyarashmi PN, Neelima Bhide, Anand Bapat, Krishna Rao N V, Nagalakshmi DV, Aditya Bhardwaj\nVuppula, Nirupama Patel, Asir. T, Sneha Gupta, Dinesh Kumar, Jasmin Gilani, Vivek R, Sivaprasad S, Pranoy J, Ashutosh Bharadwaj, Balaji Venkateshwar, Vinkesh Bansal, Vaishnavi Udyavara, Ramandeep Singh, Khushi Goyal, Yashasvi LN Pasumarthy and Naren Akash." ]
5d5e4a5187d8f54ace4d58e275f9df4cccc3ff59
# Naver 영화 평점 데이터셋
Blpeng/nsmc
[ "region:us" ]
2022-12-29T07:18:03+00:00
{}
2022-12-29T07:27:12+00:00
[]
[]
TAGS #region-us
# Naver 영화 평점 데이터셋
[ "# Naver 영화 평점 데이터셋" ]
[ "TAGS\n#region-us \n", "# Naver 영화 평점 데이터셋" ]
77828c01279313d129743cbc8bdc71f6931873eb
<div align="center"> <img width="640" alt="keremberke/csgo-object-detection" src="https://huggingface.co/datasets/keremberke/csgo-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['ct', 'cthead', 't', 'thead'] ``` ### Number of Images ```json {'train': 3879, 'valid': 383, 'test': 192} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/csgo-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/asd-culfr/wlots/dataset/1](https://universe.roboflow.com/asd-culfr/wlots/dataset/1?ref=roboflow2huggingface) ### Citation ``` @misc{ wlots_dataset, title = { wlots Dataset }, type = { Open Source Dataset }, author = { asd }, howpublished = { \\url{ https://universe.roboflow.com/asd-culfr/wlots } }, url = { https://universe.roboflow.com/asd-culfr/wlots }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { may }, note = { visited on 2023-01-27 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on December 28, 2022 at 8:08 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 4454 images. Ct-cthead-t-thead are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 416x416 (Fill (with center crop)) The following augmentation was applied to create 3 versions of each source image: * Random brigthness adjustment of between -15 and +15 percent
keremberke/csgo-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "region:us" ]
2022-12-29T07:37:55+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface"]}
2023-01-27T13:39:19+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #roboflow2huggingface #region-us
<div align="center"> <img width="640" alt="keremberke/csgo-object-detection" src="URL </div> ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### Roboflow Dataset Page URL ### License CC BY 4.0 ### Dataset Summary This dataset was exported via URL on December 28, 2022 at 8:08 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 4454 images. Ct-cthead-t-thead are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 416x416 (Fill (with center crop)) The following augmentation was applied to create 3 versions of each source image: * Random brigthness adjustment of between -15 and +15 percent
[ "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on December 28, 2022 at 8:08 PM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 4454 images.\nCt-cthead-t-thead are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 416x416 (Fill (with center crop))\n\nThe following augmentation was applied to create 3 versions of each source image:\n* Random brigthness adjustment of between -15 and +15 percent" ]
[ "TAGS\n#task_categories-object-detection #roboflow #roboflow2huggingface #region-us \n", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on December 28, 2022 at 8:08 PM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 4454 images.\nCt-cthead-t-thead are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 416x416 (Fill (with center crop))\n\nThe following augmentation was applied to create 3 versions of each source image:\n* Random brigthness adjustment of between -15 and +15 percent" ]
57aa02d263a6a82015660874b25765e58cb765be
# Dataset Card for reddit_one_ups_2014 ## Dataset Description - **Homepage:** https://github.com/Georeactor/reddit-one-ups ### Dataset Summary Reddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments. This task makes one-ups easier by focusing on a set of common, often meme-like replies (e.g. 'yes', 'nope', '(͡°͜ʖ͡°)'). For commentary on predictions with a previous version of the dataset, see https://blog.goodaudience.com/can-deepclapback-learn-when-to-lol-e4a2092a8f2c For unique / non-meme seq2seq version of this dataset, see https://huggingface.co/datasets/georeactor/reddit_one_ups_seq2seq_2014 Replies were selected from PushShift's archive of posts from 2014. ### Supported Tasks Text classification task: finding the common reply (out of ~37) to match the parent comment text. Text prediction task: estimating the vote score, or parent:reply ratio, of a meme response, as a measure of relevancy/cleverness of reply. ### Languages Primarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ) ## Dataset Structure ### Data Instances 29,375 rows ### Data Fields - id: the Reddit alphanumeric ID for the reply - body: the content of the original reply - score: the net vote score of the original reply - parent_id: the Reddit alphanumeric ID for the parent - author: the Reddit username of the reply - subreddit: the Reddit community where the discussion occurred - parent_score: the net vote score of the parent comment - cleantext: the simplified reply (one of 37 classes) - tstamp: the timestamp of the reply - parent_body: the content of the original parent ## Dataset Creation ### Source Data Reddit comments collected through PushShift.io archives for 2014. #### Initial Data Collection and Normalization - Removed deleted or empty comments. - Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score. - Found the top/repeating phrases common to these one-ups/clapback comments. - Selected only replies which had one of these top/repeating phrases. - Made rows in PostgreSQL and output as CSV. ## Considerations for Using the Data Comments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links! - You can use the subreddit and score columns to filter content. - Imbalanced dataset: replies 'yes' and 'no' are more common than others. - Overlap of labels: replies such as 'yes', 'yep', and 'yup' serve similar purposes; in other cases 'no' vs. 'nope' may be interesting. - Timestamps: the given timestamp may help identify trends in meme replies - Usernames: a username was included to identify the 'username checks out' meme, but this was not common enough in 2014, and the included username is from the reply. Reddit comments are properties of Reddit and comment owners using their Terms of Service.
georeactor/reddit_one_ups_2014
[ "task_categories:text-classification", "language:en", "reddit", "not-for-all-eyes", "not-for-all-audiences", "region:us" ]
2022-12-29T08:23:42+00:00
{"language": "en", "task_categories": ["text-classification"], "tags": ["reddit", "not-for-all-eyes", "not-for-all-audiences"]}
2023-03-28T21:02:40+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #language-English #reddit #not-for-all-eyes #not-for-all-audiences #region-us
# Dataset Card for reddit_one_ups_2014 ## Dataset Description - Homepage: URL ### Dataset Summary Reddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments. This task makes one-ups easier by focusing on a set of common, often meme-like replies (e.g. 'yes', 'nope', '(͡°͜ʖ͡°)'). For commentary on predictions with a previous version of the dataset, see URL For unique / non-meme seq2seq version of this dataset, see URL Replies were selected from PushShift's archive of posts from 2014. ### Supported Tasks Text classification task: finding the common reply (out of ~37) to match the parent comment text. Text prediction task: estimating the vote score, or parent:reply ratio, of a meme response, as a measure of relevancy/cleverness of reply. ### Languages Primarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ) ## Dataset Structure ### Data Instances 29,375 rows ### Data Fields - id: the Reddit alphanumeric ID for the reply - body: the content of the original reply - score: the net vote score of the original reply - parent_id: the Reddit alphanumeric ID for the parent - author: the Reddit username of the reply - subreddit: the Reddit community where the discussion occurred - parent_score: the net vote score of the parent comment - cleantext: the simplified reply (one of 37 classes) - tstamp: the timestamp of the reply - parent_body: the content of the original parent ## Dataset Creation ### Source Data Reddit comments collected through URL archives for 2014. #### Initial Data Collection and Normalization - Removed deleted or empty comments. - Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score. - Found the top/repeating phrases common to these one-ups/clapback comments. - Selected only replies which had one of these top/repeating phrases. - Made rows in PostgreSQL and output as CSV. ## Considerations for Using the Data Comments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links! - You can use the subreddit and score columns to filter content. - Imbalanced dataset: replies 'yes' and 'no' are more common than others. - Overlap of labels: replies such as 'yes', 'yep', and 'yup' serve similar purposes; in other cases 'no' vs. 'nope' may be interesting. - Timestamps: the given timestamp may help identify trends in meme replies - Usernames: a username was included to identify the 'username checks out' meme, but this was not common enough in 2014, and the included username is from the reply. Reddit comments are properties of Reddit and comment owners using their Terms of Service.
[ "# Dataset Card for reddit_one_ups_2014", "## Dataset Description\n\n- Homepage: URL", "### Dataset Summary\n\nReddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments. This task makes one-ups easier by focusing on a set of common, often meme-like replies (e.g. 'yes', 'nope', '(͡°͜ʖ͡°)').\n\nFor commentary on predictions with a previous version of the dataset, see URL\n\nFor unique / non-meme seq2seq version of this dataset, see URL\n\nReplies were selected from PushShift's archive of posts from 2014.", "### Supported Tasks\n\nText classification task: finding the common reply (out of ~37) to match the parent comment text.\n\nText prediction task: estimating the vote score, or parent:reply ratio, of a meme response, as a measure of relevancy/cleverness of reply.", "### Languages\n\nPrimarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ)", "## Dataset Structure", "### Data Instances\n\n29,375 rows", "### Data Fields\n\n- id: the Reddit alphanumeric ID for the reply\n- body: the content of the original reply\n- score: the net vote score of the original reply\n- parent_id: the Reddit alphanumeric ID for the parent\n- author: the Reddit username of the reply\n- subreddit: the Reddit community where the discussion occurred\n- parent_score: the net vote score of the parent comment\n- cleantext: the simplified reply (one of 37 classes)\n- tstamp: the timestamp of the reply\n- parent_body: the content of the original parent", "## Dataset Creation", "### Source Data\n\nReddit comments collected through URL archives for 2014.", "#### Initial Data Collection and Normalization\n\n- Removed deleted or empty comments.\n- Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score.\n- Found the top/repeating phrases common to these one-ups/clapback comments.\n- Selected only replies which had one of these top/repeating phrases.\n- Made rows in PostgreSQL and output as CSV.", "## Considerations for Using the Data\n\nComments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links!\n\n- You can use the subreddit and score columns to filter content.\n- Imbalanced dataset: replies 'yes' and 'no' are more common than others.\n- Overlap of labels: replies such as 'yes', 'yep', and 'yup' serve similar purposes; in other cases 'no' vs. 'nope' may be interesting.\n- Timestamps: the given timestamp may help identify trends in meme replies\n- Usernames: a username was included to identify the 'username checks out' meme, but this was not common enough in 2014, and the included username is from the reply.\n\nReddit comments are properties of Reddit and comment owners using their Terms of Service." ]
[ "TAGS\n#task_categories-text-classification #language-English #reddit #not-for-all-eyes #not-for-all-audiences #region-us \n", "# Dataset Card for reddit_one_ups_2014", "## Dataset Description\n\n- Homepage: URL", "### Dataset Summary\n\nReddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments. This task makes one-ups easier by focusing on a set of common, often meme-like replies (e.g. 'yes', 'nope', '(͡°͜ʖ͡°)').\n\nFor commentary on predictions with a previous version of the dataset, see URL\n\nFor unique / non-meme seq2seq version of this dataset, see URL\n\nReplies were selected from PushShift's archive of posts from 2014.", "### Supported Tasks\n\nText classification task: finding the common reply (out of ~37) to match the parent comment text.\n\nText prediction task: estimating the vote score, or parent:reply ratio, of a meme response, as a measure of relevancy/cleverness of reply.", "### Languages\n\nPrimarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ)", "## Dataset Structure", "### Data Instances\n\n29,375 rows", "### Data Fields\n\n- id: the Reddit alphanumeric ID for the reply\n- body: the content of the original reply\n- score: the net vote score of the original reply\n- parent_id: the Reddit alphanumeric ID for the parent\n- author: the Reddit username of the reply\n- subreddit: the Reddit community where the discussion occurred\n- parent_score: the net vote score of the parent comment\n- cleantext: the simplified reply (one of 37 classes)\n- tstamp: the timestamp of the reply\n- parent_body: the content of the original parent", "## Dataset Creation", "### Source Data\n\nReddit comments collected through URL archives for 2014.", "#### Initial Data Collection and Normalization\n\n- Removed deleted or empty comments.\n- Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score.\n- Found the top/repeating phrases common to these one-ups/clapback comments.\n- Selected only replies which had one of these top/repeating phrases.\n- Made rows in PostgreSQL and output as CSV.", "## Considerations for Using the Data\n\nComments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links!\n\n- You can use the subreddit and score columns to filter content.\n- Imbalanced dataset: replies 'yes' and 'no' are more common than others.\n- Overlap of labels: replies such as 'yes', 'yep', and 'yup' serve similar purposes; in other cases 'no' vs. 'nope' may be interesting.\n- Timestamps: the given timestamp may help identify trends in meme replies\n- Usernames: a username was included to identify the 'username checks out' meme, but this was not common enough in 2014, and the included username is from the reply.\n\nReddit comments are properties of Reddit and comment owners using their Terms of Service." ]
48a29533105e0f6bd59070cdc9ceda2723183d5c
# Dataset Card for reddit_one_ups_seq2seq_2014 ## Dataset Description - **Homepage:** https://github.com/Georeactor/reddit-one-ups ### Dataset Summary Reddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments. This dataset chose freeform replies, which did not follow repetitive meme replies. The IAmA subreddit was excluded to avoid an issue where their answers frequently score higher than questions. For commentary on predictions with a previous version of the dataset, see https://blog.goodaudience.com/can-deepclapback-learn-when-to-lol-e4a2092a8f2c For meme / text-classification version of this dataset, see https://huggingface.co/datasets/georeactor/reddit_one_ups_2014 Replies were selected from PushShift's archive of posts from 2014. ### Supported Tasks seq2seq writing of replies to Reddit comments ### Languages Primarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ) ## Dataset Structure ### Data Instances 19,992 rows ### Data Fields - id: the Reddit alphanumeric ID for the reply - body: the content of the original reply - score: the net vote score of the original reply - parent_id: the Reddit alphanumeric ID for the parent - author: the Reddit username of the reply - subreddit: the Reddit community where the discussion occurred - parent_score: the net vote score of the parent comment - tstamp: the timestamp of the reply - parent_body: the content of the original parent ## Dataset Creation ### Source Data Reddit comments collected through PushShift.io archives for 2014. #### Initial Data Collection and Normalization - Removed deleted or empty comments. - Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score. - Found top/repeating phrases common to these one-ups/clapback comments; selected only replies which DID NOT have these phrases. - Selected the top-scored ~1,667 replies from each month in 2014, avoiding /r/IAmA. - Made rows in PostgreSQL and output as CSV. ## Considerations for Using the Data Comments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links! You can use the subreddit and score columns to filter, and subreddit and timestamps to improve predictions of reply content. Reddit comments are properties of Reddit and comment owners using their Terms of Service.
georeactor/reddit_one_ups_seq2seq_2014
[ "language:en", "reddit", "not-for-all-eyes", "not-for-all-audiences", "region:us" ]
2022-12-29T08:25:12+00:00
{"language": "en", "tags": ["reddit", "not-for-all-eyes", "not-for-all-audiences"]}
2023-03-28T21:01:50+00:00
[]
[ "en" ]
TAGS #language-English #reddit #not-for-all-eyes #not-for-all-audiences #region-us
# Dataset Card for reddit_one_ups_seq2seq_2014 ## Dataset Description - Homepage: URL ### Dataset Summary Reddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments. This dataset chose freeform replies, which did not follow repetitive meme replies. The IAmA subreddit was excluded to avoid an issue where their answers frequently score higher than questions. For commentary on predictions with a previous version of the dataset, see URL For meme / text-classification version of this dataset, see URL Replies were selected from PushShift's archive of posts from 2014. ### Supported Tasks seq2seq writing of replies to Reddit comments ### Languages Primarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ) ## Dataset Structure ### Data Instances 19,992 rows ### Data Fields - id: the Reddit alphanumeric ID for the reply - body: the content of the original reply - score: the net vote score of the original reply - parent_id: the Reddit alphanumeric ID for the parent - author: the Reddit username of the reply - subreddit: the Reddit community where the discussion occurred - parent_score: the net vote score of the parent comment - tstamp: the timestamp of the reply - parent_body: the content of the original parent ## Dataset Creation ### Source Data Reddit comments collected through URL archives for 2014. #### Initial Data Collection and Normalization - Removed deleted or empty comments. - Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score. - Found top/repeating phrases common to these one-ups/clapback comments; selected only replies which DID NOT have these phrases. - Selected the top-scored ~1,667 replies from each month in 2014, avoiding /r/IAmA. - Made rows in PostgreSQL and output as CSV. ## Considerations for Using the Data Comments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links! You can use the subreddit and score columns to filter, and subreddit and timestamps to improve predictions of reply content. Reddit comments are properties of Reddit and comment owners using their Terms of Service.
[ "# Dataset Card for reddit_one_ups_seq2seq_2014", "## Dataset Description\n\n- Homepage: URL", "### Dataset Summary\n\nReddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments.\n\nThis dataset chose freeform replies, which did not follow repetitive meme replies. The IAmA subreddit was excluded to avoid an issue where their answers frequently score higher than questions.\n\nFor commentary on predictions with a previous version of the dataset, see URL\n\nFor meme / text-classification version of this dataset, see URL\n\nReplies were selected from PushShift's archive of posts from 2014.", "### Supported Tasks\n\nseq2seq writing of replies to Reddit comments", "### Languages\n\nPrimarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ)", "## Dataset Structure", "### Data Instances\n\n19,992 rows", "### Data Fields\n\n- id: the Reddit alphanumeric ID for the reply\n- body: the content of the original reply\n- score: the net vote score of the original reply\n- parent_id: the Reddit alphanumeric ID for the parent\n- author: the Reddit username of the reply\n- subreddit: the Reddit community where the discussion occurred\n- parent_score: the net vote score of the parent comment\n- tstamp: the timestamp of the reply\n- parent_body: the content of the original parent", "## Dataset Creation", "### Source Data\n\nReddit comments collected through URL archives for 2014.", "#### Initial Data Collection and Normalization\n\n- Removed deleted or empty comments.\n- Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score.\n- Found top/repeating phrases common to these one-ups/clapback comments; selected only replies which DID NOT have these phrases.\n- Selected the top-scored ~1,667 replies from each month in 2014, avoiding /r/IAmA.\n- Made rows in PostgreSQL and output as CSV.", "## Considerations for Using the Data\n\nComments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links!\n\nYou can use the subreddit and score columns to filter, and subreddit and timestamps to improve predictions of reply content.\n\nReddit comments are properties of Reddit and comment owners using their Terms of Service." ]
[ "TAGS\n#language-English #reddit #not-for-all-eyes #not-for-all-audiences #region-us \n", "# Dataset Card for reddit_one_ups_seq2seq_2014", "## Dataset Description\n\n- Homepage: URL", "### Dataset Summary\n\nReddit 'one-ups' or 'clapbacks' - replies which scored higher than the original comments.\n\nThis dataset chose freeform replies, which did not follow repetitive meme replies. The IAmA subreddit was excluded to avoid an issue where their answers frequently score higher than questions.\n\nFor commentary on predictions with a previous version of the dataset, see URL\n\nFor meme / text-classification version of this dataset, see URL\n\nReplies were selected from PushShift's archive of posts from 2014.", "### Supported Tasks\n\nseq2seq writing of replies to Reddit comments", "### Languages\n\nPrimarily English - includes some emoticons such as ┬─┬ノ(ಠ_ಠノ)", "## Dataset Structure", "### Data Instances\n\n19,992 rows", "### Data Fields\n\n- id: the Reddit alphanumeric ID for the reply\n- body: the content of the original reply\n- score: the net vote score of the original reply\n- parent_id: the Reddit alphanumeric ID for the parent\n- author: the Reddit username of the reply\n- subreddit: the Reddit community where the discussion occurred\n- parent_score: the net vote score of the parent comment\n- tstamp: the timestamp of the reply\n- parent_body: the content of the original parent", "## Dataset Creation", "### Source Data\n\nReddit comments collected through URL archives for 2014.", "#### Initial Data Collection and Normalization\n\n- Removed deleted or empty comments.\n- Selected only replies which scored 1.5x higher than a parent comment, where both have a positive score.\n- Found top/repeating phrases common to these one-ups/clapback comments; selected only replies which DID NOT have these phrases.\n- Selected the top-scored ~1,667 replies from each month in 2014, avoiding /r/IAmA.\n- Made rows in PostgreSQL and output as CSV.", "## Considerations for Using the Data\n\nComments and responses in the Reddit archives and output datasets all include NSFW and otherwise toxic language and links!\n\nYou can use the subreddit and score columns to filter, and subreddit and timestamps to improve predictions of reply content.\n\nReddit comments are properties of Reddit and comment owners using their Terms of Service." ]
0f769cdd5e8f1a6ab5e5cc2dfb2819c34bc319a7
COVID-19 Epidemic Weibo Emotional Dataset, the content of Weibo in this dataset is the epidemic Weibo obtained by using relevant keywords to filter during the epidemic, and its content is related to COVID-19. Each tweet is labeled as one of the following six categories: neutral (no emotion), happy (positive), angry (angry), sad (sad), fear (fear), surprise (surprise) The COVID-19 Weibo training dataset includes 8,606 Weibos, the validation set contains 2,000 Weibos, and the test dataset contains 3,000 Weibos. 疫情微博数据集,该数据集内的微博内容是在疫情期间使用相关关键字筛选获得的疫情微博,其内容与新冠疫情相关。 每条微博被标注为以下六个类别之一:neutral(无情绪)、happy(积极)、angry(愤怒)、sad(悲伤)、fear(恐惧)、surprise(惊奇) 疫情微博训练数据集包括8,606条微博,验证集包含2,000条微博,测试数据集包含3,000条微博。
souljoy/COVID-19_weibo_emotion
[ "region:us" ]
2022-12-29T09:05:37+00:00
{}
2022-12-29T09:42:16+00:00
[]
[]
TAGS #region-us
COVID-19 Epidemic Weibo Emotional Dataset, the content of Weibo in this dataset is the epidemic Weibo obtained by using relevant keywords to filter during the epidemic, and its content is related to COVID-19. Each tweet is labeled as one of the following six categories: neutral (no emotion), happy (positive), angry (angry), sad (sad), fear (fear), surprise (surprise) The COVID-19 Weibo training dataset includes 8,606 Weibos, the validation set contains 2,000 Weibos, and the test dataset contains 3,000 Weibos. 疫情微博数据集,该数据集内的微博内容是在疫情期间使用相关关键字筛选获得的疫情微博,其内容与新冠疫情相关。 每条微博被标注为以下六个类别之一:neutral(无情绪)、happy(积极)、angry(愤怒)、sad(悲伤)、fear(恐惧)、surprise(惊奇) 疫情微博训练数据集包括8,606条微博,验证集包含2,000条微博,测试数据集包含3,000条微博。
[]
[ "TAGS\n#region-us \n" ]
ebfcf2c45f5a0ead919889674965e6eec8c6670a
# SICK_PL - Sentences Involving Compositional Knowledge (Polish) ### Dataset Summary This dataset is a manually translated version of popular English natural language inference (NLI) corpus consisting of 10,000 sentence pairs. NLI is the task of determining whether one statement (premise) semantically entails other statement (hypothesis). Such relation can be classified as entailment (if the first sentence entails second sentence), neutral (the first statement does not determine the truth value of the second statement), or contradiction (if the first sentence is true, the second is false). Additionally, the original SICK dataset contains semantic relatedness scores for the sentence pairs as real numbers ranging from 1 to 5. When translating the corpus to Polish, we tried to be as close as possible to the original meaning. In some cases, however, two different English sentences had an identical translation in Polish. Such instances were slightly modified in order to preserve both the meaning and the syntactic differences in sentence pair. ### Data Instances Example instance: ``` { "pair_ID": "122", "sentence_A": "Pięcioro dzieci stoi blisko siebie , a jedno dziecko ma pistolet", "sentence_B": "Pięcioro dzieci stoi blisko siebie i żadne z nich nie ma pistoletu", "relatedness_score": 3.7, "entailment_judgment": "CONTRADICTION" } ``` ### Data Fields - pair_ID: sentence pair ID - sentence_A: sentence A - sentence_B: sentence B - entailment_judgment: textual entailment gold label: entailment (0), neutral (1) or contradiction (2) - relatedness_score: semantic relatedness gold score (on a 1-5 continuous scale) ### Citation Information ``` @inproceedings{dadas-etal-2020-evaluation, title = "Evaluation of Sentence Representations in {P}olish", author = "Dadas, Slawomir and Pere{\l}kiewicz, Micha{\l} and Po{\'s}wiata, Rafa{\l}", booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference", month = may, year = "2020", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2020.lrec-1.207", pages = "1674--1680", language = "English", ISBN = "979-10-95546-34-4", } ```
sdadas/sick_pl
[ "task_categories:text-classification", "task_ids:natural-language-inference", "task_ids:semantic-similarity-scoring", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:sick", "language:pl", "license:cc-by-nc-sa-3.0", "region:us" ]
2022-12-29T10:04:41+00:00
{"language": ["pl"], "license": ["cc-by-nc-sa-3.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["sick"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference", "semantic-similarity-scoring"], "pretty_name": "Sentences Involving Compositional Knowledge (Polish)", "dataset_info": {"features": [{"name": "pair_ID", "dtype": "string"}, {"name": "sentence_A", "dtype": "string"}, {"name": "sentence_B", "dtype": "string"}, {"name": "relatedness_score", "dtype": "float32"}, {"name": "entailment_judgment", "dtype": "string"}], "splits": [{"name": "train"}, {"name": "validation"}, {"name": "test"}]}}
2022-12-29T11:01:28+00:00
[]
[ "pl" ]
TAGS #task_categories-text-classification #task_ids-natural-language-inference #task_ids-semantic-similarity-scoring #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-sick #language-Polish #license-cc-by-nc-sa-3.0 #region-us
# SICK_PL - Sentences Involving Compositional Knowledge (Polish) ### Dataset Summary This dataset is a manually translated version of popular English natural language inference (NLI) corpus consisting of 10,000 sentence pairs. NLI is the task of determining whether one statement (premise) semantically entails other statement (hypothesis). Such relation can be classified as entailment (if the first sentence entails second sentence), neutral (the first statement does not determine the truth value of the second statement), or contradiction (if the first sentence is true, the second is false). Additionally, the original SICK dataset contains semantic relatedness scores for the sentence pairs as real numbers ranging from 1 to 5. When translating the corpus to Polish, we tried to be as close as possible to the original meaning. In some cases, however, two different English sentences had an identical translation in Polish. Such instances were slightly modified in order to preserve both the meaning and the syntactic differences in sentence pair. ### Data Instances Example instance: ### Data Fields - pair_ID: sentence pair ID - sentence_A: sentence A - sentence_B: sentence B - entailment_judgment: textual entailment gold label: entailment (0), neutral (1) or contradiction (2) - relatedness_score: semantic relatedness gold score (on a 1-5 continuous scale)
[ "# SICK_PL - Sentences Involving Compositional Knowledge (Polish)", "### Dataset Summary\n\nThis dataset is a manually translated version of popular English natural language inference (NLI) corpus consisting of 10,000 sentence pairs. NLI is the task of determining whether one statement (premise) semantically entails other statement (hypothesis). Such relation can be classified as entailment (if the first sentence entails second sentence), neutral (the first statement does not determine the truth value of the second statement), or contradiction (if the first sentence is true, the second is false). Additionally, the original SICK dataset contains semantic relatedness scores for the sentence pairs as real numbers ranging from 1 to 5. When translating the corpus to Polish, we tried to be as close as possible to the original meaning. In some cases, however, two different English sentences had an identical translation in Polish. Such instances were slightly modified in order to preserve both the meaning and the syntactic differences in sentence pair.", "### Data Instances\n\nExample instance:", "### Data Fields\n\n- pair_ID: sentence pair ID\n- sentence_A: sentence A\n- sentence_B: sentence B\n- entailment_judgment: textual entailment gold label: entailment (0), neutral (1) or contradiction (2)\n- relatedness_score: semantic relatedness gold score (on a 1-5 continuous scale)" ]
[ "TAGS\n#task_categories-text-classification #task_ids-natural-language-inference #task_ids-semantic-similarity-scoring #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-sick #language-Polish #license-cc-by-nc-sa-3.0 #region-us \n", "# SICK_PL - Sentences Involving Compositional Knowledge (Polish)", "### Dataset Summary\n\nThis dataset is a manually translated version of popular English natural language inference (NLI) corpus consisting of 10,000 sentence pairs. NLI is the task of determining whether one statement (premise) semantically entails other statement (hypothesis). Such relation can be classified as entailment (if the first sentence entails second sentence), neutral (the first statement does not determine the truth value of the second statement), or contradiction (if the first sentence is true, the second is false). Additionally, the original SICK dataset contains semantic relatedness scores for the sentence pairs as real numbers ranging from 1 to 5. When translating the corpus to Polish, we tried to be as close as possible to the original meaning. In some cases, however, two different English sentences had an identical translation in Polish. Such instances were slightly modified in order to preserve both the meaning and the syntactic differences in sentence pair.", "### Data Instances\n\nExample instance:", "### Data Fields\n\n- pair_ID: sentence pair ID\n- sentence_A: sentence A\n- sentence_B: sentence B\n- entailment_judgment: textual entailment gold label: entailment (0), neutral (1) or contradiction (2)\n- relatedness_score: semantic relatedness gold score (on a 1-5 continuous scale)" ]
073012dc81efb47b0fd66b0ab48bf06ddb62f528
# PPC - Polish Paraphrase Corpus ### Dataset Summary Polish Paraphrase Corpus contains 7000 manually labeled sentence pairs. The dataset was divided into training, validation and test splits. The training part includes 5000 examples, while the other parts contain 1000 examples each. The main purpose of creating such a dataset was to verify how machine learning models perform in the challenging problem of paraphrase identification, where most records contain semantically overlapping parts. Technically, this is a three-class classification task, where each record can be assigned to one of the following categories: - Exact paraphrases - Sentence pairs that convey exactly the same information. We are interested only in the semantic meaning of the sentence, therefore this category also includes sentences that are semantically identical but, for example, have different emotional emphasis. - Close paraphrases - Sentence pairs with similar semantic meaning. In this category we include all pairs which contain the same information, but in addition to it there may be other semantically non-overlapping parts. This category also contains context-dependent paraphrases - sentence pairs that may have the same meaning in some contexts but are different in others. - Non-paraphrases - All other cases, including contradictory sentences and semantically unrelated sentences. The corpus contains 2911, 1297, and 2792 examples for the above three categories, respectively. The process of annotating the dataset was preceded by an automated generation of candidate pairs, which were then manually labeled. We experimented with two popular techniques of generating possible paraphrases: backtranslation with a set of neural machine translation models and paraphrase mining using a pre-trained multilingual sentence encoder. The extracted sentence pairs are drawn from different data sources: Taboeba, Polish news articles, Wikipedia and Polish version of SICK dataset. Since most of the sentence pairs obtained in this way fell into the first two categories, in order to balance the dataset, some of the examples were manually modified to convey different information. In this way, even negative examples often have high semantic overlap, making this problem difficult for machine learning models. ### Data Instances Example instance: ``` { "sentence_A": "Libia: lotnisko w w Trypolisie ostrzelane rakietami.", "sentence_B": "Jedyne lotnisko w stolicy Libii - Trypolisie zostało w nocy z wtorku na środę ostrzelane rakietami.", "label": "2" } ``` ### Data Fields - sentence_A: first sentence text - sentence_B: second sentence text - label: label identifier corresponding to one of three categories ### Citation Information ``` @inproceedings{9945218, author={Dadas, S{\l}awomir}, booktitle={2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC)}, title={Training Effective Neural Sentence Encoders from Automatically Mined Paraphrases}, year={2022}, volume={}, number={}, pages={371-378}, doi={10.1109/SMC53654.2022.9945218} } ```
sdadas/ppc
[ "task_categories:text-classification", "task_ids:semantic-similarity-classification", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:pl", "license:cc-by-nc-sa-4.0", "region:us" ]
2022-12-29T10:11:25+00:00
{"language": ["pl"], "license": ["cc-by-nc-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "task_ids": ["semantic-similarity-classification"], "pretty_name": "Polish Paraphrase Corpus", "dataset_info": {"features": [{"name": "sentence_A", "dtype": "string"}, {"name": "sentence_B", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "not used", "1": "exact paraphrases", "2": "similar sentences", "3": "non-paraphrases"}}}}], "splits": [{"name": "train", "num_bytes": 539121, "num_examples": 5000}, {"name": "validation", "num_bytes": 107010, "num_examples": 1000}, {"name": "test", "num_bytes": 106515, "num_examples": 1000}]}}
2024-01-19T06:11:43+00:00
[]
[ "pl" ]
TAGS #task_categories-text-classification #task_ids-semantic-similarity-classification #multilinguality-monolingual #size_categories-1K<n<10K #language-Polish #license-cc-by-nc-sa-4.0 #region-us
# PPC - Polish Paraphrase Corpus ### Dataset Summary Polish Paraphrase Corpus contains 7000 manually labeled sentence pairs. The dataset was divided into training, validation and test splits. The training part includes 5000 examples, while the other parts contain 1000 examples each. The main purpose of creating such a dataset was to verify how machine learning models perform in the challenging problem of paraphrase identification, where most records contain semantically overlapping parts. Technically, this is a three-class classification task, where each record can be assigned to one of the following categories: - Exact paraphrases - Sentence pairs that convey exactly the same information. We are interested only in the semantic meaning of the sentence, therefore this category also includes sentences that are semantically identical but, for example, have different emotional emphasis. - Close paraphrases - Sentence pairs with similar semantic meaning. In this category we include all pairs which contain the same information, but in addition to it there may be other semantically non-overlapping parts. This category also contains context-dependent paraphrases - sentence pairs that may have the same meaning in some contexts but are different in others. - Non-paraphrases - All other cases, including contradictory sentences and semantically unrelated sentences. The corpus contains 2911, 1297, and 2792 examples for the above three categories, respectively. The process of annotating the dataset was preceded by an automated generation of candidate pairs, which were then manually labeled. We experimented with two popular techniques of generating possible paraphrases: backtranslation with a set of neural machine translation models and paraphrase mining using a pre-trained multilingual sentence encoder. The extracted sentence pairs are drawn from different data sources: Taboeba, Polish news articles, Wikipedia and Polish version of SICK dataset. Since most of the sentence pairs obtained in this way fell into the first two categories, in order to balance the dataset, some of the examples were manually modified to convey different information. In this way, even negative examples often have high semantic overlap, making this problem difficult for machine learning models. ### Data Instances Example instance: ### Data Fields - sentence_A: first sentence text - sentence_B: second sentence text - label: label identifier corresponding to one of three categories
[ "# PPC - Polish Paraphrase Corpus", "### Dataset Summary\n\nPolish Paraphrase Corpus contains 7000 manually labeled sentence pairs. The dataset was divided into training, validation and test splits. The training part includes 5000 examples, while the other parts contain 1000 examples each. The main purpose of creating such a dataset was to verify how machine learning models perform in the challenging problem of paraphrase identification, where most records contain semantically overlapping parts. Technically, this is a three-class classification task, where each record can be assigned to one of the following categories:\n- Exact paraphrases - Sentence pairs that convey exactly the same information. We are interested only in the semantic meaning of the sentence, therefore this category also includes sentences that are semantically identical but, for example, have different emotional emphasis.\n- Close paraphrases - Sentence pairs with similar semantic meaning. In this category we include all pairs which contain the same information, but in addition to it there may be other semantically non-overlapping parts. This category also contains context-dependent paraphrases - sentence pairs that may have the same meaning in some contexts but are different in others.\n- Non-paraphrases - All other cases, including contradictory sentences and semantically unrelated sentences.\n\nThe corpus contains 2911, 1297, and 2792 examples for the above three categories, respectively. The process of annotating the dataset was preceded by an automated generation of candidate pairs, which were then manually labeled. We experimented with two popular techniques of generating possible paraphrases: backtranslation with a set of neural machine translation models and paraphrase mining using a pre-trained multilingual sentence encoder. The extracted sentence pairs are drawn from different data sources: Taboeba, Polish news articles, Wikipedia and Polish version of SICK dataset. Since most of the sentence pairs obtained in this way fell into the first two categories, in order to balance the dataset, some of the examples were manually modified to convey different information. In this way, even negative examples often have high semantic overlap, making this problem difficult for machine learning models.", "### Data Instances\n\nExample instance:", "### Data Fields\n\n- sentence_A: first sentence text\n- sentence_B: second sentence text\n- label: label identifier corresponding to one of three categories" ]
[ "TAGS\n#task_categories-text-classification #task_ids-semantic-similarity-classification #multilinguality-monolingual #size_categories-1K<n<10K #language-Polish #license-cc-by-nc-sa-4.0 #region-us \n", "# PPC - Polish Paraphrase Corpus", "### Dataset Summary\n\nPolish Paraphrase Corpus contains 7000 manually labeled sentence pairs. The dataset was divided into training, validation and test splits. The training part includes 5000 examples, while the other parts contain 1000 examples each. The main purpose of creating such a dataset was to verify how machine learning models perform in the challenging problem of paraphrase identification, where most records contain semantically overlapping parts. Technically, this is a three-class classification task, where each record can be assigned to one of the following categories:\n- Exact paraphrases - Sentence pairs that convey exactly the same information. We are interested only in the semantic meaning of the sentence, therefore this category also includes sentences that are semantically identical but, for example, have different emotional emphasis.\n- Close paraphrases - Sentence pairs with similar semantic meaning. In this category we include all pairs which contain the same information, but in addition to it there may be other semantically non-overlapping parts. This category also contains context-dependent paraphrases - sentence pairs that may have the same meaning in some contexts but are different in others.\n- Non-paraphrases - All other cases, including contradictory sentences and semantically unrelated sentences.\n\nThe corpus contains 2911, 1297, and 2792 examples for the above three categories, respectively. The process of annotating the dataset was preceded by an automated generation of candidate pairs, which were then manually labeled. We experimented with two popular techniques of generating possible paraphrases: backtranslation with a set of neural machine translation models and paraphrase mining using a pre-trained multilingual sentence encoder. The extracted sentence pairs are drawn from different data sources: Taboeba, Polish news articles, Wikipedia and Polish version of SICK dataset. Since most of the sentence pairs obtained in this way fell into the first two categories, in order to balance the dataset, some of the examples were manually modified to convey different information. In this way, even negative examples often have high semantic overlap, making this problem difficult for machine learning models.", "### Data Instances\n\nExample instance:", "### Data Fields\n\n- sentence_A: first sentence text\n- sentence_B: second sentence text\n- label: label identifier corresponding to one of three categories" ]
82ddf9c0d06ffc982aeccf2473b7ce31f2167adf
# 8TAGS ### Dataset Summary A Polish topic classification dataset consisting of headlines from social media posts. It contains about 50,000 sentences annotated with 8 topic labels: film, history, food, medicine, motorization, work, sport and technology. This dataset was created automatically by extracting sentences from headlines and short descriptions of articles posted on Polish social networking site **wykop.pl**. The service allows users to annotate articles with one or more tags (categories). Dataset represents a selection of article sentences from 8 popular categories. The resulting corpus contains cleaned and tokenized, unambiguous sentences (tagged with only one of the selected categories), and longer than 30 characters. ### Data Instances Example instance: ``` { "sentence": "Kierowca był nieco zdziwiony że podróżując sporo ponad 200 km / h zatrzymali go policjanci.", "label": "4" } ``` ### Data Fields - sentence: sentence text - label: label identifier corresponding to one of 8 topics ### Citation Information ``` @inproceedings{dadas-etal-2020-evaluation, title = "Evaluation of Sentence Representations in {P}olish", author = "Dadas, Slawomir and Pere{\l}kiewicz, Micha{\l} and Po{\'s}wiata, Rafa{\l}", booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference", month = may, year = "2020", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2020.lrec-1.207", pages = "1674--1680", language = "English", ISBN = "979-10-95546-34-4", } ```
sdadas/8tags
[ "task_categories:text-classification", "task_ids:topic-classification", "task_ids:multi-class-classification", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:pl", "license:cc-by-nc-sa-4.0", "region:us" ]
2022-12-29T10:19:38+00:00
{"language": ["pl"], "license": ["cc-by-nc-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "task_ids": ["topic-classification", "multi-class-classification"], "pretty_name": "8TAGS", "dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "film", "1": "history", "2": "food", "3": "medicine", "4": "motorization", "5": "work", "6": "sport", "7": "technology"}}}}], "splits": [{"name": "train", "num_bytes": 3765325, "num_examples": 40001}, {"name": "validation", "num_bytes": 467676, "num_examples": 5000}, {"name": "test", "num_bytes": 416311, "num_examples": 4372}]}}
2024-01-19T06:10:23+00:00
[]
[ "pl" ]
TAGS #task_categories-text-classification #task_ids-topic-classification #task_ids-multi-class-classification #multilinguality-monolingual #size_categories-10K<n<100K #language-Polish #license-cc-by-nc-sa-4.0 #region-us
# 8TAGS ### Dataset Summary A Polish topic classification dataset consisting of headlines from social media posts. It contains about 50,000 sentences annotated with 8 topic labels: film, history, food, medicine, motorization, work, sport and technology. This dataset was created automatically by extracting sentences from headlines and short descriptions of articles posted on Polish social networking site URL. The service allows users to annotate articles with one or more tags (categories). Dataset represents a selection of article sentences from 8 popular categories. The resulting corpus contains cleaned and tokenized, unambiguous sentences (tagged with only one of the selected categories), and longer than 30 characters. ### Data Instances Example instance: ### Data Fields - sentence: sentence text - label: label identifier corresponding to one of 8 topics
[ "# 8TAGS", "### Dataset Summary\n\nA Polish topic classification dataset consisting of headlines from social media posts. It contains about 50,000 sentences annotated with 8 topic labels: film, history, food, medicine, motorization, work, sport and technology. This dataset was created automatically by extracting sentences from headlines and short descriptions of articles posted on Polish social networking site URL. The service allows users to annotate articles with one or more tags (categories). Dataset represents a selection of article sentences from 8 popular categories. The resulting corpus contains cleaned and tokenized, unambiguous sentences (tagged with only one of the selected categories), and longer than 30 characters.", "### Data Instances\n\nExample instance:", "### Data Fields\n\n- sentence: sentence text\n- label: label identifier corresponding to one of 8 topics" ]
[ "TAGS\n#task_categories-text-classification #task_ids-topic-classification #task_ids-multi-class-classification #multilinguality-monolingual #size_categories-10K<n<100K #language-Polish #license-cc-by-nc-sa-4.0 #region-us \n", "# 8TAGS", "### Dataset Summary\n\nA Polish topic classification dataset consisting of headlines from social media posts. It contains about 50,000 sentences annotated with 8 topic labels: film, history, food, medicine, motorization, work, sport and technology. This dataset was created automatically by extracting sentences from headlines and short descriptions of articles posted on Polish social networking site URL. The service allows users to annotate articles with one or more tags (categories). Dataset represents a selection of article sentences from 8 popular categories. The resulting corpus contains cleaned and tokenized, unambiguous sentences (tagged with only one of the selected categories), and longer than 30 characters.", "### Data Instances\n\nExample instance:", "### Data Fields\n\n- sentence: sentence text\n- label: label identifier corresponding to one of 8 topics" ]
add7cab1932a3b0e6d9347285218dd0fe98ef1aa
# Dataset Card for "speech2text" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
qbaro/speech2text
[ "region:us" ]
2022-12-29T10:54:15+00:00
{"dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "audio", "struct": [{"name": "array", "sequence": "float32"}, {"name": "path", "dtype": "string"}, {"name": "sampling_rate", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 1357744185, "num_examples": 1057}, {"name": "test", "num_bytes": 589556544, "num_examples": 464}], "download_size": 1949997840, "dataset_size": 1947300729}}
2022-12-30T08:47:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "speech2text" More Information needed
[ "# Dataset Card for \"speech2text\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"speech2text\"\n\nMore Information needed" ]
e7d9481ad419f20c4b87c05859bc0be17c923e74
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Prgckwb/jiro-style-ramen
[ "region:us" ]
2022-12-29T10:58:35+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 978393.0, "num_examples": 31}], "download_size": 978665, "dataset_size": 978393.0}}
2022-12-29T13:36:40+00:00
[]
[]
TAGS #region-us
More Information needed
[]
[ "TAGS\n#region-us \n" ]
9a3809d0d5e8b975b2ed489dc7be07a97a09de05
# Dataset Card for "results_valid_20rows_2022-12-29" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joddy/results_valid_20rows_2022-12-29
[ "region:us" ]
2022-12-29T12:49:17+00:00
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "resolution", "dtype": "int64"}, {"name": "attributes_loc", "dtype": {"class_label": {"names": {"0": "upper left", "1": "upper right", "2": "lower left", "3": "lower right"}}}}, {"name": "NL_text", "dtype": "string"}, {"name": "bbox_text", "dtype": "string"}, {"name": "center_text", "dtype": "string"}, {"name": "normed_object_bbox", "sequence": "int64"}, {"name": "without_pos_stable-diffusion-v1-5", "dtype": "image"}, {"name": "NL_stable-diffusion-v1-5", "dtype": "image"}, {"name": "bbox_stable-diffusion-v1-5", "dtype": "image"}, {"name": "center_stable-diffusion-v1-5", "dtype": "image"}, {"name": "without_pos_NL_text_TextENC_off", "dtype": "image"}, {"name": "NL_text_TextENC_off", "dtype": "image"}, {"name": "without_pos_bbox_text_TextENC_off", "dtype": "image"}, {"name": "bbox_text_TextENC_off", "dtype": "image"}, {"name": "without_pos_center_text_TextENC_off", "dtype": "image"}, {"name": "center_text_TextENC_off", "dtype": "image"}, {"name": "without_pos_NL_text_TextENC_on", "dtype": "image"}, {"name": "NL_text_TextENC_on", "dtype": "image"}, {"name": "without_pos_bbox_text_TextENC_on", "dtype": "image"}, {"name": "bbox_text_TextENC_on", "dtype": "image"}, {"name": "without_pos_center_text_TextENC_on", "dtype": "image"}, {"name": "center_text_TextENC_on", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 160413036.0, "num_examples": 20}], "download_size": 160434518, "dataset_size": 160413036.0}}
2022-12-29T13:04:15+00:00
[]
[]
TAGS #region-us
# Dataset Card for "results_valid_20rows_2022-12-29" More Information needed
[ "# Dataset Card for \"results_valid_20rows_2022-12-29\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"results_valid_20rows_2022-12-29\"\n\nMore Information needed" ]
c6beef35c66617554fb99eeb88bef3bf9141dfe6
# Dataset Card for Pochita Dataset ## Dataset created for fine-tuning Stable Diffusion model on HuggingFace Diffusion Hackaton Consists of 19 photos of Pochita plushie. (ye, he's cute)
Arch4ngel/pochita
[ "task_categories:summarization", "task_ids:news-articles-summarization", "annotations_creators:no-annotation", "language_creators:found", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "license:cc", "region:us" ]
2022-12-29T13:42:14+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["found"], "language": ["en"], "license": "cc", "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["summarization"], "task_ids": ["news-articles-summarization"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 73272140.0, "num_examples": 19}], "download_size": 73099117, "dataset_size": 73272140.0}}
2022-12-29T14:00:15+00:00
[]
[ "en" ]
TAGS #task_categories-summarization #task_ids-news-articles-summarization #annotations_creators-no-annotation #language_creators-found #size_categories-100K<n<1M #source_datasets-original #language-English #license-cc #region-us
# Dataset Card for Pochita Dataset ## Dataset created for fine-tuning Stable Diffusion model on HuggingFace Diffusion Hackaton Consists of 19 photos of Pochita plushie. (ye, he's cute)
[ "# Dataset Card for Pochita Dataset", "## Dataset created for fine-tuning Stable Diffusion model on HuggingFace Diffusion Hackaton\nConsists of 19 photos of Pochita plushie. (ye, he's cute)" ]
[ "TAGS\n#task_categories-summarization #task_ids-news-articles-summarization #annotations_creators-no-annotation #language_creators-found #size_categories-100K<n<1M #source_datasets-original #language-English #license-cc #region-us \n", "# Dataset Card for Pochita Dataset", "## Dataset created for fine-tuning Stable Diffusion model on HuggingFace Diffusion Hackaton\nConsists of 19 photos of Pochita plushie. (ye, he's cute)" ]
ec5e31de25f64ea06c378eaa6b982a28301e60f9
The original dataset is in French (https://www.kaggle.com/datasets/fedi1996/insurance-reviews-france) --- Dataset was translated to the Dutch language using the Google translate python library googletrans==3.1.0a0 --- The sentiment labels are 1 (POS) and -1 (NEG) ---
ebrigham/NL_insurance_reviews_sentiment
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:nl", "region:us" ]
2022-12-29T13:59:07+00:00
{"language": ["nl"], "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"]}
2023-10-23T08:18:16+00:00
[]
[ "nl" ]
TAGS #task_categories-text-classification #size_categories-1K<n<10K #language-Dutch #region-us
The original dataset is in French (URL --- Dataset was translated to the Dutch language using the Google translate python library googletrans==3.1.0a0 --- The sentiment labels are 1 (POS) and -1 (NEG) ---
[]
[ "TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-Dutch #region-us \n" ]
7bccfdcabf8dc14fb27bab2a508818fec0128769
# Dataset Card for "bankingapp_sentiment" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
dvilasuero/bankingapp_sentiment
[ "region:us" ]
2022-12-29T15:40:21+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "inputs", "struct": [{"name": "text", "dtype": "string"}]}, {"name": "prediction", "dtype": "null"}, {"name": "prediction_agent", "dtype": "null"}, {"name": "annotation", "dtype": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "null"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 163514, "num_examples": 1000}], "download_size": 79893, "dataset_size": 163514}}
2022-12-29T15:40:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bankingapp_sentiment" More Information needed
[ "# Dataset Card for \"bankingapp_sentiment\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bankingapp_sentiment\"\n\nMore Information needed" ]
788f9b660eb2ff16caf65f01f22df66cf6b85880
--- annotations_creators: - other language: [] language_creators: - other license: [] multilinguality: [] pretty_name: A dataset containing 17 images of a plush reindeer size_categories: [] source_datasets: [] tags: [] task_categories: - other task_ids: [] ---sssssssssssssssssssssssssssssssssssss
Likalto4/Rena_dataset
[ "region:us" ]
2022-12-29T15:49:54+00:00
{}
2022-12-29T16:04:31+00:00
[]
[]
TAGS #region-us
--- annotations_creators: - other language: [] language_creators: - other license: [] multilinguality: [] pretty_name: A dataset containing 17 images of a plush reindeer size_categories: [] source_datasets: [] tags: [] task_categories: - other task_ids: [] ---sssssssssssssssssssssssssssssssssssss
[]
[ "TAGS\n#region-us \n" ]
412bd606998b3be099f39922211b270040ae2e30
![](https://huggingface.co/datasets/TrpFrog/trpfrog-icons/resolve/main/logo.jpg) # trpfrog-icons Dataset This is a dataset of [TrpFrog](https://trpfrog.net)'s icons. By the way, what do you use this for? 🤔 ## How to use ```py from datasets import load_dataset dataset = load_dataset("TrpFrog/trpfrog-icons") ``` ```py # print all data for data in dataset["train"]: print(data) # remove not green icons dataset = dataset.filter(lambda x: x["label"] == 0) ``` ## License MIT License
TrpFrog/trpfrog-icons
[ "license:mit", "region:us" ]
2022-12-29T17:00:46+00:00
{"license": "mit", "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "green", "1": "others"}}}}], "splits": [{"name": "train", "num_bytes": 3106612.0, "num_examples": 50}], "download_size": 2598455, "dataset_size": 3106612.0}}
2022-12-30T04:37:09+00:00
[]
[]
TAGS #license-mit #region-us
![](URL # trpfrog-icons Dataset This is a dataset of TrpFrog's icons. By the way, what do you use this for? ## How to use ## License MIT License
[ "# trpfrog-icons Dataset\n\nThis is a dataset of TrpFrog's icons. By the way, what do you use this for?", "## How to use", "## License\n\nMIT License" ]
[ "TAGS\n#license-mit #region-us \n", "# trpfrog-icons Dataset\n\nThis is a dataset of TrpFrog's icons. By the way, what do you use this for?", "## How to use", "## License\n\nMIT License" ]
81a394970aa0aab17c354b4ffbcbf5539dc00397
All images of all ratings from e621.net from the date it was generated, at sample resolution where possible. This includes the following additional metadata: - post ID - created at - updated at - tags (stored as IDs you can cross-reference from an e621 tags dump) - rating (0 = safe, 1 = questionable, 2 = explicit) - favorite count - comment count - up score - down score Note that this dataset excludes images that are, at the time of scraping: - pending - tagged with tags indicating that it is illegal to possess in most jurisdictions Some files in this dataset may be corrupted. Make sure you're able to handle invalid images in your processing code or you're going to have bad time!
thruway/e621_samples_2022-12-28
[ "region:us" ]
2022-12-29T18:29:17+00:00
{"extra_gated_heading": "Terms of use", "extra_gated_button_content": "Acknowledge", "extra_gated_fields": {"I will use this dataset in a way that does not hinder the ability of artists to make a living from their work": "checkbox", "I acknowledge that the content contained within this dataset is the intellectual property of the artists who created it": "checkbox", "If I should wish to use this dataset for any commercial purposes, it is my responsibility to obtain the appropriate permissions from the copyright holders": "checkbox"}, "dataset_info": {"features": [{"name": "id", "dtype": "uint32"}, {"name": "created_at", "dtype": "timestamp[us]"}, {"name": "updated_at", "dtype": "timestamp[us]"}, {"name": "image", "dtype": "image"}, {"name": "tags", "sequence": "uint32"}, {"name": "rating", "dtype": "uint8"}, {"name": "fav_count", "dtype": "uint32"}, {"name": "comment_count", "dtype": "uint32"}, {"name": "up_score", "dtype": "int32"}, {"name": "down_score", "dtype": "int32"}], "splits": [{"name": "train", "num_bytes": 384353755927.75, "num_examples": 3065570}], "download_size": 382556768725, "dataset_size": 384353755927.75}, "viewer": false}
2022-12-30T22:02:16+00:00
[]
[]
TAGS #region-us
All images of all ratings from URL from the date it was generated, at sample resolution where possible. This includes the following additional metadata: - post ID - created at - updated at - tags (stored as IDs you can cross-reference from an e621 tags dump) - rating (0 = safe, 1 = questionable, 2 = explicit) - favorite count - comment count - up score - down score Note that this dataset excludes images that are, at the time of scraping: - pending - tagged with tags indicating that it is illegal to possess in most jurisdictions Some files in this dataset may be corrupted. Make sure you're able to handle invalid images in your processing code or you're going to have bad time!
[]
[ "TAGS\n#region-us \n" ]
aa052d3aff7a0398d227d7f5a3ac1699007bb1df
# Dataset Card for "dreambooth-hackathon-images-nendoroid" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
milyiyo/dreambooth-hackathon-images-nendoroid
[ "region:us" ]
2022-12-29T18:53:19+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 795179.0, "num_examples": 28}], "download_size": 795969, "dataset_size": 795179.0}}
2022-12-29T18:53:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dreambooth-hackathon-images-nendoroid" More Information needed
[ "# Dataset Card for \"dreambooth-hackathon-images-nendoroid\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dreambooth-hackathon-images-nendoroid\"\n\nMore Information needed" ]
a19eace121442bce60da9f5036dc16bf9f2f6fa6
<div align="center"> <img width="640" alt="keremberke/construction-safety-object-detection" src="https://huggingface.co/datasets/keremberke/construction-safety-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['barricade', 'dumpster', 'excavators', 'gloves', 'hardhat', 'mask', 'no-hardhat', 'no-mask', 'no-safety vest', 'person', 'safety net', 'safety shoes', 'safety vest', 'dump truck', 'mini-van', 'truck', 'wheel loader'] ``` ### Number of Images ```json {'train': 307, 'valid': 57, 'test': 34} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/construction-safety-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/dataset/1](https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/dataset/1?ref=roboflow2huggingface) ### Citation ``` @misc{ construction-site-safety_dataset, title = { Construction Site Safety Dataset }, type = { Open Source Dataset }, author = { Roboflow Universe Projects }, howpublished = { \\url{ https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety } }, url = { https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2023 }, month = { jan }, note = { visited on 2023-01-26 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on December 29, 2022 at 11:22 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 398 images. Construction are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
keremberke/construction-safety-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "Construction", "Logistics", "Utilities", "Damage Risk", "Ppe", "Manufacturing", "Assembly Line", "Warehouse", "Factory", "region:us" ]
2022-12-29T20:12:45+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface", "Construction", "Logistics", "Utilities", "Damage Risk", "Ppe", "Construction", "Utilities", "Manufacturing", "Logistics", "Ppe", "Assembly Line", "Warehouse", "Factory"]}
2023-01-27T13:36:19+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #roboflow2huggingface #Construction #Logistics #Utilities #Damage Risk #Ppe #Manufacturing #Assembly Line #Warehouse #Factory #region-us
<div align="center"> <img width="640" alt="keremberke/construction-safety-object-detection" src="URL </div> ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### Roboflow Dataset Page URL ### License CC BY 4.0 ### Dataset Summary This dataset was exported via URL on December 29, 2022 at 11:22 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 398 images. Construction are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
[ "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on December 29, 2022 at 11:22 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 398 images.\nConstruction are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n\nNo image augmentation techniques were applied." ]
[ "TAGS\n#task_categories-object-detection #roboflow #roboflow2huggingface #Construction #Logistics #Utilities #Damage Risk #Ppe #Manufacturing #Assembly Line #Warehouse #Factory #region-us \n", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on December 29, 2022 at 11:22 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 398 images.\nConstruction are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n\nNo image augmentation techniques were applied." ]
caef5b500f6ecbf5c6d9f73eb393bdf6966eca7d
# Dataset Card for "diffusion_db_10k_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bggmyfuture-ai/diffusion_db_10k_processed
[ "region:us" ]
2022-12-29T21:23:33+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "label_txt", "dtype": "string"}, {"name": "topic_keywords", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 2572020, "num_examples": 8481}], "download_size": 570847, "dataset_size": 2572020}}
2022-12-31T02:31:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "diffusion_db_10k_processed" More Information needed
[ "# Dataset Card for \"diffusion_db_10k_processed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"diffusion_db_10k_processed\"\n\nMore Information needed" ]
43e04f2c5dbaadb2b98cf678e17b979bf198db2e
LINK TO THE ITEM :: https://www.wakefit.co/coffee-tables/sheesham-wood-coffee-table-jackson/WSCFTJACKSONR1
alpha-proj/wakefit_center_table
[ "region:us" ]
2022-12-29T22:00:46+00:00
{}
2022-12-29T22:12:15+00:00
[]
[]
TAGS #region-us
LINK TO THE ITEM :: URL
[]
[ "TAGS\n#region-us \n" ]
4e9b2f7204e129f8fa45baa0c9bdb1e7216dce96
# Dataset Card for "shoebill-hackathon-images" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
fnavales/shoebill-hackathon-images
[ "region:us" ]
2022-12-29T22:47:44+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 272566.0, "num_examples": 18}], "download_size": 264844, "dataset_size": 272566.0}}
2022-12-29T22:47:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "shoebill-hackathon-images" More Information needed
[ "# Dataset Card for \"shoebill-hackathon-images\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"shoebill-hackathon-images\"\n\nMore Information needed" ]
a1cb132fc175685acf56daf1cbc41f7f2b9362ae
# Dataset Card for "bookcorpus_compact_512" Num samples: 1,219,333 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_512
[ "region:us" ]
2022-12-29T23:46:15+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2755013516, "num_examples": 1219333}], "download_size": 1625636757, "dataset_size": 2755013516}}
2022-12-30T21:51:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_compact_512" Num samples: 1,219,333 More Information needed
[ "# Dataset Card for \"bookcorpus_compact_512\"\n\nNum samples: 1,219,333\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_compact_512\"\n\nNum samples: 1,219,333\n\nMore Information needed" ]
0eaa262f877d80ddb219819c34fbb8dca4ae4c54
# Dataset Card for "bookcorpus_compact_256" Num samples: 2,389,359 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_256
[ "region:us" ]
2022-12-30T01:43:43+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2758524897, "num_examples": 2389359}], "download_size": 1630356023, "dataset_size": 2758524897}}
2022-12-30T21:52:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_compact_256" Num samples: 2,389,359 More Information needed
[ "# Dataset Card for \"bookcorpus_compact_256\"\n\nNum samples: 2,389,359\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_compact_256\"\n\nNum samples: 2,389,359\n\nMore Information needed" ]
31a9eb9e37c68a3e3558f927415c265671b685c2
## Dataset Description - **Homepage: https://mirror.xyz/bitkevin.eth** - **Repository: https://colab.research.google.com/drive/1EnqpDiKOVYhR0c6f4CgmDg2zqcbYZJpB#scrollTo=c1ef3d21-6e0e-46c9-a459-8a2ab856a5ca** - **Point of Contact: Kevin Leffew – [email protected]** ### Dataset Summary: golf-course This dataset (bethecloud/golf-courses) includes 21 unique images of golf courses pulled from Unsplash. The dataset is a collection of photographs taken at various golf courses around the world. The images depict a variety of scenes, including fairways, greens, bunkers, water hazards, and clubhouse facilities. The images are high resolution and have been carefully selected to provide a diverse range of visual content for fine-tuning a machine learning model. The dataset is intended to be used in the context of the Hugging Face Dream Booth hackathon, a competition that challenges participants to build innovative applications using the Hugging Face transformers library. The submission is for the category of landscape. Overall, this dataset provides a rich source of visual data for machine learning models looking to understand and classify elements of golf courses. Its diverse range of images and high-quality resolution make it well-suited for use in fine-tuning models for tasks such as image classification, object detection, and image segmentation. By using the golf course images as part of their training data, participants can fine-tune their models to recognize and classify specific features and elements commonly found on golf courses. The ultimate goal after the hackathon is to pull this dataset from decentralized cloud storage (like Storj DCS), increasing its accessibility, performance, and resilience by distributing across an edge of over 17,000 uncorrelated participants. ## Example Output ![golf-acropolis.jpg]https://link.storjshare.io/juid5vc27dbajh6zyzplf4fah5xq/golf-course-output%2Fgolf-acropolis.png # Usage The golf-courses dataset can be used by modifying the instance_prompt: a photo of golf course ### Languages The language data in golf-courses is in English (BCP-47 en) ## Dataset Structure The complete dataset is GBs and consists of 21 objects. ### Parallelized download using Decentralized Object Storage (Storj DCS) A direct download for the dataset is located at https://link.storjshare.io/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses. In the future, Storj DCS will be used to download large datasets (exceeding 1TB) in a highly parallel, highly performant, and highly economical manner (by utilizing a network of over 17,000 diverse and economically incentivized datacenter node endpoints. ### Curation Rationale This model was created as a sample by Kevin Leffew as part of the DreamBooth Hackathon. ### Source Data The source data for the dataset is simply pulled from Unsplash ### Licensing Information MIT License ## Thanks to John Whitaker and Lewis Tunstall Thanks to [John Whitaker](https://github.com/johnowhitaker) and [Lewis Tunstall](https://github.com/lewtun)for writing out and describing the initial hackathon parameters at https://huggingface.co/dreambooth-hackathon. ## Example Training Data ![golf-course1.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/andrew-anderson-CtyC2JjLhVg-unsplash.jpg) ![golf-course2.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/dean-SuGEzQkeJno-unsplash.jpg) ![golf-course3.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/amauri-cruz-filho-kBNV9WpCs5k-unsplash.jpg) ![golf-course4.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/minho-yoon-_ZVEio7AkGc-unsplash.jpg) ![golf-course5.jpg](https://link.storjshare.io/raw/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/minho-yoon-_ZVEio7AkGc-unsplash.jpg) ![golf-course6.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/jura-FegOaqn_4GQ-unsplash%20%281%29.jpg?wrap=1) ![golf-course7.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/sly-dizzle-cE6SpYTfqqg-unsplash.jpg?wrap=1) ![golf-course8.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/jura-FegOaqn_4GQ-unsplash.jpg) ![golf-course9.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/sly-dizzle-cE6SpYTfqqg-unsplash.jpg?wrap=1) ![golf-course10.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/dean-ricciardi-08Ipbe8GpWw-unsplash.jpg) ![golf-course11.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/jonas-from-berlin-UgwkaRUt2d0-unsplash.jpg) ![golf-course12.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/rob-tol-Ner8kdSXh0M-unsplash.jpg) ![golf-course13.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/richard-brutyo-HQXFhq8FNJ8-unsplash.jpg?wrap=1) ![golf-course14.jpg](https://link.storjshare.io/s/juo7ynuvpe5svxj3hh454v6fnhba/golf-courses/edwin-compton-Z8XlmAj65iM-unsplash.jpg?wrap=1)
bethecloud/golf-courses
[ "task_categories:image-classification", "task_ids:multi-label-image-classification", "annotations_creators:machine-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:n<1K", "language:en", "license:mit", "golf-courses", "region:us" ]
2022-12-30T01:44:22+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["found"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "source_datasets": [], "task_categories": ["image-classification"], "task_ids": ["multi-label-image-classification"], "pretty_name": "bethecloud/golf-courses", "tags": ["golf-courses"]}
2022-12-30T06:55:56+00:00
[]
[ "en" ]
TAGS #task_categories-image-classification #task_ids-multi-label-image-classification #annotations_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-n<1K #language-English #license-mit #golf-courses #region-us
## Dataset Description - Homepage: URL - Repository: URL - Point of Contact: Kevin Leffew – kleffew94@URL ### Dataset Summary: golf-course This dataset (bethecloud/golf-courses) includes 21 unique images of golf courses pulled from Unsplash. The dataset is a collection of photographs taken at various golf courses around the world. The images depict a variety of scenes, including fairways, greens, bunkers, water hazards, and clubhouse facilities. The images are high resolution and have been carefully selected to provide a diverse range of visual content for fine-tuning a machine learning model. The dataset is intended to be used in the context of the Hugging Face Dream Booth hackathon, a competition that challenges participants to build innovative applications using the Hugging Face transformers library. The submission is for the category of landscape. Overall, this dataset provides a rich source of visual data for machine learning models looking to understand and classify elements of golf courses. Its diverse range of images and high-quality resolution make it well-suited for use in fine-tuning models for tasks such as image classification, object detection, and image segmentation. By using the golf course images as part of their training data, participants can fine-tune their models to recognize and classify specific features and elements commonly found on golf courses. The ultimate goal after the hackathon is to pull this dataset from decentralized cloud storage (like Storj DCS), increasing its accessibility, performance, and resilience by distributing across an edge of over 17,000 uncorrelated participants. ## Example Output ![URL]URL # Usage The golf-courses dataset can be used by modifying the instance_prompt: a photo of golf course ### Languages The language data in golf-courses is in English (BCP-47 en) ## Dataset Structure The complete dataset is GBs and consists of 21 objects. ### Parallelized download using Decentralized Object Storage (Storj DCS) A direct download for the dataset is located at URL In the future, Storj DCS will be used to download large datasets (exceeding 1TB) in a highly parallel, highly performant, and highly economical manner (by utilizing a network of over 17,000 diverse and economically incentivized datacenter node endpoints. ### Curation Rationale This model was created as a sample by Kevin Leffew as part of the DreamBooth Hackathon. ### Source Data The source data for the dataset is simply pulled from Unsplash ### Licensing Information MIT License ## Thanks to John Whitaker and Lewis Tunstall Thanks to John Whitaker and Lewis Tunstallfor writing out and describing the initial hackathon parameters at URL ## Example Training Data !URL !URL !URL !URL !URL !URL !URL !URL !URL !URL !URL !URL !URL !URL
[ "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Point of Contact: Kevin Leffew – kleffew94@URL", "### Dataset Summary: golf-course\n\nThis dataset (bethecloud/golf-courses) includes 21 unique images of golf courses pulled from Unsplash. \n\nThe dataset is a collection of photographs taken at various golf courses around the world. The images depict a variety of scenes, including fairways, greens, bunkers, water hazards, and clubhouse facilities. The images are high resolution and have been carefully selected to provide a diverse range of visual content for fine-tuning a machine learning model.\n\nThe dataset is intended to be used in the context of the Hugging Face Dream Booth hackathon, a competition that challenges participants to build innovative applications using the Hugging Face transformers library. The submission is for the category of landscape. \n\nOverall, this dataset provides a rich source of visual data for machine learning models looking to understand and classify elements of golf courses. Its diverse range of images and high-quality resolution make it well-suited for use in fine-tuning models for tasks such as image classification, object detection, and image segmentation.\n\nBy using the golf course images as part of their training data, participants can fine-tune their models to recognize and classify specific features and elements commonly found on golf courses. The ultimate goal after the hackathon is to pull this dataset from decentralized cloud storage (like Storj DCS), increasing its accessibility, performance, and resilience by distributing across an edge of over 17,000 uncorrelated participants.", "## Example Output\n![URL]URL", "# Usage \nThe golf-courses dataset can be used by modifying the instance_prompt: a photo of golf course", "### Languages\n\nThe language data in golf-courses is in English (BCP-47 en)", "## Dataset Structure\n\nThe complete dataset is GBs and consists of 21 objects.", "### Parallelized download using Decentralized Object Storage (Storj DCS)\n\nA direct download for the dataset is located at URL\n\nIn the future, Storj DCS will be used to download large datasets (exceeding 1TB) in a highly parallel, highly performant, and highly economical manner (by utilizing a network of over 17,000 diverse and economically incentivized datacenter node endpoints.", "### Curation Rationale\n\nThis model was created as a sample by Kevin Leffew as part of the DreamBooth Hackathon.", "### Source Data\n\nThe source data for the dataset is simply pulled from Unsplash", "### Licensing Information\n\nMIT License", "## Thanks to John Whitaker and Lewis Tunstall\n\nThanks to John Whitaker and Lewis Tunstallfor writing out and describing the initial hackathon parameters at URL", "## Example Training Data\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL" ]
[ "TAGS\n#task_categories-image-classification #task_ids-multi-label-image-classification #annotations_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-n<1K #language-English #license-mit #golf-courses #region-us \n", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Point of Contact: Kevin Leffew – kleffew94@URL", "### Dataset Summary: golf-course\n\nThis dataset (bethecloud/golf-courses) includes 21 unique images of golf courses pulled from Unsplash. \n\nThe dataset is a collection of photographs taken at various golf courses around the world. The images depict a variety of scenes, including fairways, greens, bunkers, water hazards, and clubhouse facilities. The images are high resolution and have been carefully selected to provide a diverse range of visual content for fine-tuning a machine learning model.\n\nThe dataset is intended to be used in the context of the Hugging Face Dream Booth hackathon, a competition that challenges participants to build innovative applications using the Hugging Face transformers library. The submission is for the category of landscape. \n\nOverall, this dataset provides a rich source of visual data for machine learning models looking to understand and classify elements of golf courses. Its diverse range of images and high-quality resolution make it well-suited for use in fine-tuning models for tasks such as image classification, object detection, and image segmentation.\n\nBy using the golf course images as part of their training data, participants can fine-tune their models to recognize and classify specific features and elements commonly found on golf courses. The ultimate goal after the hackathon is to pull this dataset from decentralized cloud storage (like Storj DCS), increasing its accessibility, performance, and resilience by distributing across an edge of over 17,000 uncorrelated participants.", "## Example Output\n![URL]URL", "# Usage \nThe golf-courses dataset can be used by modifying the instance_prompt: a photo of golf course", "### Languages\n\nThe language data in golf-courses is in English (BCP-47 en)", "## Dataset Structure\n\nThe complete dataset is GBs and consists of 21 objects.", "### Parallelized download using Decentralized Object Storage (Storj DCS)\n\nA direct download for the dataset is located at URL\n\nIn the future, Storj DCS will be used to download large datasets (exceeding 1TB) in a highly parallel, highly performant, and highly economical manner (by utilizing a network of over 17,000 diverse and economically incentivized datacenter node endpoints.", "### Curation Rationale\n\nThis model was created as a sample by Kevin Leffew as part of the DreamBooth Hackathon.", "### Source Data\n\nThe source data for the dataset is simply pulled from Unsplash", "### Licensing Information\n\nMIT License", "## Thanks to John Whitaker and Lewis Tunstall\n\nThanks to John Whitaker and Lewis Tunstallfor writing out and describing the initial hackathon parameters at URL", "## Example Training Data\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL\n!URL" ]
98f1a36334b12f38ccf7a4bf76e3f2369c9fbbeb
# Dataset Card for "bookcorpus_compact_1024" Num samples: 616,051 The number of tokens for each sequence is not exactly 1024, but all slightly shorter than 1024. The sequences were built by merging sentences to the maximal length shorter than 1024 tokens. Therefore, padding is necessary for batch processing. ```python import time from typing import List from datasets import load_dataset, Dataset from tqdm import tqdm from transformers import AutoTokenizer def batch_tokenize(texts: List[str], tokenizer, batch_size=1000): start = time.time() """Tokenize the texts in batch""" assert tokenizer.is_fast, "tokenizer must be fast tokenizer" tokenized_texts = [] for i in tqdm(range(0, len(texts), batch_size)): batch = texts[i:i + batch_size] batch_encoding = tokenizer(batch) tokenized_texts.extend(batch_encoding["input_ids"]) print(f"batch_tokenize time with bs={batch_size}: {time.time() - start}") return tokenized_texts class CompactText: def __init__(self, tokenizer="gpt2", split="test", block_size=512): self.block_size = block_size self.tokenizer = AutoTokenizer.from_pretrained(tokenizer) def compact_load(self, dataset_name: str, split: str): dataset = load_dataset(dataset_name)[split] batch_encoding = batch_tokenize(dataset["text"], self.tokenizer, batch_size=10000) compact_texts = [] texts = dataset["text"] total_num_tok = 0 tracker = [] i = 0 for j in tqdm(range(len(batch_encoding))): total_num_tok += len(batch_encoding[j]) if total_num_tok >= self.block_size: batch_sents = texts[i:j] big_sent = " ".join(batch_sents) compact_texts.append(big_sent) tracker.append((i, j)) i = j total_num_tok = 0 print(tracker) # self.examples = compact_texts compact_ds = Dataset.from_dict({"text": compact_texts}) return compact_ds if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("-b", "--block-size", type=int, default=512) args = parser.parse_args() compactifier = CompactText(block_size=args.block_size) dataset = compactifier.compact_load(dataset_name="saibo/bookcorpus_deduplicated", split="train") dataset.push_to_hub(f"saibo/bookcorpus_compact_{args.block_size}") ``` [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_compact_1024
[ "size_categories:100K<n<1M", "region:us" ]
2022-12-30T01:45:52+00:00
{"size_categories": ["100K<n<1M"], "dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 2753205189, "num_examples": 616051}], "download_size": 1603181006, "dataset_size": 2753205189}}
2023-01-10T11:48:52+00:00
[]
[]
TAGS #size_categories-100K<n<1M #region-us
# Dataset Card for "bookcorpus_compact_1024" Num samples: 616,051 The number of tokens for each sequence is not exactly 1024, but all slightly shorter than 1024. The sequences were built by merging sentences to the maximal length shorter than 1024 tokens. Therefore, padding is necessary for batch processing. More Information needed
[ "# Dataset Card for \"bookcorpus_compact_1024\"\n\nNum samples: 616,051\n\nThe number of tokens for each sequence is not exactly 1024, but all slightly shorter than 1024.\nThe sequences were built by merging sentences to the maximal length shorter than 1024 tokens.\nTherefore, padding is necessary for batch processing.\n\n\n\n\nMore Information needed" ]
[ "TAGS\n#size_categories-100K<n<1M #region-us \n", "# Dataset Card for \"bookcorpus_compact_1024\"\n\nNum samples: 616,051\n\nThe number of tokens for each sequence is not exactly 1024, but all slightly shorter than 1024.\nThe sequences were built by merging sentences to the maximal length shorter than 1024 tokens.\nTherefore, padding is necessary for batch processing.\n\n\n\n\nMore Information needed" ]
e3665ecd171e82b1a4b3b4a921c2194150e1daa1
# Dataset Card for "bookcorpus_small_compact_512" Num samples: 3,109 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_small_compact_512
[ "region:us" ]
2022-12-30T02:15:23+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 19751847, "num_examples": 3109}], "download_size": 9777636, "dataset_size": 19751847}}
2023-01-18T22:18:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_small_compact_512" Num samples: 3,109 More Information needed
[ "# Dataset Card for \"bookcorpus_small_compact_512\"\n\nNum samples: 3,109\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_small_compact_512\"\n\nNum samples: 3,109\n\nMore Information needed" ]
3419587a608a9002eb4bd740b9cf78e20e4c4396
# Dataset Card for "bookcorpus_small_compact_256" Num samples: 6,104 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_small_compact_256
[ "region:us" ]
2022-12-30T02:15:45+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "concept_with_offset", "dtype": "null"}], "splits": [{"name": "train"}], "download_size": 0, "dataset_size": 0}}
2023-03-08T08:34:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_small_compact_256" Num samples: 6,104 More Information needed
[ "# Dataset Card for \"bookcorpus_small_compact_256\"\n\nNum samples: 6,104\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_small_compact_256\"\n\nNum samples: 6,104\n\nMore Information needed" ]
d217b7632eadca4f9a3c67451c374dfbb8d71a4d
# Dataset Card for "bookcorpus_small_compact_1024" Num samples: 1,571 [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_small_compact_1024
[ "region:us" ]
2022-12-30T02:16:37+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 18843209, "num_examples": 1571}], "download_size": 9378154, "dataset_size": 18843209}}
2023-01-19T10:09:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_small_compact_1024" Num samples: 1,571 More Information needed
[ "# Dataset Card for \"bookcorpus_small_compact_1024\"\n\nNum samples: 1,571\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_small_compact_1024\"\n\nNum samples: 1,571\n\nMore Information needed" ]
88c67d93f2665c1158e6506f9b1c16264c1c4bff
<div align="center"> <img width="640" alt="keremberke/clash-of-clans-object-detection" src="https://huggingface.co/datasets/keremberke/clash-of-clans-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['ad', 'airsweeper', 'bombtower', 'canon', 'clancastle', 'eagle', 'inferno', 'kingpad', 'mortar', 'queenpad', 'rcpad', 'scattershot', 'th13', 'wardenpad', 'wizztower', 'xbow'] ``` ### Number of Images ```json {'train': 88, 'test': 13, 'valid': 24} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/clash-of-clans-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y/dataset/5](https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y/dataset/5?ref=roboflow2huggingface?ref=roboflow2huggingface) ### Citation ``` @misc{ clash-of-clans-vop4y_dataset, title = { Clash of Clans Dataset }, type = { Open Source Dataset }, author = { Find This Base }, howpublished = { \\url{ https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y } }, url = { https://universe.roboflow.com/find-this-base/clash-of-clans-vop4y }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { feb }, note = { visited on 2023-01-18 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.ai on March 30, 2022 at 4:31 PM GMT It includes 125 images. CoC are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 1920x1920 (Fit (black edges)) No image augmentation techniques were applied.
keremberke/clash-of-clans-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "Gaming", "region:us" ]
2022-12-30T05:14:59+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface", "Gaming"]}
2023-01-29T12:38:03+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #roboflow2huggingface #Gaming #region-us
<div align="center"> <img width="640" alt="keremberke/clash-of-clans-object-detection" src="URL </div> ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### Roboflow Dataset Page URL ### License CC BY 4.0 ### Dataset Summary This dataset was exported via URL on March 30, 2022 at 4:31 PM GMT It includes 125 images. CoC are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 1920x1920 (Fit (black edges)) No image augmentation techniques were applied.
[ "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on March 30, 2022 at 4:31 PM GMT\n\nIt includes 125 images.\nCoC are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 1920x1920 (Fit (black edges))\n\nNo image augmentation techniques were applied." ]
[ "TAGS\n#task_categories-object-detection #roboflow #roboflow2huggingface #Gaming #region-us \n", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on March 30, 2022 at 4:31 PM GMT\n\nIt includes 125 images.\nCoC are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 1920x1920 (Fit (black edges))\n\nNo image augmentation techniques were applied." ]
e2d790297be5d87417a993f3b1c733abed31b906
# Dataset Card for "portraits-512" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
conorcl/portraits-512
[ "region:us" ]
2022-12-30T09:03:33+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 83939067.61, "num_examples": 2917}], "download_size": 83808019, "dataset_size": 83939067.61}}
2022-12-30T09:04:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "portraits-512" More Information needed
[ "# Dataset Card for \"portraits-512\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"portraits-512\"\n\nMore Information needed" ]
e6fbaebf9a321e063345318d5afc305afb6ea187
# Dataset Card for "dataset_nautical" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
alvarochelo/dataset_nautical
[ "region:us" ]
2022-12-30T09:22:55+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 908645884.0, "num_examples": 239}], "download_size": 875628182, "dataset_size": 908645884.0}}
2023-03-06T23:03:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset_nautical" More Information needed
[ "# Dataset Card for \"dataset_nautical\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset_nautical\"\n\nMore Information needed" ]
1efd45608bff8f2d9a8fffcc4baebc201f499375
# Dataset Card for "results_valid_100rows_2022-12-30" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joddy/results_valid_100rows_2022-12-30
[ "region:us" ]
2022-12-30T09:45:54+00:00
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "resolution", "dtype": "int64"}, {"name": "attributes_loc", "dtype": {"class_label": {"names": {"0": "upper left", "1": "upper right", "2": "lower left", "3": "lower right"}}}}, {"name": "NL_text", "dtype": "string"}, {"name": "bbox_text", "dtype": "string"}, {"name": "center_text", "dtype": "string"}, {"name": "normed_object_bbox", "sequence": "int64"}, {"name": "without_pos_stable-diffusion-v1-5", "dtype": "image"}, {"name": "NL_stable-diffusion-v1-5", "dtype": "image"}, {"name": "bbox_stable-diffusion-v1-5", "dtype": "image"}, {"name": "center_stable-diffusion-v1-5", "dtype": "image"}, {"name": "without_pos_NL_text_TextENC_off", "dtype": "image"}, {"name": "NL_text_TextENC_off", "dtype": "image"}, {"name": "without_pos_bbox_text_TextENC_off", "dtype": "image"}, {"name": "bbox_text_TextENC_off", "dtype": "image"}, {"name": "without_pos_center_text_TextENC_off", "dtype": "image"}, {"name": "center_text_TextENC_off", "dtype": "image"}, {"name": "without_pos_NL_text_TextENC_on", "dtype": "image"}, {"name": "NL_text_TextENC_on", "dtype": "image"}, {"name": "without_pos_bbox_text_TextENC_on", "dtype": "image"}, {"name": "bbox_text_TextENC_on", "dtype": "image"}, {"name": "without_pos_center_text_TextENC_on", "dtype": "image"}, {"name": "center_text_TextENC_on", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 792511070.0, "num_examples": 100}], "download_size": 784909101, "dataset_size": 792511070.0}}
2022-12-30T09:55:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "results_valid_100rows_2022-12-30" More Information needed
[ "# Dataset Card for \"results_valid_100rows_2022-12-30\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"results_valid_100rows_2022-12-30\"\n\nMore Information needed" ]
409e22daa73895d32b243768fc4015cb72a2c476
# Dataset Card for "dreambooth-hackathon-losie" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
misza222/dreambooth-hackathon-losie
[ "region:us" ]
2022-12-30T10:00:39+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 435582.0, "num_examples": 5}], "download_size": 436362, "dataset_size": 435582.0}}
2022-12-30T10:00:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dreambooth-hackathon-losie" More Information needed
[ "# Dataset Card for \"dreambooth-hackathon-losie\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dreambooth-hackathon-losie\"\n\nMore Information needed" ]
4dbc4d772167de2dad8d9e5026808d0dc7933447
<div align="center"> <img width="640" alt="keremberke/nfl-object-detection" src="https://huggingface.co/datasets/keremberke/nfl-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['helmet', 'helmet-blurred', 'helmet-difficult', 'helmet-partial', 'helmet-sideline'] ``` ### Number of Images ```json {'valid': 1989, 'train': 6963, 'test': 995} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/nfl-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/home-mxzv1/nfl-competition/dataset/1](https://universe.roboflow.com/home-mxzv1/nfl-competition/dataset/1?ref=roboflow2huggingface?ref=roboflow2huggingface) ### Citation ``` @misc{ nfl-competition_dataset, title = { NFL-competition Dataset }, type = { Open Source Dataset }, author = { home }, howpublished = { \\url{ https://universe.roboflow.com/home-mxzv1/nfl-competition } }, url = { https://universe.roboflow.com/home-mxzv1/nfl-competition }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { sep }, note = { visited on 2023-01-18 }, } ``` ### License Public Domain ### Dataset Summary This dataset was exported via roboflow.com on December 29, 2022 at 8:12 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 9947 images. Helmets are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 1280x720 (Stretch) No image augmentation techniques were applied.
keremberke/nfl-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "region:us" ]
2022-12-30T10:37:59+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface"]}
2023-01-29T12:37:17+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #roboflow2huggingface #region-us
<div align="center"> <img width="640" alt="keremberke/nfl-object-detection" src="URL </div> ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### Roboflow Dataset Page URL ### License Public Domain ### Dataset Summary This dataset was exported via URL on December 29, 2022 at 8:12 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 9947 images. Helmets are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 1280x720 (Stretch) No image augmentation techniques were applied.
[ "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nPublic Domain", "### Dataset Summary\nThis dataset was exported via URL on December 29, 2022 at 8:12 PM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 9947 images.\nHelmets are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 1280x720 (Stretch)\n\nNo image augmentation techniques were applied." ]
[ "TAGS\n#task_categories-object-detection #roboflow #roboflow2huggingface #region-us \n", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nPublic Domain", "### Dataset Summary\nThis dataset was exported via URL on December 29, 2022 at 8:12 PM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 9947 images.\nHelmets are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 1280x720 (Stretch)\n\nNo image augmentation techniques were applied." ]
89ebf3499f500a90d8820d2296432a6e0595faa1
带标注的抽象数据集 训练diffsinger使用 tts训练正在制作
funnymdzz/diffsinger-chuansao258
[ "license:cc-by-sa-4.0", "region:us" ]
2022-12-30T13:53:15+00:00
{"license": "cc-by-sa-4.0"}
2023-04-27T15:55:20+00:00
[]
[]
TAGS #license-cc-by-sa-4.0 #region-us
带标注的抽象数据集 训练diffsinger使用 tts训练正在制作
[]
[ "TAGS\n#license-cc-by-sa-4.0 #region-us \n" ]
113bf5b61fbf4685b48fb9392ffdbf911426f5ae
this might be only funk. when I say codes I mean musenet encoding. i make all of my datasets by hand.
breadlicker45/big-midi-codes
[ "license:other", "region:us" ]
2022-12-30T14:09:50+00:00
{"license": "other"}
2023-01-14T22:52:51+00:00
[]
[]
TAGS #license-other #region-us
this might be only funk. when I say codes I mean musenet encoding. i make all of my datasets by hand.
[]
[ "TAGS\n#license-other #region-us \n" ]
ba25faa3e1fbb728f95d23c4015d47728d935839
# Dataset Card for "jurassic-coast" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
harveymannering/jurassic-coast
[ "region:us" ]
2022-12-30T15:57:45+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 32984479.0, "num_examples": 14}], "download_size": 32973035, "dataset_size": 32984479.0}}
2022-12-30T15:57:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "jurassic-coast" More Information needed
[ "# Dataset Card for \"jurassic-coast\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"jurassic-coast\"\n\nMore Information needed" ]
07abf67aa6f1b3f053327405bbada4ac9d85a3bb
# test
nayanah/os_names
[ "region:us" ]
2022-12-30T16:50:34+00:00
{}
2022-12-30T16:53:00+00:00
[]
[]
TAGS #region-us
# test
[ "# test" ]
[ "TAGS\n#region-us \n", "# test" ]
7e90f0f342569b35213445f809cfaf3b91f9964f
# Dataset Card for [EDGAR-CORPUS] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [References](#references) - [Contributions](#contributions) ## Dataset Description - **Point of Contact: Lefteris Loukas** ### Dataset Summary This dataset card is based on the paper **EDGAR-CORPUS: Billions of Tokens Make The World Go Round** authored by _Lefteris Loukas et.al_, as published in the _ECONLP 2021_ workshop. This dataset contains the annual reports of public companies from 1993-2020 from SEC EDGAR filings. There is supported functionality to load a specific year. Care: since this is a corpus dataset, different `train/val/test` splits do not have any special meaning. It's the default HF card format to have train/val/test splits. If you wish to load specific year(s) of specific companies, you probably want to use the open-source software which generated this dataset, EDGAR-CRAWLER: https://github.com/nlpaueb/edgar-crawler. ## Citation If this work helps or inspires you in any way, please consider citing the relevant paper published at the [3rd Economics and Natural Language Processing (ECONLP) workshop](https://lt3.ugent.be/econlp/) at EMNLP 2021 (Punta Cana, Dominican Republic): ``` @inproceedings{loukas-etal-2021-edgar, title = "{EDGAR}-{CORPUS}: Billions of Tokens Make The World Go Round", author = "Loukas, Lefteris and Fergadiotis, Manos and Androutsopoulos, Ion and Malakasiotis, Prodromos", booktitle = "Proceedings of the Third Workshop on Economics and Natural Language Processing", month = nov, year = "2021", address = "Punta Cana, Dominican Republic", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.econlp-1.2", pages = "13--18", } ``` ### Supported Tasks This is a raw dataset/corpus for financial NLP. As such, there are no annotations or labels. ### Languages The EDGAR Filings are in English. ## Dataset Structure ### Data Instances Refer to the dataset preview. ### Data Fields **filename**: Name of file on EDGAR from which the report was extracted.<br> **cik**: EDGAR identifier for a firm.<br> **year**: Year of report.<br> **section_1**: Corressponding section of the Annual Report.<br> **section_1A**: Corressponding section of the Annual Report.<br> **section_1B**: Corressponding section of the Annual Report.<br> **section_2**: Corressponding section of the Annual Report.<br> **section_3**: Corressponding section of the Annual Report.<br> **section_4**: Corressponding section of the Annual Report.<br> **section_5**: Corressponding section of the Annual Report.<br> **section_6**: Corressponding section of the Annual Report.<br> **section_7**: Corressponding section of the Annual Report.<br> **section_7A**: Corressponding section of the Annual Report.<br> **section_8**: Corressponding section of the Annual Report.<br> **section_9**: Corressponding section of the Annual Report.<br> **section_9A**: Corressponding section of the Annual Report.<br> **section_9B**: Corressponding section of the Annual Report.<br> **section_10**: Corressponding section of the Annual Report.<br> **section_11**: Corressponding section of the Annual Report.<br> **section_12**: Corressponding section of the Annual Report.<br> **section_13**: Corressponding section of the Annual Report.<br> **section_14**: Corressponding section of the Annual Report.<br> **section_15**: Corressponding section of the Annual Report.<br> ```python import datasets # Load the entire dataset raw_dataset = datasets.load_dataset("eloukas/edgar-corpus", "full") # Load a specific year and split year_1993_training_dataset = datasets.load_dataset("eloukas/edgar-corpus", "year_1993", split="train") ``` ### Data Splits | Config | Training | Validation | Test | | --------- | -------- | ---------- | ------ | | full | 176,289 | 22,050 | 22,036 | | year_1993 | 1,060 | 133 | 133 | | year_1994 | 2,083 | 261 | 260 | | year_1995 | 4,110 | 514 | 514 | | year_1996 | 7,589 | 949 | 949 | | year_1997 | 8,084 | 1,011 | 1,011 | | year_1998 | 8,040 | 1,006 | 1,005 | | year_1999 | 7,864 | 984 | 983 | | year_2000 | 7,589 | 949 | 949 | | year_2001 | 7,181 | 898 | 898 | | year_2002 | 6,636 | 830 | 829 | | year_2003 | 6,672 | 834 | 834 | | year_2004 | 7,111 | 889 | 889 | | year_2005 | 7,113 | 890 | 889 | | year_2006 | 7,064 | 883 | 883 | | year_2007 | 6,683 | 836 | 835 | | year_2008 | 7,408 | 927 | 926 | | year_2009 | 7,336 | 917 | 917 | | year_2010 | 7,013 | 877 | 877 | | year_2011 | 6,724 | 841 | 840 | | year_2012 | 6,479 | 810 | 810 | | year_2013 | 6,372 | 797 | 796 | | year_2014 | 6,261 | 783 | 783 | | year_2015 | 6,028 | 754 | 753 | | year_2016 | 5,812 | 727 | 727 | | year_2017 | 5,635 | 705 | 704 | | year_2018 | 5,508 | 689 | 688 | | year_2019 | 5,354 | 670 | 669 | | year_2020 | 5,480 | 686 | 685 | ## Dataset Creation ### Source Data #### Initial Data Collection and Normalization Initial data was collected and processed by the authors of the research paper **EDGAR-CORPUS: Billions of Tokens Make The World Go Round**. #### Who are the source language producers? Public firms filing with the SEC. ### Annotations #### Annotation process NA #### Who are the annotators? NA ### Personal and Sensitive Information The dataset contains public filings data from SEC. ## Considerations for Using the Data ### Social Impact of Dataset Low to none. ### Discussion of Biases The dataset is about financial information of public companies and as such the tone and style of text is in line with financial literature. ### Other Known Limitations The dataset needs further cleaning for improved performance. ## Additional Information ### Licensing Information EDGAR data is publicly available. ### Shoutout Huge shoutout to [@JanosAudran](https://huggingface.co/JanosAudran) for the HF Card setup! ### References - [Research Paper] Lefteris Loukas, Manos Fergadiotis, Ion Androutsopoulos, and, Prodromos Malakasiotis. EDGAR-CORPUS: Billions of Tokens Make The World Go Round. Third Workshop on Economics and Natural Language Processing (ECONLP). https://arxiv.org/abs/2109.14394 - Punta Cana, Dominican Republic, November 2021. - [Software] Lefteris Loukas, Manos Fergadiotis, Ion Androutsopoulos, and, Prodromos Malakasiotis. EDGAR-CRAWLER. https://github.com/nlpaueb/edgar-crawler (2021) - [EDGAR CORPUS, but in zip files] EDGAR CORPUS: A corpus for financial NLP research, built from SEC's EDGAR. https://zenodo.org/record/5528490 (2021) - [Word Embeddings] EDGAR-W2V: Word2vec Embeddings trained on EDGAR-CORPUS. https://zenodo.org/record/5524358 (2021) - [Applied Research paper where EDGAR-CORPUS is used] Lefteris Loukas, Manos Fergadiotis, Ilias Chalkidis, Eirini Spyropoulou, Prodromos Malakasiotis, Ion Androutsopoulos, and, George Paliouras. FiNER: Financial Numeric Entity Recognition for XBRL Tagging. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). https://doi.org/10.18653/v1/2022.acl-long.303 (2022)
eloukas/edgar-corpus
[ "task_categories:other", "annotations_creators:no-annotation", "language_creators:other", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:extended|other", "language:en", "license:apache-2.0", "research papers", "edgar", "sec", "finance", "financial", "filings", "10K", "10-K", "nlp", "research", "econlp", "economics", "business", "arxiv:2109.14394", "region:us" ]
2022-12-30T16:55:08+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["other"], "language": ["en"], "license": ["apache-2.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["extended|other"], "task_categories": ["other"], "task_ids": [], "pretty_name": "EDGAR-CORPUS (10-K Filings from 1999 to 2020)", "dataset_info": [{"config_name": ".", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 40306320885, "num_examples": 220375}], "download_size": 10734208660, "dataset_size": 40306320885}, {"config_name": "full", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32237457024, "num_examples": 176289}, {"name": "validation", "num_bytes": 4023129683, "num_examples": 22050}, {"name": "test", "num_bytes": 4045734178, "num_examples": 22036}], "download_size": 40699852536, "dataset_size": 40306320885}, {"config_name": "year_1993", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 112714537, "num_examples": 1060}, {"name": "validation", "num_bytes": 13584432, "num_examples": 133}, {"name": "test", "num_bytes": 14520566, "num_examples": 133}], "download_size": 141862572, "dataset_size": 140819535}, {"config_name": "year_1994", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 198955093, "num_examples": 2083}, {"name": "validation", "num_bytes": 23432307, "num_examples": 261}, {"name": "test", "num_bytes": 26115768, "num_examples": 260}], "download_size": 250411041, "dataset_size": 248503168}, {"config_name": "year_1995", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 356959049, "num_examples": 4110}, {"name": "validation", "num_bytes": 42781161, "num_examples": 514}, {"name": "test", "num_bytes": 45275568, "num_examples": 514}], "download_size": 448617549, "dataset_size": 445015778}, {"config_name": "year_1996", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 738506135, "num_examples": 7589}, {"name": "validation", "num_bytes": 89873905, "num_examples": 949}, {"name": "test", "num_bytes": 91248882, "num_examples": 949}], "download_size": 926536700, "dataset_size": 919628922}, {"config_name": "year_1997", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 854201733, "num_examples": 8084}, {"name": "validation", "num_bytes": 103167272, "num_examples": 1011}, {"name": "test", "num_bytes": 106843950, "num_examples": 1011}], "download_size": 1071898139, "dataset_size": 1064212955}, {"config_name": "year_1998", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 904075497, "num_examples": 8040}, {"name": "validation", "num_bytes": 112630658, "num_examples": 1006}, {"name": "test", "num_bytes": 113308750, "num_examples": 1005}], "download_size": 1137887615, "dataset_size": 1130014905}, {"config_name": "year_1999", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 911374885, "num_examples": 7864}, {"name": "validation", "num_bytes": 118614261, "num_examples": 984}, {"name": "test", "num_bytes": 116706581, "num_examples": 983}], "download_size": 1154736765, "dataset_size": 1146695727}, {"config_name": "year_2000", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 926444625, "num_examples": 7589}, {"name": "validation", "num_bytes": 113264749, "num_examples": 949}, {"name": "test", "num_bytes": 114605470, "num_examples": 949}], "download_size": 1162526814, "dataset_size": 1154314844}, {"config_name": "year_2001", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 964631161, "num_examples": 7181}, {"name": "validation", "num_bytes": 117509010, "num_examples": 898}, {"name": "test", "num_bytes": 116141097, "num_examples": 898}], "download_size": 1207790205, "dataset_size": 1198281268}, {"config_name": "year_2002", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1049271720, "num_examples": 6636}, {"name": "validation", "num_bytes": 128339491, "num_examples": 830}, {"name": "test", "num_bytes": 128444184, "num_examples": 829}], "download_size": 1317817728, "dataset_size": 1306055395}, {"config_name": "year_2003", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1027557690, "num_examples": 6672}, {"name": "validation", "num_bytes": 126684704, "num_examples": 834}, {"name": "test", "num_bytes": 130672979, "num_examples": 834}], "download_size": 1297227566, "dataset_size": 1284915373}, {"config_name": "year_2004", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1129657843, "num_examples": 7111}, {"name": "validation", "num_bytes": 147499772, "num_examples": 889}, {"name": "test", "num_bytes": 147890092, "num_examples": 889}], "download_size": 1439663100, "dataset_size": 1425047707}, {"config_name": "year_2005", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1200714441, "num_examples": 7113}, {"name": "validation", "num_bytes": 161003977, "num_examples": 890}, {"name": "test", "num_bytes": 160727195, "num_examples": 889}], "download_size": 1538876195, "dataset_size": 1522445613}, {"config_name": "year_2006", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1286566049, "num_examples": 7064}, {"name": "validation", "num_bytes": 160843494, "num_examples": 883}, {"name": "test", "num_bytes": 163270601, "num_examples": 883}], "download_size": 1628452618, "dataset_size": 1610680144}, {"config_name": "year_2007", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1296737173, "num_examples": 6683}, {"name": "validation", "num_bytes": 166735560, "num_examples": 836}, {"name": "test", "num_bytes": 156399535, "num_examples": 835}], "download_size": 1637502176, "dataset_size": 1619872268}, {"config_name": "year_2008", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1525698198, "num_examples": 7408}, {"name": "validation", "num_bytes": 190034435, "num_examples": 927}, {"name": "test", "num_bytes": 187659976, "num_examples": 926}], "download_size": 1924164839, "dataset_size": 1903392609}, {"config_name": "year_2009", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1547816260, "num_examples": 7336}, {"name": "validation", "num_bytes": 188897783, "num_examples": 917}, {"name": "test", "num_bytes": 196463897, "num_examples": 917}], "download_size": 1954076983, "dataset_size": 1933177940}, {"config_name": "year_2010", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1493505900, "num_examples": 7013}, {"name": "validation", "num_bytes": 192695567, "num_examples": 877}, {"name": "test", "num_bytes": 191482640, "num_examples": 877}], "download_size": 1897687327, "dataset_size": 1877684107}, {"config_name": "year_2011", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1481486551, "num_examples": 6724}, {"name": "validation", "num_bytes": 190781558, "num_examples": 841}, {"name": "test", "num_bytes": 185869151, "num_examples": 840}], "download_size": 1877396421, "dataset_size": 1858137260}, {"config_name": "year_2012", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1463496224, "num_examples": 6479}, {"name": "validation", "num_bytes": 186247306, "num_examples": 810}, {"name": "test", "num_bytes": 185923601, "num_examples": 810}], "download_size": 1854377191, "dataset_size": 1835667131}, {"config_name": "year_2013", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1468172419, "num_examples": 6372}, {"name": "validation", "num_bytes": 183570866, "num_examples": 797}, {"name": "test", "num_bytes": 182495750, "num_examples": 796}], "download_size": 1852839009, "dataset_size": 1834239035}, {"config_name": "year_2014", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1499451593, "num_examples": 6261}, {"name": "validation", "num_bytes": 181568907, "num_examples": 783}, {"name": "test", "num_bytes": 181046535, "num_examples": 783}], "download_size": 1880963095, "dataset_size": 1862067035}, {"config_name": "year_2015", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1472346721, "num_examples": 6028}, {"name": "validation", "num_bytes": 180128910, "num_examples": 754}, {"name": "test", "num_bytes": 189210252, "num_examples": 753}], "download_size": 1860303134, "dataset_size": 1841685883}, {"config_name": "year_2016", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1471605426, "num_examples": 5812}, {"name": "validation", "num_bytes": 178310005, "num_examples": 727}, {"name": "test", "num_bytes": 177481471, "num_examples": 727}], "download_size": 1845967492, "dataset_size": 1827396902}, {"config_name": "year_2017", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1459021126, "num_examples": 5635}, {"name": "validation", "num_bytes": 174360913, "num_examples": 705}, {"name": "test", "num_bytes": 184398250, "num_examples": 704}], "download_size": 1836306408, "dataset_size": 1817780289}, {"config_name": "year_2018", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1433409319, "num_examples": 5508}, {"name": "validation", "num_bytes": 181466460, "num_examples": 689}, {"name": "test", "num_bytes": 182594965, "num_examples": 688}], "download_size": 1815810567, "dataset_size": 1797470744}, {"config_name": "year_2019", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1421232269, "num_examples": 5354}, {"name": "validation", "num_bytes": 175603562, "num_examples": 670}, {"name": "test", "num_bytes": 176336174, "num_examples": 669}], "download_size": 1791237155, "dataset_size": 1773172005}, {"config_name": "year_2020", "features": [{"name": "filename", "dtype": "string"}, {"name": "cik", "dtype": "string"}, {"name": "year", "dtype": "string"}, {"name": "section_1", "dtype": "string"}, {"name": "section_1A", "dtype": "string"}, {"name": "section_1B", "dtype": "string"}, {"name": "section_2", "dtype": "string"}, {"name": "section_3", "dtype": "string"}, {"name": "section_4", "dtype": "string"}, {"name": "section_5", "dtype": "string"}, {"name": "section_6", "dtype": "string"}, {"name": "section_7", "dtype": "string"}, {"name": "section_7A", "dtype": "string"}, {"name": "section_8", "dtype": "string"}, {"name": "section_9", "dtype": "string"}, {"name": "section_9A", "dtype": "string"}, {"name": "section_9B", "dtype": "string"}, {"name": "section_10", "dtype": "string"}, {"name": "section_11", "dtype": "string"}, {"name": "section_12", "dtype": "string"}, {"name": "section_13", "dtype": "string"}, {"name": "section_14", "dtype": "string"}, {"name": "section_15", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1541847387, "num_examples": 5480}, {"name": "validation", "num_bytes": 193498658, "num_examples": 686}, {"name": "test", "num_bytes": 192600298, "num_examples": 685}], "download_size": 1946916132, "dataset_size": 1927946343}], "tags": ["research papers", "edgar", "sec", "finance", "financial", "filings", "10K", "10-K", "nlp", "research", "econlp", "economics", "business"]}
2023-07-14T06:17:12+00:00
[ "2109.14394" ]
[ "en" ]
TAGS #task_categories-other #annotations_creators-no-annotation #language_creators-other #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-extended|other #language-English #license-apache-2.0 #research papers #edgar #sec #finance #financial #filings #10K #10-K #nlp #research #econlp #economics #business #arxiv-2109.14394 #region-us
Dataset Card for [EDGAR-CORPUS] =============================== Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Licensing Information + References + Contributions Dataset Description ------------------- * Point of Contact: Lefteris Loukas ### Dataset Summary This dataset card is based on the paper EDGAR-CORPUS: Billions of Tokens Make The World Go Round authored by *Lefteris Loukas et.al*, as published in the *ECONLP 2021* workshop. This dataset contains the annual reports of public companies from 1993-2020 from SEC EDGAR filings. There is supported functionality to load a specific year. Care: since this is a corpus dataset, different 'train/val/test' splits do not have any special meaning. It's the default HF card format to have train/val/test splits. If you wish to load specific year(s) of specific companies, you probably want to use the open-source software which generated this dataset, EDGAR-CRAWLER: URL If this work helps or inspires you in any way, please consider citing the relevant paper published at the 3rd Economics and Natural Language Processing (ECONLP) workshop at EMNLP 2021 (Punta Cana, Dominican Republic): ### Supported Tasks This is a raw dataset/corpus for financial NLP. As such, there are no annotations or labels. ### Languages The EDGAR Filings are in English. Dataset Structure ----------------- ### Data Instances Refer to the dataset preview. ### Data Fields filename: Name of file on EDGAR from which the report was extracted. cik: EDGAR identifier for a firm. year: Year of report. section\_1: Corressponding section of the Annual Report. section\_1A: Corressponding section of the Annual Report. section\_1B: Corressponding section of the Annual Report. section\_2: Corressponding section of the Annual Report. section\_3: Corressponding section of the Annual Report. section\_4: Corressponding section of the Annual Report. section\_5: Corressponding section of the Annual Report. section\_6: Corressponding section of the Annual Report. section\_7: Corressponding section of the Annual Report. section\_7A: Corressponding section of the Annual Report. section\_8: Corressponding section of the Annual Report. section\_9: Corressponding section of the Annual Report. section\_9A: Corressponding section of the Annual Report. section\_9B: Corressponding section of the Annual Report. section\_10: Corressponding section of the Annual Report. section\_11: Corressponding section of the Annual Report. section\_12: Corressponding section of the Annual Report. section\_13: Corressponding section of the Annual Report. section\_14: Corressponding section of the Annual Report. section\_15: Corressponding section of the Annual Report. ### Data Splits Dataset Creation ---------------- ### Source Data #### Initial Data Collection and Normalization Initial data was collected and processed by the authors of the research paper EDGAR-CORPUS: Billions of Tokens Make The World Go Round. #### Who are the source language producers? Public firms filing with the SEC. ### Annotations #### Annotation process NA #### Who are the annotators? NA ### Personal and Sensitive Information The dataset contains public filings data from SEC. Considerations for Using the Data --------------------------------- ### Social Impact of Dataset Low to none. ### Discussion of Biases The dataset is about financial information of public companies and as such the tone and style of text is in line with financial literature. ### Other Known Limitations The dataset needs further cleaning for improved performance. Additional Information ---------------------- ### Licensing Information EDGAR data is publicly available. ### Shoutout Huge shoutout to @JanosAudran for the HF Card setup! ### References * [Research Paper] Lefteris Loukas, Manos Fergadiotis, Ion Androutsopoulos, and, Prodromos Malakasiotis. EDGAR-CORPUS: Billions of Tokens Make The World Go Round. Third Workshop on Economics and Natural Language Processing (ECONLP). URL - Punta Cana, Dominican Republic, November 2021. * [Software] Lefteris Loukas, Manos Fergadiotis, Ion Androutsopoulos, and, Prodromos Malakasiotis. EDGAR-CRAWLER. URL (2021) * [EDGAR CORPUS, but in zip files] EDGAR CORPUS: A corpus for financial NLP research, built from SEC's EDGAR. URL (2021) * [Word Embeddings] EDGAR-W2V: Word2vec Embeddings trained on EDGAR-CORPUS. URL (2021) * [Applied Research paper where EDGAR-CORPUS is used] Lefteris Loukas, Manos Fergadiotis, Ilias Chalkidis, Eirini Spyropoulou, Prodromos Malakasiotis, Ion Androutsopoulos, and, George Paliouras. FiNER: Financial Numeric Entity Recognition for XBRL Tagging. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). URL (2022)
[ "### Dataset Summary\n\n\nThis dataset card is based on the paper EDGAR-CORPUS: Billions of Tokens Make The World Go Round authored by *Lefteris Loukas et.al*, as published in the *ECONLP 2021* workshop.\n\n\nThis dataset contains the annual reports of public companies from 1993-2020 from SEC EDGAR filings.\n\n\nThere is supported functionality to load a specific year.\n\n\nCare: since this is a corpus dataset, different 'train/val/test' splits do not have any special meaning. It's the default HF card format to have train/val/test splits.\n\n\nIf you wish to load specific year(s) of specific companies, you probably want to use the open-source software which generated this dataset, EDGAR-CRAWLER: URL\n\n\nIf this work helps or inspires you in any way, please consider citing the relevant paper published at the 3rd Economics and Natural Language Processing (ECONLP) workshop at EMNLP 2021 (Punta Cana, Dominican Republic):", "### Supported Tasks\n\n\nThis is a raw dataset/corpus for financial NLP.\nAs such, there are no annotations or labels.", "### Languages\n\n\nThe EDGAR Filings are in English.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nRefer to the dataset preview.", "### Data Fields\n\n\nfilename: Name of file on EDGAR from which the report was extracted. \n\ncik: EDGAR identifier for a firm. \n\nyear: Year of report. \n\nsection\\_1: Corressponding section of the Annual Report. \n\nsection\\_1A: Corressponding section of the Annual Report. \n\nsection\\_1B: Corressponding section of the Annual Report. \n\nsection\\_2: Corressponding section of the Annual Report. \n\nsection\\_3: Corressponding section of the Annual Report. \n\nsection\\_4: Corressponding section of the Annual Report. \n\nsection\\_5: Corressponding section of the Annual Report. \n\nsection\\_6: Corressponding section of the Annual Report. \n\nsection\\_7: Corressponding section of the Annual Report. \n\nsection\\_7A: Corressponding section of the Annual Report. \n\nsection\\_8: Corressponding section of the Annual Report. \n\nsection\\_9: Corressponding section of the Annual Report. \n\nsection\\_9A: Corressponding section of the Annual Report. \n\nsection\\_9B: Corressponding section of the Annual Report. \n\nsection\\_10: Corressponding section of the Annual Report. \n\nsection\\_11: Corressponding section of the Annual Report. \n\nsection\\_12: Corressponding section of the Annual Report. \n\nsection\\_13: Corressponding section of the Annual Report. \n\nsection\\_14: Corressponding section of the Annual Report. \n\nsection\\_15: Corressponding section of the Annual Report.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nInitial data was collected and processed by the authors of the research paper EDGAR-CORPUS: Billions of Tokens Make The World Go Round.", "#### Who are the source language producers?\n\n\nPublic firms filing with the SEC.", "### Annotations", "#### Annotation process\n\n\nNA", "#### Who are the annotators?\n\n\nNA", "### Personal and Sensitive Information\n\n\nThe dataset contains public filings data from SEC.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset\n\n\nLow to none.", "### Discussion of Biases\n\n\nThe dataset is about financial information of public companies and as such the tone and style of text is in line with financial literature.", "### Other Known Limitations\n\n\nThe dataset needs further cleaning for improved performance.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nEDGAR data is publicly available.", "### Shoutout\n\n\nHuge shoutout to @JanosAudran for the HF Card setup!", "### References\n\n\n* [Research Paper] Lefteris Loukas, Manos Fergadiotis, Ion Androutsopoulos, and, Prodromos Malakasiotis. EDGAR-CORPUS: Billions of Tokens Make The World Go Round. Third Workshop on Economics and Natural Language Processing (ECONLP). URL - Punta Cana, Dominican Republic, November 2021.\n* [Software] Lefteris Loukas, Manos Fergadiotis, Ion Androutsopoulos, and, Prodromos Malakasiotis. EDGAR-CRAWLER. URL (2021)\n* [EDGAR CORPUS, but in zip files] EDGAR CORPUS: A corpus for financial NLP research, built from SEC's EDGAR. URL (2021)\n* [Word Embeddings] EDGAR-W2V: Word2vec Embeddings trained on EDGAR-CORPUS. URL (2021)\n* [Applied Research paper where EDGAR-CORPUS is used] Lefteris Loukas, Manos Fergadiotis, Ilias Chalkidis, Eirini Spyropoulou, Prodromos Malakasiotis, Ion Androutsopoulos, and, George Paliouras. FiNER: Financial Numeric Entity Recognition for XBRL Tagging. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). URL (2022)" ]
[ "TAGS\n#task_categories-other #annotations_creators-no-annotation #language_creators-other #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-extended|other #language-English #license-apache-2.0 #research papers #edgar #sec #finance #financial #filings #10K #10-K #nlp #research #econlp #economics #business #arxiv-2109.14394 #region-us \n", "### Dataset Summary\n\n\nThis dataset card is based on the paper EDGAR-CORPUS: Billions of Tokens Make The World Go Round authored by *Lefteris Loukas et.al*, as published in the *ECONLP 2021* workshop.\n\n\nThis dataset contains the annual reports of public companies from 1993-2020 from SEC EDGAR filings.\n\n\nThere is supported functionality to load a specific year.\n\n\nCare: since this is a corpus dataset, different 'train/val/test' splits do not have any special meaning. It's the default HF card format to have train/val/test splits.\n\n\nIf you wish to load specific year(s) of specific companies, you probably want to use the open-source software which generated this dataset, EDGAR-CRAWLER: URL\n\n\nIf this work helps or inspires you in any way, please consider citing the relevant paper published at the 3rd Economics and Natural Language Processing (ECONLP) workshop at EMNLP 2021 (Punta Cana, Dominican Republic):", "### Supported Tasks\n\n\nThis is a raw dataset/corpus for financial NLP.\nAs such, there are no annotations or labels.", "### Languages\n\n\nThe EDGAR Filings are in English.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nRefer to the dataset preview.", "### Data Fields\n\n\nfilename: Name of file on EDGAR from which the report was extracted. \n\ncik: EDGAR identifier for a firm. \n\nyear: Year of report. \n\nsection\\_1: Corressponding section of the Annual Report. \n\nsection\\_1A: Corressponding section of the Annual Report. \n\nsection\\_1B: Corressponding section of the Annual Report. \n\nsection\\_2: Corressponding section of the Annual Report. \n\nsection\\_3: Corressponding section of the Annual Report. \n\nsection\\_4: Corressponding section of the Annual Report. \n\nsection\\_5: Corressponding section of the Annual Report. \n\nsection\\_6: Corressponding section of the Annual Report. \n\nsection\\_7: Corressponding section of the Annual Report. \n\nsection\\_7A: Corressponding section of the Annual Report. \n\nsection\\_8: Corressponding section of the Annual Report. \n\nsection\\_9: Corressponding section of the Annual Report. \n\nsection\\_9A: Corressponding section of the Annual Report. \n\nsection\\_9B: Corressponding section of the Annual Report. \n\nsection\\_10: Corressponding section of the Annual Report. \n\nsection\\_11: Corressponding section of the Annual Report. \n\nsection\\_12: Corressponding section of the Annual Report. \n\nsection\\_13: Corressponding section of the Annual Report. \n\nsection\\_14: Corressponding section of the Annual Report. \n\nsection\\_15: Corressponding section of the Annual Report.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nInitial data was collected and processed by the authors of the research paper EDGAR-CORPUS: Billions of Tokens Make The World Go Round.", "#### Who are the source language producers?\n\n\nPublic firms filing with the SEC.", "### Annotations", "#### Annotation process\n\n\nNA", "#### Who are the annotators?\n\n\nNA", "### Personal and Sensitive Information\n\n\nThe dataset contains public filings data from SEC.\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset\n\n\nLow to none.", "### Discussion of Biases\n\n\nThe dataset is about financial information of public companies and as such the tone and style of text is in line with financial literature.", "### Other Known Limitations\n\n\nThe dataset needs further cleaning for improved performance.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nEDGAR data is publicly available.", "### Shoutout\n\n\nHuge shoutout to @JanosAudran for the HF Card setup!", "### References\n\n\n* [Research Paper] Lefteris Loukas, Manos Fergadiotis, Ion Androutsopoulos, and, Prodromos Malakasiotis. EDGAR-CORPUS: Billions of Tokens Make The World Go Round. Third Workshop on Economics and Natural Language Processing (ECONLP). URL - Punta Cana, Dominican Republic, November 2021.\n* [Software] Lefteris Loukas, Manos Fergadiotis, Ion Androutsopoulos, and, Prodromos Malakasiotis. EDGAR-CRAWLER. URL (2021)\n* [EDGAR CORPUS, but in zip files] EDGAR CORPUS: A corpus for financial NLP research, built from SEC's EDGAR. URL (2021)\n* [Word Embeddings] EDGAR-W2V: Word2vec Embeddings trained on EDGAR-CORPUS. URL (2021)\n* [Applied Research paper where EDGAR-CORPUS is used] Lefteris Loukas, Manos Fergadiotis, Ilias Chalkidis, Eirini Spyropoulou, Prodromos Malakasiotis, Ion Androutsopoulos, and, George Paliouras. FiNER: Financial Numeric Entity Recognition for XBRL Tagging. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). URL (2022)" ]
851e8810714890d615ecd10e0fc225e2db0e127c
# emo
Hosswin/Self
[ "region:us" ]
2022-12-30T17:50:28+00:00
{}
2022-12-30T17:54:39+00:00
[]
[]
TAGS #region-us
# emo
[ "# emo" ]
[ "TAGS\n#region-us \n", "# emo" ]
a6ea0aadd1bb2f929384443c672f45ac16ed7b04
# Dataset Card for Genshin Voice ## Dataset Description ### Dataset Summary The Genshin Voice dataset is a text-to-voice dataset of different Genshin Impact characters unpacked from the game. ### Languages The text in the dataset is in Mandarin. ## Dataset Creation ### Source Data #### Initial Data Collection and Normalization The data was obtained by unpacking the [Genshin Impact](https://genshin.hoyoverse.com/) game. #### Who are the source language producers? The language producers are the employee of [Hoyoverse](https://hoyoverse.com/) and contractors from [EchoSky Studio](http://qx.asiacu.com/). ### Annotations The dataset contains official annotations from the game, including ingame speaker name and transcripts. ## Additional Information ### Dataset Curators The dataset was created by [w4123](https://github.com/w4123) initially in his [GitHub repository](https://github.com/w4123/GenshinVoice). ### Licensing Information Copyright © COGNOSPHERE. All Rights Reserved.
hanamizuki-ai/genshin-voice-v3.3-mandarin
[ "task_categories:text-to-speech", "task_categories:automatic-speech-recognition", "multilinguality:monolingual", "source_datasets:original", "language:zh", "region:us" ]
2022-12-30T18:13:13+00:00
{"language": ["zh"], "multilinguality": ["monolingual"], "source_datasets": ["original"], "task_categories": ["text-to-speech", "automatic-speech-recognition"], "pretty_name": "Genshin Voice", "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "language", "dtype": "string"}, {"name": "npcName", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "type", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 36412736429.25, "num_examples": 75033}], "download_size": 18251937481, "dataset_size": 36412736429.25}}
2022-12-31T05:01:47+00:00
[]
[ "zh" ]
TAGS #task_categories-text-to-speech #task_categories-automatic-speech-recognition #multilinguality-monolingual #source_datasets-original #language-Chinese #region-us
# Dataset Card for Genshin Voice ## Dataset Description ### Dataset Summary The Genshin Voice dataset is a text-to-voice dataset of different Genshin Impact characters unpacked from the game. ### Languages The text in the dataset is in Mandarin. ## Dataset Creation ### Source Data #### Initial Data Collection and Normalization The data was obtained by unpacking the Genshin Impact game. #### Who are the source language producers? The language producers are the employee of Hoyoverse and contractors from EchoSky Studio. ### Annotations The dataset contains official annotations from the game, including ingame speaker name and transcripts. ## Additional Information ### Dataset Curators The dataset was created by w4123 initially in his GitHub repository. ### Licensing Information Copyright © COGNOSPHERE. All Rights Reserved.
[ "# Dataset Card for Genshin Voice", "## Dataset Description", "### Dataset Summary\n\nThe Genshin Voice dataset is a text-to-voice dataset of different Genshin Impact characters unpacked from the game.", "### Languages\n\nThe text in the dataset is in Mandarin.", "## Dataset Creation", "### Source Data", "#### Initial Data Collection and Normalization\n\nThe data was obtained by unpacking the Genshin Impact game.", "#### Who are the source language producers?\n\nThe language producers are the employee of Hoyoverse and contractors from EchoSky Studio.", "### Annotations\n\nThe dataset contains official annotations from the game, including ingame speaker name and transcripts.", "## Additional Information", "### Dataset Curators\n\nThe dataset was created by w4123 initially in his GitHub repository.", "### Licensing Information\n\nCopyright © COGNOSPHERE. All Rights Reserved." ]
[ "TAGS\n#task_categories-text-to-speech #task_categories-automatic-speech-recognition #multilinguality-monolingual #source_datasets-original #language-Chinese #region-us \n", "# Dataset Card for Genshin Voice", "## Dataset Description", "### Dataset Summary\n\nThe Genshin Voice dataset is a text-to-voice dataset of different Genshin Impact characters unpacked from the game.", "### Languages\n\nThe text in the dataset is in Mandarin.", "## Dataset Creation", "### Source Data", "#### Initial Data Collection and Normalization\n\nThe data was obtained by unpacking the Genshin Impact game.", "#### Who are the source language producers?\n\nThe language producers are the employee of Hoyoverse and contractors from EchoSky Studio.", "### Annotations\n\nThe dataset contains official annotations from the game, including ingame speaker name and transcripts.", "## Additional Information", "### Dataset Curators\n\nThe dataset was created by w4123 initially in his GitHub repository.", "### Licensing Information\n\nCopyright © COGNOSPHERE. All Rights Reserved." ]
e4c525ff05693d2b7c9cc3ffd6c25e820bec7b66
a
nayanah/os_cat_2
[ "region:us" ]
2022-12-30T18:39:58+00:00
{}
2022-12-30T18:40:28+00:00
[]
[]
TAGS #region-us
a
[]
[ "TAGS\n#region-us \n" ]
2cb865a138cb7251be9e3841beeee14d296db950
# Dataset Card for "artist-lyrics" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
BhavyaMuni/artist-lyrics
[ "region:us" ]
2022-12-30T19:06:53+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "beyonce", "num_bytes": 593257, "num_examples": 17188}, {"name": "sia", "num_bytes": 290482, "num_examples": 8410}, {"name": "anitta", "num_bytes": 52302, "num_examples": 1536}, {"name": "adele", "num_bytes": 123431, "num_examples": 3719}, {"name": "eminem", "num_bytes": 1884010, "num_examples": 43830}, {"name": "ed_sheeran", "num_bytes": 437717, "num_examples": 11731}, {"name": "coldplay", "num_bytes": 229712, "num_examples": 7037}, {"name": "pink", "num_bytes": 338827, "num_examples": 9922}, {"name": "taylor_swift", "num_bytes": 696163, "num_examples": 19203}, {"name": "imagine_dragons", "num_bytes": 213012, "num_examples": 6208}, {"name": "justin_bieber", "num_bytes": 550768, "num_examples": 15086}, {"name": "ludmilla", "num_bytes": 826, "num_examples": 24}, {"name": "the_beatles", "num_bytes": 298451, "num_examples": 8894}, {"name": "maroon_5", "num_bytes": 296992, "num_examples": 8401}, {"name": "bruno_mars", "num_bytes": 241371, "num_examples": 6831}, {"name": "lady_gaga", "num_bytes": 495013, "num_examples": 14949}, {"name": "lana_del_rey", "num_bytes": 518382, "num_examples": 14768}, {"name": "ariana_grande", "num_bytes": 352469, "num_examples": 10024}, {"name": "christina_perri", "num_bytes": 81053, "num_examples": 2358}, {"name": "phil_collins", "num_bytes": 180491, "num_examples": 4718}, {"name": "rihanna", "num_bytes": 524927, "num_examples": 15505}, {"name": "camila_cabello", "num_bytes": 147677, "num_examples": 4137}, {"name": "bon_jovi", "num_bytes": 550018, "num_examples": 15139}, {"name": "elton_john", "num_bytes": 656548, "num_examples": 17599}, {"name": "john_legend", "num_bytes": 266362, "num_examples": 7744}, {"name": "john_lennon", "num_bytes": 128386, "num_examples": 3685}, {"name": "pink_floyd", "num_bytes": 164745, "num_examples": 4588}, {"name": "scorpions", "num_bytes": 293093, "num_examples": 8990}, {"name": "red_hot_chili_peppers", "num_bytes": 365278, "num_examples": 11353}, {"name": "50_cent", "num_bytes": 1371989, "num_examples": 32353}, {"name": "nirvana", "num_bytes": 103195, "num_examples": 3345}, {"name": "queen", "num_bytes": 271145, "num_examples": 8132}, {"name": "katy_perry", "num_bytes": 348706, "num_examples": 10383}, {"name": "alok", "num_bytes": 67991, "num_examples": 2115}, {"name": "u2", "num_bytes": 402969, "num_examples": 12790}, {"name": "black_eyed_peas", "num_bytes": 445727, "num_examples": 12127}, {"name": "michael_jackson", "num_bytes": 529153, "num_examples": 16749}, {"name": "jason_mraz", "num_bytes": 381834, "num_examples": 10153}, {"name": "guns_n_roses", "num_bytes": 177135, "num_examples": 5120}, {"name": "alicia_keys", "num_bytes": 330863, "num_examples": 9934}, {"name": "rammstein", "num_bytes": 56457, "num_examples": 1973}, {"name": "shawn_mendes", "num_bytes": 156939, "num_examples": 4398}, {"name": "linkin_park", "num_bytes": 331637, "num_examples": 9580}, {"name": "shakira", "num_bytes": 136600, "num_examples": 4227}], "download_size": 7993813, "dataset_size": 16104173}}
2023-01-02T00:29:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "artist-lyrics" More Information needed
[ "# Dataset Card for \"artist-lyrics\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"artist-lyrics\"\n\nMore Information needed" ]
e4f15c21606feeca3c7e74f036973deee48a77f6
# Dataset Card for "bookcorpus_small_compact_512_meta" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_small_compact_512_meta
[ "region:us" ]
2022-12-30T20:09:23+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}, {"name": "cid_arrangement", "sequence": "int32"}, {"name": "schema_lengths", "sequence": "int64"}, {"name": "topic_entity_mask", "sequence": "int64"}, {"name": "text_lengths", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 208307299, "num_examples": 3109}], "download_size": 0, "dataset_size": 208307299}}
2023-01-21T13:53:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_small_compact_512_meta" More Information needed
[ "# Dataset Card for \"bookcorpus_small_compact_512_meta\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_small_compact_512_meta\"\n\nMore Information needed" ]
33d0001c61dcf56673e4aeaa7a871de5d4032123
# Dataset Card for "test-squad" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
susnato/test-squad
[ "region:us" ]
2022-12-30T20:09:59+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}], "splits": [{"name": "train", "num_bytes": 79346108, "num_examples": 87599}], "download_size": 0, "dataset_size": 79346108}}
2022-12-30T20:13:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "test-squad" More Information needed
[ "# Dataset Card for \"test-squad\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"test-squad\"\n\nMore Information needed" ]
176a3f11b7ef453947b486c1de843068d108acef
# Dataset Card for LegalCaseDocumentSummarization ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [GitHub](https://github.com/Law-AI/summarization) - **Repository:** [Zenodo](https://zenodo.org/record/7152317#.Y69PkeKZODW) - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@JoelNiklaus](https://github.com/JoelNiklaus) for adding this dataset.
joelniklaus/legal_case_document_summarization
[ "region:us" ]
2022-12-30T20:54:10+00:00
{}
2023-02-02T23:52:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for LegalCaseDocumentSummarization ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: GitHub - Repository: Zenodo - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @JoelNiklaus for adding this dataset.
[ "# Dataset Card for LegalCaseDocumentSummarization", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: GitHub\n- Repository: Zenodo\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @JoelNiklaus for adding this dataset." ]
[ "TAGS\n#region-us \n", "# Dataset Card for LegalCaseDocumentSummarization", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: GitHub\n- Repository: Zenodo\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @JoelNiklaus for adding this dataset." ]
7c48587c8ed03edde3184cf7e8dc55b271bf1a90
# Dataset Card for PlainEnglishContractsSummarization ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [GitHub](https://github.com/lauramanor/legal_summarization) - **Repository:** - **Paper:** [ACL Anthology](https://aclanthology.org/W19-2201/) - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@JoelNiklaus](https://github.com/JoelNiklaus) for adding this dataset.
joelniklaus/plain_english_contracts_summarization
[ "region:us" ]
2022-12-30T22:17:07+00:00
{}
2022-12-30T22:18:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for PlainEnglishContractsSummarization ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: GitHub - Repository: - Paper: ACL Anthology - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @JoelNiklaus for adding this dataset.
[ "# Dataset Card for PlainEnglishContractsSummarization", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: GitHub\n- Repository:\n- Paper: ACL Anthology\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @JoelNiklaus for adding this dataset." ]
[ "TAGS\n#region-us \n", "# Dataset Card for PlainEnglishContractsSummarization", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: GitHub\n- Repository:\n- Paper: ACL Anthology\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @JoelNiklaus for adding this dataset." ]
1d8cf8814f8fdb9b08470405566ba5b3ae34ee28
# Dataset Card for "OxfordPets_embeddings" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Multimodal-Fatima/OxfordPets_embeddings
[ "region:us" ]
2022-12-30T22:23:44+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "image", "dtype": "image"}, {"name": "vision_embeddings", "sequence": "float32"}], "splits": [{"name": "test", "num_bytes": 420471647.375, "num_examples": 3669}], "download_size": 0, "dataset_size": 420471647.375}}
2022-12-30T22:27:08+00:00
[]
[]
TAGS #region-us
# Dataset Card for "OxfordPets_embeddings" More Information needed
[ "# Dataset Card for \"OxfordPets_embeddings\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"OxfordPets_embeddings\"\n\nMore Information needed" ]
1abc2bdfb7483ae1ae130b3ad770855f8b558621
annotations_creators: - machine-generated language: - en - ar language_creators: - machine-generated license: [] multilinguality: - translation pretty_name: Arabic_English Corpus size_categories: - 1M<n<10M source_datasets: [] tags: - translation task_categories: - translation task_ids: []
NadiaHassan/ar-en
[ "region:us" ]
2022-12-31T11:33:04+00:00
{}
2022-12-31T11:38:47+00:00
[]
[]
TAGS #region-us
annotations_creators: - machine-generated language: - en - ar language_creators: - machine-generated license: [] multilinguality: - translation pretty_name: Arabic_English Corpus size_categories: - 1M<n<10M source_datasets: [] tags: - translation task_categories: - translation task_ids: []
[]
[ "TAGS\n#region-us \n" ]
751c312fb6d7ab22cdca047299e43ffafe1d8f80
# Dataset Card for "pochita_v2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Arch4ngel/pochita_v2
[ "region:us" ]
2022-12-31T14:35:38+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 67970413.0, "num_examples": 15}], "download_size": 67840616, "dataset_size": 67970413.0}}
2022-12-31T14:35:44+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pochita_v2" More Information needed
[ "# Dataset Card for \"pochita_v2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pochita_v2\"\n\nMore Information needed" ]
f897da373d352da035f31042189338fc6ff36538
# Dataset Card for "bookcorpus_small_compact_256_meta" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_small_compact_256_meta
[ "region:us" ]
2022-12-31T15:07:16+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}, {"name": "cid_arrangement", "sequence": "int32"}, {"name": "schema_lengths", "sequence": "int64"}, {"name": "topic_entity_mask", "sequence": "int64"}, {"name": "text_lengths", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 213919213, "num_examples": 6104}], "download_size": 45654115, "dataset_size": 213919213}}
2023-01-19T09:05:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_small_compact_256_meta" More Information needed
[ "# Dataset Card for \"bookcorpus_small_compact_256_meta\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_small_compact_256_meta\"\n\nMore Information needed" ]
85d076ab1b8c03532c344c35243833ea8181899c
# Hyperpartisan news detection This dataset has the hyperpartisan new dataset, processed and split exactly as it was for [longformer](https://arxiv.org/abs/2004.05150) experiments. Code for processing was found at [here](https://github.com/allenai/longformer/blob/master/scripts/hp_preprocess.py).
jonathanli/hyperpartisan-longformer-split
[ "arxiv:2004.05150", "region:us" ]
2022-12-31T15:56:50+00:00
{}
2022-12-31T16:08:16+00:00
[ "2004.05150" ]
[]
TAGS #arxiv-2004.05150 #region-us
# Hyperpartisan news detection This dataset has the hyperpartisan new dataset, processed and split exactly as it was for longformer experiments. Code for processing was found at here.
[ "# Hyperpartisan news detection\n\nThis dataset has the hyperpartisan new dataset, processed and split exactly as it was for longformer experiments.\nCode for processing was found at here." ]
[ "TAGS\n#arxiv-2004.05150 #region-us \n", "# Hyperpartisan news detection\n\nThis dataset has the hyperpartisan new dataset, processed and split exactly as it was for longformer experiments.\nCode for processing was found at here." ]
2642a7288f0d213452e8398ded8a975900c29d91
# Dataset Card for "bookcorpus_small_compact_1024_meta" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saibo/bookcorpus_small_compact_1024_meta
[ "region:us" ]
2022-12-31T17:19:28+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "concept_with_offset", "dtype": "string"}, {"name": "cid_arrangement", "sequence": "int32"}, {"name": "schema_lengths", "sequence": "int64"}, {"name": "topic_entity_mask", "sequence": "int64"}, {"name": "text_lengths", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 192026469, "num_examples": 1571}], "download_size": 0, "dataset_size": 192026469}}
2023-01-25T17:23:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "bookcorpus_small_compact_1024_meta" More Information needed
[ "# Dataset Card for \"bookcorpus_small_compact_1024_meta\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"bookcorpus_small_compact_1024_meta\"\n\nMore Information needed" ]
c40df9780a6b5864435f4294b0790024ff610a60
annotations_creators: - no-annotation language: [] language_creators: - other license: - afl-3.0 multilinguality: [] pretty_name: pishi size_categories: - n<1K source_datasets: - original tags: - '''cat''' task_categories: - text-to-image task_ids: []
Atallahw/pishi
[ "region:us" ]
2022-12-31T18:53:11+00:00
{}
2022-12-31T19:05:45+00:00
[]
[]
TAGS #region-us
annotations_creators: - no-annotation language: [] language_creators: - other license: - afl-3.0 multilinguality: [] pretty_name: pishi size_categories: - n<1K source_datasets: - original tags: - '''cat''' task_categories: - text-to-image task_ids: []
[]
[ "TAGS\n#region-us \n" ]
6b231df0229cd49fd2005f0244b55cfb1e7f76e7
THIS DATASET BASED ON THIS SOURCE: [winvoker/turkish-sentiment-analysis-dataset](https://huggingface.co/datasets/winvoker/turkish-sentiment-analysis-dataset)
W4nkel/turkish-sentiment-dataset
[ "license:cc-by-sa-4.0", "region:us" ]
2022-12-31T22:37:06+00:00
{"license": "cc-by-sa-4.0"}
2023-01-01T18:07:08+00:00
[]
[]
TAGS #license-cc-by-sa-4.0 #region-us
THIS DATASET BASED ON THIS SOURCE: winvoker/turkish-sentiment-analysis-dataset
[]
[ "TAGS\n#license-cc-by-sa-4.0 #region-us \n" ]
d165b1842aea1598b21c0e19fa2a05a8bb418ace
<div align="center"> <img width="640" alt="keremberke/blood-cell-object-detection" src="https://huggingface.co/datasets/keremberke/blood-cell-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['platelets', 'rbc', 'wbc'] ``` ### Number of Images ```json {'train': 255, 'test': 36, 'valid': 73} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/blood-cell-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/team-roboflow/blood-cell-detection-1ekwu/dataset/3](https://universe.roboflow.com/team-roboflow/blood-cell-detection-1ekwu/dataset/3?ref=roboflow2huggingface) ### Citation ``` @misc{ blood-cell-detection-1ekwu_dataset, title = { Blood Cell Detection Dataset }, type = { Open Source Dataset }, author = { Team Roboflow }, howpublished = { \\url{ https://universe.roboflow.com/team-roboflow/blood-cell-detection-1ekwu } }, url = { https://universe.roboflow.com/team-roboflow/blood-cell-detection-1ekwu }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { nov }, note = { visited on 2023-01-18 }, } ``` ### License Public Domain ### Dataset Summary This dataset was exported via roboflow.com on November 4, 2022 at 7:46 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 364 images. Cells are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 416x416 (Stretch) No image augmentation techniques were applied.
keremberke/blood-cell-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "Biology", "region:us" ]
2022-12-31T22:57:22+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface", "Biology"]}
2023-01-18T20:37:18+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #roboflow2huggingface #Biology #region-us
<div align="center"> <img width="640" alt="keremberke/blood-cell-object-detection" src="URL </div> ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### Roboflow Dataset Page URL ### License Public Domain ### Dataset Summary This dataset was exported via URL on November 4, 2022 at 7:46 PM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 364 images. Cells are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 416x416 (Stretch) No image augmentation techniques were applied.
[ "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nPublic Domain", "### Dataset Summary\nThis dataset was exported via URL on November 4, 2022 at 7:46 PM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 364 images.\nCells are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 416x416 (Stretch)\n\nNo image augmentation techniques were applied." ]
[ "TAGS\n#task_categories-object-detection #roboflow #roboflow2huggingface #Biology #region-us \n", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nPublic Domain", "### Dataset Summary\nThis dataset was exported via URL on November 4, 2022 at 7:46 PM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 364 images.\nCells are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 416x416 (Stretch)\n\nNo image augmentation techniques were applied." ]
a51194c739991abb50ac8afe14704aa99a66cf51
<div align="center"> <img width="640" alt="keremberke/license-plate-object-detection" src="https://huggingface.co/datasets/keremberke/license-plate-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['license_plate'] ``` ### Number of Images ```json {'train': 6176, 'valid': 1765, 'test': 882} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/license-plate-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk/dataset/1](https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk/dataset/1?ref=roboflow2huggingface) ### Citation ``` @misc{ vehicle-registration-plates-trudk_dataset, title = { Vehicle Registration Plates Dataset }, type = { Open Source Dataset }, author = { Augmented Startups }, howpublished = { \\url{ https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk } }, url = { https://universe.roboflow.com/augmented-startups/vehicle-registration-plates-trudk }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { jun }, note = { visited on 2023-01-18 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.ai on January 13, 2022 at 5:20 PM GMT It includes 8823 images. VRP are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
keremberke/license-plate-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "Self Driving", "Anpr", "region:us" ]
2023-01-01T02:32:07+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface", "Self Driving", "Anpr"]}
2023-01-18T20:37:51+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #roboflow2huggingface #Self Driving #Anpr #region-us
<div align="center"> <img width="640" alt="keremberke/license-plate-object-detection" src="URL </div> ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### Roboflow Dataset Page URL ### License CC BY 4.0 ### Dataset Summary This dataset was exported via URL on January 13, 2022 at 5:20 PM GMT It includes 8823 images. VRP are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
[ "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on January 13, 2022 at 5:20 PM GMT\n\nIt includes 8823 images.\nVRP are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n\nNo image augmentation techniques were applied." ]
[ "TAGS\n#task_categories-object-detection #roboflow #roboflow2huggingface #Self Driving #Anpr #region-us \n", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on January 13, 2022 at 5:20 PM GMT\n\nIt includes 8823 images.\nVRP are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n\nNo image augmentation techniques were applied." ]
35f65b603b004e57d5eea1c5a4b90bff1f34e290
# Dataset Card for "whisper-small-hindi" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shields/whisper-small-hindi
[ "region:us" ]
2023-01-01T03:28:10+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 48000}}}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 172188480.0, "num_examples": 6540}, {"name": "test", "num_bytes": 90338189.0, "num_examples": 2894}], "download_size": 0, "dataset_size": 262526669.0}}
2023-01-01T04:00:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "whisper-small-hindi" More Information needed
[ "# Dataset Card for \"whisper-small-hindi\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"whisper-small-hindi\"\n\nMore Information needed" ]
e42a0b8b3344d73d1ecf9165b90abcdeb9b87b62
# Dataset Card for "catalan_commonvoice" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shields/catalan_commonvoice
[ "region:us" ]
2023-01-01T04:47:06+00:00
{"dataset_info": {"features": [{"name": "client_id", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 48000}}}, {"name": "sentence", "dtype": "string"}, {"name": "up_votes", "dtype": "int64"}, {"name": "down_votes", "dtype": "int64"}, {"name": "age", "dtype": "string"}, {"name": "gender", "dtype": "string"}, {"name": "accent", "dtype": "string"}, {"name": "locale", "dtype": "string"}, {"name": "segment", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 34635950777.0, "num_examples": 905243}, {"name": "validation", "num_bytes": 652519005.0, "num_examples": 16340}, {"name": "test", "num_bytes": 625225219.0, "num_examples": 16340}], "download_size": 34496947979, "dataset_size": 35913695001.0}}
2023-01-01T05:12:56+00:00
[]
[]
TAGS #region-us
# Dataset Card for "catalan_commonvoice" More Information needed
[ "# Dataset Card for \"catalan_commonvoice\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"catalan_commonvoice\"\n\nMore Information needed" ]
2f760f5a232b842b788e58adfb533dbf205a8b31
# Dataset Card for "microsoft-fluentui-emoji-512-whitebg" [svg and their file names were converted to images and text from Microsoft's fluentui-emoji repo](https://github.com/microsoft/fluentui-emoji)
Norod78/microsoft-fluentui-emoji-512-whitebg
[ "task_categories:unconditional-image-generation", "task_categories:text-to-image", "size_categories:n<10K", "language:en", "license:mit", "emoji", "fluentui", "region:us" ]
2023-01-01T09:03:35+00:00
{"language": "en", "license": "mit", "size_categories": ["n<10K"], "task_categories": ["unconditional-image-generation", "text-to-image"], "pretty_name": "Microsoft FluentUI Emoji 512x512 White Background", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 329173985.708, "num_examples": 7564}], "download_size": 338676474, "dataset_size": 329173985.708}, "tags": ["emoji", "fluentui"]}
2023-07-16T11:12:01+00:00
[]
[ "en" ]
TAGS #task_categories-unconditional-image-generation #task_categories-text-to-image #size_categories-n<10K #language-English #license-mit #emoji #fluentui #region-us
# Dataset Card for "microsoft-fluentui-emoji-512-whitebg" svg and their file names were converted to images and text from Microsoft's fluentui-emoji repo
[ "# Dataset Card for \"microsoft-fluentui-emoji-512-whitebg\"\n\nsvg and their file names were converted to images and text from Microsoft's fluentui-emoji repo" ]
[ "TAGS\n#task_categories-unconditional-image-generation #task_categories-text-to-image #size_categories-n<10K #language-English #license-mit #emoji #fluentui #region-us \n", "# Dataset Card for \"microsoft-fluentui-emoji-512-whitebg\"\n\nsvg and their file names were converted to images and text from Microsoft's fluentui-emoji repo" ]
8e8c373f3a2601b4ed440389466a7c230c5fabca
# Dataset Card for "microsoft-fluentui-emoji-768" [svg and their file names were converted to images and text from Microsoft's fluentui-emoji repo](https://github.com/microsoft/fluentui-emoji)
Norod78/microsoft-fluentui-emoji-768
[ "task_categories:text-to-image", "size_categories:n<10K", "language:en", "license:mit", "emoji", "fluentui", "region:us" ]
2023-01-01T09:35:07+00:00
{"language": "en", "license": "mit", "size_categories": ["n<10K"], "task_categories": ["text-to-image"], "pretty_name": "Microsoft FluentUI Emoji 768x768", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 679617796.94, "num_examples": 7564}], "download_size": 704564297, "dataset_size": 679617796.94}, "tags": ["emoji", "fluentui"]}
2023-07-16T11:13:07+00:00
[]
[ "en" ]
TAGS #task_categories-text-to-image #size_categories-n<10K #language-English #license-mit #emoji #fluentui #region-us
# Dataset Card for "microsoft-fluentui-emoji-768" svg and their file names were converted to images and text from Microsoft's fluentui-emoji repo
[ "# Dataset Card for \"microsoft-fluentui-emoji-768\"\n\nsvg and their file names were converted to images and text from Microsoft's fluentui-emoji repo" ]
[ "TAGS\n#task_categories-text-to-image #size_categories-n<10K #language-English #license-mit #emoji #fluentui #region-us \n", "# Dataset Card for \"microsoft-fluentui-emoji-768\"\n\nsvg and their file names were converted to images and text from Microsoft's fluentui-emoji repo" ]
d8ad1490da2686d7af5c6a7f3d8844f0f9542b0f
### Roboflow Dataset Page [https://universe.roboflow.com/material-identification/garbage-classification-3/dataset/2](https://universe.roboflow.com/material-identification/garbage-classification-3/dataset/2?ref=roboflow2huggingface) ### Dataset Labels ``` ['biodegradable', 'cardboard', 'glass', 'metal', 'paper', 'plastic'] ``` ### Citation ``` @misc{ garbage-classification-3_dataset, title = { GARBAGE CLASSIFICATION 3 Dataset }, type = { Open Source Dataset }, author = { Material Identification }, howpublished = { \\url{ https://universe.roboflow.com/material-identification/garbage-classification-3 } }, url = { https://universe.roboflow.com/material-identification/garbage-classification-3 }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { mar }, note = { visited on 2023-01-02 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.com on July 27, 2022 at 5:44 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 10464 images. GARBAGE-GARBAGE-CLASSIFICATION are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 416x416 (Stretch) The following augmentation was applied to create 1 versions of each source image: * 50% probability of horizontal flip * 50% probability of vertical flip * Equal probability of one of the following 90-degree rotations: none, clockwise, counter-clockwise, upside-down
keremberke/garbage-object-detection
[ "task_categories:object-detection", "roboflow", "region:us" ]
2023-01-01T09:38:12+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow"]}
2023-01-05T11:30:08+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #region-us
### Roboflow Dataset Page URL ### Dataset Labels ### License CC BY 4.0 ### Dataset Summary This dataset was exported via URL on July 27, 2022 at 5:44 AM GMT Roboflow is an end-to-end computer vision platform that helps you * collaborate with your team on computer vision projects * collect & organize images * understand unstructured image data * annotate, and create datasets * export, train, and deploy computer vision models * use active learning to improve your dataset over time It includes 10464 images. GARBAGE-GARBAGE-CLASSIFICATION are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) * Resize to 416x416 (Stretch) The following augmentation was applied to create 1 versions of each source image: * 50% probability of horizontal flip * 50% probability of vertical flip * Equal probability of one of the following 90-degree rotations: none, clockwise, counter-clockwise, upside-down
[ "### Roboflow Dataset Page\nURL", "### Dataset Labels", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on July 27, 2022 at 5:44 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 10464 images.\nGARBAGE-GARBAGE-CLASSIFICATION are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 416x416 (Stretch)\n\nThe following augmentation was applied to create 1 versions of each source image:\n* 50% probability of horizontal flip\n* 50% probability of vertical flip\n* Equal probability of one of the following 90-degree rotations: none, clockwise, counter-clockwise, upside-down" ]
[ "TAGS\n#task_categories-object-detection #roboflow #region-us \n", "### Roboflow Dataset Page\nURL", "### Dataset Labels", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on July 27, 2022 at 5:44 AM GMT\n\nRoboflow is an end-to-end computer vision platform that helps you\n* collaborate with your team on computer vision projects\n* collect & organize images\n* understand unstructured image data\n* annotate, and create datasets\n* export, train, and deploy computer vision models\n* use active learning to improve your dataset over time\n\nIt includes 10464 images.\nGARBAGE-GARBAGE-CLASSIFICATION are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n* Resize to 416x416 (Stretch)\n\nThe following augmentation was applied to create 1 versions of each source image:\n* 50% probability of horizontal flip\n* 50% probability of vertical flip\n* Equal probability of one of the following 90-degree rotations: none, clockwise, counter-clockwise, upside-down" ]
2c0f4e8e9085f3079b80137e780aab0e54936cfc
<div align="center"> <img width="640" alt="keremberke/forklift-object-detection" src="https://huggingface.co/datasets/keremberke/forklift-object-detection/resolve/main/thumbnail.jpg"> </div> ### Dataset Labels ``` ['forklift', 'person'] ``` ### Number of Images ```json {'test': 42, 'valid': 84, 'train': 295} ``` ### How to Use - Install [datasets](https://pypi.org/project/datasets/): ```bash pip install datasets ``` - Load the dataset: ```python from datasets import load_dataset ds = load_dataset("keremberke/forklift-object-detection", name="full") example = ds['train'][0] ``` ### Roboflow Dataset Page [https://universe.roboflow.com/mohamed-traore-2ekkp/forklift-dsitv/dataset/1](https://universe.roboflow.com/mohamed-traore-2ekkp/forklift-dsitv/dataset/1?ref=roboflow2huggingface) ### Citation ``` @misc{ forklift-dsitv_dataset, title = { Forklift Dataset }, type = { Open Source Dataset }, author = { Mohamed Traore }, howpublished = { \\url{ https://universe.roboflow.com/mohamed-traore-2ekkp/forklift-dsitv } }, url = { https://universe.roboflow.com/mohamed-traore-2ekkp/forklift-dsitv }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { mar }, note = { visited on 2023-01-15 }, } ``` ### License CC BY 4.0 ### Dataset Summary This dataset was exported via roboflow.ai on April 3, 2022 at 9:01 PM GMT It includes 421 images. Forklift are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
keremberke/forklift-object-detection
[ "task_categories:object-detection", "roboflow", "roboflow2huggingface", "Manufacturing", "region:us" ]
2023-01-01T09:57:34+00:00
{"task_categories": ["object-detection"], "tags": ["roboflow", "roboflow2huggingface", "Manufacturing"]}
2023-01-15T14:32:47+00:00
[]
[]
TAGS #task_categories-object-detection #roboflow #roboflow2huggingface #Manufacturing #region-us
<div align="center"> <img width="640" alt="keremberke/forklift-object-detection" src="URL </div> ### Dataset Labels ### Number of Images ### How to Use - Install datasets: - Load the dataset: ### Roboflow Dataset Page URL ### License CC BY 4.0 ### Dataset Summary This dataset was exported via URL on April 3, 2022 at 9:01 PM GMT It includes 421 images. Forklift are annotated in COCO format. The following pre-processing was applied to each image: * Auto-orientation of pixel data (with EXIF-orientation stripping) No image augmentation techniques were applied.
[ "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on April 3, 2022 at 9:01 PM GMT\n\nIt includes 421 images.\nForklift are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n\nNo image augmentation techniques were applied." ]
[ "TAGS\n#task_categories-object-detection #roboflow #roboflow2huggingface #Manufacturing #region-us \n", "### Dataset Labels", "### Number of Images", "### How to Use\n\n- Install datasets:\n\n\n\n- Load the dataset:", "### Roboflow Dataset Page\nURL", "### License\nCC BY 4.0", "### Dataset Summary\nThis dataset was exported via URL on April 3, 2022 at 9:01 PM GMT\n\nIt includes 421 images.\nForklift are annotated in COCO format.\n\nThe following pre-processing was applied to each image:\n* Auto-orientation of pixel data (with EXIF-orientation stripping)\n\nNo image augmentation techniques were applied." ]
d73f8f05a66e1c7fcaf2b0e8b806d85aefcc642d
# Dataset Card for "phone-recognition" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Nithiwat/phone-recognition
[ "region:us" ]
2023-01-01T11:50:02+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "ipa", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 762470383.96, "num_examples": 3860}], "download_size": 902056545, "dataset_size": 762470383.96}}
2023-01-07T09:48:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "phone-recognition" More Information needed
[ "# Dataset Card for \"phone-recognition\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"phone-recognition\"\n\nMore Information needed" ]
eaf94e802d241351cf3f12bc2d221d108ff5d8f1
# Dataset Card for "rlhf-reward-datasets" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yitingxie/rlhf-reward-datasets
[ "region:us" ]
2023-01-01T12:22:09+00:00
{"dataset_info": {"features": [{"name": "prompt", "dtype": "string"}, {"name": "chosen", "dtype": "string"}, {"name": "rejected", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 6093563, "num_examples": 5103}, {"name": "train", "num_bytes": 90528217, "num_examples": 76256}], "download_size": 57138483, "dataset_size": 96621780}}
2023-01-01T12:23:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rlhf-reward-datasets" More Information needed
[ "# Dataset Card for \"rlhf-reward-datasets\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rlhf-reward-datasets\"\n\nMore Information needed" ]
f7ec397979411ad5e08e1771ea62e978dfec2cfe
# bAbi_nli bAbI tasks recasted as natural language inference. https://github.com/facebookarchive/bAbI-tasks tasksource recasting code: https://colab.research.google.com/drive/1J_RqDSw9iPxJSBvCJu-VRbjXnrEjKVvr?usp=sharing ```bibtex @article{weston2015towards, title={Towards ai-complete question answering: A set of prerequisite toy tasks}, author={Weston, Jason and Bordes, Antoine and Chopra, Sumit and Rush, Alexander M and Van Merri{\"e}nboer, Bart and Joulin, Armand and Mikolov, Tomas}, journal={arXiv preprint arXiv:1502.05698}, year={2015} } ```
tasksource/babi_nli
[ "task_categories:text-classification", "task_ids:natural-language-inference", "annotations_creators:expert-generated", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:bsd", "logical reasoning", "nli", "natural-language-inference", "reasoning", "logic", "region:us" ]
2023-01-01T14:39:33+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["crowdsourced"], "language": ["en"], "license": "bsd", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["natural-language-inference"], "pretty_name": "babi_nli", "tags": ["logical reasoning", "nli", "natural-language-inference", "reasoning", "logic"]}
2023-06-05T08:05:59+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-natural-language-inference #annotations_creators-expert-generated #language_creators-crowdsourced #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-English #license-bsd #logical reasoning #nli #natural-language-inference #reasoning #logic #region-us
# bAbi_nli bAbI tasks recasted as natural language inference. URL tasksource recasting code: URL
[ "# bAbi_nli\n\nbAbI tasks recasted as natural language inference.\nURL\n\ntasksource recasting code:\nURL" ]
[ "TAGS\n#task_categories-text-classification #task_ids-natural-language-inference #annotations_creators-expert-generated #language_creators-crowdsourced #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-English #license-bsd #logical reasoning #nli #natural-language-inference #reasoning #logic #region-us \n", "# bAbi_nli\n\nbAbI tasks recasted as natural language inference.\nURL\n\ntasksource recasting code:\nURL" ]
16d21e47237942fe49cf1ee7a35f0ef1d35c3176
# Dataset Card for "porkypig" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
matteopilotto/porkypig
[ "region:us" ]
2023-01-01T15:55:29+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2107384.0, "num_examples": 11}], "download_size": 2108606, "dataset_size": 2107384.0}}
2023-01-01T15:55:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "porkypig" More Information needed
[ "# Dataset Card for \"porkypig\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"porkypig\"\n\nMore Information needed" ]
33e00bf20c361ee26f360c4c603f826f9d055e1c
This dataset contains just under half of the training data used to train [Paint Journey](https://huggingface.co/FredZhang7/Paint-Journey). All 768x768 images were generated using one of Disco Diffusion v3.1, v4.1, and v5.x, but later upscaled then downscaled twice (super resolution) using R-ESRGAN General WDN 4x V3 just before training.
FredZhang7/disco-diffusion
[ "license:mit", "stable-diffusion", "paint-journey", "region:us" ]
2023-01-01T18:57:14+00:00
{"license": "mit", "tags": ["stable-diffusion", "paint-journey"]}
2023-01-02T06:25:07+00:00
[]
[]
TAGS #license-mit #stable-diffusion #paint-journey #region-us
This dataset contains just under half of the training data used to train Paint Journey. All 768x768 images were generated using one of Disco Diffusion v3.1, v4.1, and v5.x, but later upscaled then downscaled twice (super resolution) using R-ESRGAN General WDN 4x V3 just before training.
[]
[ "TAGS\n#license-mit #stable-diffusion #paint-journey #region-us \n" ]
83bd696d6f1611a7ede49f8fe1c68727cf3ce7ae
# Dataset Card for "Terrier-images" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
bobber/Terrier-images
[ "region:us" ]
2023-01-01T19:49:56+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 1623322.0, "num_examples": 18}], "download_size": 1624818, "dataset_size": 1623322.0}}
2023-01-01T19:50:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Terrier-images" More Information needed
[ "# Dataset Card for \"Terrier-images\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Terrier-images\"\n\nMore Information needed" ]
616e9c7b2570cd1cfeacb459d862a2e64e8b0e98
# Dataset Card for "twitter_de_ru" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
carexl8/twitter_de_ru
[ "region:us" ]
2023-01-01T21:02:34+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "created_at", "dtype": "timestamp[ns, tz=UTC]"}, {"name": "tokens", "sequence": "string"}, {"name": "language tags", "sequence": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 563616, "num_examples": 688}], "download_size": 0, "dataset_size": 563616}}
2023-04-21T18:41:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for "twitter_de_ru" More Information needed
[ "# Dataset Card for \"twitter_de_ru\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"twitter_de_ru\"\n\nMore Information needed" ]
37fa73c9eb0c081e4a9faf15df7022b0b14d6b79
# Dataset Card for "CV_Eng_train_specialCharsRemoved" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kabir5297/CV_Eng_train_specialCharsRemoved
[ "region:us" ]
2023-01-01T22:54:57+00:00
{"dataset_info": {"features": [{"name": "filename", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 822619, "num_examples": 11281}], "download_size": 409770, "dataset_size": 822619}}
2023-01-01T22:55:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "CV_Eng_train_specialCharsRemoved" More Information needed
[ "# Dataset Card for \"CV_Eng_train_specialCharsRemoved\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"CV_Eng_train_specialCharsRemoved\"\n\nMore Information needed" ]
5e352bce8912d68ee9bd4ea045542123fdb4cded
# Dataset Card for "w2v2_0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mp1704/w2v2_0
[ "region:us" ]
2023-01-02T03:08:30+00:00
{"dataset_info": {"features": [{"name": "file", "dtype": "string"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 163223, "num_examples": 1048}, {"name": "test", "num_bytes": 18107, "num_examples": 117}], "download_size": 107863, "dataset_size": 181330}}
2023-01-02T03:08:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "w2v2_0" More Information needed
[ "# Dataset Card for \"w2v2_0\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"w2v2_0\"\n\nMore Information needed" ]
831307f930f53d188e137ccd5d935e4162dd0929
# Dataset Card for "mediumroast-press-releases" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jgoodie/mediumroast-press-releases
[ "region:us" ]
2023-01-02T06:11:17+00:00
{"dataset_info": {"features": [{"name": "Id", "dtype": "string"}, {"name": "Title", "dtype": "string"}, {"name": "Published", "dtype": "string"}, {"name": "Link", "dtype": "string"}, {"name": "Text", "dtype": "string"}, {"name": "Abouts", "struct": [{"name": "About TransVoyant", "dtype": "string"}, {"name": "About Merck Global Health Innovation Fund", "dtype": "string"}, {"name": "About P74 Ventures", "dtype": "string"}, {"name": "About Historic Hotels of America", "dtype": "string"}, {"name": "About First Internet Bancorp", "dtype": "string"}, {"name": "About Mary Kay", "dtype": "string"}, {"name": "About United Nations Development Programme (UNDP)", "dtype": "string"}, {"name": "About China International Center for Economic and Technical Exchanges (CICETE)", "dtype": "string"}, {"name": "About China Women\u2019s Development Foundation (CWDF)", "dtype": "string"}, {"name": "About Ivanti", "dtype": "string"}, {"name": "About Brandon Hall Group", "dtype": "string"}, {"name": "About UBS", "dtype": "string"}, {"name": "About CDP", "dtype": "string"}, {"name": "About The SEAL Awards", "dtype": "string"}, {"name": "About CyrusOne", "dtype": "string"}, {"name": "About Vizient", "dtype": "string"}, {"name": "About KARL STORZ", "dtype": "string"}, {"name": "About Rupert Resources", "dtype": "string"}, {"name": "About Grain Sustainability", "dtype": "string"}, {"name": "About Garmin International, Inc.", "dtype": "string"}, {"name": "About CARFAX Canada", "dtype": "string"}, {"name": "About Edgecore", "dtype": "string"}, {"name": "About Cyware", "dtype": "string"}, {"name": "About CSW", "dtype": "string"}, {"name": "About Euromonitor International", "dtype": "string"}, {"name": "About FICO", "dtype": "string"}, {"name": "About Veritone", "dtype": "string"}, {"name": "About Seagate Technology", "dtype": "string"}, {"name": "About MedVector", "dtype": "string"}, {"name": "About Gain\u00ae", "dtype": "string"}, {"name": "About Procter & Gamble", "dtype": "string"}, {"name": "About SITE Centers Corp.", "dtype": "string"}, {"name": "About Ford Motor Company", "dtype": "string"}, {"name": "About CDK Global, Inc.", "dtype": "string"}, {"name": "About William Blair Investment Banking", "dtype": "string"}, {"name": "About William Blair", "dtype": "string"}, {"name": "About Postmedia Network Inc.", "dtype": "string"}, {"name": "About Sports Venture Holdings and BET99", "dtype": "string"}, {"name": "About Great Western Bank", "dtype": "string"}, {"name": "About Ambient Photonics", "dtype": "string"}, {"name": "About Origis Energy", "dtype": "string"}, {"name": "About Mitsubishi Power Americas, Inc.", "dtype": "string"}, {"name": "About Insider Connected", "dtype": "string"}, {"name": "About Stern Pinball, Inc.", "dtype": "string"}, {"name": "About Garmin:", "dtype": "string"}, {"name": "About Navy Federal Credit Union:", "dtype": "string"}, {"name": "About Carbon Robotics", "dtype": "string"}, {"name": "About Purpose Investments Inc.", "dtype": "string"}, {"name": "About Purpose Financial", "dtype": "string"}, {"name": "About Second Harvest", "dtype": "string"}, {"name": "About Kerrigan Advisors", "dtype": "string"}, {"name": "About InstaSafe", "dtype": "string"}, {"name": "About ZNet Technologies", "dtype": "string"}, {"name": "About RPtech", "dtype": "string"}, {"name": "About Xylem", "dtype": "string"}, {"name": "About Bobbie", "dtype": "string"}, {"name": "About Uber Eats", "dtype": "string"}, {"name": "About ConocoPhillips", "dtype": "string"}, {"name": "About Genius Sports", "dtype": "string"}, {"name": "About Walmart", "dtype": "string"}, {"name": "About", "dtype": "string"}, {"name": "About Historic Hotels Worldwide", "dtype": "string"}, {"name": "About Symetra", "dtype": "string"}, {"name": "About MCR", "dtype": "string"}, {"name": "About Bynder", "dtype": "string"}, {"name": "About Thomas H. Lee Partners, L.P.", "dtype": "string"}, {"name": "About Nickel 28", "dtype": "string"}, {"name": "About Critical Metals Corp.", "dtype": "string"}, {"name": "About European Lithium Ltd", "dtype": "string"}, {"name": "About Historic Hotels Worldwide\u00ae", "dtype": "string"}, {"name": "About KBRA", "dtype": "string"}, {"name": "About Knowles", "dtype": "string"}, {"name": "About Trillbit", "dtype": "string"}, {"name": "About UKG", "dtype": "string"}, {"name": "About Unravel Data", "dtype": "string"}, {"name": "About OmniVision", "dtype": "string"}, {"name": "About Seagate", "dtype": "string"}, {"name": "About Sharp NEC Display Solutions of America, Inc.", "dtype": "string"}, {"name": "About Avery Dennison", "dtype": "string"}, {"name": "About three-quarters plan to use locationing technologies", "dtype": "string"}, {"name": "About 63", "dtype": "string"}, {"name": "About Ocean Prime", "dtype": "string"}, {"name": "About Williams", "dtype": "string"}, {"name": "About Global Learning Systems", "dtype": "string"}, {"name": "About Orthofix", "dtype": "string"}, {"name": "About Laura Wilkinson", "dtype": "string"}, {"name": "About Fiduciary Trust International", "dtype": "string"}, {"name": "About Franklin Templeton", "dtype": "string"}, {"name": "About Momentus", "dtype": "string"}, {"name": "About Jaunt", "dtype": "string"}, {"name": "About the NEO Exchange", "dtype": "string"}, {"name": "About Purpose Investments", "dtype": "string"}, {"name": "About Prasad Corp:", "dtype": "string"}, {"name": "About Cube-Tec International:", "dtype": "string"}, {"name": "About Bowman Consulting Group Ltd. (Bowman):", "dtype": "string"}, {"name": "About Dutch Bros:", "dtype": "string"}, {"name": "About Cascade", "dtype": "string"}, {"name": "About BEF and Change the Course", "dtype": "string"}, {"name": "About GPM", "dtype": "string"}, {"name": "About Tmunity Therapeutics", "dtype": "string"}, {"name": "About Kite", "dtype": "string"}, {"name": "About Gilead Sciences", "dtype": "string"}, {"name": "About Pathway", "dtype": "string"}, {"name": "About Globality \u2013 Option 1", "dtype": "string"}, {"name": "About The AI Journal Global Excellence Awards", "dtype": "string"}, {"name": "About National Geographic Content", "dtype": "string"}, {"name": "About ABC Signature", "dtype": "string"}, {"name": "About Keshet Studios", "dtype": "string"}, {"name": "About Promega Corporation", "dtype": "string"}, {"name": "About Bristol Myers Squibb", "dtype": "string"}, {"name": "About Montgomery County", "dtype": "string"}, {"name": "About Rave Mobile Safety", "dtype": "string"}, {"name": "About Motorola Solutions (NYSE: MSI)", "dtype": "string"}, {"name": "About ARR", "dtype": "string"}, {"name": "About Everygame Poker:", "dtype": "string"}, {"name": "About Graceland and Elvis Presley Enterprises, Inc", "dtype": "string"}, {"name": "About ELVIS", "dtype": "string"}, {"name": "About Abacus", "dtype": "string"}, {"name": "About East Resources Acquisition Company", "dtype": "string"}, {"name": "About INNIO", "dtype": "string"}, {"name": "About Chilmark", "dtype": "string"}, {"name": "About LeanTaaS", "dtype": "string"}, {"name": "About Adobe", "dtype": "string"}, {"name": "About Webhelp", "dtype": "string"}, {"name": "About MassNAELA", "dtype": "string"}, {"name": "About Multiple Sclerosis", "dtype": "string"}, {"name": "About the American Music Therapy Association", "dtype": "string"}, {"name": "About Five9", "dtype": "string"}, {"name": "About ClearSale", "dtype": "string"}, {"name": "About Savills", "dtype": "string"}, {"name": "About Lucy Cavendish College", "dtype": "string"}, {"name": "About Queens College of the City University of New York", "dtype": "string"}, {"name": "About ReferralPoint", "dtype": "string"}, {"name": "About athenahealth Marketplace", "dtype": "string"}, {"name": "About Luxfer Holdings PLC", "dtype": "string"}, {"name": "About CAIRE Inc.", "dtype": "string"}, {"name": "About Know Labs, Inc.", "dtype": "string"}, {"name": "About Business Wire", "dtype": "string"}, {"name": "About ForgeRock", "dtype": "string"}, {"name": "About Ameresco, Inc.", "dtype": "string"}, {"name": "About Gunze", "dtype": "string"}, {"name": "About Motorola Solutions", "dtype": "string"}, {"name": "About NTT Research", "dtype": "string"}, {"name": "About Factspan:", "dtype": "string"}, {"name": "About the Children\u2019s Eye Foundation of AAPOS", "dtype": "string"}, {"name": "About Magik Eye Inc", "dtype": "string"}, {"name": "About Garmin Connect", "dtype": "string"}, {"name": "About Bladder and Urothelial Cancer", "dtype": "string"}, {"name": "About the EV-103/KEYNOTE-869 Trial", "dtype": "string"}, {"name": "About PADCEV", "dtype": "string"}, {"name": "About KEYTRUDA", "dtype": "string"}, {"name": "About Seagen", "dtype": "string"}, {"name": "About Astellas", "dtype": "string"}, {"name": "About Merck", "dtype": "string"}, {"name": "About the Seagen, Astellas and Merck Collaboration", "dtype": "string"}, {"name": "About bluebird bio, Inc.", "dtype": "string"}, {"name": "About Great Western Bancorp, Inc.", "dtype": "string"}, {"name": "About Pebblebrook Hotel Trust", "dtype": "string"}, {"name": "About The Harris Poll", "dtype": "string"}, {"name": "About MITRE", "dtype": "string"}, {"name": "About Nomi Health", "dtype": "string"}, {"name": "About Ganymede", "dtype": "string"}, {"name": "About Apprentice", "dtype": "string"}, {"name": "About Columbia Sussex:", "dtype": "string"}, {"name": "About Marriott Hotels", "dtype": "string"}, {"name": "About Carallel", "dtype": "string"}, {"name": "About Jon Peddie Research", "dtype": "string"}, {"name": "About Semperis", "dtype": "string"}, {"name": "About InspereX", "dtype": "string"}, {"name": "About Allison+Partners", "dtype": "string"}, {"name": "About American Efficient", "dtype": "string"}, {"name": "About See it. Feel it. Seal it.", "dtype": "string"}, {"name": "About NBC News:", "dtype": "string"}, {"name": "About Tyler Technologies, Inc.", "dtype": "string"}, {"name": "About Duo Health:", "dtype": "string"}, {"name": "About Desert Kidney Associates:", "dtype": "string"}, {"name": "About Peachtree Group", "dtype": "string"}, {"name": "About Bona", "dtype": "string"}, {"name": "About UserTesting", "dtype": "string"}, {"name": "About ICR", "dtype": "string"}, {"name": "About ActivTrak", "dtype": "string"}, {"name": "About Historic Hotels of America\u00ae", "dtype": "string"}, {"name": "About NextDecade Corporation", "dtype": "string"}, {"name": "About ElevateBio BaseCamp", "dtype": "string"}, {"name": "About ElevateBio", "dtype": "string"}, {"name": "About WPP", "dtype": "string"}, {"name": "About ExtensisHR", "dtype": "string"}, {"name": "About Pfizer: Breakthroughs That Change Patients\u2019 Lives", "dtype": "string"}, {"name": "About CareWell Health Medical Center", "dtype": "string"}, {"name": "About Upper Crust Food Service:", "dtype": "string"}, {"name": "About College Chefs:", "dtype": "string"}, {"name": "About Lineage Logistics", "dtype": "string"}, {"name": "About Bay Grove", "dtype": "string"}, {"name": "About Bird", "dtype": "string"}, {"name": "About Wish", "dtype": "string"}, {"name": "About Eurora Solutions", "dtype": "string"}, {"name": "About Chatham Lodging Trust", "dtype": "string"}, {"name": "About DESTINY-Breast03", "dtype": "string"}, {"name": "About HER2 Positive Breast Cancer", "dtype": "string"}, {"name": "About ENHERTU", "dtype": "string"}, {"name": "About the ENHERTU Clinical Development Program", "dtype": "string"}, {"name": "About the Daiichi Sankyo and AstraZeneca Collaboration", "dtype": "string"}, {"name": "About Daiichi Sankyo", "dtype": "string"}, {"name": "About ADS-TEC Energy", "dtype": "string"}, {"name": "About TSG Consumer Partners", "dtype": "string"}, {"name": "About what3words", "dtype": "string"}, {"name": "About BeyondNetZero", "dtype": "string"}, {"name": "About General Atlantic", "dtype": "string"}, {"name": "About Angel Oak Mortgage, Inc.", "dtype": "string"}, {"name": "About The Beachbody Company, Inc.", "dtype": "string"}, {"name": "About Quantum-Si Incorporated", "dtype": "string"}, {"name": "About StorONE", "dtype": "string"}, {"name": "About Health Fidelity", "dtype": "string"}, {"name": "About Werner Enterprises", "dtype": "string"}, {"name": "About Endoluxe", "dtype": "string"}, {"name": "About Granite", "dtype": "string"}, {"name": "About KBI Biopharma, Inc.", "dtype": "string"}, {"name": "About Waters Corporation", "dtype": "string"}, {"name": "About Dawn", "dtype": "string"}, {"name": "About Alarm.com", "dtype": "string"}, {"name": "About American Pacific Group", "dtype": "string"}, {"name": "About BioTalent Canada", "dtype": "string"}, {"name": "About FashWire:", "dtype": "string"}, {"name": "About Credit Karma", "dtype": "string"}, {"name": "About Intuit", "dtype": "string"}, {"name": "About SheerID", "dtype": "string"}, {"name": "About Sessions", "dtype": "string"}, {"name": "About RGI-2001", "dtype": "string"}, {"name": "About REGiMMUNE Limited", "dtype": "string"}, {"name": "About Audax Group", "dtype": "string"}, {"name": "About Skillz Inc.", "dtype": "string"}, {"name": "About Talon Cyber Security", "dtype": "string"}, {"name": "About Battery Ventures", "dtype": "string"}, {"name": "About Qognify", "dtype": "string"}, {"name": "About WestRock", "dtype": "string"}, {"name": "About Teucrium Trading LLC", "dtype": "string"}, {"name": "About Turo", "dtype": "string"}, {"name": "About IDT", "dtype": "string"}, {"name": "About PIND", "dtype": "string"}, {"name": "About NDPI", "dtype": "string"}, {"name": "About the Principal Financial Well-Being Index", "dtype": "string"}, {"name": "About Principal Financial Group", "dtype": "string"}, {"name": "About Parallel Bio", "dtype": "string"}, {"name": "About Granite Point Mortgage Trust Inc.", "dtype": "string"}, {"name": "About SambaNova Systems", "dtype": "string"}, {"name": "About the Tech Ascension Awards", "dtype": "string"}, {"name": "About UNCF", "dtype": "string"}, {"name": "About DeVry University", "dtype": "string"}, {"name": "About ISG Provider Lens\u2122 Research", "dtype": "string"}, {"name": "About ISG", "dtype": "string"}, {"name": "About Lp(a)", "dtype": "string"}, {"name": "About Arrowhead Pharmaceuticals", "dtype": "string"}, {"name": "About Cantaloupe, Inc.:", "dtype": "string"}, {"name": "About Amneal", "dtype": "string"}, {"name": "About Smartling", "dtype": "string"}, {"name": "About Oragenics, Inc.", "dtype": "string"}, {"name": "About TD SYNNEX", "dtype": "string"}, {"name": "About AtriCure", "dtype": "string"}, {"name": "About Scale Computing", "dtype": "string"}, {"name": "About Planar", "dtype": "string"}, {"name": "About Pactum", "dtype": "string"}, {"name": "About Millennium Solutions", "dtype": "string"}, {"name": "About FINEOS Corporation", "dtype": "string"}, {"name": "About CrowdStrike", "dtype": "string"}, {"name": "About BitNile Holdings, Inc.", "dtype": "string"}, {"name": "About EIG", "dtype": "string"}, {"name": "About ILOS Projects", "dtype": "string"}, {"name": "About Omnes", "dtype": "string"}, {"name": "About Achievers", "dtype": "string"}, {"name": "About Dasera", "dtype": "string"}, {"name": "About Rapid Dose Therapeutics Corp.", "dtype": "string"}, {"name": "About Deucravacitinib", "dtype": "string"}, {"name": "About the Phase 3 POETYK PSO-1 and POETYK PSO-2 Studies", "dtype": "string"}, {"name": "About Psoriasis", "dtype": "string"}, {"name": "About Enerpac Tool Group", "dtype": "string"}, {"name": "About doxo INSIGHTS", "dtype": "string"}, {"name": "About doxo", "dtype": "string"}, {"name": "About Uncovering TNBC", "dtype": "string"}, {"name": "About Yvonne Orji", "dtype": "string"}, {"name": "About American Water", "dtype": "string"}, {"name": "About The Water Research Foundation", "dtype": "string"}, {"name": "About Stratascale", "dtype": "string"}, {"name": "About the FinOps Foundation", "dtype": "string"}, {"name": "About Native Voice", "dtype": "string"}, {"name": "About iHeartMedia", "dtype": "string"}, {"name": "About Consilio", "dtype": "string"}, {"name": "About Comparably", "dtype": "string"}, {"name": "About Comparably Awards", "dtype": "string"}, {"name": "About Runway Group", "dtype": "string"}, {"name": "About UP.Partners", "dtype": "string"}, {"name": "About Century Housing Corporation", "dtype": "string"}, {"name": "About U.S. Bancorp Community Development Corporation:", "dtype": "string"}, {"name": "About FCPT", "dtype": "string"}, {"name": "About Micronoma", "dtype": "string"}, {"name": "About Samsung Electronics Co., Ltd.", "dtype": "string"}, {"name": "About OppFi", "dtype": "string"}, {"name": "About Searchlight Cyber", "dtype": "string"}, {"name": "About Mighty Buildings", "dtype": "string"}, {"name": "About Jack in the Box", "dtype": "string"}, {"name": "About Western Union", "dtype": "string"}, {"name": "About ADM", "dtype": "string"}, {"name": "About Bona US", "dtype": "string"}, {"name": "About NW Natural", "dtype": "string"}, {"name": "About P&G", "dtype": "string"}, {"name": "About P&G\u2019s 2,021 Acts of Good in 2021", "dtype": "string"}, {"name": "About Grove Collaborative Holdings, Inc.", "dtype": "string"}, {"name": "About PhishFirewall", "dtype": "string"}, {"name": "About Fluree", "dtype": "string"}, {"name": "About Juniper Networks", "dtype": "string"}, {"name": "About Lomiko Metals Inc.", "dtype": "string"}, {"name": "About Quotient", "dtype": "string"}, {"name": "About Supermicro", "dtype": "string"}, {"name": "About Susan G. Komen\u00ae", "dtype": "string"}, {"name": "About CSET", "dtype": "string"}, {"name": "About Snorkel AI", "dtype": "string"}, {"name": "About Inversion6", "dtype": "string"}, {"name": "About Make-A-Wish", "dtype": "string"}, {"name": "About Klick Health", "dtype": "string"}, {"name": "About Klick Group", "dtype": "string"}, {"name": "About Upflex:", "dtype": "string"}, {"name": "About QuantumScape Corporation", "dtype": "string"}, {"name": "About RevBio, Inc.", "dtype": "string"}, {"name": "About Navy Federal Credit Union", "dtype": "string"}, {"name": "About Operation Homefront:", "dtype": "string"}, {"name": "About Jackpot.com", "dtype": "string"}, {"name": "About Vanson Bourne", "dtype": "string"}, {"name": "About Code42", "dtype": "string"}, {"name": "About Decorative Films:", "dtype": "string"}, {"name": "About Appvion:", "dtype": "string"}, {"name": "About Nekoosa:", "dtype": "string"}, {"name": "About Wynnchurch Capital:", "dtype": "string"}, {"name": "About NanOlogy", "dtype": "string"}, {"name": "About Komatsu", "dtype": "string"}, {"name": "About PPM America", "dtype": "string"}, {"name": "About Applied UV", "dtype": "string"}, {"name": "About LED Supply Co.", "dtype": "string"}, {"name": "About PURO UV Disinfection Lighting", "dtype": "string"}, {"name": "About Algolia", "dtype": "string"}, {"name": "About Apple Hospitality REIT, Inc.", "dtype": "string"}, {"name": "About DS Smith", "dtype": "string"}, {"name": "About HC3", "dtype": "string"}, {"name": "About the Potential Home Sales Model", "dtype": "string"}, {"name": "About First American", "dtype": "string"}, {"name": "About Sappi North America, Inc.", "dtype": "string"}, {"name": "About KlariVis", "dtype": "string"}, {"name": "About Nexgrill", "dtype": "string"}, {"name": "About Edgewise Therapeutics", "dtype": "string"}, {"name": "About the new board members", "dtype": "string"}, {"name": "About Grant Thornton LLP", "dtype": "string"}, {"name": "About Geographic Atrophy", "dtype": "string"}, {"name": "About Avacincaptad Pegol", "dtype": "string"}, {"name": "About the GATHER Clinical Trials", "dtype": "string"}, {"name": "About Breakthrough Therapy Designation", "dtype": "string"}, {"name": "About Iveric Bio", "dtype": "string"}, {"name": "About Imbrium Therapeutics L.P.", "dtype": "string"}, {"name": "About the Alice L. Walton Foundation", "dtype": "string"}, {"name": "About Washington Regional Medical System", "dtype": "string"}, {"name": "About Cleveland Clinic", "dtype": "string"}, {"name": "About ShopOne", "dtype": "string"}, {"name": "About Pantheon", "dtype": "string"}, {"name": "About Gamepires", "dtype": "string"}, {"name": "About Jagex", "dtype": "string"}, {"name": "About Everbridge", "dtype": "string"}, {"name": "About Riskalyze", "dtype": "string"}, {"name": "About InvestorCOM Inc.", "dtype": "string"}, {"name": "About AngioDynamics, Inc.", "dtype": "string"}, {"name": "About Intelitek", "dtype": "string"}, {"name": "About First Farmers and Merchants Corporation and First Farmers and Merchants Bank", "dtype": "string"}, {"name": "About UP.Summit", "dtype": "string"}, {"name": "About the Food Network & Cooking Channel South Beach Wine & Food Festival", "dtype": "string"}, {"name": "About Strategic Storage Trust VI, Inc. (SST VI):", "dtype": "string"}, {"name": "About SmartStop Self Storage REIT, Inc. (SmartStop):", "dtype": "string"}, {"name": "About DTEX Systems", "dtype": "string"}, {"name": "About the Call of Duty Endowment", "dtype": "string"}, {"name": "About Experian", "dtype": "string"}, {"name": "About Operation HOPE", "dtype": "string"}, {"name": "About Dermavant\u2019s Phase 3 Program for Tapinarof in Psoriasis", "dtype": "string"}, {"name": "About Dermavant", "dtype": "string"}, {"name": "About Microvast", "dtype": "string"}, {"name": "About the Archer Awards", "dtype": "string"}, {"name": "About TechTarget", "dtype": "string"}, {"name": "About L3Harris Technologies", "dtype": "string"}, {"name": "About Klara", "dtype": "string"}, {"name": "About ModMed", "dtype": "string"}, {"name": "About Business Intelligence Group", "dtype": "string"}, {"name": "About Wisk", "dtype": "string"}, {"name": "About Illinois American Water", "dtype": "string"}, {"name": "About SSG", "dtype": "string"}, {"name": "About FPT Software", "dtype": "string"}, {"name": "About Circle Pharma, Inc.", "dtype": "string"}, {"name": "About NETGEAR, Inc.", "dtype": "string"}, {"name": "About Zurn Elkay Water Solutions", "dtype": "string"}, {"name": "About IDC Trackers", "dtype": "string"}, {"name": "About IDC", "dtype": "string"}, {"name": "About Monroe Capital", "dtype": "string"}, {"name": "About NICE Actimize", "dtype": "string"}, {"name": "About NICE", "dtype": "string"}, {"name": "About AuriNovo\u2122", "dtype": "string"}, {"name": "About the Microtia-Congenital Ear Deformity Institute", "dtype": "string"}, {"name": "About 3DBio Therapeutics", "dtype": "string"}, {"name": "About Oshkosh Corporation", "dtype": "string"}, {"name": "About Bob Harper", "dtype": "string"}, {"name": "About AstraZeneca", "dtype": "string"}, {"name": "About SafePath\u00ae", "dtype": "string"}, {"name": "About Smith Micro Software, Inc.", "dtype": "string"}, {"name": "About Non-GAAP Financial Measures", "dtype": "string"}, {"name": "About Cognyte Software Ltd.", "dtype": "string"}, {"name": "About BJ's Wholesale Club Holdings, Inc.", "dtype": "string"}, {"name": "About Sama", "dtype": "string"}, {"name": "About Evans Transportation Services Inc.", "dtype": "string"}, {"name": "About PowerSchool", "dtype": "string"}, {"name": "About MatSing", "dtype": "string"}, {"name": "About Transaction Network Services", "dtype": "string"}, {"name": "About Cataracts", "dtype": "string"}, {"name": "About Presbyopia", "dtype": "string"}, {"name": "About the AcrySof\u00ae IQ Vivity", "dtype": "string"}, {"name": "About the Outstanding Pole Award", "dtype": "string"}, {"name": "About Pure Wafer", "dtype": "string"}, {"name": "About Braverman Greenspun P.C.", "dtype": "string"}, {"name": "About XPeng Inc.", "dtype": "string"}, {"name": "About Wallarm", "dtype": "string"}, {"name": "About Transaction Network Services (TNS)", "dtype": "string"}, {"name": "About KKR", "dtype": "string"}, {"name": "About IMV", "dtype": "string"}, {"name": "About Cepton", "dtype": "string"}, {"name": "About Coty Inc.", "dtype": "string"}, {"name": "About HUGO BOSS", "dtype": "string"}, {"name": "About Juicy Stakes Casino:", "dtype": "string"}, {"name": "About Susan G. Komen", "dtype": "string"}, {"name": "About Sonio", "dtype": "string"}, {"name": "About Terreal", "dtype": "string"}, {"name": "About BabyQuip", "dtype": "string"}, {"name": "About Sense", "dtype": "string"}, {"name": "About OCC", "dtype": "string"}, {"name": "About International Bird Rescue", "dtype": "string"}, {"name": "About The Marine Mammal Center", "dtype": "string"}, {"name": "About Japan National Tourism Organization", "dtype": "string"}, {"name": "About Wan Bridge", "dtype": "string"}, {"name": "About The Gabelli Dividend & Income Trust", "dtype": "string"}, {"name": "About Hurricane Electric", "dtype": "string"}, {"name": "About NIKE, Inc.", "dtype": "string"}, {"name": "About Emburse", "dtype": "string"}, {"name": "About the IDC MarketScape", "dtype": "string"}, {"name": "About Skechers USA Ltd. and Skechers USA, Inc.", "dtype": "string"}, {"name": "About N-able", "dtype": "string"}, {"name": "About ViewSonic", "dtype": "string"}, {"name": "About NortonLifeLock Inc.", "dtype": "string"}, {"name": "About Airiam", "dtype": "string"}, {"name": "About the Principal Financial Well-Being Index\u2120", "dtype": "string"}, {"name": "About Gail Devers", "dtype": "string"}, {"name": "About Graves\u2019 Disease", "dtype": "string"}, {"name": "About Thyroid Eye Disease", "dtype": "string"}, {"name": "About The Graves\u2019 Disease and Thyroid Foundation", "dtype": "string"}, {"name": "About Prevent Blindness", "dtype": "string"}, {"name": "About Horizon", "dtype": "string"}, {"name": "About Conceal", "dtype": "string"}, {"name": "About Cybin", "dtype": "string"}, {"name": "About Synergis Software", "dtype": "string"}, {"name": "About Aruba, a Hewlett Packard Enterprise company", "dtype": "string"}, {"name": "About Stewart", "dtype": "string"}, {"name": "About Tumble", "dtype": "string"}, {"name": "About GRAIL", "dtype": "string"}, {"name": "About Wheels", "dtype": "string"}, {"name": "About Helbiz", "dtype": "string"}, {"name": "About Regions Financial Corporation", "dtype": "string"}, {"name": "About Match Marketing Group", "dtype": "string"}, {"name": "About Public Label", "dtype": "string"}, {"name": "About Match Retail", "dtype": "string"}, {"name": "About Bushu Pharmaceuticals Ltd.", "dtype": "string"}, {"name": "About The 81 Collection", "dtype": "string"}, {"name": "About Columbia Sussex", "dtype": "string"}, {"name": "About Renaissance Hotels", "dtype": "string"}, {"name": "About Hims & Hers", "dtype": "string"}, {"name": "About eternalHealth:", "dtype": "string"}, {"name": "About Angeles Equity Partners, LLC", "dtype": "string"}, {"name": "About R\u014dBEX", "dtype": "string"}, {"name": "About Vince Tizzio", "dtype": "string"}, {"name": "About Albert Benchimol", "dtype": "string"}, {"name": "About AXIS Capital", "dtype": "string"}, {"name": "About Regional Management Corp.", "dtype": "string"}, {"name": "About Black & Veatch", "dtype": "string"}, {"name": "About NextGen Healthcare, Inc.", "dtype": "string"}, {"name": "About Bridges Health Partners", "dtype": "string"}, {"name": "About Keller Williams", "dtype": "string"}, {"name": "About Board", "dtype": "string"}, {"name": "About Velodyne Lidar", "dtype": "string"}, {"name": "About Clarity AI", "dtype": "string"}, {"name": "About Refinitiv, an LSEG business", "dtype": "string"}, {"name": "About LSEG", "dtype": "string"}, {"name": "About Sterling", "dtype": "string"}, {"name": "About Orelabrutinib", "dtype": "string"}, {"name": "About Tafasitamab", "dtype": "string"}, {"name": "About InnoCare", "dtype": "string"}, {"name": "About ExtraHop", "dtype": "string"}, {"name": "About Mitek Systems, Inc.", "dtype": "string"}, {"name": "About Hamilton Capital Partners Inc. (Hamilton ETFs)", "dtype": "string"}, {"name": "About Benson Hill", "dtype": "string"}, {"name": "About Star Peak Corp II", "dtype": "string"}, {"name": "About Commonwealth Financial Network", "dtype": "string"}, {"name": "About Skyhigh Security:", "dtype": "string"}, {"name": "About Heartland Summit", "dtype": "string"}, {"name": "About Neuromyelitis Optica Spectrum Disorder (NMOSD)", "dtype": "string"}, {"name": "About UPLIZNA (inebilizumab-cdon)", "dtype": "string"}, {"name": "About Eptura\u2122", "dtype": "string"}, {"name": "About Space Perspective", "dtype": "string"}, {"name": "About David Grutman", "dtype": "string"}, {"name": "About Harrods", "dtype": "string"}, {"name": "About ISG Provider Lens\u2122", "dtype": "string"}, {"name": "About Slate Office REIT (TSX: SOT.UN)", "dtype": "string"}, {"name": "About Slate Asset Management", "dtype": "string"}, {"name": "About Gatos Silver", "dtype": "string"}, {"name": "About Outset Medical, Inc.", "dtype": "string"}, {"name": "About I/ITSEC", "dtype": "string"}, {"name": "About RAVE Computer", "dtype": "string"}, {"name": "About Sun-Maid Growers of California", "dtype": "string"}, {"name": "About CIBC Innovation Banking", "dtype": "string"}, {"name": "About Azalea Health", "dtype": "string"}, {"name": "About Great American\u2019s Fidelity / Crime Division", "dtype": "string"}, {"name": "About Great American Insurance Group", "dtype": "string"}, {"name": "About Infobip", "dtype": "string"}, {"name": "About Cantaloupe, Inc.", "dtype": "string"}, {"name": "About Express, Inc.:", "dtype": "string"}, {"name": "About Cooper Tire & Rubber Company", "dtype": "string"}, {"name": "About Alice Cooper", "dtype": "string"}, {"name": "About Evanescence", "dtype": "string"}, {"name": "About THIO", "dtype": "string"}, {"name": "About MAIA Biotechnology, Inc.", "dtype": "string"}, {"name": "About The Mission Continues:", "dtype": "string"}, {"name": "About SmartBear", "dtype": "string"}, {"name": "About DemandScience", "dtype": "string"}, {"name": "About Guild Mortgage", "dtype": "string"}, {"name": "About Generational Equity", "dtype": "string"}, {"name": "About Lisbon Heritage Hotels", "dtype": "string"}, {"name": "About Bojangles, Inc.", "dtype": "string"}, {"name": "About Ozark Fiber:", "dtype": "string"}, {"name": "About Duravant", "dtype": "string"}, {"name": "About Multiscan Technologies", "dtype": "string"}, {"name": "About Acorda Therapeutics", "dtype": "string"}, {"name": "About HealthCare Royalty", "dtype": "string"}, {"name": "About Atara Biotherapeutics, Inc.", "dtype": "string"}, {"name": "About the Principal Super Savers Study", "dtype": "string"}, {"name": "About Target Date Funds:", "dtype": "string"}, {"name": "About AmTrust Financial Services, Inc.", "dtype": "string"}, {"name": "About Sovereign Wealth Fund Institute", "dtype": "string"}, {"name": "About AG Mortgage Investment Trust, Inc.", "dtype": "string"}, {"name": "About Angelo, Gordon & Co., L.P.", "dtype": "string"}, {"name": "About Lob", "dtype": "string"}, {"name": "About Climate Impact Partners", "dtype": "string"}, {"name": "About CarbonNeutral\u00ae certification", "dtype": "string"}, {"name": "About Edgewater Wireless", "dtype": "string"}, {"name": "About Cincoze", "dtype": "string"}, {"name": "About TransPerfect", "dtype": "string"}, {"name": "About Seso:", "dtype": "string"}, {"name": "About Vyond", "dtype": "string"}, {"name": "About Pliant", "dtype": "string"}, {"name": "About Entegris", "dtype": "string"}, {"name": "About FlexTrade Systems", "dtype": "string"}, {"name": "About UBS Asset Management:", "dtype": "string"}, {"name": "About Immersion", "dtype": "string"}, {"name": "About Faurecia", "dtype": "string"}, {"name": "About BankUnited, Inc.", "dtype": "string"}, {"name": "About Archer", "dtype": "string"}, {"name": "About Northspyre", "dtype": "string"}, {"name": "About Gastric Cancer", "dtype": "string"}, {"name": "About DESTINY-Gastric01", "dtype": "string"}, {"name": "About the Collaboration between Daiichi Sankyo and AstraZeneca", "dtype": "string"}, {"name": "About Lakeview Community Partners Limited", "dtype": "string"}, {"name": "About SBA Communications Corporation", "dtype": "string"}, {"name": "About Basis Theory", "dtype": "string"}, {"name": "About Dassault Syst\u00e8mes", "dtype": "string"}, {"name": "About McPhy", "dtype": "string"}, {"name": "About Visiativ", "dtype": "string"}, {"name": "About Getty", "dtype": "string"}, {"name": "About automatic world generation acceleration", "dtype": "string"}, {"name": "About the publication of the beta version", "dtype": "string"}, {"name": "About MATRIX Inc.", "dtype": "string"}, {"name": "About MATRIX GENESIS LABS (MGL)", "dtype": "string"}, {"name": "About MetaReal Co., Ltd.", "dtype": "string"}, {"name": "About OWC", "dtype": "string"}, {"name": "About Elior Group", "dtype": "string"}, {"name": "About FarEye", "dtype": "string"}, {"name": "About Dole plc", "dtype": "string"}, {"name": "About Forbright Bank:", "dtype": "string"}, {"name": "About Trez Capital", "dtype": "string"}, {"name": "About Sharp/NEC", "dtype": "string"}]}], "splits": [{"name": "train", "num_bytes": 5620857.333333333, "num_examples": 578}, {"name": "test", "num_bytes": 709900.6666666666, "num_examples": 73}, {"name": "valid", "num_bytes": 700176.0, "num_examples": 72}], "download_size": 5767270, "dataset_size": 7030934.0}}
2023-01-02T06:11:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mediumroast-press-releases" More Information needed
[ "# Dataset Card for \"mediumroast-press-releases\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mediumroast-press-releases\"\n\nMore Information needed" ]
6cd515d1fa7d4d480f1cb06223f4abb9e3e765f0
# Dataset Card for "CUB-SD" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) The CUB dataset re-created using Stable Diffusion 2.0 <table> <tr> <td><img alt="Showcase" src="https://huggingface.co/datasets/taesiri/CUB-SD/resolve/main/sample_images/4__Blue%20Jay.png"/></td> <td><img alt="Showcase" src="https://huggingface.co/datasets/taesiri/CUB-SD/resolve/main/sample_images/11__Kentucky%20Warbler.png"/></td> </tr> <tr> <td><img alt="Showcase" src="https://huggingface.co/datasets/taesiri/CUB-SD/resolve/main/sample_images/9__House%20Sparrow.png"/></td> <td><img alt="Showcase" src="https://huggingface.co/datasets/taesiri/CUB-SD/resolve/main/sample_images/23__Scarlet%20Tanager.png"/></td> </tr> </table>
taesiri/CUB-SD
[ "region:us" ]
2023-01-02T06:29:12+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "001.Black_footed_Albatross", "1": "002.Laysan_Albatross", "2": "003.Sooty_Albatross", "3": "004.Groove_billed_Ani", "4": "005.Crested_Auklet", "5": "006.Least_Auklet", "6": "007.Parakeet_Auklet", "7": "008.Rhinoceros_Auklet", "8": "009.Brewer_Blackbird", "9": "010.Red_winged_Blackbird", "10": "011.Rusty_Blackbird", "11": "012.Yellow_headed_Blackbird", "12": "013.Bobolink", "13": "014.Indigo_Bunting", "14": "015.Lazuli_Bunting", "15": "016.Painted_Bunting", "16": "017.Cardinal", "17": "018.Spotted_Catbird", "18": "019.Gray_Catbird", "19": "020.Yellow_breasted_Chat", "20": "021.Eastern_Towhee", "21": "022.Chuck_will_Widow", "22": "023.Brandt_Cormorant", "23": "024.Red_faced_Cormorant", "24": "025.Pelagic_Cormorant", "25": "026.Bronzed_Cowbird", "26": "027.Shiny_Cowbird", "27": "028.Brown_Creeper", "28": "029.American_Crow", "29": "030.Fish_Crow", "30": "031.Black_billed_Cuckoo", "31": "032.Mangrove_Cuckoo", "32": "033.Yellow_billed_Cuckoo", "33": "034.Gray_crowned_Rosy_Finch", "34": "035.Purple_Finch", "35": "036.Northern_Flicker", "36": "037.Acadian_Flycatcher", "37": "038.Great_Crested_Flycatcher", "38": "039.Least_Flycatcher", "39": "040.Olive_sided_Flycatcher", "40": "041.Scissor_tailed_Flycatcher", "41": "042.Vermilion_Flycatcher", "42": "043.Yellow_bellied_Flycatcher", "43": "044.Frigatebird", "44": "045.Northern_Fulmar", "45": "046.Gadwall", "46": "047.American_Goldfinch", "47": "048.European_Goldfinch", "48": "049.Boat_tailed_Grackle", "49": "050.Eared_Grebe", "50": "051.Horned_Grebe", "51": "052.Pied_billed_Grebe", "52": "053.Western_Grebe", "53": "054.Blue_Grosbeak", "54": "055.Evening_Grosbeak", "55": "056.Pine_Grosbeak", "56": "057.Rose_breasted_Grosbeak", "57": "058.Pigeon_Guillemot", "58": "059.California_Gull", "59": "060.Glaucous_winged_Gull", "60": "061.Heermann_Gull", "61": "062.Herring_Gull", "62": "063.Ivory_Gull", "63": "064.Ring_billed_Gull", "64": "065.Slaty_backed_Gull", "65": "066.Western_Gull", "66": "067.Anna_Hummingbird", "67": "068.Ruby_throated_Hummingbird", "68": "069.Rufous_Hummingbird", "69": "070.Green_Violetear", "70": "071.Long_tailed_Jaeger", "71": "072.Pomarine_Jaeger", "72": "073.Blue_Jay", "73": "074.Florida_Jay", "74": "075.Green_Jay", "75": "076.Dark_eyed_Junco", "76": "077.Tropical_Kingbird", "77": "078.Gray_Kingbird", "78": "079.Belted_Kingfisher", "79": "080.Green_Kingfisher", "80": "081.Pied_Kingfisher", "81": "082.Ringed_Kingfisher", "82": "083.White_breasted_Kingfisher", "83": "084.Red_legged_Kittiwake", "84": "085.Horned_Lark", "85": "086.Pacific_Loon", "86": "087.Mallard", "87": "088.Western_Meadowlark", "88": "089.Hooded_Merganser", "89": "090.Red_breasted_Merganser", "90": "091.Mockingbird", "91": "092.Nighthawk", "92": "093.Clark_Nutcracker", "93": "094.White_breasted_Nuthatch", "94": "095.Baltimore_Oriole", "95": "096.Hooded_Oriole", "96": "097.Orchard_Oriole", "97": "098.Scott_Oriole", "98": "099.Ovenbird", "99": "100.Brown_Pelican", "100": "101.White_Pelican", "101": "102.Western_Wood_Pewee", "102": "103.Sayornis", "103": "104.American_Pipit", "104": "105.Whip_poor_Will", "105": "106.Horned_Puffin", "106": "107.Common_Raven", "107": "108.White_necked_Raven", "108": "109.American_Redstart", "109": "110.Geococcyx", "110": "111.Loggerhead_Shrike", "111": "112.Great_Grey_Shrike", "112": "113.Baird_Sparrow", "113": "114.Black_throated_Sparrow", "114": "115.Brewer_Sparrow", "115": "116.Chipping_Sparrow", "116": "117.Clay_colored_Sparrow", "117": "118.House_Sparrow", "118": "119.Field_Sparrow", "119": "120.Fox_Sparrow", "120": "121.Grasshopper_Sparrow", "121": "122.Harris_Sparrow", "122": "123.Henslow_Sparrow", "123": "124.Le_Conte_Sparrow", "124": "125.Lincoln_Sparrow", "125": "126.Nelson_Sharp_tailed_Sparrow", "126": "127.Savannah_Sparrow", "127": "128.Seaside_Sparrow", "128": "129.Song_Sparrow", "129": "130.Tree_Sparrow", "130": "131.Vesper_Sparrow", "131": "132.White_crowned_Sparrow", "132": "133.White_throated_Sparrow", "133": "134.Cape_Glossy_Starling", "134": "135.Bank_Swallow", "135": "136.Barn_Swallow", "136": "137.Cliff_Swallow", "137": "138.Tree_Swallow", "138": "139.Scarlet_Tanager", "139": "140.Summer_Tanager", "140": "141.Artic_Tern", "141": "142.Black_Tern", "142": "143.Caspian_Tern", "143": "144.Common_Tern", "144": "145.Elegant_Tern", "145": "146.Forsters_Tern", "146": "147.Least_Tern", "147": "148.Green_tailed_Towhee", "148": "149.Brown_Thrasher", "149": "150.Sage_Thrasher", "150": "151.Black_capped_Vireo", "151": "152.Blue_headed_Vireo", "152": "153.Philadelphia_Vireo", "153": "154.Red_eyed_Vireo", "154": "155.Warbling_Vireo", "155": "156.White_eyed_Vireo", "156": "157.Yellow_throated_Vireo", "157": "158.Bay_breasted_Warbler", "158": "159.Black_and_white_Warbler", "159": "160.Black_throated_Blue_Warbler", "160": "161.Blue_winged_Warbler", "161": "162.Canada_Warbler", "162": "163.Cape_May_Warbler", "163": "164.Cerulean_Warbler", "164": "165.Chestnut_sided_Warbler", "165": "166.Golden_winged_Warbler", "166": "167.Hooded_Warbler", "167": "168.Kentucky_Warbler", "168": "169.Magnolia_Warbler", "169": "170.Mourning_Warbler", "170": "171.Myrtle_Warbler", "171": "172.Nashville_Warbler", "172": "173.Orange_crowned_Warbler", "173": "174.Palm_Warbler", "174": "175.Pine_Warbler", "175": "176.Prairie_Warbler", "176": "177.Prothonotary_Warbler", "177": "178.Swainson_Warbler", "178": "179.Tennessee_Warbler", "179": "180.Wilson_Warbler", "180": "181.Worm_eating_Warbler", "181": "182.Yellow_Warbler", "182": "183.Northern_Waterthrush", "183": "184.Louisiana_Waterthrush", "184": "185.Bohemian_Waxwing", "185": "186.Cedar_Waxwing", "186": "187.American_Three_toed_Woodpecker", "187": "188.Pileated_Woodpecker", "188": "189.Red_bellied_Woodpecker", "189": "190.Red_cockaded_Woodpecker", "190": "191.Red_headed_Woodpecker", "191": "192.Downy_Woodpecker", "192": "193.Bewick_Wren", "193": "194.Cactus_Wren", "194": "195.Carolina_Wren", "195": "196.House_Wren", "196": "197.Marsh_Wren", "197": "198.Rock_Wren", "198": "199.Winter_Wren", "199": "200.Common_Yellowthroat"}}}}], "splits": [{"name": "test", "num_bytes": 5095337196.0, "num_examples": 6000}], "download_size": 5009478428, "dataset_size": 5095337196.0}}
2023-01-02T06:48:12+00:00
[]
[]
TAGS #region-us
Dataset Card for "CUB-SD" ========================= More Information needed The CUB dataset re-created using Stable Diffusion 2.0
[]
[ "TAGS\n#region-us \n" ]