sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
sequencelengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
sequencelengths
0
25
languages
sequencelengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
sequencelengths
0
352
processed_texts
sequencelengths
1
353
1e368cd93d5614ee6a7687060cbe7b598491b4ff
Please note this dataset is private ### Using the data You can stream the data data loader: ```python myaudio = load_dataset( "evageon/myaudio", use_auth_token=os.environ["HG_USER_TOKEN"], # replace this with your access token streaming=True) ``` Then you can iterate over the dataset ```python # replace test with validation or train depending on split you need print(next(iter(myaudio["test"]))) ``` outputs: ``` {'path': 'CD93A8FF-C3ED-4AD4-95A6-8363CCB93B90_spk-0001_seg-0024467:0025150.wav', 'audio': {'path': 'dataset/test/wav/CD93A8FF-C3ED-4AD4-95A6-8363CCB93B90_spk-0001_seg-0024467:0025150.wav', 'array': array([0.00662231, 0.00497437, 0.00518799, ..., 0.01150513, 0.00708008, 0.00296021]), 'sampling_rate': 16000}, 'text': 'خطرا على دول الخليج لماذا اعتبرت أن إيران اليوم والخطر الذي تشكله إيران مختلف'} ```
evageon/myaudio
[ "region:us" ]
2022-12-07T20:56:35+00:00
{"pretty_name": "MGB2", "alt_glob": [], "alt_sep": "", "dataset_link": "https://huggingface.co/datasets/malmarz/test_mgb2/resolve/main/mgb2.test.tar.gz", "dataset_name": "mgb2_speech", "datasets_path": "datasets", "file_type": "wav", "hf_path": "", "label_column_name": "", "lines": false, "local_dir": false, "new_columns": "", "pal": false, "skiprows": 0, "squad": false, "xml_columns": "", "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "transcription", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 384372257.17, "num_examples": 3890}, {"name": "train", "num_bytes": 1488884786.673, "num_examples": 15559}], "download_size": 0, "dataset_size": 1873257043.8430002}}
2022-12-08T15:28:56+00:00
[]
[]
TAGS #region-us
Please note this dataset is private ### Using the data You can stream the data data loader: Then you can iterate over the dataset outputs:
[ "### Using the data\n\nYou can stream the data data loader:\n\n\n\nThen you can iterate over the dataset\n\n\noutputs:" ]
[ "TAGS\n#region-us \n", "### Using the data\n\nYou can stream the data data loader:\n\n\n\nThen you can iterate over the dataset\n\n\noutputs:" ]
81fa681e6a216879457e50b1821e25731e0ec6b7
# Dataset Card for "cantonese_processed_guangzhou" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tilos/cantonese_processed_guangzhou
[ "region:us" ]
2022-12-07T21:32:19+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 4652712704, "num_examples": 4844}], "download_size": 659265457, "dataset_size": 4652712704}}
2022-12-08T16:19:05+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cantonese_processed_guangzhou" More Information needed
[ "# Dataset Card for \"cantonese_processed_guangzhou\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cantonese_processed_guangzhou\"\n\nMore Information needed" ]
95adf35dffd9afd51b326b417c8aae8ba281f7d6
# Dataset Card for "dlforcv" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kyu-kang/dlforcv
[ "region:us" ]
2022-12-07T21:38:38+00:00
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 67733111.0, "num_examples": 107}], "download_size": 67131614, "dataset_size": 67733111.0}}
2022-12-07T22:10:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dlforcv" More Information needed
[ "# Dataset Card for \"dlforcv\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dlforcv\"\n\nMore Information needed" ]
1e2c092fa44a9779a5405c7cdc85a7454393bff1
# Dataset Card for "id2223_whisper_swedish_augmented" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jsra2/id2223_whisper_swedish_augmented
[ "region:us" ]
2022-12-07T21:45:15+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 11871603408, "num_examples": 12360}, {"name": "test", "num_bytes": 4868697560, "num_examples": 5069}], "download_size": 2532495364, "dataset_size": 16740300968}}
2022-12-07T21:50:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "id2223_whisper_swedish_augmented" More Information needed
[ "# Dataset Card for \"id2223_whisper_swedish_augmented\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"id2223_whisper_swedish_augmented\"\n\nMore Information needed" ]
de69ac023094ff0488283b63bf0d3b10a09a8b23
﷽ # Dataset Card for Tarteel AI's EveryAyah Dataset ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Tarteel AI](https://www.tarteel.ai/) - **Repository:** [Needs More Information] - **Point of Contact:** [Mohamed Saad Ibn Seddik](mailto:[email protected]) ### Dataset Summary This dataset is a collection of Quranic verses and their transcriptions, with diacritization, by different reciters. ### Supported Tasks and Leaderboards [Needs More Information] ### Languages The audio is in Arabic. ## Dataset Structure ### Data Instances A typical data point comprises the audio file `audio`, and its transcription called `text`. The `duration` is in seconds, and the author is `reciter`. An example from the dataset is: ``` { 'audio': { 'path': None, 'array': array([ 0. , 0. , 0. , ..., -0.00057983, -0.00085449, -0.00061035]), 'sampling_rate': 16000 }, 'duration': 6.478375, 'text': 'بِسْمِ اللَّهِ الرَّحْمَنِ الرَّحِيمِ', 'reciter': 'abdulsamad' } ``` ### Data Fields - audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`. - text: The transcription of the audio file. - duration: The duration of the audio file. - reciter: The reciter of the verses. ### Data Splits | | Train | Test | Validation | | ----- | ----- | ---- | ---------- | | dataset | 187785 | 23473 | 23474 | ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators ### Licensing Information [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/) ### Citation Information ``` ``` ### Contributions This dataset was created by:
tarteel-ai/everyayah
[ "task_categories:automatic-speech-recognition", "annotations_creators:expert-generated", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:ar", "license:mit", "region:us" ]
2022-12-07T21:53:59+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["crowdsourced"], "language": ["ar"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "paperswithcode_id": "tarteel-everyayah", "pretty_name": "Tarteel AI - EveryAyah Dataset", "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "duration", "dtype": "float64"}, {"name": "text", "dtype": "string"}, {"name": "reciter", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 262627688145.3, "num_examples": 187785}, {"name": "test", "num_bytes": 25156009734.72, "num_examples": 23473}, {"name": "validation", "num_bytes": 23426886730.218, "num_examples": 23474}], "download_size": 117190597305, "dataset_size": 311210584610.23804}, "train-eval-index": [{"config": "clean", "task": "automatic-speech-recognition", "task_id": "speech_recognition", "splits": {"train_split": "train", "eval_split": "test", "validation_split": "validation"}, "col_mapping": {"audio": "audio", "text": "text", "reciter": "text"}, "metrics": [{"type": "wer", "name": "WER"}, {"type": "cer", "name": "CER"}]}]}
2022-12-09T19:33:08+00:00
[]
[ "ar" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-expert-generated #language_creators-crowdsourced #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-Arabic #license-mit #region-us
﷽ Dataset Card for Tarteel AI's EveryAyah Dataset =============================================== Table of Contents ----------------- * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage: Tarteel AI * Repository: * Point of Contact: Mohamed Saad Ibn Seddik ### Dataset Summary This dataset is a collection of Quranic verses and their transcriptions, with diacritization, by different reciters. ### Supported Tasks and Leaderboards ### Languages The audio is in Arabic. Dataset Structure ----------------- ### Data Instances A typical data point comprises the audio file 'audio', and its transcription called 'text'. The 'duration' is in seconds, and the author is 'reciter'. An example from the dataset is: ### Data Fields * audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: 'dataset[0]["audio"]' the audio file is automatically decoded and resampled to 'dataset.features["audio"].sampling\_rate'. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the '"audio"' column, *i.e.* 'dataset[0]["audio"]' should always be preferred over 'dataset["audio"][0]'. * text: The transcription of the audio file. * duration: The duration of the audio file. * reciter: The reciter of the verses. ### Data Splits Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information CC BY 4.0 ### Contributions This dataset was created by:
[ "### Dataset Summary\n\n\nThis dataset is a collection of Quranic verses and their transcriptions, with diacritization, by different reciters.", "### Supported Tasks and Leaderboards", "### Languages\n\n\nThe audio is in Arabic.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA typical data point comprises the audio file 'audio', and its transcription called 'text'.\nThe 'duration' is in seconds, and the author is 'reciter'.\n\n\nAn example from the dataset is:", "### Data Fields\n\n\n* audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: 'dataset[0][\"audio\"]' the audio file is automatically decoded and resampled to 'dataset.features[\"audio\"].sampling\\_rate'. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the '\"audio\"' column, *i.e.* 'dataset[0][\"audio\"]' should always be preferred over 'dataset[\"audio\"][0]'.\n* text: The transcription of the audio file.\n* duration: The duration of the audio file.\n* reciter: The reciter of the verses.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information\n\n\nCC BY 4.0", "### Contributions\n\n\nThis dataset was created by:" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-expert-generated #language_creators-crowdsourced #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-Arabic #license-mit #region-us \n", "### Dataset Summary\n\n\nThis dataset is a collection of Quranic verses and their transcriptions, with diacritization, by different reciters.", "### Supported Tasks and Leaderboards", "### Languages\n\n\nThe audio is in Arabic.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA typical data point comprises the audio file 'audio', and its transcription called 'text'.\nThe 'duration' is in seconds, and the author is 'reciter'.\n\n\nAn example from the dataset is:", "### Data Fields\n\n\n* audio: A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: 'dataset[0][\"audio\"]' the audio file is automatically decoded and resampled to 'dataset.features[\"audio\"].sampling\\_rate'. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the '\"audio\"' column, *i.e.* 'dataset[0][\"audio\"]' should always be preferred over 'dataset[\"audio\"][0]'.\n* text: The transcription of the audio file.\n* duration: The duration of the audio file.\n* reciter: The reciter of the verses.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information\n\n\nCC BY 4.0", "### Contributions\n\n\nThis dataset was created by:" ]
d1a894fc30563abd922a21c5baa99a4c17b49d92
# Dataset Card for "olm-october-2022-with-bookcorpus-tokenized-1024" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-october-2022-with-bookcorpus-tokenized-1024
[ "region:us" ]
2022-12-07T22:14:29+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 88003461204, "num_examples": 14295559}], "download_size": 23317060365, "dataset_size": 88003461204}}
2022-12-07T22:55:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-october-2022-with-bookcorpus-tokenized-1024" More Information needed
[ "# Dataset Card for \"olm-october-2022-with-bookcorpus-tokenized-1024\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-october-2022-with-bookcorpus-tokenized-1024\"\n\nMore Information needed" ]
d798e79ecaf1390d68bb3e543356e62ca4cf2039
# Dataset Card for "banking_sentiment_zs_gpt3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
argilla/banking_sentiment_zs_gpt3
[ "region:us" ]
2022-12-07T22:16:36+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "null"}, {"name": "inputs", "struct": [{"name": "explanation", "dtype": "string"}, {"name": "text", "dtype": "string"}]}, {"name": "prediction", "list": [{"name": "label", "dtype": "string"}, {"name": "score", "dtype": "float64"}]}, {"name": "prediction_agent", "dtype": "string"}, {"name": "annotation", "dtype": "string"}, {"name": "annotation_agent", "dtype": "string"}, {"name": "multi_label", "dtype": "bool"}, {"name": "explanation", "dtype": "null"}, {"name": "id", "dtype": "string"}, {"name": "metadata", "dtype": "null"}, {"name": "status", "dtype": "string"}, {"name": "event_timestamp", "dtype": "null"}, {"name": "metrics", "struct": [{"name": "text_length", "dtype": "int64"}]}], "splits": [{"name": "train", "num_bytes": 11113, "num_examples": 36}], "download_size": 10768, "dataset_size": 11113}}
2022-12-07T22:16:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "banking_sentiment_zs_gpt3" More Information needed
[ "# Dataset Card for \"banking_sentiment_zs_gpt3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"banking_sentiment_zs_gpt3\"\n\nMore Information needed" ]
75d7288c6e598ddb3c740f64ec8fe633bade016f
## Dataset Description Dataset containing 1453 images of pixel-perfect vector Pokémon. ## Source Data Files taken from [Pokémon icons](https://theartificial.github.io/pokemon-icons/#download).
matteopilotto/pokemon_cute
[ "region:us" ]
2022-12-07T22:30:07+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6031856.525, "num_examples": 1453}], "download_size": 6196888, "dataset_size": 6031856.525}}
2022-12-07T22:45:43+00:00
[]
[]
TAGS #region-us
## Dataset Description Dataset containing 1453 images of pixel-perfect vector Pokémon. ## Source Data Files taken from Pokémon icons.
[ "## Dataset Description\nDataset containing 1453 images of pixel-perfect vector Pokémon.", "## Source Data\nFiles taken from Pokémon icons." ]
[ "TAGS\n#region-us \n", "## Dataset Description\nDataset containing 1453 images of pixel-perfect vector Pokémon.", "## Source Data\nFiles taken from Pokémon icons." ]
425686a02827b3de1f30c72a510baebf3f4f7453
# Dataset Card for "aop-dataset-2022-11-10-interview-lines-by-youtube" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
ThingsThatDoStuff/aop-dataset-2022-11-10-interview-lines-by-youtube
[ "region:us" ]
2022-12-08T01:31:02+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "start", "dtype": "int64"}, {"name": "duration", "dtype": "int64"}, {"name": "name", "dtype": "string"}, {"name": "chars", "dtype": "int64"}, {"name": "text", "dtype": "string"}, {"name": "speaker_id", "dtype": "string"}, {"name": "type", "dtype": "string"}, {"name": "mfS", "dtype": "string"}, {"name": "mfDe", "dtype": "string"}, {"name": "S1", "dtype": "string"}, {"name": "S2", "dtype": "string"}, {"name": "A1", "dtype": "string"}, {"name": "A2", "dtype": "string"}, {"name": "A3", "dtype": "string"}, {"name": "A4", "dtype": "string"}, {"name": "ST-S", "dtype": "int64"}, {"name": "SF-S", "dtype": "int64"}, {"name": "NT-S", "dtype": "int64"}, {"name": "NF-S", "dtype": "int64"}, {"name": "Jumper", "dtype": "int64"}, {"name": "non-Jumper", "dtype": "int64"}, {"name": "PB", "dtype": "int64"}, {"name": "SC", "dtype": "int64"}, {"name": "SB", "dtype": "int64"}, {"name": "PC", "dtype": "int64"}, {"name": "type-128", "dtype": "string"}, {"name": "type-32", "dtype": "string"}, {"name": "stack", "dtype": "string"}, {"name": "S quadra", "dtype": "string"}, {"name": "O/D", "dtype": "string"}, {"name": "Oi/Oe", "dtype": "string"}, {"name": "Di/De", "dtype": "string"}, {"name": "S/P", "dtype": "string"}, {"name": "C/B", "dtype": "string"}, {"name": "dom", "dtype": "string"}, {"name": "temp", "dtype": "string"}, {"name": "O/DD", "dtype": "int64"}, {"name": "D/OO", "dtype": "int64"}, {"name": "Aee", "dtype": "int64"}, {"name": "Aii", "dtype": "int64"}, {"name": "mN", "dtype": "int64"}, {"name": "mS", "dtype": "int64"}, {"name": "mDi", "dtype": "int64"}, {"name": "mDe", "dtype": "int64"}, {"name": "fS", "dtype": "int64"}, {"name": "fN", "dtype": "int64"}, {"name": "fDe", "dtype": "int64"}, {"name": "fDi", "dtype": "int64"}, {"name": "sav S", "dtype": "int64"}, {"name": "sav N", "dtype": "int64"}, {"name": "sav T", "dtype": "int64"}, {"name": "sav F", "dtype": "int64"}, {"name": "dem S", "dtype": "int64"}, {"name": "dem N", "dtype": "int64"}, {"name": "dem T", "dtype": "int64"}, {"name": "dem F", "dtype": "int64"}, {"name": "has Si", "dtype": "int64"}, {"name": "has Ne", "dtype": "int64"}, {"name": "has Ni", "dtype": "int64"}, {"name": "has Se", "dtype": "int64"}, {"name": "has Ti", "dtype": "int64"}, {"name": "has Fe", "dtype": "int64"}, {"name": "has Fi", "dtype": "int64"}, {"name": "has Te", "dtype": "int64"}, {"name": "sav Oi", "dtype": "int64"}, {"name": "sav Di", "dtype": "int64"}, {"name": "sav Oe", "dtype": "int64"}, {"name": "sav De", "dtype": "int64"}, {"name": "dem Oe", "dtype": "int64"}, {"name": "dem De", "dtype": "int64"}, {"name": "dem Oi", "dtype": "int64"}, {"name": "dem Di", "dtype": "int64"}, {"name": "sav Play", "dtype": "int64"}, {"name": "sav Blast", "dtype": "int64"}, {"name": "sav Consume", "dtype": "int64"}, {"name": "sav Sleep", "dtype": "int64"}, {"name": "dem Play", "dtype": "int64"}, {"name": "dem Blast", "dtype": "int64"}, {"name": "dem Consume", "dtype": "int64"}, {"name": "dem Sleep", "dtype": "int64"}, {"name": "has Play", "dtype": "int64"}, {"name": "has Blast", "dtype": "int64"}, {"name": "has Consume", "dtype": "int64"}, {"name": "has Sleep", "dtype": "int64"}, {"name": "last Play", "dtype": "int64"}, {"name": "last Blast", "dtype": "int64"}, {"name": "last Consume", "dtype": "int64"}, {"name": "last Sleep", "dtype": "int64"}, {"name": "dom in", "dtype": "int64"}, {"name": "dom en", "dtype": "int64"}, {"name": "sav ST", "dtype": "int64"}, {"name": "sav SF", "dtype": "int64"}, {"name": "sav NT", "dtype": "int64"}, {"name": "sav NF", "dtype": "int64"}, {"name": "line_id", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 86092404, "num_examples": 82290}, {"name": "test", "num_bytes": 28744341, "num_examples": 27430}, {"name": "valid", "num_bytes": 28700636, "num_examples": 27430}], "download_size": 32536937, "dataset_size": 143537381}}
2022-12-08T01:51:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "aop-dataset-2022-11-10-interview-lines-by-youtube" More Information needed
[ "# Dataset Card for \"aop-dataset-2022-11-10-interview-lines-by-youtube\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"aop-dataset-2022-11-10-interview-lines-by-youtube\"\n\nMore Information needed" ]
191e21adb653e244098280ff5c201bd22daf10cb
# Dataset Card for "image3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Steven0633/image3
[ "region:us" ]
2022-12-08T02:00:02+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "arrange chairs", "1": "arrange flowers", "2": "bake potato", "3": "beat eggs", "4": "bend knee", "5": "bend tree", "6": "bind hair", "7": "bite apple", "8": "block door", "9": "block window", "10": "boil egg", "11": "boil potato", "12": "break bowl", "13": "break cup", "14": "break door", "15": "break egg", "16": "break glass", "17": "break window", "18": "burn book", "19": "burn paper", "20": "burn tree", "21": "burn wood", "22": "burst balloon", "23": "burst door", "24": "carry bag", "25": "carry book", "26": "carry umbrella", "27": "chop carrot", "28": "chop meat", "29": "chop onion", "30": "chop tree", "31": "chop wood", "32": "close book", "33": "close cabinet", "34": "close door", "35": "close drawer", "36": "close window", "37": "coil rope", "38": "cook egg", "39": "cook meat", "40": "cook onion", "41": "cook potato", "42": "crack bottle", "43": "crack egg", "44": "crack glass", "45": "crack window", "46": "crash car", "47": "crop hair", "48": "cut apple", "49": "cut meat", "50": "cut onion", "51": "cut potato", "52": "cut tree", "53": "cut wood", "54": "fasten door", "55": "fasten window", "56": "fold paper", "57": "fry egg", "58": "fry meat", "59": "fry potato", "60": "grate carrot", "61": "grate potato", "62": "grind meat", "63": "hang bag", "64": "hang shirt", "65": "ignite paper", "66": "ignite wood", "67": "insert key", "68": "kick door", "69": "kick football", "70": "knot rope", "71": "label bottle", "72": "label box", "73": "lock cabinet", "74": "lock door", "75": "lock drawer", "76": "lock window", "77": "mash potato", "78": "mix eggs", "79": "open bottle", "80": "open box", "81": "open cabinet", "82": "open door", "83": "open drawer", "84": "open umbrella", "85": "open window", "86": "park car", "87": "peel apple", "88": "peel banana", "89": "peel carrot", "90": "peel orange", "91": "peel potato", "92": "pile books", "93": "pile boxes", "94": "pile wood", "95": "pitch baseball", "96": "ride bicycle", "97": "rip paper", "98": "roll paper", "99": "roll umbrella", "100": "saw tree", "101": "saw wood", "102": "scratch car", "103": "scratch knee", "104": "shave hair", "105": "shut door", "106": "shut window", "107": "skin knee", "108": "slice apple", "109": "slice meat", "110": "slice onion", "111": "slice potato", "112": "smash door", "113": "smash window", "114": "soak hair", "115": "soak shirt", "116": "spill coffee", "117": "split tree", "118": "split wood", "119": "squeeze bottle", "120": "squeeze orange", "121": "stain paper", "122": "stain shirt", "123": "stir coffee", "124": "stir soup", "125": "strip tree", "126": "tear book", "127": "tear paper", "128": "tear shirt", "129": "throw apple", "130": "throw baseball", "131": "throw football", "132": "throw frisbee", "133": "tie shoe", "134": "trim hair", "135": "trim tree", "136": "twist hair", "137": "twist rope", "138": "wrap book", "139": "wrap box"}}}}], "splits": [{"name": "train", "num_bytes": 14000672.0, "num_examples": 420}], "download_size": 13171812, "dataset_size": 14000672.0}}
2022-12-08T02:20:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "image3" More Information needed
[ "# Dataset Card for \"image3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"image3\"\n\nMore Information needed" ]
78fafd897851c9d4e0879a8572a920fd90604829
# Dataset Card for "final" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kyu-kang/final
[ "region:us" ]
2022-12-08T03:38:12+00:00
{"dataset_info": {"features": [{"name": "pixel_values", "dtype": "image"}, {"name": "label", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 68391793.0, "num_examples": 107}], "download_size": 0, "dataset_size": 68391793.0}}
2022-12-08T03:40:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "final" More Information needed
[ "# Dataset Card for \"final\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"final\"\n\nMore Information needed" ]
9c82f7903d64fef28ff36a409f2f6a99c2083b65
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-xl-16384-book-summary * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate/autoeval-eval-billsum-default-30a180-2375174515
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T06:06:55+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["billsum"], "eval_info": {"task": "summarization", "model": "pszemraj/long-t5-tglobal-xl-16384-book-summary", "metrics": [], "dataset_name": "billsum", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "summary"}}}
2022-12-08T17:09:09+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-xl-16384-book-summary * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @pszemraj for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: pszemraj/long-t5-tglobal-xl-16384-book-summary\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: pszemraj/long-t5-tglobal-xl-16384-book-summary\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
8ec6ef8b2770574f99269e82cd442b10186db4d3
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-xl-16384-book-summary * Dataset: ccdv/arxiv-summarization * Config: document * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate/autoeval-eval-ccdv__arxiv-summarization-document-dcd037-2375274516
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T06:06:59+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["ccdv/arxiv-summarization"], "eval_info": {"task": "summarization", "model": "pszemraj/long-t5-tglobal-xl-16384-book-summary", "metrics": ["bertscore", "perplexity"], "dataset_name": "ccdv/arxiv-summarization", "dataset_config": "document", "dataset_split": "test", "col_mapping": {"text": "article", "target": "abstract"}}}
2022-12-09T01:24:32+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-xl-16384-book-summary * Dataset: ccdv/arxiv-summarization * Config: document * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @pszemraj for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: pszemraj/long-t5-tglobal-xl-16384-book-summary\n* Dataset: ccdv/arxiv-summarization\n* Config: document\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: pszemraj/long-t5-tglobal-xl-16384-book-summary\n* Dataset: ccdv/arxiv-summarization\n* Config: document\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
a971f52a130383fcf25dd5c6e5d57f18c273eece
# SMC Malayalam Speech Corpus Malayalam Speech Corpus (MSC) is a repository of curated speech samples collected using MSC web application, released by Swathanthra Malayalam Computing. The official blog post and source data can be found at [https://blog.smc.org.in/malayalam-speech-corpus/](https://blog.smc.org.in/malayalam-speech-corpus/). ## Dataset Description - **Homepage:** [https://blog.smc.org.in/malayalam-speech-corpus/](https://blog.smc.org.in/malayalam-speech-corpus/) ### Dataset Summary The first version of Malayalam Speech Corpus contains 1541 speech samples from 75 contributors amounting to 1:38:16 hours of speech. It has 482 unique sentences, 1400 unique words, 553 unique syllables and 48 unique phonemes.
thennal/msc
[ "task_categories:automatic-speech-recognition", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:ml", "license:cc-by-sa-4.0", "region:us" ]
2022-12-08T06:19:56+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["crowdsourced"], "language": ["ml"], "license": ["cc-by-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": [], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "Swathanthra Malayalam Computing Malayalam Speech Corpus", "tags": [], "dataset_info": {"features": [{"name": "speechid", "dtype": "string"}, {"name": "speaker_id", "dtype": "string"}, {"name": "review_score", "dtype": "int64"}, {"name": "transcript", "dtype": "string"}, {"name": "category", "dtype": "string"}, {"name": "speaker_gender", "dtype": "string"}, {"name": "speaker_age", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 48000}}}], "splits": [{"name": "train", "num_bytes": 581998721.306, "num_examples": 1541}], "download_size": 422643542, "dataset_size": 581998721.306}}
2022-12-08T06:49:31+00:00
[]
[ "ml" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #size_categories-1K<n<10K #language-Malayalam #license-cc-by-sa-4.0 #region-us
# SMC Malayalam Speech Corpus Malayalam Speech Corpus (MSC) is a repository of curated speech samples collected using MSC web application, released by Swathanthra Malayalam Computing. The official blog post and source data can be found at URL ## Dataset Description - Homepage: URL ### Dataset Summary The first version of Malayalam Speech Corpus contains 1541 speech samples from 75 contributors amounting to 1:38:16 hours of speech. It has 482 unique sentences, 1400 unique words, 553 unique syllables and 48 unique phonemes.
[ "# SMC Malayalam Speech Corpus\n\nMalayalam Speech Corpus (MSC) is a repository of curated speech samples collected using MSC web application, released by Swathanthra Malayalam Computing. \nThe official blog post and source data can be found at URL", "## Dataset Description\n\n- Homepage: URL", "### Dataset Summary\n\nThe first version of Malayalam Speech Corpus contains 1541 speech samples from 75 contributors amounting to 1:38:16 hours of speech. It has 482 unique sentences, 1400 unique words, 553 unique syllables and 48 unique phonemes." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #size_categories-1K<n<10K #language-Malayalam #license-cc-by-sa-4.0 #region-us \n", "# SMC Malayalam Speech Corpus\n\nMalayalam Speech Corpus (MSC) is a repository of curated speech samples collected using MSC web application, released by Swathanthra Malayalam Computing. \nThe official blog post and source data can be found at URL", "## Dataset Description\n\n- Homepage: URL", "### Dataset Summary\n\nThe first version of Malayalam Speech Corpus contains 1541 speech samples from 75 contributors amounting to 1:38:16 hours of speech. It has 482 unique sentences, 1400 unique words, 553 unique syllables and 48 unique phonemes." ]
0b6233b2eb00c57dfb4cee8acedfce128e0133b8
# Dataset Card for "conv_intent_w_inference" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
rungalileo/conv_intent_w_inference
[ "region:us" ]
2022-12-08T06:20:11+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int32"}, {"name": "text", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "PlayMusic", "1": "BookRestaurant", "2": "GetWeather", "3": "SearchCreativeWork", "4": "AddToPlaylist", "5": "RateBook", "6": "SearchScreeningEvent"}}}}, {"name": "str_label", "dtype": "string"}], "splits": [{"name": "inference_example", "num_bytes": 230847, "num_examples": 2723}, {"name": "train", "num_bytes": 856479, "num_examples": 11061}, {"name": "validation", "num_bytes": 56192, "num_examples": 700}], "download_size": 491008, "dataset_size": 1143518}}
2022-12-08T06:20:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "conv_intent_w_inference" More Information needed
[ "# Dataset Card for \"conv_intent_w_inference\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"conv_intent_w_inference\"\n\nMore Information needed" ]
1345f29321e8020aa4a73e39be9d5bbd27b75b46
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: google/bigbird-pegasus-large-arxiv * Dataset: kmfoda/booksum * Config: kmfoda--booksum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate/autoeval-eval-kmfoda__booksum-kmfoda__booksum-342ce6-2376174529
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T06:51:15+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["kmfoda/booksum"], "eval_info": {"task": "summarization", "model": "google/bigbird-pegasus-large-arxiv", "metrics": [], "dataset_name": "kmfoda/booksum", "dataset_config": "kmfoda--booksum", "dataset_split": "test", "col_mapping": {"text": "chapter", "target": "summary_text"}}}
2022-12-08T07:39:24+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: google/bigbird-pegasus-large-arxiv * Dataset: kmfoda/booksum * Config: kmfoda--booksum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @pszemraj for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/bigbird-pegasus-large-arxiv\n* Dataset: kmfoda/booksum\n* Config: kmfoda--booksum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/bigbird-pegasus-large-arxiv\n* Dataset: kmfoda/booksum\n* Config: kmfoda--booksum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
d3d7410306f754c7a581423d057ff04a38ba1648
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: google/bigbird-pegasus-large-pubmed * Dataset: kmfoda/booksum * Config: kmfoda--booksum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate/autoeval-eval-kmfoda__booksum-kmfoda__booksum-29029e-2376274530
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T06:51:18+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["kmfoda/booksum"], "eval_info": {"task": "summarization", "model": "google/bigbird-pegasus-large-pubmed", "metrics": [], "dataset_name": "kmfoda/booksum", "dataset_config": "kmfoda--booksum", "dataset_split": "test", "col_mapping": {"text": "chapter", "target": "summary_text"}}}
2022-12-08T07:39:56+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: google/bigbird-pegasus-large-pubmed * Dataset: kmfoda/booksum * Config: kmfoda--booksum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @pszemraj for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/bigbird-pegasus-large-pubmed\n* Dataset: kmfoda/booksum\n* Config: kmfoda--booksum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/bigbird-pegasus-large-pubmed\n* Dataset: kmfoda/booksum\n* Config: kmfoda--booksum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
cee2f2d4c0f18879213e5029b3c35576fb9422c8
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: google/bigbird-pegasus-large-bigpatent * Dataset: kmfoda/booksum * Config: kmfoda--booksum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate/autoeval-eval-kmfoda__booksum-kmfoda__booksum-29029e-2376274531
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T06:51:25+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["kmfoda/booksum"], "eval_info": {"task": "summarization", "model": "google/bigbird-pegasus-large-bigpatent", "metrics": [], "dataset_name": "kmfoda/booksum", "dataset_config": "kmfoda--booksum", "dataset_split": "test", "col_mapping": {"text": "chapter", "target": "summary_text"}}}
2022-12-08T07:40:11+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: google/bigbird-pegasus-large-bigpatent * Dataset: kmfoda/booksum * Config: kmfoda--booksum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @pszemraj for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/bigbird-pegasus-large-bigpatent\n* Dataset: kmfoda/booksum\n* Config: kmfoda--booksum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/bigbird-pegasus-large-bigpatent\n* Dataset: kmfoda/booksum\n* Config: kmfoda--booksum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
502e746a9d9640db3f706b4416209404ada6a5b1
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: google/bigbird-pegasus-large-bigpatent * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@sam-mosaic](https://huggingface.co/sam-mosaic) for evaluating this model.
autoevaluate/autoeval-eval-billsum-default-e23aac-2376574533
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T07:06:13+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["billsum"], "eval_info": {"task": "summarization", "model": "google/bigbird-pegasus-large-bigpatent", "metrics": [], "dataset_name": "billsum", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "target": "summary"}}}
2022-12-08T08:51:32+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: google/bigbird-pegasus-large-bigpatent * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @sam-mosaic for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/bigbird-pegasus-large-bigpatent\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sam-mosaic for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: google/bigbird-pegasus-large-bigpatent\n* Dataset: billsum\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @sam-mosaic for evaluating this model." ]
ea72d5e0f10537b11919e379e4b8e88b179a62ad
# Indic TTS Malayalam Speech Corpus The Malayalam subset of [Indic TTS Corpus](https://www.iitm.ac.in/donlab/tts/index.php), taken from [this Kaggle database.](https://www.kaggle.com/datasets/kavyamanohar/indic-tts-malayalam-speech-corpus) The corpus contains one male and one female speaker, with a 2:1 ratio of samples due to missing files for the female speaker. The license is given in the repository.
thennal/indic_tts_ml
[ "task_categories:text-to-speech", "task_categories:automatic-speech-recognition", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:ml", "license:other", "region:us" ]
2022-12-08T07:18:47+00:00
{"annotations_creators": [], "language_creators": [], "language": ["ml"], "license": ["other"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": [], "task_categories": ["text-to-speech", "automatic-speech-recognition"], "task_ids": [], "pretty_name": "Indic TTS Malayalam Speech Corpus", "dataset_info": {"features": [{"name": "audio", "dtype": "audio"}, {"name": "text", "dtype": "string"}, {"name": "gender", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4830182115.4, "num_examples": 8600}], "download_size": 3966895730, "dataset_size": 4830182115.4}, "tags": []}
2022-12-08T20:23:33+00:00
[]
[ "ml" ]
TAGS #task_categories-text-to-speech #task_categories-automatic-speech-recognition #multilinguality-monolingual #size_categories-1K<n<10K #language-Malayalam #license-other #region-us
# Indic TTS Malayalam Speech Corpus The Malayalam subset of Indic TTS Corpus, taken from this Kaggle database. The corpus contains one male and one female speaker, with a 2:1 ratio of samples due to missing files for the female speaker. The license is given in the repository.
[ "# Indic TTS Malayalam Speech Corpus\nThe Malayalam subset of Indic TTS Corpus, taken from\nthis Kaggle database. The corpus contains\none male and one female speaker, with a 2:1 ratio of samples due to missing files for the female speaker. The license is given\nin the repository." ]
[ "TAGS\n#task_categories-text-to-speech #task_categories-automatic-speech-recognition #multilinguality-monolingual #size_categories-1K<n<10K #language-Malayalam #license-other #region-us \n", "# Indic TTS Malayalam Speech Corpus\nThe Malayalam subset of Indic TTS Corpus, taken from\nthis Kaggle database. The corpus contains\none male and one female speaker, with a 2:1 ratio of samples due to missing files for the female speaker. The license is given\nin the repository." ]
cb310ffbe09bccb23393b94bad25267a03d5766a
# Dataset Card for "competency_games" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
joshr/competency_games
[ "region:us" ]
2022-12-08T08:46:34+00:00
{"dataset_info": {"features": [{"name": "description", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "sg_train", "num_bytes": 51120000, "num_examples": 90000}, {"name": "sg_validation", "num_bytes": 2726400, "num_examples": 4800}, {"name": "sg_test", "num_bytes": 2726400, "num_examples": 4800}, {"name": "sg_llm_eval", "num_bytes": 272640, "num_examples": 480}, {"name": "sg_exemplar", "num_bytes": 6816, "num_examples": 12}, {"name": "cc_train", "num_bytes": 119621976, "num_examples": 90000}, {"name": "cc_validation", "num_bytes": 6379974, "num_examples": 4800}, {"name": "cc_test", "num_bytes": 6380186, "num_examples": 4800}, {"name": "cc_llm_eval", "num_bytes": 637971, "num_examples": 480}, {"name": "cc_exemplar", "num_bytes": 15965, "num_examples": 12}, {"name": "hn_train", "num_bytes": 183955709, "num_examples": 90000}, {"name": "hn_validation", "num_bytes": 9810256, "num_examples": 4800}, {"name": "hn_test", "num_bytes": 9809441, "num_examples": 4800}, {"name": "hn_llm_eval", "num_bytes": 981379, "num_examples": 480}, {"name": "hn_exemplar", "num_bytes": 24478, "num_examples": 12}, {"name": "ring_train", "num_bytes": 79429571, "num_examples": 90000}, {"name": "ring_validation", "num_bytes": 4236560, "num_examples": 4800}, {"name": "ring_test", "num_bytes": 4236287, "num_examples": 4800}, {"name": "ring_llm_eval", "num_bytes": 423614, "num_examples": 480}, {"name": "ring_exemplar", "num_bytes": 10597, "num_examples": 12}, {"name": "voting_train", "num_bytes": 48250182, "num_examples": 90000}, {"name": "voting_validation", "num_bytes": 2573367, "num_examples": 4800}, {"name": "voting_test", "num_bytes": 2573448, "num_examples": 4800}, {"name": "voting_llm_eval", "num_bytes": 257391, "num_examples": 480}, {"name": "voting_exemplar", "num_bytes": 6430, "num_examples": 12}], "download_size": 93750310, "dataset_size": 536467038}}
2022-12-08T08:53:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "competency_games" More Information needed
[ "# Dataset Card for \"competency_games\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"competency_games\"\n\nMore Information needed" ]
c496e013c584ceab9e834b2a93e1cd9766b9eb1b
# Dataset Card for Deezer ego nets ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](https://snap.stanford.edu/data/deezer_ego_nets.html)** - **Paper:**: (see citation) ### Dataset Summary The Deezer ego nets dataset contains ego-nets of Eastern European users collected from the music streaming service Deezer in February 2020. Nodes are users and edges are mutual follower relationships. ### Supported Tasks and Leaderboards The related task is the binary classification to predict gender for the ego node in the graph. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Fields Each row of a given file is a graph, with: - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under GPL-3.0 license. ### Citation Information See also [github](https://github.com/benedekrozemberczki/karateclub). ``` @inproceedings{karateclub, title = {{Karate Club: An API Oriented Open-source Python Framework for Unsupervised Learning on Graphs}}, author = {Benedek Rozemberczki and Oliver Kiss and Rik Sarkar}, year = {2020}, pages = {3125–3132}, booktitle = {Proceedings of the 29th ACM International Conference on Information and Knowledge Management (CIKM '20)}, organization = {ACM}, } ```
graphs-datasets/deezer_ego_nets
[ "task_categories:graph-ml", "license:gpl-3.0", "region:us" ]
2022-12-08T09:38:31+00:00
{"license": "gpl-3.0", "task_categories": ["graph-ml"], "licence": "unknown"}
2023-02-07T16:36:48+00:00
[]
[]
TAGS #task_categories-graph-ml #license-gpl-3.0 #region-us
# Dataset Card for Deezer ego nets ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - External Use - PyGeometric - Dataset Structure - Data Properties - Data Fields - Data Splits - Additional Information - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage - Paper:: (see citation) ### Dataset Summary The Deezer ego nets dataset contains ego-nets of Eastern European users collected from the music streaming service Deezer in February 2020. Nodes are users and edges are mutual follower relationships. ### Supported Tasks and Leaderboards The related task is the binary classification to predict gender for the ego node in the graph. ## External Use ### PyGeometric To load in PyGeometric, do the following: ## Dataset Structure ### Data Fields Each row of a given file is a graph, with: - 'edge_index' (list: 2 x #edges): pairs of nodes constituting edges - 'y' (list: #labels): contains the number of labels available to predict - 'num_nodes' (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under GPL-3.0 license. See also github.
[ "# Dataset Card for Deezer ego nets", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n- External Use\n - PyGeometric\n- Dataset Structure\n - Data Properties\n - Data Fields\n - Data Splits\n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage\n- Paper:: (see citation)", "### Dataset Summary\nThe Deezer ego nets dataset contains ego-nets of Eastern European users collected from the music streaming service Deezer in February 2020. Nodes are users and edges are mutual follower relationships.", "### Supported Tasks and Leaderboards\nThe related task is the binary classification to predict gender for the ego node in the graph.", "## External Use", "### PyGeometric\nTo load in PyGeometric, do the following:", "## Dataset Structure", "### Data Fields\n\nEach row of a given file is a graph, with: \n- 'edge_index' (list: 2 x #edges): pairs of nodes constituting edges\n- 'y' (list: #labels): contains the number of labels available to predict\n- 'num_nodes' (int): number of nodes of the graph", "### Data Splits\n\nThis data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset.", "## Additional Information", "### Licensing Information\nThe dataset has been released under GPL-3.0 license.\n\n\nSee also github." ]
[ "TAGS\n#task_categories-graph-ml #license-gpl-3.0 #region-us \n", "# Dataset Card for Deezer ego nets", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n- External Use\n - PyGeometric\n- Dataset Structure\n - Data Properties\n - Data Fields\n - Data Splits\n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage\n- Paper:: (see citation)", "### Dataset Summary\nThe Deezer ego nets dataset contains ego-nets of Eastern European users collected from the music streaming service Deezer in February 2020. Nodes are users and edges are mutual follower relationships.", "### Supported Tasks and Leaderboards\nThe related task is the binary classification to predict gender for the ego node in the graph.", "## External Use", "### PyGeometric\nTo load in PyGeometric, do the following:", "## Dataset Structure", "### Data Fields\n\nEach row of a given file is a graph, with: \n- 'edge_index' (list: 2 x #edges): pairs of nodes constituting edges\n- 'y' (list: #labels): contains the number of labels available to predict\n- 'num_nodes' (int): number of nodes of the graph", "### Data Splits\n\nThis data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset.", "## Additional Information", "### Licensing Information\nThe dataset has been released under GPL-3.0 license.\n\n\nSee also github." ]
8c636656950ac009470d4af3829e4db538bdd507
# Dataset Card for Reddit threads ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](https://snap.stanford.edu/data/reddit_threads.html)** - **Paper:**: (see citation) ### Dataset Summary The `Reddit threads` dataset contains 'discussion and non-discussion based threads from Reddit which we collected in May 2018. Nodes are Reddit users who participate in a discussion and links are replies between them' (doc). ### Supported Tasks and Leaderboards The related task is the binary classification to predict whether a thread is discussion based or not. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Dataset information - 203,088 graphs ### Data Fields Each row of a given file is a graph, with: - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under GPL-3.0 license. ### Citation Information See also [github](https://github.com/benedekrozemberczki/karateclub). ``` @inproceedings{karateclub, title = {{Karate Club: An API Oriented Open-source Python Framework for Unsupervised Learning on Graphs}}, author = {Benedek Rozemberczki and Oliver Kiss and Rik Sarkar}, year = {2020}, pages = {3125–3132}, booktitle = {Proceedings of the 29th ACM International Conference on Information and Knowledge Management (CIKM '20)}, organization = {ACM}, } ```
graphs-datasets/reddit_threads
[ "task_categories:graph-ml", "license:gpl-3.0", "region:us" ]
2022-12-08T09:40:25+00:00
{"license": "gpl-3.0", "task_categories": ["graph-ml"]}
2023-02-07T16:36:39+00:00
[]
[]
TAGS #task_categories-graph-ml #license-gpl-3.0 #region-us
# Dataset Card for Reddit threads ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - External Use - PyGeometric - Dataset Structure - Data Properties - Data Fields - Data Splits - Additional Information - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage - Paper:: (see citation) ### Dataset Summary The 'Reddit threads' dataset contains 'discussion and non-discussion based threads from Reddit which we collected in May 2018. Nodes are Reddit users who participate in a discussion and links are replies between them' (doc). ### Supported Tasks and Leaderboards The related task is the binary classification to predict whether a thread is discussion based or not. ## External Use ### PyGeometric To load in PyGeometric, do the following: ## Dataset Structure ### Dataset information - 203,088 graphs ### Data Fields Each row of a given file is a graph, with: - 'edge_index' (list: 2 x #edges): pairs of nodes constituting edges - 'y' (list: #labels): contains the number of labels available to predict - 'num_nodes' (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under GPL-3.0 license. See also github.
[ "# Dataset Card for Reddit threads", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n- External Use\n - PyGeometric\n- Dataset Structure\n - Data Properties\n - Data Fields\n - Data Splits\n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage\n- Paper:: (see citation)", "### Dataset Summary\nThe 'Reddit threads' dataset contains 'discussion and non-discussion based threads from Reddit which we collected in May 2018. Nodes are Reddit users who participate in a discussion and links are replies between them' (doc).", "### Supported Tasks and Leaderboards\nThe related task is the binary classification to predict whether a thread is discussion based or not.", "## External Use", "### PyGeometric\nTo load in PyGeometric, do the following:", "## Dataset Structure", "### Dataset information\n- 203,088 graphs", "### Data Fields\n\nEach row of a given file is a graph, with: \n- 'edge_index' (list: 2 x #edges): pairs of nodes constituting edges\n- 'y' (list: #labels): contains the number of labels available to predict\n- 'num_nodes' (int): number of nodes of the graph", "### Data Splits\n\nThis data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset.", "## Additional Information", "### Licensing Information\nThe dataset has been released under GPL-3.0 license.\n\n\nSee also github." ]
[ "TAGS\n#task_categories-graph-ml #license-gpl-3.0 #region-us \n", "# Dataset Card for Reddit threads", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n- External Use\n - PyGeometric\n- Dataset Structure\n - Data Properties\n - Data Fields\n - Data Splits\n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage\n- Paper:: (see citation)", "### Dataset Summary\nThe 'Reddit threads' dataset contains 'discussion and non-discussion based threads from Reddit which we collected in May 2018. Nodes are Reddit users who participate in a discussion and links are replies between them' (doc).", "### Supported Tasks and Leaderboards\nThe related task is the binary classification to predict whether a thread is discussion based or not.", "## External Use", "### PyGeometric\nTo load in PyGeometric, do the following:", "## Dataset Structure", "### Dataset information\n- 203,088 graphs", "### Data Fields\n\nEach row of a given file is a graph, with: \n- 'edge_index' (list: 2 x #edges): pairs of nodes constituting edges\n- 'y' (list: #labels): contains the number of labels available to predict\n- 'num_nodes' (int): number of nodes of the graph", "### Data Splits\n\nThis data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset.", "## Additional Information", "### Licensing Information\nThe dataset has been released under GPL-3.0 license.\n\n\nSee also github." ]
c01d97cec814c6b18e476178ea10792b7b88ab14
# Dataset Card for Twitch ego nets ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](https://snap.stanford.edu/data/twitch_ego_nets.html)** - **Paper:**: (see citation) ### Dataset Summary The `Twitch ego nets` dataset contains ' ego-nets of Twitch users who participated in the partnership program in April 2018. Nodes are users and links are friendships.' (doc). ### Supported Tasks and Leaderboards The related task is the binary classification to predict whether a user plays a single or multple games. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Dataset information - 127,094 graphs ### Data Fields Each row of a given file is a graph, with: - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under GPL-3.0 license. ### Citation Information See also [github](https://github.com/benedekrozemberczki/karateclub). ``` @inproceedings{karateclub, title = {{Karate Club: An API Oriented Open-source Python Framework for Unsupervised Learning on Graphs}}, author = {Benedek Rozemberczki and Oliver Kiss and Rik Sarkar}, year = {2020}, pages = {3125–3132}, booktitle = {Proceedings of the 29th ACM International Conference on Information and Knowledge Management (CIKM '20)}, organization = {ACM}, } ```
graphs-datasets/twitch_egos
[ "task_categories:graph-ml", "license:gpl-3.0", "region:us" ]
2022-12-08T09:42:51+00:00
{"license": "gpl-3.0", "task_categories": ["graph-ml"]}
2023-02-07T16:36:30+00:00
[]
[]
TAGS #task_categories-graph-ml #license-gpl-3.0 #region-us
# Dataset Card for Twitch ego nets ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - External Use - PyGeometric - Dataset Structure - Data Properties - Data Fields - Data Splits - Additional Information - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage - Paper:: (see citation) ### Dataset Summary The 'Twitch ego nets' dataset contains ' ego-nets of Twitch users who participated in the partnership program in April 2018. Nodes are users and links are friendships.' (doc). ### Supported Tasks and Leaderboards The related task is the binary classification to predict whether a user plays a single or multple games. ## External Use ### PyGeometric To load in PyGeometric, do the following: ## Dataset Structure ### Dataset information - 127,094 graphs ### Data Fields Each row of a given file is a graph, with: - 'edge_index' (list: 2 x #edges): pairs of nodes constituting edges - 'y' (list: #labels): contains the number of labels available to predict - 'num_nodes' (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under GPL-3.0 license. See also github.
[ "# Dataset Card for Twitch ego nets", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n- External Use\n - PyGeometric\n- Dataset Structure\n - Data Properties\n - Data Fields\n - Data Splits\n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage\n- Paper:: (see citation)", "### Dataset Summary\nThe 'Twitch ego nets' dataset contains ' ego-nets of Twitch users who participated in the partnership program in April 2018. Nodes are users and links are friendships.' (doc).", "### Supported Tasks and Leaderboards\nThe related task is the binary classification to predict whether a user plays a single or multple games.", "## External Use", "### PyGeometric\nTo load in PyGeometric, do the following:", "## Dataset Structure", "### Dataset information\n- 127,094 graphs", "### Data Fields\n\nEach row of a given file is a graph, with: \n- 'edge_index' (list: 2 x #edges): pairs of nodes constituting edges\n- 'y' (list: #labels): contains the number of labels available to predict\n- 'num_nodes' (int): number of nodes of the graph", "### Data Splits\n\nThis data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset.", "## Additional Information", "### Licensing Information\nThe dataset has been released under GPL-3.0 license.\n\n\nSee also github." ]
[ "TAGS\n#task_categories-graph-ml #license-gpl-3.0 #region-us \n", "# Dataset Card for Twitch ego nets", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n- External Use\n - PyGeometric\n- Dataset Structure\n - Data Properties\n - Data Fields\n - Data Splits\n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage\n- Paper:: (see citation)", "### Dataset Summary\nThe 'Twitch ego nets' dataset contains ' ego-nets of Twitch users who participated in the partnership program in April 2018. Nodes are users and links are friendships.' (doc).", "### Supported Tasks and Leaderboards\nThe related task is the binary classification to predict whether a user plays a single or multple games.", "## External Use", "### PyGeometric\nTo load in PyGeometric, do the following:", "## Dataset Structure", "### Dataset information\n- 127,094 graphs", "### Data Fields\n\nEach row of a given file is a graph, with: \n- 'edge_index' (list: 2 x #edges): pairs of nodes constituting edges\n- 'y' (list: #labels): contains the number of labels available to predict\n- 'num_nodes' (int): number of nodes of the graph", "### Data Splits\n\nThis data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset.", "## Additional Information", "### Licensing Information\nThe dataset has been released under GPL-3.0 license.\n\n\nSee also github." ]
8aa98e62b05c881c9e22005fa662d72f0426ec7f
# Dataset Card for MNIST ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](https://github.com/graphdeeplearning/benchmarking-gnns)** - **Paper:**: (see citation) ### Dataset Summary The `MNIST` dataset consists of 55000 images in 10 classes, represented as graphs. It comes from a computer vision dataset. ### Supported Tasks and Leaderboards `MNIST` should be used for multiclass graph classification. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | #graphs | 55,000 | | average #nodes | 70.6 | | average #edges | 564.5 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph - `pos` (list: 2 x #node): positional information of each node ### Data Splits This data is split. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under MIT license. ### Citation Information ``` @article{DBLP:journals/corr/abs-2003-00982, author = {Vijay Prakash Dwivedi and Chaitanya K. Joshi and Thomas Laurent and Yoshua Bengio and Xavier Bresson}, title = {Benchmarking Graph Neural Networks}, journal = {CoRR}, volume = {abs/2003.00982}, year = {2020}, url = {https://arxiv.org/abs/2003.00982}, eprinttype = {arXiv}, eprint = {2003.00982}, timestamp = {Sat, 23 Jan 2021 01:14:30 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2003-00982.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
graphs-datasets/MNIST
[ "task_categories:graph-ml", "license:mit", "arxiv:2003.00982", "region:us" ]
2022-12-08T09:52:08+00:00
{"license": "mit", "task_categories": ["graph-ml"]}
2023-02-07T16:37:15+00:00
[ "2003.00982" ]
[]
TAGS #task_categories-graph-ml #license-mit #arxiv-2003.00982 #region-us
Dataset Card for MNIST ====================== Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards * External Use + PyGeometric * Dataset Structure + Data Properties + Data Fields + Data Splits * Additional Information + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage * Paper:: (see citation) ### Dataset Summary The 'MNIST' dataset consists of 55000 images in 10 classes, represented as graphs. It comes from a computer vision dataset. ### Supported Tasks and Leaderboards 'MNIST' should be used for multiclass graph classification. External Use ------------ ### PyGeometric To load in PyGeometric, do the following: Dataset Structure ----------------- ### Data Properties ### Data Fields Each row of a given file is a graph, with: * 'node\_feat' (list: #nodes x #node-features): nodes * 'edge\_index' (list: 2 x #edges): pairs of nodes constituting edges * 'edge\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features * 'y' (list: #labels): contains the number of labels available to predict * 'num\_nodes' (int): number of nodes of the graph * 'pos' (list: 2 x #node): positional information of each node ### Data Splits This data is split. It comes from the PyGeometric version of the dataset. Additional Information ---------------------- ### Licensing Information The dataset has been released under MIT license.
[ "### Dataset Summary\n\n\nThe 'MNIST' dataset consists of 55000 images in 10 classes, represented as graphs. It comes from a computer vision dataset.", "### Supported Tasks and Leaderboards\n\n\n'MNIST' should be used for multiclass graph classification.\n\n\nExternal Use\n------------", "### PyGeometric\n\n\nTo load in PyGeometric, do the following:\n\n\nDataset Structure\n-----------------", "### Data Properties", "### Data Fields\n\n\nEach row of a given file is a graph, with:\n\n\n* 'node\\_feat' (list: #nodes x #node-features): nodes\n* 'edge\\_index' (list: 2 x #edges): pairs of nodes constituting edges\n* 'edge\\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features\n* 'y' (list: #labels): contains the number of labels available to predict\n* 'num\\_nodes' (int): number of nodes of the graph\n* 'pos' (list: 2 x #node): positional information of each node", "### Data Splits\n\n\nThis data is split. It comes from the PyGeometric version of the dataset.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThe dataset has been released under MIT license." ]
[ "TAGS\n#task_categories-graph-ml #license-mit #arxiv-2003.00982 #region-us \n", "### Dataset Summary\n\n\nThe 'MNIST' dataset consists of 55000 images in 10 classes, represented as graphs. It comes from a computer vision dataset.", "### Supported Tasks and Leaderboards\n\n\n'MNIST' should be used for multiclass graph classification.\n\n\nExternal Use\n------------", "### PyGeometric\n\n\nTo load in PyGeometric, do the following:\n\n\nDataset Structure\n-----------------", "### Data Properties", "### Data Fields\n\n\nEach row of a given file is a graph, with:\n\n\n* 'node\\_feat' (list: #nodes x #node-features): nodes\n* 'edge\\_index' (list: 2 x #edges): pairs of nodes constituting edges\n* 'edge\\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features\n* 'y' (list: #labels): contains the number of labels available to predict\n* 'num\\_nodes' (int): number of nodes of the graph\n* 'pos' (list: 2 x #node): positional information of each node", "### Data Splits\n\n\nThis data is split. It comes from the PyGeometric version of the dataset.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThe dataset has been released under MIT license." ]
c3d5c7aa3321c405b2818dc5f89125a84e28c7e4
# Dataset Card for CIFAR10 ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](https://github.com/graphdeeplearning/benchmarking-gnns)** - **Paper:**: (see citation) ### Dataset Summary The `CIFAR10` dataset consists of 45000 images in 10 classes, represented as graphs. ### Supported Tasks and Leaderboards `CIFAR10` should be used for multiclass graph classification. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | #graphs | 45,000 | | average #nodes | 117.6 | | average #edges | 941.2 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph - `pos` (list: 2 x #node): positional information of each node ### Data Splits This data is split. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under MIT license. ### Citation Information ``` @article{DBLP:journals/corr/abs-2003-00982, author = {Vijay Prakash Dwivedi and Chaitanya K. Joshi and Thomas Laurent and Yoshua Bengio and Xavier Bresson}, title = {Benchmarking Graph Neural Networks}, journal = {CoRR}, volume = {abs/2003.00982}, year = {2020}, url = {https://arxiv.org/abs/2003.00982}, eprinttype = {arXiv}, eprint = {2003.00982}, timestamp = {Sat, 23 Jan 2021 01:14:30 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2003-00982.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
graphs-datasets/CIFAR10
[ "task_categories:graph-ml", "license:mit", "arxiv:2003.00982", "region:us" ]
2022-12-08T09:59:00+00:00
{"license": "mit", "task_categories": ["graph-ml"], "licence": "unknown"}
2023-02-07T16:37:24+00:00
[ "2003.00982" ]
[]
TAGS #task_categories-graph-ml #license-mit #arxiv-2003.00982 #region-us
Dataset Card for CIFAR10 ======================== Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards * External Use + PyGeometric * Dataset Structure + Data Properties + Data Fields + Data Splits * Additional Information + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage * Paper:: (see citation) ### Dataset Summary The 'CIFAR10' dataset consists of 45000 images in 10 classes, represented as graphs. ### Supported Tasks and Leaderboards 'CIFAR10' should be used for multiclass graph classification. External Use ------------ ### PyGeometric To load in PyGeometric, do the following: Dataset Structure ----------------- ### Data Properties ### Data Fields Each row of a given file is a graph, with: * 'node\_feat' (list: #nodes x #node-features): nodes * 'edge\_index' (list: 2 x #edges): pairs of nodes constituting edges * 'edge\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features * 'y' (list: #labels): contains the number of labels available to predict * 'num\_nodes' (int): number of nodes of the graph * 'pos' (list: 2 x #node): positional information of each node ### Data Splits This data is split. It comes from the PyGeometric version of the dataset. Additional Information ---------------------- ### Licensing Information The dataset has been released under MIT license.
[ "### Dataset Summary\n\n\nThe 'CIFAR10' dataset consists of 45000 images in 10 classes, represented as graphs.", "### Supported Tasks and Leaderboards\n\n\n'CIFAR10' should be used for multiclass graph classification.\n\n\nExternal Use\n------------", "### PyGeometric\n\n\nTo load in PyGeometric, do the following:\n\n\nDataset Structure\n-----------------", "### Data Properties", "### Data Fields\n\n\nEach row of a given file is a graph, with:\n\n\n* 'node\\_feat' (list: #nodes x #node-features): nodes\n* 'edge\\_index' (list: 2 x #edges): pairs of nodes constituting edges\n* 'edge\\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features\n* 'y' (list: #labels): contains the number of labels available to predict\n* 'num\\_nodes' (int): number of nodes of the graph\n* 'pos' (list: 2 x #node): positional information of each node", "### Data Splits\n\n\nThis data is split. It comes from the PyGeometric version of the dataset.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThe dataset has been released under MIT license." ]
[ "TAGS\n#task_categories-graph-ml #license-mit #arxiv-2003.00982 #region-us \n", "### Dataset Summary\n\n\nThe 'CIFAR10' dataset consists of 45000 images in 10 classes, represented as graphs.", "### Supported Tasks and Leaderboards\n\n\n'CIFAR10' should be used for multiclass graph classification.\n\n\nExternal Use\n------------", "### PyGeometric\n\n\nTo load in PyGeometric, do the following:\n\n\nDataset Structure\n-----------------", "### Data Properties", "### Data Fields\n\n\nEach row of a given file is a graph, with:\n\n\n* 'node\\_feat' (list: #nodes x #node-features): nodes\n* 'edge\\_index' (list: 2 x #edges): pairs of nodes constituting edges\n* 'edge\\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features\n* 'y' (list: #labels): contains the number of labels available to predict\n* 'num\\_nodes' (int): number of nodes of the graph\n* 'pos' (list: 2 x #node): positional information of each node", "### Data Splits\n\n\nThis data is split. It comes from the PyGeometric version of the dataset.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThe dataset has been released under MIT license." ]
5c57b14ae6f8d48ecdf72a15dda64597bfa7d3ad
# Dataset Card for CSK ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](https://github.com/graphdeeplearning/benchmarking-gnns)** - **Paper:**: (see citation) ### Dataset Summary The CSL dataset is a synthetic dataset, to test GNN expressivity. ### Supported Tasks and Leaderboards `CSL` should be used for binary graph classification, on isomoprhism or not. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | #graphs | 150 | | average #nodes | 41.0 | | average #edges | 164.0 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is split. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under MIT license. ### Citation Information ``` @article{DBLP:journals/corr/abs-2003-00982, author = {Vijay Prakash Dwivedi and Chaitanya K. Joshi and Thomas Laurent and Yoshua Bengio and Xavier Bresson}, title = {Benchmarking Graph Neural Networks}, journal = {CoRR}, volume = {abs/2003.00982}, year = {2020}, url = {https://arxiv.org/abs/2003.00982}, eprinttype = {arXiv}, eprint = {2003.00982}, timestamp = {Sat, 23 Jan 2021 01:14:30 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2003-00982.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
graphs-datasets/CSL
[ "task_categories:graph-ml", "license:mit", "arxiv:2003.00982", "region:us" ]
2022-12-08T10:03:06+00:00
{"license": "mit", "task_categories": ["graph-ml"]}
2023-02-07T16:37:07+00:00
[ "2003.00982" ]
[]
TAGS #task_categories-graph-ml #license-mit #arxiv-2003.00982 #region-us
Dataset Card for CSK ==================== Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards * External Use + PyGeometric * Dataset Structure + Data Properties + Data Fields + Data Splits * Additional Information + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage * Paper:: (see citation) ### Dataset Summary The CSL dataset is a synthetic dataset, to test GNN expressivity. ### Supported Tasks and Leaderboards 'CSL' should be used for binary graph classification, on isomoprhism or not. External Use ------------ ### PyGeometric To load in PyGeometric, do the following: Dataset Structure ----------------- ### Data Properties ### Data Fields Each row of a given file is a graph, with: * 'node\_feat' (list: #nodes x #node-features): nodes * 'edge\_index' (list: 2 x #edges): pairs of nodes constituting edges * 'edge\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features * 'y' (list: #labels): contains the number of labels available to predict * 'num\_nodes' (int): number of nodes of the graph ### Data Splits This data is split. It comes from the PyGeometric version of the dataset. Additional Information ---------------------- ### Licensing Information The dataset has been released under MIT license.
[ "### Dataset Summary\n\n\nThe CSL dataset is a synthetic dataset, to test GNN expressivity.", "### Supported Tasks and Leaderboards\n\n\n'CSL' should be used for binary graph classification, on isomoprhism or not.\n\n\nExternal Use\n------------", "### PyGeometric\n\n\nTo load in PyGeometric, do the following:\n\n\nDataset Structure\n-----------------", "### Data Properties", "### Data Fields\n\n\nEach row of a given file is a graph, with:\n\n\n* 'node\\_feat' (list: #nodes x #node-features): nodes\n* 'edge\\_index' (list: 2 x #edges): pairs of nodes constituting edges\n* 'edge\\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features\n* 'y' (list: #labels): contains the number of labels available to predict\n* 'num\\_nodes' (int): number of nodes of the graph", "### Data Splits\n\n\nThis data is split. It comes from the PyGeometric version of the dataset.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThe dataset has been released under MIT license." ]
[ "TAGS\n#task_categories-graph-ml #license-mit #arxiv-2003.00982 #region-us \n", "### Dataset Summary\n\n\nThe CSL dataset is a synthetic dataset, to test GNN expressivity.", "### Supported Tasks and Leaderboards\n\n\n'CSL' should be used for binary graph classification, on isomoprhism or not.\n\n\nExternal Use\n------------", "### PyGeometric\n\n\nTo load in PyGeometric, do the following:\n\n\nDataset Structure\n-----------------", "### Data Properties", "### Data Fields\n\n\nEach row of a given file is a graph, with:\n\n\n* 'node\\_feat' (list: #nodes x #node-features): nodes\n* 'edge\\_index' (list: 2 x #edges): pairs of nodes constituting edges\n* 'edge\\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features\n* 'y' (list: #labels): contains the number of labels available to predict\n* 'num\\_nodes' (int): number of nodes of the graph", "### Data Splits\n\n\nThis data is split. It comes from the PyGeometric version of the dataset.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThe dataset has been released under MIT license." ]
df19b007b610be45bbb910678e83fd87db9c78c2
# Dataset Card for "mnist_images" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Ansa00/mnist_images
[ "region:us" ]
2022-12-08T11:07:40+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "0", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8", "9": "9"}}}}], "splits": [{"name": "train", "num_bytes": 594553015.3377689, "num_examples": 51021}, {"name": "test", "num_bytes": 104426905.66123116, "num_examples": 9004}], "download_size": 523900419, "dataset_size": 698979920.9990001}}
2022-12-08T11:08:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mnist_images" More Information needed
[ "# Dataset Card for \"mnist_images\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mnist_images\"\n\nMore Information needed" ]
c59a3a11d2b192ec4fcc667f6c000c8436f15fd2
# Dataset Card for OrangeSum ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository:** [OrangeSum repository](https://github.com/Tixierae/OrangeSum) - **Paper:** [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) - **Point of Contact:** [Antoine J.-P. Tixier]([email protected]) ### Dataset Summary The OrangeSum dataset was inspired by the XSum dataset. It was created by scraping the "Orange Actu" website: https://actu.orange.fr/. Orange S.A. is a large French multinational telecommunications corporation, with 266M customers worldwide. Scraped pages cover almost a decade from Feb 2011 to Sep 2020. They belong to five main categories: France, world, politics, automotive, and society. The society category is itself divided into 8 subcategories: health, environment, people, culture, media, high-tech, unsual ("insolite" in French), and miscellaneous. Each article featured a single-sentence title as well as a very brief abstract, both professionally written by the author of the article. These two fields were extracted from each page, thus creating two summarization tasks: OrangeSum Title and OrangeSum Abstract. ### Supported Tasks and Leaderboards **Tasks:** OrangeSum Title and OrangeSum Abstract. To this day, there is no Leaderboard for this dataset. ### Languages The text in the dataset is in French. ## Dataset Structure ### Data Instances A data instance consists of a news article and a summary. The summary can be a short abstract or a title depending on the configuration. Example: **Document:** Le temps sera pluvieux sur huit départements de la France ces prochaines heures : outre les trois départements bretons placés en vigilance orange jeudi matin, cinq autres départements du sud du Massif Central ont été à leur tour placés en alerte orange pluie et inondation. Il s'agit de l'Aveyron, du Cantal, du Gard, de la Lozère, et de la Haute-Loire. Sur l'ensemble de l'épisode, les cumuls de pluies attendus en Bretagne sont compris entre 40 et 60 mm en 24 heures et peuvent atteindre localement les 70 mm en 24 heures.Par la suite, la dégradation qui va se mettre en place cette nuit sur le Languedoc et le sud du Massif Central va donner sur l'Aveyron une première salve intense de pluie. Des cumuls entre 70 et 100 mm voir 120 mm localement sont attendus sur une durée de 24 heures. Sur le relief des Cévennes on attend de 150 à 200 mm, voire 250 mm très ponctuellement sur l'ouest du Gard et l'est de la Lozère. Cet épisode va s'estomper dans la soirée avec le décalage des orages vers les régions plus au nord. Un aspect orageux se mêlera à ces précipitations, avec de la grêle possible, des rafales de vent et une forte activité électrique. **Abstract:** Outre les trois départements bretons, cinq autres départements du centre de la France ont été placés en vigilance orange pluie-inondation. **Title:** Pluie-inondations : 8 départements en alerte orange. ### Data Fields `text`: the document to be summarized. \ `summary`: the summary of the source document. ### Data Splits The data is split into a training, validation and test in both configuration. | | train | validation | test | |----------|------:|-----------:|-----:| | Abstract | 21400 | 1500 | 1500 | | Title | 30658 | 1500 | 1500 | ## Dataset Creation ### Curation Rationale The goal here was to create a French equivalent of the recently introduced [XSum](https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset) dataset. Unlike the historical summarization datasets, CNN, DailyMail, and NY Times, which favor extractive strategies, XSum, as well as OrangeSum require the models to display a high degree of abstractivity to perform well. The summaries in OrangeSum are not catchy headlines, but rather capture the gist of the articles. ### Source Data #### Initial Data Collection and Normalization Each article features a single-sentence title as well as a very brief abstract. Extracting these two fields from each news article page, creates two summarization tasks: OrangeSum Title and OrangeSum Abstract. As a post-processing step, all empty articles and those whose summaries were shorter than 5 words were removed. For OrangeSum Abstract, the top 10% articles in terms of proportion of novel unigrams in the abstracts were removed, as it was observed that such abstracts tend to be introductions rather than real abstracts. This corresponded to a threshold of 57% novel unigrams. For both OrangeSum Title and OrangeSum Abstract, 1500 pairs for testing and 1500 for validation are set aside, and all the remaining ones are used for training. #### Who are the source language producers? The authors of the artiles. ### Annotations #### Annotation process The smmaries are professionally written by the author of the articles. #### Who are the annotators? The authors of the artiles. ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators The dataset was initially created by Antoine J.-P. Tixier. ### Licensing Information [More Information Needed] ### Citation Information ``` @article{eddine2020barthez, title={BARThez: a Skilled Pretrained French Sequence-to-Sequence Model}, author={Eddine, Moussa Kamal and Tixier, Antoine J-P and Vazirgiannis, Michalis}, journal={arXiv preprint arXiv:2010.12321}, year={2020} } ``` ### Contributions Thanks to [@moussaKam](https://github.com/moussaKam) for adding this dataset.
rlasseri/test-OrangeSum-small
[ "task_categories:summarization", "task_ids:news-articles-headline-generation", "task_ids:news-articles-summarization", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:fr", "license:unknown", "arxiv:2010.12321", "region:us" ]
2022-12-08T11:33:24+00:00
{"annotations_creators": ["found"], "language_creators": ["found"], "language": ["fr"], "license": ["unknown"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["summarization"], "task_ids": ["news-articles-headline-generation", "news-articles-summarization"], "paperswithcode_id": "orangesum", "pretty_name": "OrangeSum", "dataset_info": [{"config_name": "abstract", "features": [{"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 53531651, "num_examples": 21401}, {"name": "test", "num_bytes": 3785207, "num_examples": 1500}, {"name": "validation", "num_bytes": 3698650, "num_examples": 1500}], "download_size": 23058350, "dataset_size": 61015508}, {"config_name": "title", "features": [{"name": "text", "dtype": "string"}, {"name": "summary", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 65225136, "num_examples": 30659}, {"name": "test", "num_bytes": 3176690, "num_examples": 1500}, {"name": "validation", "num_bytes": 3276713, "num_examples": 1500}], "download_size": 27321627, "dataset_size": 71678539}]}
2022-12-08T13:15:18+00:00
[ "2010.12321" ]
[ "fr" ]
TAGS #task_categories-summarization #task_ids-news-articles-headline-generation #task_ids-news-articles-summarization #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-French #license-unknown #arxiv-2010.12321 #region-us
Dataset Card for OrangeSum ========================== Table of Contents ----------------- * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Repository: OrangeSum repository * Paper: BARThez: a Skilled Pretrained French Sequence-to-Sequence Model * Point of Contact: Antoine J.-P. Tixier ### Dataset Summary The OrangeSum dataset was inspired by the XSum dataset. It was created by scraping the "Orange Actu" website: URL Orange S.A. is a large French multinational telecommunications corporation, with 266M customers worldwide. Scraped pages cover almost a decade from Feb 2011 to Sep 2020. They belong to five main categories: France, world, politics, automotive, and society. The society category is itself divided into 8 subcategories: health, environment, people, culture, media, high-tech, unsual ("insolite" in French), and miscellaneous. Each article featured a single-sentence title as well as a very brief abstract, both professionally written by the author of the article. These two fields were extracted from each page, thus creating two summarization tasks: OrangeSum Title and OrangeSum Abstract. ### Supported Tasks and Leaderboards Tasks: OrangeSum Title and OrangeSum Abstract. To this day, there is no Leaderboard for this dataset. ### Languages The text in the dataset is in French. Dataset Structure ----------------- ### Data Instances A data instance consists of a news article and a summary. The summary can be a short abstract or a title depending on the configuration. Example: Document: Le temps sera pluvieux sur huit départements de la France ces prochaines heures : outre les trois départements bretons placés en vigilance orange jeudi matin, cinq autres départements du sud du Massif Central ont été à leur tour placés en alerte orange pluie et inondation. Il s'agit de l'Aveyron, du Cantal, du Gard, de la Lozère, et de la Haute-Loire. Sur l'ensemble de l'épisode, les cumuls de pluies attendus en Bretagne sont compris entre 40 et 60 mm en 24 heures et peuvent atteindre localement les 70 mm en 24 heures.Par la suite, la dégradation qui va se mettre en place cette nuit sur le Languedoc et le sud du Massif Central va donner sur l'Aveyron une première salve intense de pluie. Des cumuls entre 70 et 100 mm voir 120 mm localement sont attendus sur une durée de 24 heures. Sur le relief des Cévennes on attend de 150 à 200 mm, voire 250 mm très ponctuellement sur l'ouest du Gard et l'est de la Lozère. Cet épisode va s'estomper dans la soirée avec le décalage des orages vers les régions plus au nord. Un aspect orageux se mêlera à ces précipitations, avec de la grêle possible, des rafales de vent et une forte activité électrique. Abstract: Outre les trois départements bretons, cinq autres départements du centre de la France ont été placés en vigilance orange pluie-inondation. Title: Pluie-inondations : 8 départements en alerte orange. ### Data Fields 'text': the document to be summarized. 'summary': the summary of the source document. ### Data Splits The data is split into a training, validation and test in both configuration. Dataset Creation ---------------- ### Curation Rationale The goal here was to create a French equivalent of the recently introduced XSum dataset. Unlike the historical summarization datasets, CNN, DailyMail, and NY Times, which favor extractive strategies, XSum, as well as OrangeSum require the models to display a high degree of abstractivity to perform well. The summaries in OrangeSum are not catchy headlines, but rather capture the gist of the articles. ### Source Data #### Initial Data Collection and Normalization Each article features a single-sentence title as well as a very brief abstract. Extracting these two fields from each news article page, creates two summarization tasks: OrangeSum Title and OrangeSum Abstract. As a post-processing step, all empty articles and those whose summaries were shorter than 5 words were removed. For OrangeSum Abstract, the top 10% articles in terms of proportion of novel unigrams in the abstracts were removed, as it was observed that such abstracts tend to be introductions rather than real abstracts. This corresponded to a threshold of 57% novel unigrams. For both OrangeSum Title and OrangeSum Abstract, 1500 pairs for testing and 1500 for validation are set aside, and all the remaining ones are used for training. #### Who are the source language producers? The authors of the artiles. ### Annotations #### Annotation process The smmaries are professionally written by the author of the articles. #### Who are the annotators? The authors of the artiles. ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators The dataset was initially created by Antoine J.-P. Tixier. ### Licensing Information ### Contributions Thanks to @moussaKam for adding this dataset.
[ "### Dataset Summary\n\n\nThe OrangeSum dataset was inspired by the XSum dataset. It was created by scraping the \"Orange Actu\" website: URL Orange S.A. is a large French multinational telecommunications corporation, with 266M customers worldwide. Scraped pages cover almost a decade from Feb 2011 to Sep 2020. They belong to five main categories: France, world, politics, automotive, and society. The society category is itself divided into 8 subcategories: health, environment, people, culture, media, high-tech, unsual (\"insolite\" in French), and miscellaneous.\nEach article featured a single-sentence title as well as a very brief abstract, both professionally written by the author of the article. These two fields were extracted from each page, thus creating two summarization tasks: OrangeSum Title and OrangeSum Abstract.", "### Supported Tasks and Leaderboards\n\n\nTasks: OrangeSum Title and OrangeSum Abstract.\nTo this day, there is no Leaderboard for this dataset.", "### Languages\n\n\nThe text in the dataset is in French.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA data instance consists of a news article and a summary. The summary can be a short abstract or a title depending on the configuration.\nExample:\nDocument: Le temps sera pluvieux sur huit départements de la France ces prochaines heures : outre les trois départements bretons placés en vigilance orange jeudi matin, cinq autres départements du sud du Massif Central ont été à leur tour placés en alerte orange pluie et inondation. Il s'agit de l'Aveyron, du Cantal, du Gard, de la Lozère, et de la Haute-Loire. Sur l'ensemble de l'épisode, les cumuls de pluies attendus en Bretagne sont compris entre 40 et 60 mm en 24 heures et peuvent atteindre localement les 70 mm en 24 heures.Par la suite, la dégradation qui va se mettre en place cette nuit sur le Languedoc et le sud du Massif Central va donner sur l'Aveyron une première salve intense de pluie. Des cumuls entre 70 et 100 mm voir 120 mm localement sont attendus sur une durée de 24 heures. Sur le relief des Cévennes on attend de 150 à 200 mm, voire 250 mm très ponctuellement sur l'ouest du Gard et l'est de la Lozère. Cet épisode va s'estomper dans la soirée avec le décalage des orages vers les régions plus au nord. Un aspect orageux se mêlera à ces précipitations, avec de la grêle possible, des rafales de vent et une forte activité électrique.\nAbstract: Outre les trois départements bretons, cinq autres départements du centre de la France ont été placés en vigilance orange pluie-inondation.\nTitle: Pluie-inondations : 8 départements en alerte orange.", "### Data Fields\n\n\n'text': the document to be summarized. \n\n'summary': the summary of the source document.", "### Data Splits\n\n\nThe data is split into a training, validation and test in both configuration.\n\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nThe goal here was to create a French equivalent of the recently introduced XSum dataset. Unlike the historical summarization datasets, CNN, DailyMail, and NY Times, which favor extractive strategies, XSum, as well as OrangeSum require the models to display a high degree of abstractivity to perform well. The summaries in OrangeSum are not catchy headlines, but rather capture the gist of the articles.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nEach article features a single-sentence title as well as a very brief abstract. Extracting these two fields from each news article page, creates two summarization tasks: OrangeSum Title and OrangeSum Abstract. As a post-processing step, all empty articles and those whose summaries were shorter than 5 words were removed. For OrangeSum Abstract, the top 10% articles in terms of proportion of novel unigrams in the abstracts were removed, as it was observed that such abstracts tend to be introductions rather than real abstracts. This corresponded to a threshold of 57% novel unigrams. For both OrangeSum Title and OrangeSum Abstract, 1500 pairs for testing and 1500 for validation are set aside, and all the remaining ones are used for training.", "#### Who are the source language producers?\n\n\nThe authors of the artiles.", "### Annotations", "#### Annotation process\n\n\nThe smmaries are professionally written by the author of the articles.", "#### Who are the annotators?\n\n\nThe authors of the artiles.", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nThe dataset was initially created by Antoine J.-P. Tixier.", "### Licensing Information", "### Contributions\n\n\nThanks to @moussaKam for adding this dataset." ]
[ "TAGS\n#task_categories-summarization #task_ids-news-articles-headline-generation #task_ids-news-articles-summarization #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-French #license-unknown #arxiv-2010.12321 #region-us \n", "### Dataset Summary\n\n\nThe OrangeSum dataset was inspired by the XSum dataset. It was created by scraping the \"Orange Actu\" website: URL Orange S.A. is a large French multinational telecommunications corporation, with 266M customers worldwide. Scraped pages cover almost a decade from Feb 2011 to Sep 2020. They belong to five main categories: France, world, politics, automotive, and society. The society category is itself divided into 8 subcategories: health, environment, people, culture, media, high-tech, unsual (\"insolite\" in French), and miscellaneous.\nEach article featured a single-sentence title as well as a very brief abstract, both professionally written by the author of the article. These two fields were extracted from each page, thus creating two summarization tasks: OrangeSum Title and OrangeSum Abstract.", "### Supported Tasks and Leaderboards\n\n\nTasks: OrangeSum Title and OrangeSum Abstract.\nTo this day, there is no Leaderboard for this dataset.", "### Languages\n\n\nThe text in the dataset is in French.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA data instance consists of a news article and a summary. The summary can be a short abstract or a title depending on the configuration.\nExample:\nDocument: Le temps sera pluvieux sur huit départements de la France ces prochaines heures : outre les trois départements bretons placés en vigilance orange jeudi matin, cinq autres départements du sud du Massif Central ont été à leur tour placés en alerte orange pluie et inondation. Il s'agit de l'Aveyron, du Cantal, du Gard, de la Lozère, et de la Haute-Loire. Sur l'ensemble de l'épisode, les cumuls de pluies attendus en Bretagne sont compris entre 40 et 60 mm en 24 heures et peuvent atteindre localement les 70 mm en 24 heures.Par la suite, la dégradation qui va se mettre en place cette nuit sur le Languedoc et le sud du Massif Central va donner sur l'Aveyron une première salve intense de pluie. Des cumuls entre 70 et 100 mm voir 120 mm localement sont attendus sur une durée de 24 heures. Sur le relief des Cévennes on attend de 150 à 200 mm, voire 250 mm très ponctuellement sur l'ouest du Gard et l'est de la Lozère. Cet épisode va s'estomper dans la soirée avec le décalage des orages vers les régions plus au nord. Un aspect orageux se mêlera à ces précipitations, avec de la grêle possible, des rafales de vent et une forte activité électrique.\nAbstract: Outre les trois départements bretons, cinq autres départements du centre de la France ont été placés en vigilance orange pluie-inondation.\nTitle: Pluie-inondations : 8 départements en alerte orange.", "### Data Fields\n\n\n'text': the document to be summarized. \n\n'summary': the summary of the source document.", "### Data Splits\n\n\nThe data is split into a training, validation and test in both configuration.\n\n\n\nDataset Creation\n----------------", "### Curation Rationale\n\n\nThe goal here was to create a French equivalent of the recently introduced XSum dataset. Unlike the historical summarization datasets, CNN, DailyMail, and NY Times, which favor extractive strategies, XSum, as well as OrangeSum require the models to display a high degree of abstractivity to perform well. The summaries in OrangeSum are not catchy headlines, but rather capture the gist of the articles.", "### Source Data", "#### Initial Data Collection and Normalization\n\n\nEach article features a single-sentence title as well as a very brief abstract. Extracting these two fields from each news article page, creates two summarization tasks: OrangeSum Title and OrangeSum Abstract. As a post-processing step, all empty articles and those whose summaries were shorter than 5 words were removed. For OrangeSum Abstract, the top 10% articles in terms of proportion of novel unigrams in the abstracts were removed, as it was observed that such abstracts tend to be introductions rather than real abstracts. This corresponded to a threshold of 57% novel unigrams. For both OrangeSum Title and OrangeSum Abstract, 1500 pairs for testing and 1500 for validation are set aside, and all the remaining ones are used for training.", "#### Who are the source language producers?\n\n\nThe authors of the artiles.", "### Annotations", "#### Annotation process\n\n\nThe smmaries are professionally written by the author of the articles.", "#### Who are the annotators?\n\n\nThe authors of the artiles.", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators\n\n\nThe dataset was initially created by Antoine J.-P. Tixier.", "### Licensing Information", "### Contributions\n\n\nThanks to @moussaKam for adding this dataset." ]
212fdac8fad8ea663ddc3f110fcfc3734e2b5983
# Dataset Card for AQSOL ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](https://github.com/graphdeeplearning/benchmarking-gnns)** - **Paper:**: (see citation) ### Dataset Summary The AQSOL dataset comes "from the Benchmarking Graph Neural Networks paper based on AqSolDB, a standardized database of 9,982 molecular graphs with their aqueous solubility values, collected from 9 different data sources" (PyGeometric doc). ### Supported Tasks and Leaderboards `AQSOL` should be used for graph regression, on aqueous solubility. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | #graphs | 9,833 | | average #nodes | 17.6 | | average #edges | 35.8 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is split. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under MIT license. ### Citation Information ``` @article{DBLP:journals/corr/abs-2003-00982, author = {Vijay Prakash Dwivedi and Chaitanya K. Joshi and Thomas Laurent and Yoshua Bengio and Xavier Bresson}, title = {Benchmarking Graph Neural Networks}, journal = {CoRR}, volume = {abs/2003.00982}, year = {2020}, url = {https://arxiv.org/abs/2003.00982}, eprinttype = {arXiv}, eprint = {2003.00982}, timestamp = {Sat, 23 Jan 2021 01:14:30 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2003-00982.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
graphs-datasets/AQSOL
[ "task_categories:graph-ml", "license:mit", "arxiv:2003.00982", "region:us" ]
2022-12-08T11:54:55+00:00
{"license": "mit", "task_categories": ["graph-ml"]}
2023-02-07T16:36:58+00:00
[ "2003.00982" ]
[]
TAGS #task_categories-graph-ml #license-mit #arxiv-2003.00982 #region-us
Dataset Card for AQSOL ====================== Table of Contents ----------------- * Table of Contents * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards * External Use + PyGeometric * Dataset Structure + Data Properties + Data Fields + Data Splits * Additional Information + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage * Paper:: (see citation) ### Dataset Summary The AQSOL dataset comes "from the Benchmarking Graph Neural Networks paper based on AqSolDB, a standardized database of 9,982 molecular graphs with their aqueous solubility values, collected from 9 different data sources" (PyGeometric doc). ### Supported Tasks and Leaderboards 'AQSOL' should be used for graph regression, on aqueous solubility. External Use ------------ ### PyGeometric To load in PyGeometric, do the following: Dataset Structure ----------------- ### Data Properties ### Data Fields Each row of a given file is a graph, with: * 'node\_feat' (list: #nodes x #node-features): nodes * 'edge\_index' (list: 2 x #edges): pairs of nodes constituting edges * 'edge\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features * 'y' (list: #labels): contains the number of labels available to predict * 'num\_nodes' (int): number of nodes of the graph ### Data Splits This data is split. It comes from the PyGeometric version of the dataset. Additional Information ---------------------- ### Licensing Information The dataset has been released under MIT license.
[ "### Dataset Summary\n\n\nThe AQSOL dataset comes \"from the Benchmarking Graph Neural Networks paper based on AqSolDB, a standardized database of 9,982 molecular graphs with their aqueous solubility values, collected from 9 different data sources\" (PyGeometric doc).", "### Supported Tasks and Leaderboards\n\n\n'AQSOL' should be used for graph regression, on aqueous solubility.\n\n\nExternal Use\n------------", "### PyGeometric\n\n\nTo load in PyGeometric, do the following:\n\n\nDataset Structure\n-----------------", "### Data Properties", "### Data Fields\n\n\nEach row of a given file is a graph, with:\n\n\n* 'node\\_feat' (list: #nodes x #node-features): nodes\n* 'edge\\_index' (list: 2 x #edges): pairs of nodes constituting edges\n* 'edge\\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features\n* 'y' (list: #labels): contains the number of labels available to predict\n* 'num\\_nodes' (int): number of nodes of the graph", "### Data Splits\n\n\nThis data is split. It comes from the PyGeometric version of the dataset.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThe dataset has been released under MIT license." ]
[ "TAGS\n#task_categories-graph-ml #license-mit #arxiv-2003.00982 #region-us \n", "### Dataset Summary\n\n\nThe AQSOL dataset comes \"from the Benchmarking Graph Neural Networks paper based on AqSolDB, a standardized database of 9,982 molecular graphs with their aqueous solubility values, collected from 9 different data sources\" (PyGeometric doc).", "### Supported Tasks and Leaderboards\n\n\n'AQSOL' should be used for graph regression, on aqueous solubility.\n\n\nExternal Use\n------------", "### PyGeometric\n\n\nTo load in PyGeometric, do the following:\n\n\nDataset Structure\n-----------------", "### Data Properties", "### Data Fields\n\n\nEach row of a given file is a graph, with:\n\n\n* 'node\\_feat' (list: #nodes x #node-features): nodes\n* 'edge\\_index' (list: 2 x #edges): pairs of nodes constituting edges\n* 'edge\\_attr' (list: #edges x #edge-features): for the aforementioned edges, contains their features\n* 'y' (list: #labels): contains the number of labels available to predict\n* 'num\\_nodes' (int): number of nodes of the graph", "### Data Splits\n\n\nThis data is split. It comes from the PyGeometric version of the dataset.\n\n\nAdditional Information\n----------------------", "### Licensing Information\n\n\nThe dataset has been released under MIT license." ]
c9d5f8777f487ccd1e1fdddb73c2b325ccdc1844
# wikipedia persons masked: A filtered version of the wikipedia dataset, with only pages of people ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary Contains ~70k pages from wikipedia, each describing a person. For each page, the person described in the text is masked with a <mask> token. The ground truth for every mask is provided. ### Supported Tasks and Leaderboards The dataset supports the tasks of fill-mask, but can also be used for other tasks such as question answering, e.g. "Who is <mask>?" ### Languages *english only* ## Dataset Structure There is one large dataset file (dataset.jsonl.xz), containing all data. Use the dataset like this: ```python from datasets import load_dataset dataset = load_dataset('rcds/wikipedia-persons-masked') ``` ### Data Fields Columns are: - id: the id in the original dataset - url: the link to the wikipedia page - title: the title of the wikipedia page - text: the original wikipedia text - sentences: text split to sentences - paraphrased_sentences: text split to sentences, with each sentence paraphrased (e.g. mutated a bit) - masked_text_original: original text with entity masked in every occurence ( - masked_entities_original: array of entities masked in masked_text_original - masked_text_paraphrased: paraphrased text with entity masked in every occurence - masked_entities_paraphrased: array of entities msked in masked_text_paraphrased ### Data Splits There are no splits. ## Dataset Creation This dataset was created by using the wikipedia dataset from huggingface and processing it from there. People were queried via wikidata. The texts were split with nltk punkt, paraphrased with tuner007's pegasus. The entity recognition was performed with bert-base-NER by dslim and recognized entities replaced with a mask token. ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` TODO add citation ``` ### Contributions Thanks to [@skatinger](https://github.com/skatinger) for adding this dataset.
rcds/wikipedia-persons-masked
[ "task_categories:fill-mask", "annotations_creators:other", "language_creators:found", "multilinguality:multilingual", "size_categories:10M<n<100M", "source_datasets:original", "language:en", "license:cc-by-4.0", "region:us" ]
2022-12-08T13:51:30+00:00
{"annotations_creators": ["other"], "language_creators": ["found"], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["multilingual"], "size_categories": ["10M<n<100M"], "source_datasets": ["original"], "task_categories": ["fill-mask"], "pretty_name": "wikipedia persons masked: A filtered version of the wikipedia dataset, with only pages of people."}
2022-12-14T08:19:17+00:00
[]
[ "en" ]
TAGS #task_categories-fill-mask #annotations_creators-other #language_creators-found #multilinguality-multilingual #size_categories-10M<n<100M #source_datasets-original #language-English #license-cc-by-4.0 #region-us
# wikipedia persons masked: A filtered version of the wikipedia dataset, with only pages of people ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary Contains ~70k pages from wikipedia, each describing a person. For each page, the person described in the text is masked with a <mask> token. The ground truth for every mask is provided. ### Supported Tasks and Leaderboards The dataset supports the tasks of fill-mask, but can also be used for other tasks such as question answering, e.g. "Who is <mask>?" ### Languages *english only* ## Dataset Structure There is one large dataset file (URL), containing all data. Use the dataset like this: ### Data Fields Columns are: - id: the id in the original dataset - url: the link to the wikipedia page - title: the title of the wikipedia page - text: the original wikipedia text - sentences: text split to sentences - paraphrased_sentences: text split to sentences, with each sentence paraphrased (e.g. mutated a bit) - masked_text_original: original text with entity masked in every occurence ( - masked_entities_original: array of entities masked in masked_text_original - masked_text_paraphrased: paraphrased text with entity masked in every occurence - masked_entities_paraphrased: array of entities msked in masked_text_paraphrased ### Data Splits There are no splits. ## Dataset Creation This dataset was created by using the wikipedia dataset from huggingface and processing it from there. People were queried via wikidata. The texts were split with nltk punkt, paraphrased with tuner007's pegasus. The entity recognition was performed with bert-base-NER by dslim and recognized entities replaced with a mask token. ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @skatinger for adding this dataset.
[ "# wikipedia persons masked: A filtered version of the wikipedia dataset, with only pages of people", "## Table of Contents\n\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository: \n- Paper: \n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nContains ~70k pages from wikipedia, each describing a person. For each page, the person described in the text\nis masked with a <mask> token. The ground truth for every mask is provided.", "### Supported Tasks and Leaderboards\n\nThe dataset supports the tasks of fill-mask, but can also be used for other tasks such as question answering,\ne.g. \"Who is <mask>?\"", "### Languages\n\n*english only*", "## Dataset Structure\n\nThere is one large dataset file (URL), containing all data.\n\nUse the dataset like this:", "### Data Fields\n\nColumns are:\n- id: the id in the original dataset\n- url: the link to the wikipedia page\n- title: the title of the wikipedia page\n- text: the original wikipedia text\n- sentences: text split to sentences\n- paraphrased_sentences: text split to sentences, with each sentence paraphrased (e.g. mutated a bit)\n- masked_text_original: original text with entity masked in every occurence (\n- masked_entities_original: array of entities masked in masked_text_original\n- masked_text_paraphrased: paraphrased text with entity masked in every occurence\n- masked_entities_paraphrased: array of entities msked in masked_text_paraphrased", "### Data Splits\n\nThere are no splits.", "## Dataset Creation\n\nThis dataset was created by using the wikipedia dataset from huggingface and processing it from there.\nPeople were queried via wikidata. The texts were split with nltk punkt, paraphrased with tuner007's pegasus.\nThe entity recognition was performed with bert-base-NER by dslim and recognized entities replaced with a mask token.", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @skatinger for adding this dataset." ]
[ "TAGS\n#task_categories-fill-mask #annotations_creators-other #language_creators-found #multilinguality-multilingual #size_categories-10M<n<100M #source_datasets-original #language-English #license-cc-by-4.0 #region-us \n", "# wikipedia persons masked: A filtered version of the wikipedia dataset, with only pages of people", "## Table of Contents\n\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository: \n- Paper: \n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nContains ~70k pages from wikipedia, each describing a person. For each page, the person described in the text\nis masked with a <mask> token. The ground truth for every mask is provided.", "### Supported Tasks and Leaderboards\n\nThe dataset supports the tasks of fill-mask, but can also be used for other tasks such as question answering,\ne.g. \"Who is <mask>?\"", "### Languages\n\n*english only*", "## Dataset Structure\n\nThere is one large dataset file (URL), containing all data.\n\nUse the dataset like this:", "### Data Fields\n\nColumns are:\n- id: the id in the original dataset\n- url: the link to the wikipedia page\n- title: the title of the wikipedia page\n- text: the original wikipedia text\n- sentences: text split to sentences\n- paraphrased_sentences: text split to sentences, with each sentence paraphrased (e.g. mutated a bit)\n- masked_text_original: original text with entity masked in every occurence (\n- masked_entities_original: array of entities masked in masked_text_original\n- masked_text_paraphrased: paraphrased text with entity masked in every occurence\n- masked_entities_paraphrased: array of entities msked in masked_text_paraphrased", "### Data Splits\n\nThere are no splits.", "## Dataset Creation\n\nThis dataset was created by using the wikipedia dataset from huggingface and processing it from there.\nPeople were queried via wikidata. The texts were split with nltk punkt, paraphrased with tuner007's pegasus.\nThe entity recognition was performed with bert-base-NER by dslim and recognized entities replaced with a mask token.", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @skatinger for adding this dataset." ]
f5ada01a737a001acfd5fb2e993e05499e1b0426
Filterings on top of near-dedup + line filtering: * Comments filtering (at least 1% of the number of lines should be comments/docstrings) * Stars filtering (minimum of 5 stars) (on top of near-dedup + line filtering) |Language |Before filtering | Stars | Comments ratio| More near-dedup| Tokenizer fertility| |-------|--------|-------|--------|--------|--------| |Python | 75.61 GB| 26.56 GB |65.64 GB | 61.97 GB | 72.52 GB | |Java | 110 GB |35.83 GB |92.7 GB | 88.42 GB | 105.47 GB | |Javascript | 82.7 GB |20.76 GB |57.5 GB| 65.09 GB | 76.37 GB |
loubnabnl/data-filtering-statistics
[ "region:us" ]
2022-12-08T15:01:09+00:00
{}
2022-12-14T13:32:36+00:00
[]
[]
TAGS #region-us
Filterings on top of near-dedup + line filtering: * Comments filtering (at least 1% of the number of lines should be comments/docstrings) * Stars filtering (minimum of 5 stars) (on top of near-dedup + line filtering)
[]
[ "TAGS\n#region-us \n" ]
911fc6def91ac58a3c3e402fae1e2fd36c4dba13
# Dataset Card for "olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-perplexity-filters" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-perplexity-filters
[ "region:us" ]
2022-12-08T16:05:42+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "crawl_timestamp", "dtype": "float64"}, {"name": "kenlm_ppl", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 33197245533.0, "num_examples": 14558171}], "download_size": 20748879886, "dataset_size": 33197245533.0}}
2022-12-08T22:11:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-perplexity-filters" More Information needed
[ "# Dataset Card for \"olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-perplexity-filters\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-perplexity-filters\"\n\nMore Information needed" ]
fa86c72a1a9dd4dd334d9893dcd23dba444990ef
# ULCA ASR Dataset Malayalam Speech Corpus The labelled Malayalam speech subcorpus from the larger [ULCA ASR Corpus](https://github.com/Open-Speech-EkStep/ULCA-asr-dataset-corpus). The speech is taken from news broadcasts, and is largely composed of short soundbites with some longer outliers. ## Dataset Description - **Repository:** [https://github.com/Open-Speech-EkStep/ULCA-asr-dataset-corpus](https://github.com/Open-Speech-EkStep/ULCA-asr-dataset-corpus)
thennal/ulca_ml
[ "task_categories:automatic-speech-recognition", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:ml", "license:cc-by-4.0", "region:us" ]
2022-12-08T16:51:02+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["found"], "language": ["ml"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": [], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "ULCA ASR Dataset Malayalam Speech Corpus", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "audio", "dtype": "audio"}], "splits": [{"name": "train", "num_bytes": 1117843608.106, "num_examples": 8614}], "download_size": 321100264, "dataset_size": 1117843608.106}, "tags": []}
2022-12-08T17:15:07+00:00
[]
[ "ml" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-1K<n<10K #language-Malayalam #license-cc-by-4.0 #region-us
# ULCA ASR Dataset Malayalam Speech Corpus The labelled Malayalam speech subcorpus from the larger ULCA ASR Corpus. The speech is taken from news broadcasts, and is largely composed of short soundbites with some longer outliers. ## Dataset Description - Repository: URL
[ "# ULCA ASR Dataset Malayalam Speech Corpus\nThe labelled Malayalam speech subcorpus from the larger ULCA ASR Corpus.\nThe speech is taken from news broadcasts, and is largely composed of short soundbites with some longer outliers.", "## Dataset Description\n- Repository: URL" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-1K<n<10K #language-Malayalam #license-cc-by-4.0 #region-us \n", "# ULCA ASR Dataset Malayalam Speech Corpus\nThe labelled Malayalam speech subcorpus from the larger ULCA ASR Corpus.\nThe speech is taken from news broadcasts, and is largely composed of short soundbites with some longer outliers.", "## Dataset Description\n- Repository: URL" ]
61210a16d010f73fd531393567f4bb467c10f4d0
# Dataset Card for NoCaps ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://nocaps.org/](https://nocaps.org/) - **Paper:** [nocaps: novel object captioning at scale](https://openaccess.thecvf.com/content_ICCV_2019/papers/Agrawal_nocaps_novel_object_captioning_at_scale_ICCV_2019_paper.pdf) - **Leaderboard:** - **Point of Contact:**: [email protected] ### Dataset Summary Dubbed NoCaps for novel object captioning at scale, NoCaps consists of 166,100 human-generated captions describing 15,100 images from the Open Images validation and test sets. The associated training data consists of COCO image-caption pairs, plus Open Images image-level labels and object bounding boxes. Since Open Images contains many more classes than COCO, nearly 400 object classes seen in test images have no or very few associated training captions (hence, nocaps). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances Each instance has the following structure: ``` { 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=L size=732x1024 at 0x7F574A3A9B50>, 'image_coco_url': 'https://s3.amazonaws.com/nocaps/val/0013ea2087020901.jpg', 'image_date_captured': '2018-11-06 11:04:33', 'image_file_name': '0013ea2087020901.jpg', 'image_height': 1024, 'image_width': 732, 'image_id': 0, 'image_license': 0, 'image_open_images_id': '0013ea2087020901', 'annotations_ids': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'annotations_captions': [ 'A baby is standing in front of a house.', 'A little girl in a white jacket and sandals.', 'A young child stands in front of a house.', 'A child is wearing a white shirt and standing on a side walk. ', 'A little boy is standing in his diaper with a white shirt on.', 'A child wearing a diaper and shoes stands on the sidewalk.', 'A child is wearing a light-colored shirt during the daytime.', 'A little kid standing on the pavement in a shirt. ', 'Black and white photo of a little girl smiling.', 'a cute baby is standing alone with white shirt' ] } ``` ### Data Fields - `image`: The image - `image_coco_url`: URL for the image - `image_date_captured`: Date at which the image was captured - `image_file_name`: The file name for the image - `image_height`: Height of the image - `image_width`: Width of the image - `image_id`: Id of the image - `image_license`: Not sure what this is, it is always at 0 - `image_open_images_id`: Open image id - `annotations_ids`: Unique ids for the captions (to use in conjunction with `annotations_captions`) - `annotations_captions`: Captions for the image (to use in conjunction with `annotations_ids`) ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@VictorSanh](https://github.com/VictorSanh) for adding this dataset.
HuggingFaceM4/NoCaps
[ "license:cc-by-2.0", "region:us" ]
2022-12-08T17:11:21+00:00
{"license": "cc-by-2.0"}
2022-12-14T04:08:38+00:00
[]
[]
TAGS #license-cc-by-2.0 #region-us
# Dataset Card for NoCaps ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: URL - Paper: nocaps: novel object captioning at scale - Leaderboard: - Point of Contact:: contact@URL ### Dataset Summary Dubbed NoCaps for novel object captioning at scale, NoCaps consists of 166,100 human-generated captions describing 15,100 images from the Open Images validation and test sets. The associated training data consists of COCO image-caption pairs, plus Open Images image-level labels and object bounding boxes. Since Open Images contains many more classes than COCO, nearly 400 object classes seen in test images have no or very few associated training captions (hence, nocaps). ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances Each instance has the following structure: ### Data Fields - 'image': The image - 'image_coco_url': URL for the image - 'image_date_captured': Date at which the image was captured - 'image_file_name': The file name for the image - 'image_height': Height of the image - 'image_width': Width of the image - 'image_id': Id of the image - 'image_license': Not sure what this is, it is always at 0 - 'image_open_images_id': Open image id - 'annotations_ids': Unique ids for the captions (to use in conjunction with 'annotations_captions') - 'annotations_captions': Captions for the image (to use in conjunction with 'annotations_ids') ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @VictorSanh for adding this dataset.
[ "# Dataset Card for NoCaps", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Paper: nocaps: novel object captioning at scale\n- Leaderboard:\n- Point of Contact:: contact@URL", "### Dataset Summary\n\nDubbed NoCaps for novel object captioning at scale, NoCaps consists of 166,100 human-generated captions describing 15,100 images from the Open Images validation and test sets.\nThe associated training data consists of COCO image-caption pairs, plus Open Images image-level labels and object bounding boxes.\nSince Open Images contains many more classes than COCO, nearly 400 object classes seen in test images have no or very few associated training captions (hence, nocaps).", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\nEach instance has the following structure:", "### Data Fields\n\n- 'image': The image\n- 'image_coco_url': URL for the image\n- 'image_date_captured': Date at which the image was captured\n- 'image_file_name': The file name for the image\n- 'image_height': Height of the image\n- 'image_width': Width of the image\n- 'image_id': Id of the image\n- 'image_license': Not sure what this is, it is always at 0\n- 'image_open_images_id': Open image id\n- 'annotations_ids': Unique ids for the captions (to use in conjunction with 'annotations_captions')\n- 'annotations_captions': Captions for the image (to use in conjunction with 'annotations_ids')", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @VictorSanh for adding this dataset." ]
[ "TAGS\n#license-cc-by-2.0 #region-us \n", "# Dataset Card for NoCaps", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Paper: nocaps: novel object captioning at scale\n- Leaderboard:\n- Point of Contact:: contact@URL", "### Dataset Summary\n\nDubbed NoCaps for novel object captioning at scale, NoCaps consists of 166,100 human-generated captions describing 15,100 images from the Open Images validation and test sets.\nThe associated training data consists of COCO image-caption pairs, plus Open Images image-level labels and object bounding boxes.\nSince Open Images contains many more classes than COCO, nearly 400 object classes seen in test images have no or very few associated training captions (hence, nocaps).", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\nEach instance has the following structure:", "### Data Fields\n\n- 'image': The image\n- 'image_coco_url': URL for the image\n- 'image_date_captured': Date at which the image was captured\n- 'image_file_name': The file name for the image\n- 'image_height': Height of the image\n- 'image_width': Width of the image\n- 'image_id': Id of the image\n- 'image_license': Not sure what this is, it is always at 0\n- 'image_open_images_id': Open image id\n- 'annotations_ids': Unique ids for the captions (to use in conjunction with 'annotations_captions')\n- 'annotations_captions': Captions for the image (to use in conjunction with 'annotations_ids')", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @VictorSanh for adding this dataset." ]
eac3151081115101714032dbb9ef4d577b2d84b2
# AutoTrain Dataset for project: test_row2 ## Dataset Description This dataset has been automatically processed by AutoTrain for project test_row2. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "image": "<316x316 RGB PIL image>", "target": 1 }, { "image": "<316x316 RGB PIL image>", "target": 3 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "image": "Image(decode=True, id=None)", "target": "ClassLabel(num_classes=5, names=['animals', 'dance', 'food', 'sport', 'tech'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 392 | | valid | 101 |
Efimov6886/autotrain-data-test_row2
[ "task_categories:image-classification", "region:us" ]
2022-12-08T18:38:32+00:00
{"task_categories": ["image-classification"]}
2022-12-08T18:39:45+00:00
[]
[]
TAGS #task_categories-image-classification #region-us
AutoTrain Dataset for project: test\_row2 ========================================= Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project test\_row2. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-image-classification #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
09be8c5bbc57cb3887f3a9732ad6aa7ec602a1fa
# Dataset Card for HH-RLHF ## Dataset Summary This repository provides access to two different kinds of data: 1. Human preference data about helpfulness and harmlessness from [Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback](https://arxiv.org/abs/2204.05862). These data are meant to train preference (or reward) models for subsequent RLHF training. These data are *not* meant for supervised training of dialogue agents. Training dialogue agents on these data is likely to lead to harmful models and this shold be avoided. 2. Human-generated and annotated red teaming dialogues from [Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned](https://www.anthropic.com/red_teaming.pdf). These data are meant to understand how crowdworkers red team models and what types of red team attacks are succesful or not. The data are *not* meant for fine-tuning or preference modeling (use the data above for preference modeling). These data are entire transcripts of conversations that are derived from the harmlessness preference modeling data described above, where only the chosen response is incorporated into the overall transcript. Furthermore, the transcripts are annotated with human and automated measurements of how harmful the overall dialogues are. **Disclaimer**: The data (especially the harmlessness preference data and the red team data) contain content that may be offensive or upsetting. Topics include, but are not limited to, discriminatory language and discussions of abuse, violence, self-harm, exploitation, and other potentially upsetting subject matter. Please only engage with the data in accordance with your own personal risk tolerance. The data are intended for research purposes, especially research that can make models *less* harmful. The views expressed in the data do not reflect the views of Anthropic or any of its employees. As mentioned above, these data are *not* intended for training dialogue agents as this will likely lead to harmful model behavior. Each of these datasets are described further below. ## Human preference data about helpfulness and harmlessness (PM Data) The data are described in the paper: [Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback](https://arxiv.org/abs/2204.05862). If you find the data useful, please cite the paper. The data format is very simple -- each line of the jsonl files contains a pair of texts, one "chosen" and one "rejected". For **helpfulness**, the data are grouped into train/test splits in three tranches: from our base models (context-distilled 52B language models), via rejection sampling (mostly with best-of-16 sampling) against an early preference model, and a dataset sampled during our iterated "online" process. For **harmlessness**, the data are only collected for our base models, but otherwise formatted in the same way. Details about the data collection process and crowdworker population can be found in the paper, specifically in section 2 and appendix D. ## Red teaming data (not PM Data) The data are described in the paper: [Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned](https://www.anthropic.com/red_teaming.pdf). If you find the data useful, please cite the paper. At a high level these data can be examined to understand what succesful red team attempts look like. Details about the data and data collection procedures can be found in the Datasheet in the appendix of the paper. Each line of the jsonl file contains a dictionary with the following fields: - `transcript` a text transcript of a conversation between a human adversary (red team member) and an AI assistant - `min_harmlessness_score_transcript` a real value score of the harmlessness of the AI assistant (lower is more harmful) as obtained from a preference model - `num_params` number of parameters in the language model powering the AI assistant - `model_type` type of model powering the AI assistant - `rating` the red team member's rating of how successful they were at breaking the AI assistant (Likert scale, higher is more successful) - `task_description` a short text description written by the red team member about how they tried to red team the AI assistant - `task_description_harmlessness_score` a real value score of the harmlessness of the task description (lower is more harmful) as obtained from a preference model - `red_team_member_id` an arbitrary identifier of the red team member. one red team member can generate multiple red team attacks - `is_upworker` a binary indicator that is true if the red team member was from the crowd platform Upwork or false if they were from MTurk - `tags` a list of up to 6 tags per transcript. tags are short descriptions of the red team attempts generated by crowdworkers who reviewed red team data post-hoc. tags were only provided for a random sample of 1000 red team attempts for two of four model types. ## Usage Each of the above datasets is located in a separate sub-directory. To load an individual subset, use the `data_dir` argument of the `load_dataset()` function as follows: ```python from datasets import load_dataset # Load all helpfulness/harmless subsets (share the same schema) dataset = load_dataset("Anthropic/hh-rlhf") # Load one of the harmless subsets dataset = load_dataset("Anthropic/hh-rlhf", data_dir="harmless-base") # Load the red teaming subset dataset = load_dataset("Anthropic/hh-rlhf", data_dir="red-team-attempts") ``` ## Contact The original authors host this dataset on GitHub here: https://github.com/anthropics/hh-rlhf You can submit inquiries to: [email protected]
Anthropic/hh-rlhf
[ "license:mit", "human-feedback", "arxiv:2204.05862", "region:us" ]
2022-12-08T20:11:33+00:00
{"license": "mit", "tags": ["human-feedback"]}
2023-05-26T17:47:34+00:00
[ "2204.05862" ]
[]
TAGS #license-mit #human-feedback #arxiv-2204.05862 #region-us
# Dataset Card for HH-RLHF ## Dataset Summary This repository provides access to two different kinds of data: 1. Human preference data about helpfulness and harmlessness from Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback. These data are meant to train preference (or reward) models for subsequent RLHF training. These data are *not* meant for supervised training of dialogue agents. Training dialogue agents on these data is likely to lead to harmful models and this shold be avoided. 2. Human-generated and annotated red teaming dialogues from Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned. These data are meant to understand how crowdworkers red team models and what types of red team attacks are succesful or not. The data are *not* meant for fine-tuning or preference modeling (use the data above for preference modeling). These data are entire transcripts of conversations that are derived from the harmlessness preference modeling data described above, where only the chosen response is incorporated into the overall transcript. Furthermore, the transcripts are annotated with human and automated measurements of how harmful the overall dialogues are. Disclaimer: The data (especially the harmlessness preference data and the red team data) contain content that may be offensive or upsetting. Topics include, but are not limited to, discriminatory language and discussions of abuse, violence, self-harm, exploitation, and other potentially upsetting subject matter. Please only engage with the data in accordance with your own personal risk tolerance. The data are intended for research purposes, especially research that can make models *less* harmful. The views expressed in the data do not reflect the views of Anthropic or any of its employees. As mentioned above, these data are *not* intended for training dialogue agents as this will likely lead to harmful model behavior. Each of these datasets are described further below. ## Human preference data about helpfulness and harmlessness (PM Data) The data are described in the paper: Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback. If you find the data useful, please cite the paper. The data format is very simple -- each line of the jsonl files contains a pair of texts, one "chosen" and one "rejected". For helpfulness, the data are grouped into train/test splits in three tranches: from our base models (context-distilled 52B language models), via rejection sampling (mostly with best-of-16 sampling) against an early preference model, and a dataset sampled during our iterated "online" process. For harmlessness, the data are only collected for our base models, but otherwise formatted in the same way. Details about the data collection process and crowdworker population can be found in the paper, specifically in section 2 and appendix D. ## Red teaming data (not PM Data) The data are described in the paper: Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned. If you find the data useful, please cite the paper. At a high level these data can be examined to understand what succesful red team attempts look like. Details about the data and data collection procedures can be found in the Datasheet in the appendix of the paper. Each line of the jsonl file contains a dictionary with the following fields: - 'transcript' a text transcript of a conversation between a human adversary (red team member) and an AI assistant - 'min_harmlessness_score_transcript' a real value score of the harmlessness of the AI assistant (lower is more harmful) as obtained from a preference model - 'num_params' number of parameters in the language model powering the AI assistant - 'model_type' type of model powering the AI assistant - 'rating' the red team member's rating of how successful they were at breaking the AI assistant (Likert scale, higher is more successful) - 'task_description' a short text description written by the red team member about how they tried to red team the AI assistant - 'task_description_harmlessness_score' a real value score of the harmlessness of the task description (lower is more harmful) as obtained from a preference model - 'red_team_member_id' an arbitrary identifier of the red team member. one red team member can generate multiple red team attacks - 'is_upworker' a binary indicator that is true if the red team member was from the crowd platform Upwork or false if they were from MTurk - 'tags' a list of up to 6 tags per transcript. tags are short descriptions of the red team attempts generated by crowdworkers who reviewed red team data post-hoc. tags were only provided for a random sample of 1000 red team attempts for two of four model types. ## Usage Each of the above datasets is located in a separate sub-directory. To load an individual subset, use the 'data_dir' argument of the 'load_dataset()' function as follows: ## Contact The original authors host this dataset on GitHub here: URL You can submit inquiries to: redteam@URL
[ "# Dataset Card for HH-RLHF", "## Dataset Summary\n\n\nThis repository provides access to two different kinds of data:\n1. Human preference data about helpfulness and harmlessness from Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback. These data are meant to train preference (or reward) models for subsequent RLHF training. These data are *not* meant for supervised training of dialogue agents. Training dialogue agents on these data is likely to lead to harmful models and this shold be avoided.\n2. Human-generated and annotated red teaming dialogues from Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned. These data are meant to understand how crowdworkers red team models and what types of red team attacks are succesful or not. The data are *not* meant for fine-tuning or preference modeling (use the data above for preference modeling). These data are entire transcripts of conversations that are derived from the harmlessness preference modeling data described above, where only the chosen response is incorporated into the overall transcript. Furthermore, the transcripts are annotated with human and automated measurements of how harmful the overall dialogues are.\n\nDisclaimer: The data (especially the harmlessness preference data and the red team data) contain content that may be offensive or upsetting. Topics include, but are not limited to, discriminatory language and discussions of abuse, violence, self-harm, exploitation, and other potentially upsetting subject matter. Please only engage with the data in accordance with your own personal risk tolerance. The data are intended for research purposes, especially research that can make models *less* harmful. The views expressed in the data do not reflect the views of Anthropic or any of its employees. As mentioned above, these data are *not* intended for training dialogue agents as this will likely lead to harmful model behavior. \n\nEach of these datasets are described further below.", "## Human preference data about helpfulness and harmlessness (PM Data)\n\nThe data are described in the paper: Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback. If you find the data useful, please cite the paper. The data format is very simple -- each line of the jsonl files contains a pair of texts, one \"chosen\" and one \"rejected\".\n\nFor helpfulness, the data are grouped into train/test splits in three tranches: from our base models (context-distilled 52B language models), via rejection sampling (mostly with best-of-16 sampling) against an early preference model, and a dataset sampled during our iterated \"online\" process. \n\nFor harmlessness, the data are only collected for our base models, but otherwise formatted in the same way.\n\nDetails about the data collection process and crowdworker population can be found in the paper, specifically in section 2 and appendix D.", "## Red teaming data (not PM Data)\n\nThe data are described in the paper: Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned. If you find the data useful, please cite the paper. At a high level these data can be examined to understand what succesful red team attempts look like. \n\nDetails about the data and data collection procedures can be found in the Datasheet in the appendix of the paper. \n\nEach line of the jsonl file contains a dictionary with the following fields:\n- 'transcript' a text transcript of a conversation between a human adversary (red team member) and an AI assistant\n- 'min_harmlessness_score_transcript' a real value score of the harmlessness of the AI assistant (lower is more harmful) as obtained from a preference model\n- 'num_params' number of parameters in the language model powering the AI assistant\n- 'model_type' type of model powering the AI assistant\n- 'rating' the red team member's rating of how successful they were at breaking the AI assistant (Likert scale, higher is more successful) \n- 'task_description' a short text description written by the red team member about how they tried to red team the AI assistant\n- 'task_description_harmlessness_score' a real value score of the harmlessness of the task description (lower is more harmful) as obtained from a preference model\n- 'red_team_member_id' an arbitrary identifier of the red team member. one red team member can generate multiple red team attacks\n- 'is_upworker' a binary indicator that is true if the red team member was from the crowd platform Upwork or false if they were from MTurk\n- 'tags' a list of up to 6 tags per transcript. tags are short descriptions of the red team attempts generated by crowdworkers who reviewed red team data post-hoc. tags were only provided for a random sample of 1000 red team attempts for two of four model types.", "## Usage\n\nEach of the above datasets is located in a separate sub-directory. To load an individual subset, use the 'data_dir' argument of the 'load_dataset()' function as follows:", "## Contact\n\nThe original authors host this dataset on GitHub here: URL\nYou can submit inquiries to: redteam@URL" ]
[ "TAGS\n#license-mit #human-feedback #arxiv-2204.05862 #region-us \n", "# Dataset Card for HH-RLHF", "## Dataset Summary\n\n\nThis repository provides access to two different kinds of data:\n1. Human preference data about helpfulness and harmlessness from Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback. These data are meant to train preference (or reward) models for subsequent RLHF training. These data are *not* meant for supervised training of dialogue agents. Training dialogue agents on these data is likely to lead to harmful models and this shold be avoided.\n2. Human-generated and annotated red teaming dialogues from Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned. These data are meant to understand how crowdworkers red team models and what types of red team attacks are succesful or not. The data are *not* meant for fine-tuning or preference modeling (use the data above for preference modeling). These data are entire transcripts of conversations that are derived from the harmlessness preference modeling data described above, where only the chosen response is incorporated into the overall transcript. Furthermore, the transcripts are annotated with human and automated measurements of how harmful the overall dialogues are.\n\nDisclaimer: The data (especially the harmlessness preference data and the red team data) contain content that may be offensive or upsetting. Topics include, but are not limited to, discriminatory language and discussions of abuse, violence, self-harm, exploitation, and other potentially upsetting subject matter. Please only engage with the data in accordance with your own personal risk tolerance. The data are intended for research purposes, especially research that can make models *less* harmful. The views expressed in the data do not reflect the views of Anthropic or any of its employees. As mentioned above, these data are *not* intended for training dialogue agents as this will likely lead to harmful model behavior. \n\nEach of these datasets are described further below.", "## Human preference data about helpfulness and harmlessness (PM Data)\n\nThe data are described in the paper: Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback. If you find the data useful, please cite the paper. The data format is very simple -- each line of the jsonl files contains a pair of texts, one \"chosen\" and one \"rejected\".\n\nFor helpfulness, the data are grouped into train/test splits in three tranches: from our base models (context-distilled 52B language models), via rejection sampling (mostly with best-of-16 sampling) against an early preference model, and a dataset sampled during our iterated \"online\" process. \n\nFor harmlessness, the data are only collected for our base models, but otherwise formatted in the same way.\n\nDetails about the data collection process and crowdworker population can be found in the paper, specifically in section 2 and appendix D.", "## Red teaming data (not PM Data)\n\nThe data are described in the paper: Red Teaming Language Models to Reduce Harms: Methods, Scaling Behaviors, and Lessons Learned. If you find the data useful, please cite the paper. At a high level these data can be examined to understand what succesful red team attempts look like. \n\nDetails about the data and data collection procedures can be found in the Datasheet in the appendix of the paper. \n\nEach line of the jsonl file contains a dictionary with the following fields:\n- 'transcript' a text transcript of a conversation between a human adversary (red team member) and an AI assistant\n- 'min_harmlessness_score_transcript' a real value score of the harmlessness of the AI assistant (lower is more harmful) as obtained from a preference model\n- 'num_params' number of parameters in the language model powering the AI assistant\n- 'model_type' type of model powering the AI assistant\n- 'rating' the red team member's rating of how successful they were at breaking the AI assistant (Likert scale, higher is more successful) \n- 'task_description' a short text description written by the red team member about how they tried to red team the AI assistant\n- 'task_description_harmlessness_score' a real value score of the harmlessness of the task description (lower is more harmful) as obtained from a preference model\n- 'red_team_member_id' an arbitrary identifier of the red team member. one red team member can generate multiple red team attacks\n- 'is_upworker' a binary indicator that is true if the red team member was from the crowd platform Upwork or false if they were from MTurk\n- 'tags' a list of up to 6 tags per transcript. tags are short descriptions of the red team attempts generated by crowdworkers who reviewed red team data post-hoc. tags were only provided for a random sample of 1000 red team attempts for two of four model types.", "## Usage\n\nEach of the above datasets is located in a separate sub-directory. To load an individual subset, use the 'data_dir' argument of the 'load_dataset()' function as follows:", "## Contact\n\nThe original authors host this dataset on GitHub here: URL\nYou can submit inquiries to: redteam@URL" ]
5f0b123d95c1f7ec984f37c735cfbe6fb3b3ed32
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: ARTeLab/it5-summarization-ilpost * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-417ba9-2386774734
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T21:36:12+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "ARTeLab/it5-summarization-ilpost", "metrics": [], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "test", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-08T21:37:23+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: ARTeLab/it5-summarization-ilpost * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ARTeLab/it5-summarization-ilpost\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ARTeLab/it5-summarization-ilpost\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
f97781f2633909095e80f70c3915863503cfb29c
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: ARTeLab/it5-summarization-mlsum * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-417ba9-2386774735
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T21:36:17+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "ARTeLab/it5-summarization-mlsum", "metrics": [], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "test", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-08T21:37:29+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: ARTeLab/it5-summarization-mlsum * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ARTeLab/it5-summarization-mlsum\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ARTeLab/it5-summarization-mlsum\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
2237fe0b5a22dc59cc2fab497c2e23c5974d6f90
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: IlyaGusev/rut5_base_headline_gen_telegram * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-417ba9-2386774736
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T21:36:22+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "IlyaGusev/rut5_base_headline_gen_telegram", "metrics": [], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "test", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-08T21:39:06+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: IlyaGusev/rut5_base_headline_gen_telegram * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: IlyaGusev/rut5_base_headline_gen_telegram\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: IlyaGusev/rut5_base_headline_gen_telegram\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
3fc3fbb59d389efdd0cdba1d857723827bb714ee
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: NYTK/summarization-hi-bart-base-1024-hungarian * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-417ba9-2386774737
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T21:36:28+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "NYTK/summarization-hi-bart-base-1024-hungarian", "metrics": [], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "test", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-08T21:37:11+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: NYTK/summarization-hi-bart-base-1024-hungarian * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: NYTK/summarization-hi-bart-base-1024-hungarian\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: NYTK/summarization-hi-bart-base-1024-hungarian\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
3a983a44766c2e8f54fad6e0d26ef56dcc1a2145
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: RUCAIBox/mtl-summarization * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-417ba9-2386774738
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T21:36:33+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "RUCAIBox/mtl-summarization", "metrics": [], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "test", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-08T21:38:04+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: RUCAIBox/mtl-summarization * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: RUCAIBox/mtl-summarization\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: RUCAIBox/mtl-summarization\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
eb75c19b4ad982e530ec9d5ab748c22ff723a9a0
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: ahmeddbahaa/xlmroberta-finetune-en-cnn * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-417ba9-2386774739
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T21:36:39+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "ahmeddbahaa/xlmroberta-finetune-en-cnn", "metrics": [], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "test", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-08T21:38:43+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: ahmeddbahaa/xlmroberta-finetune-en-cnn * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ahmeddbahaa/xlmroberta-finetune-en-cnn\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: ahmeddbahaa/xlmroberta-finetune-en-cnn\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
709728f1e88a0b4ed44744d7a1713738d69de8a9
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: YtBig/subtitle-v2 * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-417ba9-2386774740
[ "autotrain", "evaluation", "region:us" ]
2022-12-08T21:36:45+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "YtBig/subtitle-v2", "metrics": [], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "test", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-08T21:38:22+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: YtBig/subtitle-v2 * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @lewtun for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: YtBig/subtitle-v2\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: YtBig/subtitle-v2\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @lewtun for evaluating this model." ]
97c5aa6f8d88245a473868f8d9b7740fd88b1e6c
# Dataset Card for "dataset-dalle" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SDbiaseval/jobs-dalle-2
[ "region:us" ]
2022-12-08T21:42:37+00:00
{"dataset_info": {"features": [{"name": "adjective", "dtype": "string"}, {"name": "profession", "dtype": "string"}, {"name": "no", "dtype": "int32"}, {"name": "image_path", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 24806949244.5, "num_examples": 31500}], "download_size": 18481427451, "dataset_size": 24806949244.5}}
2022-12-09T07:49:38+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset-dalle" More Information needed
[ "# Dataset Card for \"dataset-dalle\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset-dalle\"\n\nMore Information needed" ]
d5517d7703c33f70e9e621e571a50899e5c0d0fe
This dataset was created from Mozilla's Common Voice dataset for the purposes of Multilingual ASR on Kinyarwanda and English. The dataset contains 3000 hours of multilingual training samples, 300 hours of validation samples and 200 of testing samples.
molamin/Kinyarwanda_Engligh_Multilingual_ASR
[ "size_categories:700K<n<800K", "size_categories:~3120 hours", "language:rw", "language:en", "license:cc-by-4.0", "region:us" ]
2022-12-08T21:59:16+00:00
{"language": ["rw", "en"], "license": ["cc-by-4.0"], "size_categories": ["700K<n<800K", "~3120 hours"]}
2022-12-09T07:39:29+00:00
[]
[ "rw", "en" ]
TAGS #size_categories-700K<n<800K #size_categories-~3120 hours #language-Kinyarwanda #language-English #license-cc-by-4.0 #region-us
This dataset was created from Mozilla's Common Voice dataset for the purposes of Multilingual ASR on Kinyarwanda and English. The dataset contains 3000 hours of multilingual training samples, 300 hours of validation samples and 200 of testing samples.
[]
[ "TAGS\n#size_categories-700K<n<800K #size_categories-~3120 hours #language-Kinyarwanda #language-English #license-cc-by-4.0 #region-us \n" ]
36a0ef1274ca64b80ab38b59f93608037b4c6390
# Dataset Card for "cantonese_processed_daily" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tilos/cantonese_processed_daily
[ "region:us" ]
2022-12-08T22:40:55+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 3899650216, "num_examples": 4060}], "download_size": 623179139, "dataset_size": 3899650216}}
2022-12-08T22:41:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "cantonese_processed_daily" More Information needed
[ "# Dataset Card for \"cantonese_processed_daily\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"cantonese_processed_daily\"\n\nMore Information needed" ]
480186ac8cde3997a74b6395590420b7ec4ee267
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://github.com/joojs/fairface](https://github.com/joojs/fairface) - **Repository:** [https://github.com/joojs/fairface](https://github.com/joojs/fairface) - **Paper:** [https://openaccess.thecvf.com/content/WACV2021/papers/Karkkainen_FairFace_Face_Attribute_Dataset_for_Balanced_Race_Gender_and_Age_WACV_2021_paper.pdf](https://openaccess.thecvf.com/content/WACV2021/papers/Karkkainen_FairFace_Face_Attribute_Dataset_for_Balanced_Race_Gender_and_Age_WACV_2021_paper.pdf) - **Leaderboard:** - **Point of Contact:** ### Dataset Summary FairFace is a face image dataset which is race balanced. It contains 108,501 images from 7 different race groups: White, Black, Indian, East Asian, Southeast Asian, Middle Eastern, and Latino. Images were collected from the YFCC-100M Flickr dataset and labeled with race, gender, and age groups. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances Each instance has the following structure: ``` { 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=448x448 at 0x7FCABA221FA0>, 'age': 6, 'gender': 0, 'race': 0, 'service_test': True } ``` ### Data Fields - `image`: The image - `age`: Age class among `["0-2", "3-9", "10-19", "20-29", "30-39", "40-49", "50-59", "60-69", "more than 70"]` - `gender`: Gender class among `["Male", "Female"]` - `race`: Race class among `["East Asian", "Indian", "Black", "White", "Middle Eastern", "Latino_Hispanic", "Southeast Asian"]` - `service_test`: Not sure what this is. See [issue](https://github.com/joojs/fairface/issues/9). ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@VictorSanh](https://github.com/VictorSanh) for adding this dataset.
HuggingFaceM4/FairFace
[ "license:cc-by-4.0", "region:us" ]
2022-12-08T23:00:45+00:00
{"license": "cc-by-4.0"}
2022-12-09T00:14:46+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
# Dataset Card for [Dataset Name] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: URL - Repository: URL - Paper: URL - Leaderboard: - Point of Contact: ### Dataset Summary FairFace is a face image dataset which is race balanced. It contains 108,501 images from 7 different race groups: White, Black, Indian, East Asian, Southeast Asian, Middle Eastern, and Latino. Images were collected from the YFCC-100M Flickr dataset and labeled with race, gender, and age groups. ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances Each instance has the following structure: ### Data Fields - 'image': The image - 'age': Age class among '["0-2", "3-9", "10-19", "20-29", "30-39", "40-49", "50-59", "60-69", "more than 70"]' - 'gender': Gender class among '["Male", "Female"]' - 'race': Race class among '["East Asian", "Indian", "Black", "White", "Middle Eastern", "Latino_Hispanic", "Southeast Asian"]' - 'service_test': Not sure what this is. See issue. ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @VictorSanh for adding this dataset.
[ "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: URL\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nFairFace is a face image dataset which is race balanced. It contains 108,501 images from 7 different race groups: White, Black, Indian, East Asian, Southeast Asian, Middle Eastern, and Latino.\nImages were collected from the YFCC-100M Flickr dataset and labeled with race, gender, and age groups.", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\nEach instance has the following structure:", "### Data Fields\n\n- 'image': The image\n- 'age': Age class among '[\"0-2\", \"3-9\", \"10-19\", \"20-29\", \"30-39\", \"40-49\", \"50-59\", \"60-69\", \"more than 70\"]'\n- 'gender': Gender class among '[\"Male\", \"Female\"]'\n- 'race': Race class among '[\"East Asian\", \"Indian\", \"Black\", \"White\", \"Middle Eastern\", \"Latino_Hispanic\", \"Southeast Asian\"]'\n- 'service_test': Not sure what this is. See issue.", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @VictorSanh for adding this dataset." ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "# Dataset Card for [Dataset Name]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: URL\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nFairFace is a face image dataset which is race balanced. It contains 108,501 images from 7 different race groups: White, Black, Indian, East Asian, Southeast Asian, Middle Eastern, and Latino.\nImages were collected from the YFCC-100M Flickr dataset and labeled with race, gender, and age groups.", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances\n\nEach instance has the following structure:", "### Data Fields\n\n- 'image': The image\n- 'age': Age class among '[\"0-2\", \"3-9\", \"10-19\", \"20-29\", \"30-39\", \"40-49\", \"50-59\", \"60-69\", \"more than 70\"]'\n- 'gender': Gender class among '[\"Male\", \"Female\"]'\n- 'race': Race class among '[\"East Asian\", \"Indian\", \"Black\", \"White\", \"Middle Eastern\", \"Latino_Hispanic\", \"Southeast Asian\"]'\n- 'service_test': Not sure what this is. See issue.", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @VictorSanh for adding this dataset." ]
d2a0cc5509d56204238d34969b0b3657f8d3bf32
Checks with https://visualqa.org/download.html: - Num train questions: 443,757 - Num val questions: 214,354 - Num test questions: 447,793 - Num train answers: 4,437,570 - Num val answers: 2,143,540 - Num train images: 82,783 - Num val images: 40,504 - Num test images: 81,434 testdev is not mentionned: - Num questions: 107,394 - Num images: 36,807
HuggingFaceM4/clevr
[ "region:us" ]
2022-12-09T00:26:22+00:00
{}
2022-12-10T19:56:30+00:00
[]
[]
TAGS #region-us
Checks with URL - Num train questions: 443,757 - Num val questions: 214,354 - Num test questions: 447,793 - Num train answers: 4,437,570 - Num val answers: 2,143,540 - Num train images: 82,783 - Num val images: 40,504 - Num test images: 81,434 testdev is not mentionned: - Num questions: 107,394 - Num images: 36,807
[]
[ "TAGS\n#region-us \n" ]
f4872d73a965adbb833f4330e8c20ea5a3588d24
# Dataset Card for "tattoo_v1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Drozdik/tattoo_v1
[ "region:us" ]
2022-12-09T00:36:10+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 101532798.169, "num_examples": 4239}], "download_size": 78733652, "dataset_size": 101532798.169}}
2022-12-09T00:39:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tattoo_v1" More Information needed
[ "# Dataset Card for \"tattoo_v1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tattoo_v1\"\n\nMore Information needed" ]
97c32c4fc28136f82b442d9b444c479478741fb3
# Dataset Card for "tattoo_v3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Drozdik/tattoo_v3
[ "region:us" ]
2022-12-09T00:57:27+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 101626056.169, "num_examples": 4239}], "download_size": 78738858, "dataset_size": 101626056.169}}
2022-12-09T01:00:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tattoo_v3" More Information needed
[ "# Dataset Card for \"tattoo_v3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tattoo_v3\"\n\nMore Information needed" ]
dbfbc58b8cc454e82ed36fc4c7a0b0140e0f5aae
# Dataset Card for "news_corpus_v2_p1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hieule/news_corpus_v2_p1
[ "region:us" ]
2022-12-09T03:47:17+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "sapo", "dtype": "string"}, {"name": "cates", "sequence": "string"}, {"name": "publish", "dtype": "timestamp[us]"}, {"name": "text_content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 15876374992, "num_examples": 5000000}], "download_size": 7858134654, "dataset_size": 15876374992}}
2022-12-09T05:54:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "news_corpus_v2_p1" More Information needed
[ "# Dataset Card for \"news_corpus_v2_p1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"news_corpus_v2_p1\"\n\nMore Information needed" ]
be6b2afd76cc5196705f419b0cb91a2b583c3155
# Dataset Card for TwitterDEILA ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** [Needs More Information] - **Repository:** [Needs More Information] - **Paper:** [Needs More Information] - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary ### Supported Tasks and Leaderboards [Needs More Information] ### Languages English ## Dataset Structure ### Data Instances [Needs More Information] ### Data Fields ID Sentiment Content ### Data Splits train validate validatedeila validateonetest ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information [Needs More Information] ### Citation Information [Needs More Information]
deancgarcia/TwitterDEILA
[ "region:us" ]
2022-12-09T04:48:33+00:00
{}
2022-12-09T23:43:06+00:00
[]
[]
TAGS #region-us
# Dataset Card for TwitterDEILA ## Table of Contents - Dataset Description - Dataset Summary - Supported Tasks - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages English ## Dataset Structure ### Data Instances ### Data Fields ID Sentiment Content ### Data Splits train validate validatedeila validateonetest ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information
[ "# Dataset Card for TwitterDEILA", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages\n\nEnglish", "## Dataset Structure", "### Data Instances", "### Data Fields\n\nID\nSentiment\nContent", "### Data Splits\n\ntrain\nvalidate\nvalidatedeila\nvalidateonetest", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information" ]
[ "TAGS\n#region-us \n", "# Dataset Card for TwitterDEILA", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information", "## Dataset Description\n\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary", "### Supported Tasks and Leaderboards", "### Languages\n\nEnglish", "## Dataset Structure", "### Data Instances", "### Data Fields\n\nID\nSentiment\nContent", "### Data Splits\n\ntrain\nvalidate\nvalidatedeila\nvalidateonetest", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information" ]
4da5bdd1570c7c74fb2f570a29bd515222f9d75d
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: stacked-summaries/flan-t5-large-stacked-samsum-1024 * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate/autoeval-eval-samsum-samsum-29813b-2390574811
[ "autotrain", "evaluation", "region:us" ]
2022-12-09T04:50:13+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["samsum"], "eval_info": {"task": "summarization", "model": "stacked-summaries/flan-t5-large-stacked-samsum-1024", "metrics": ["bertscore"], "dataset_name": "samsum", "dataset_config": "samsum", "dataset_split": "test", "col_mapping": {"text": "dialogue", "target": "summary"}}}
2022-12-09T04:52:51+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Summarization * Model: stacked-summaries/flan-t5-large-stacked-samsum-1024 * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @pszemraj for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: stacked-summaries/flan-t5-large-stacked-samsum-1024\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Summarization\n* Model: stacked-summaries/flan-t5-large-stacked-samsum-1024\n* Dataset: samsum\n* Config: samsum\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @pszemraj for evaluating this model." ]
0d0fe435a5528bff22b95a516e0a946d83e9ddcf
# Dataset Card for "sv_corpora_parliament_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
saichand09/sv_corpora_parliament_processed
[ "region:us" ]
2022-12-09T05:18:16+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 292351437, "num_examples": 1892723}], "download_size": 161955537, "dataset_size": 292351437}}
2022-12-09T05:18:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sv_corpora_parliament_processed" More Information needed
[ "# Dataset Card for \"sv_corpora_parliament_processed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sv_corpora_parliament_processed\"\n\nMore Information needed" ]
319f6d868eee2bc9d369191bcaadce9755c66a67
# Dataset Card for "rice-rgb-demo2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
gokulraja17/rice-rgb-demo2
[ "region:us" ]
2022-12-09T05:25:08+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "RiceLeafs_BrownSpot", "1": "RiceLeafs_Healthy", "2": "RiceLeafs_Hispa", "3": "RiceLeafs_LeafBlast"}}}}], "splits": [{"name": "train", "num_bytes": 11929981.02, "num_examples": 2683}, {"name": "test", "num_bytes": 3059814.0, "num_examples": 672}], "download_size": 14605882, "dataset_size": 14989795.02}}
2022-12-09T05:25:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "rice-rgb-demo2" More Information needed
[ "# Dataset Card for \"rice-rgb-demo2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"rice-rgb-demo2\"\n\nMore Information needed" ]
c9a878514e9367cb3ecb1dce2522f5ec97a3d5d5
# Dataset Card for "sentence_eval_aa2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/sentence_eval_aa2
[ "region:us" ]
2022-12-09T05:37:50+00:00
{"dataset_info": {"features": [{"name": "declarativized", "dtype": "string"}, {"name": "correct", "dtype": "bool"}], "splits": [{"name": "train", "num_bytes": 35463, "num_examples": 615}, {"name": "validation", "num_bytes": 18279, "num_examples": 315}, {"name": "test", "num_bytes": 17185, "num_examples": 300}], "download_size": 56380, "dataset_size": 70927}}
2022-12-09T05:38:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sentence_eval_aa2" More Information needed
[ "# Dataset Card for \"sentence_eval_aa2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sentence_eval_aa2\"\n\nMore Information needed" ]
ca9a623765e3622fd7c10f3da8cae77abb43acf9
# Dataset Card for "sentence_eval_aa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
shreyasharma/sentence_eval_aa
[ "region:us" ]
2022-12-09T05:38:21+00:00
{"dataset_info": {"features": [{"name": "declarativized", "dtype": "string"}, {"name": "correct", "dtype": "bool"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 103267, "num_examples": 1359}, {"name": "validation", "num_bytes": 29118, "num_examples": 379}, {"name": "test", "num_bytes": 29277, "num_examples": 370}], "download_size": 77770, "dataset_size": 161662}}
2022-12-09T05:38:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sentence_eval_aa" More Information needed
[ "# Dataset Card for \"sentence_eval_aa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sentence_eval_aa\"\n\nMore Information needed" ]
40e99810c8e78e6f70da9aefce099268cd99c3e3
<br><br> <p align="center"> <img src="https://github.com/jina-ai/finetuner/blob/main/docs/_static/finetuner-logo-ani.svg?raw=true" alt="Finetuner logo: Finetuner helps you to create experiments in order to improve embeddings on search tasks. It accompanies you to deliver the last mile of performance-tuning for neural search applications." width="150px"> </p> <p align="center"> <b>The data offered by Jina AI, Finetuner team.</b> </p> ## Summary This dataset is a German-language dataset based on the [Fashion12K](https://github.com/Toloka/Fashion12K_german_queries) dataset, which originally contains both English and German text descriptions for each item. This dataset was used to to finetuner CLIP using the [Finetuner](https://finetuner.jina.ai/) tool. ## Fine-tuning Please refer to our documentation: [Multilingual Text-to-Image Search with MultilingualCLIP](https://finetuner.jina.ai/notebooks/multilingual_text_to_image/) and blog [Improving Search Quality for Non-English Queries with Fine-tuned Multilingual CLIP Models](https://jina.ai/news/improving-search-quality-non-english-queries-fine-tuned-multilingual-clip-models/) ## Instances Each data point consists of a 'text' and an 'image' field, where the 'text' field describes an item of clothing in German, and the 'image' field contains and image of that item of clothing. ## Fields - 'text': A string describing the item of clothing. - 'image': A `PIL.Image.Image` object containing the image. Note that when accessing the image column: `dataset[0]["image"]` the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the "image" column, i.e. `dataset[0]["image"]` should always be preferred over dataset["image"][0]. ## Splits | | train | test | |------------|-------|------| | # of items | 10000 | 2001 | ## Source Images were sampled from the [Fashion200K dataset](https://github.com/xthan/fashion-200k). ## Annotations Data was annotated using [Toloka](https://toloka.ai/). See their site for more details. ## Licensing Information This work is licensed under a Creative Commons Attribution 4.0 International License. ## Contributors Thanks to contributors from [Jina AI](https://jina.ai) and [Toloka](https://toloka.ai) for adding this dataset.
jinaai/fashion-captions-de
[ "task_categories:text-to-image", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:de", "license:cc-by-4.0", "region:us" ]
2022-12-09T06:11:54+00:00
{"language": ["de"], "license": "cc-by-4.0", "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-to-image"], "pretty_name": "Fashion12k DE", "dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 282285477, "num_examples": 10000}, {"name": "test", "num_bytes": 56612023.875, "num_examples": 2001}], "download_size": 320681179, "dataset_size": 338897500.875}}
2023-07-09T09:37:31+00:00
[]
[ "de" ]
TAGS #task_categories-text-to-image #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-German #license-cc-by-4.0 #region-us
![](URL alt=) **The data offered by Jina AI, Finetuner team.** Summary ------- This dataset is a German-language dataset based on the Fashion12K dataset, which originally contains both English and German text descriptions for each item. This dataset was used to to finetuner CLIP using the Finetuner tool. Fine-tuning ----------- Please refer to our documentation: Multilingual Text-to-Image Search with MultilingualCLIP and blog Improving Search Quality for Non-English Queries with Fine-tuned Multilingual CLIP Models Instances --------- Each data point consists of a 'text' and an 'image' field, where the 'text' field describes an item of clothing in German, and the 'image' field contains and image of that item of clothing. Fields ------ * 'text': A string describing the item of clothing. * 'image': A 'PIL.Image.Image' object containing the image. Note that when accessing the image column: 'dataset[0]["image"]' the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the "image" column, i.e. 'dataset[0]["image"]' should always be preferred over dataset["image"][0]. Splits ------ train: # of items, test: 10000 Source ------ Images were sampled from the Fashion200K dataset. Annotations ----------- Data was annotated using Toloka. See their site for more details. Licensing Information --------------------- This work is licensed under a Creative Commons Attribution 4.0 International License. Contributors ------------ Thanks to contributors from Jina AI and Toloka for adding this dataset.
[ "# of items, test: 10000\n\n\nSource\n------\n\n\nImages were sampled from the Fashion200K dataset.\n\n\nAnnotations\n-----------\n\n\nData was annotated using Toloka. See their site for more details.\n\n\nLicensing Information\n---------------------\n\n\nThis work is licensed under a Creative Commons Attribution 4.0 International License.\n\n\nContributors\n------------\n\n\nThanks to contributors from Jina AI and Toloka for adding this dataset." ]
[ "TAGS\n#task_categories-text-to-image #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-German #license-cc-by-4.0 #region-us \n", "# of items, test: 10000\n\n\nSource\n------\n\n\nImages were sampled from the Fashion200K dataset.\n\n\nAnnotations\n-----------\n\n\nData was annotated using Toloka. See their site for more details.\n\n\nLicensing Information\n---------------------\n\n\nThis work is licensed under a Creative Commons Attribution 4.0 International License.\n\n\nContributors\n------------\n\n\nThanks to contributors from Jina AI and Toloka for adding this dataset." ]
a9211192e0405af9560768f986c8974d00358efa
# Dataset Card for "news_corpus_v2_p2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hieule/news_corpus_v2_p2
[ "region:us" ]
2022-12-09T06:14:09+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "sapo", "dtype": "string"}, {"name": "cates", "dtype": "null"}, {"name": "publish", "dtype": "timestamp[us]"}, {"name": "text_content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 18238489193, "num_examples": 5000000}], "download_size": 9130800517, "dataset_size": 18238489193}}
2022-12-09T08:28:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "news_corpus_v2_p2" More Information needed
[ "# Dataset Card for \"news_corpus_v2_p2\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"news_corpus_v2_p2\"\n\nMore Information needed" ]
dad9e9e6de9bfe942619234047d24d420949e50f
# Dataset Card for "sheet_music_clean" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
EmileEsmaili/sheet_music_clean
[ "region:us" ]
2022-12-09T06:44:26+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 2229356112.491, "num_examples": 9219}], "download_size": 1211789844, "dataset_size": 2229356112.491}}
2022-12-09T06:47:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sheet_music_clean" More Information needed
[ "# Dataset Card for \"sheet_music_clean\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sheet_music_clean\"\n\nMore Information needed" ]
a038c6ad1f3f655a0cb85bcaf6291b8aa230502d
This dataset contains 5 pickle files which is a hugging face dataset, each contains 10,000 images in PIL.Image data type together with their class label and class id
taquynhnga/preprocessed-val-imagenet
[ "region:us" ]
2022-12-09T07:20:52+00:00
{}
2022-12-09T07:22:46+00:00
[]
[]
TAGS #region-us
This dataset contains 5 pickle files which is a hugging face dataset, each contains 10,000 images in PIL.Image data type together with their class label and class id
[]
[ "TAGS\n#region-us \n" ]
b88d87988b5645dda3f69bb920a2468b3da82054
# Dataset Card for "sprite_caption_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tkarr/sprite_caption_dataset
[ "region:us" ]
2022-12-09T07:46:51+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 32010144.421997756, "num_examples": 12830}, {"name": "test", "num_bytes": 1778895.7890011223, "num_examples": 713}, {"name": "valid", "num_bytes": 1778895.7890011223, "num_examples": 713}], "download_size": 26944262, "dataset_size": 35567936.0}}
2022-12-15T02:27:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sprite_caption_dataset" More Information needed
[ "# Dataset Card for \"sprite_caption_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sprite_caption_dataset\"\n\nMore Information needed" ]
5a4de27b0bbacc6562d40b12575e30bba244c5a3
# Dataset Card for "tnews" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
xusenlin/tnews
[ "region:us" ]
2022-12-09T07:48:43+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 4421883, "num_examples": 53360}, {"name": "validation", "num_bytes": 830536, "num_examples": 10000}], "download_size": 3695633, "dataset_size": 5252419}}
2022-12-09T08:04:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tnews" More Information needed
[ "# Dataset Card for \"tnews\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tnews\"\n\nMore Information needed" ]
384c074e2f675f0c4e030a500574643826248471
# Dataset Card for "news_corpus_v2_p3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hieule/news_corpus_v2_p3
[ "region:us" ]
2022-12-09T07:54:18+00:00
{"dataset_info": {"features": [{"name": "source", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "sapo", "dtype": "string"}, {"name": "cates", "dtype": "null"}, {"name": "publish", "dtype": "timestamp[us]"}, {"name": "text_content", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 17984316450, "num_examples": 4896998}], "download_size": 9019821584, "dataset_size": 17984316450}}
2022-12-09T11:29:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "news_corpus_v2_p3" More Information needed
[ "# Dataset Card for \"news_corpus_v2_p3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"news_corpus_v2_p3\"\n\nMore Information needed" ]
5f1d8cb527e42851d69eb4b9f7d4bcfe0095334a
# Dataset Card for "olm-october-2022-tokenized-1024-perplexity-filters" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-october-2022-tokenized-1024-perplexity-filters
[ "region:us" ]
2022-12-09T08:09:43+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 78517730052.0, "num_examples": 12754667}], "download_size": 21283341524, "dataset_size": 78517730052.0}}
2022-12-09T09:03:30+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-october-2022-tokenized-1024-perplexity-filters" More Information needed
[ "# Dataset Card for \"olm-october-2022-tokenized-1024-perplexity-filters\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-october-2022-tokenized-1024-perplexity-filters\"\n\nMore Information needed" ]
c761727c4a026646f1ba20907606937562a99640
# Dataset Card for [REDv2] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This is the second version of the Romanian Emotions Dataset (RED) containing 5449 tweets annotated in a multi-label fashion with the following 7 emotions: Anger (Furie), Fear (Frică), Joy (Bucurie), Sadness (Tristețe), Surprise (Surpriză), Trust (Încredere) and Neutral (Neutru). ### Supported Tasks and Leaderboards This dataset is intended for multi-class & multi-label emotion classification. ### Languages The data is in Romanian. ## Dataset Structure ### Data Instances Each instance is a tweet with a corresponding ID and one or more emotion annotations (or neutral). ### Data Fields The simplified configuration includes: ``` text: the tweet text_id: unique identifier of the tweet (can be used to look up the entry in the raw dataset) agreed_labels: the agreed emotion annotations vector (each value of 1 means that at least two annotators recognized that specific emotion) procentual_labels: vector containing three values: 0.33 if one annotator recognised the emotion, 0.66 if two annotators agreed on the emotion, and 0.99 if all annotators recognised the emotion ``` In addition to the above, the raw data includes: ``` Anger, Fear, Joy, Neutral, Sadness, Surprise, Trust: boolean values - True if the specific emotion is found in the agreed_labels vector annotator1, annotator2, annotator3: vectors of zeros of ones - 1 means the annotator recognized the emotion on the corresponding vector index sum_labels: the sum of annotator1, annotator2 and annotator3 vectors ``` The arrays of 7 values correspond to the following emotions: ['Sadness', 'Surprise', 'Fear', 'Anger', 'Neutral', 'Trust', 'Joy']. ### Data Splits This dataset includes a set of train/val/test splits with 4088, 818, and 543 examples respectively. ## Dataset Creation ### Curation Rationale From the paper introduction: >Interpreting correctly one’s own emotions, as well as other people’s emotional states, is a central aspect of emotional intelligence. Today, people can automate the process of emotion detection by creating machine learning models, provided by the fact that the model training was done on qualitative and sufficient data. With the constant increase of social media usage there is also an increase in online public data, freely available for model creation. Thus, analyzing emotions in online content naturally has became more and more of a topic of interest in the recent years. ### Source Data #### Initial Data Collection and Normalization Data was collected from Twitter (for more information see Chapter 3.1 of the [paper](https://aclanthology.org/2022.lrec-1.149.pdf)). #### Who are the source language producers? Romanian-speaking Twitter users. ### Annotations #### Annotation process See Chapter 3.2. in the [paper](https://aclanthology.org/2022.lrec-1.149.pdf). #### Who are the annotators? Annotations were produced by 66 Cognitive Science students, University of Bucharest, Faculty of Psichology and Educational Sciences. ### Personal and Sensitive Information All tweets in this dataset are anonymized by removing usernames and proper nouns. ## Additional Information ### Dataset Curators Researchers at the University of Bucharest and Adobe (see the authors of the paper [here](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.149.pdf)). ### Licensing Information The [GitHub repository](https://github.com/Alegzandra/RED-Romanian-Emotions-Dataset/tree/main/REDv2) of this dataset has an MIT license. ### Citation Information If you are using this dataset in your research, please cite: ``` @inproceedings{redv2, author = "Alexandra Ciobotaru and Mihai V. Constantinescu and Liviu P. Dinu and Stefan Daniel Dumitrescu", title = "{RED} v2: {E}nhancing {RED} {D}ataset for {M}ulti-{L}abel {E}motion {D}etection", journal = "Proceedings of the 13th Language Resources and Evaluation Conference (LREC 2022)", pages = "1392–1399", year = "2022", address = "Marseille, France", publisher = "European Language Resources Association (ELRA)", url = "http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.149.pdf", language = "English" } ``` ### Contributions Thanks to [@Alegzandra](https://github.com/<Alegzandra>) for adding this dataset.
Alegzandra/REDv2
[ "license:mit", "region:us" ]
2022-12-09T09:11:36+00:00
{"license": "mit"}
2022-12-22T15:14:54+00:00
[]
[]
TAGS #license-mit #region-us
# Dataset Card for [REDv2] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary This is the second version of the Romanian Emotions Dataset (RED) containing 5449 tweets annotated in a multi-label fashion with the following 7 emotions: Anger (Furie), Fear (Frică), Joy (Bucurie), Sadness (Tristețe), Surprise (Surpriză), Trust (Încredere) and Neutral (Neutru). ### Supported Tasks and Leaderboards This dataset is intended for multi-class & multi-label emotion classification. ### Languages The data is in Romanian. ## Dataset Structure ### Data Instances Each instance is a tweet with a corresponding ID and one or more emotion annotations (or neutral). ### Data Fields The simplified configuration includes: In addition to the above, the raw data includes: The arrays of 7 values correspond to the following emotions: ['Sadness', 'Surprise', 'Fear', 'Anger', 'Neutral', 'Trust', 'Joy']. ### Data Splits This dataset includes a set of train/val/test splits with 4088, 818, and 543 examples respectively. ## Dataset Creation ### Curation Rationale From the paper introduction: >Interpreting correctly one’s own emotions, as well as other people’s emotional states, is a central aspect of emotional intelligence. Today, people can automate the process of emotion detection by creating machine learning models, provided by the fact that the model training was done on qualitative and sufficient data. With the constant increase of social media usage there is also an increase in online public data, freely available for model creation. Thus, analyzing emotions in online content naturally has became more and more of a topic of interest in the recent years. ### Source Data #### Initial Data Collection and Normalization Data was collected from Twitter (for more information see Chapter 3.1 of the paper). #### Who are the source language producers? Romanian-speaking Twitter users. ### Annotations #### Annotation process See Chapter 3.2. in the paper. #### Who are the annotators? Annotations were produced by 66 Cognitive Science students, University of Bucharest, Faculty of Psichology and Educational Sciences. ### Personal and Sensitive Information All tweets in this dataset are anonymized by removing usernames and proper nouns. ## Additional Information ### Dataset Curators Researchers at the University of Bucharest and Adobe (see the authors of the paper here). ### Licensing Information The GitHub repository of this dataset has an MIT license. If you are using this dataset in your research, please cite: ### Contributions Thanks to @Alegzandra for adding this dataset.
[ "# Dataset Card for [REDv2]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThis is the second version of the Romanian Emotions Dataset (RED) containing 5449 tweets annotated in a multi-label fashion with the following 7 emotions: Anger (Furie), Fear (Frică), Joy (Bucurie), Sadness (Tristețe), Surprise (Surpriză), Trust (Încredere) and Neutral (Neutru).", "### Supported Tasks and Leaderboards\n\nThis dataset is intended for multi-class & multi-label emotion classification.", "### Languages\n\nThe data is in Romanian.", "## Dataset Structure", "### Data Instances\n\nEach instance is a tweet with a corresponding ID and one or more emotion annotations (or neutral).", "### Data Fields\n\nThe simplified configuration includes:\n\nIn addition to the above, the raw data includes:\n\nThe arrays of 7 values correspond to the following emotions: ['Sadness', 'Surprise', 'Fear', 'Anger', 'Neutral', 'Trust', 'Joy'].", "### Data Splits\n\nThis dataset includes a set of train/val/test splits with 4088, 818, and 543 examples respectively.", "## Dataset Creation", "### Curation Rationale\n\nFrom the paper introduction:\n\n>Interpreting correctly one’s own emotions, as well as\nother people’s emotional states, is a central aspect of\nemotional intelligence. Today, people can automate\nthe process of emotion detection by creating machine\nlearning models, provided by the fact that the model\ntraining was done on qualitative and sufficient data.\nWith the constant increase of social media usage there\nis also an increase in online public data, freely available\nfor model creation. Thus, analyzing emotions in online\ncontent naturally has became more and more of a topic\nof interest in the recent years.", "### Source Data", "#### Initial Data Collection and Normalization\n\nData was collected from Twitter (for more information see Chapter 3.1 of the paper).", "#### Who are the source language producers?\n\nRomanian-speaking Twitter users.", "### Annotations", "#### Annotation process\n\nSee Chapter 3.2. in the paper.", "#### Who are the annotators?\n\nAnnotations were produced by 66 Cognitive Science students, University of Bucharest, Faculty of Psichology and Educational Sciences.", "### Personal and Sensitive Information\n\nAll tweets in this dataset are anonymized by removing usernames and proper nouns.", "## Additional Information", "### Dataset Curators\n\nResearchers at the University of Bucharest and Adobe (see the authors of the paper here).", "### Licensing Information\n\nThe GitHub repository of this dataset has an MIT license.\n\n\n\nIf you are using this dataset in your research, please cite:", "### Contributions\n\nThanks to @Alegzandra for adding this dataset." ]
[ "TAGS\n#license-mit #region-us \n", "# Dataset Card for [REDv2]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThis is the second version of the Romanian Emotions Dataset (RED) containing 5449 tweets annotated in a multi-label fashion with the following 7 emotions: Anger (Furie), Fear (Frică), Joy (Bucurie), Sadness (Tristețe), Surprise (Surpriză), Trust (Încredere) and Neutral (Neutru).", "### Supported Tasks and Leaderboards\n\nThis dataset is intended for multi-class & multi-label emotion classification.", "### Languages\n\nThe data is in Romanian.", "## Dataset Structure", "### Data Instances\n\nEach instance is a tweet with a corresponding ID and one or more emotion annotations (or neutral).", "### Data Fields\n\nThe simplified configuration includes:\n\nIn addition to the above, the raw data includes:\n\nThe arrays of 7 values correspond to the following emotions: ['Sadness', 'Surprise', 'Fear', 'Anger', 'Neutral', 'Trust', 'Joy'].", "### Data Splits\n\nThis dataset includes a set of train/val/test splits with 4088, 818, and 543 examples respectively.", "## Dataset Creation", "### Curation Rationale\n\nFrom the paper introduction:\n\n>Interpreting correctly one’s own emotions, as well as\nother people’s emotional states, is a central aspect of\nemotional intelligence. Today, people can automate\nthe process of emotion detection by creating machine\nlearning models, provided by the fact that the model\ntraining was done on qualitative and sufficient data.\nWith the constant increase of social media usage there\nis also an increase in online public data, freely available\nfor model creation. Thus, analyzing emotions in online\ncontent naturally has became more and more of a topic\nof interest in the recent years.", "### Source Data", "#### Initial Data Collection and Normalization\n\nData was collected from Twitter (for more information see Chapter 3.1 of the paper).", "#### Who are the source language producers?\n\nRomanian-speaking Twitter users.", "### Annotations", "#### Annotation process\n\nSee Chapter 3.2. in the paper.", "#### Who are the annotators?\n\nAnnotations were produced by 66 Cognitive Science students, University of Bucharest, Faculty of Psichology and Educational Sciences.", "### Personal and Sensitive Information\n\nAll tweets in this dataset are anonymized by removing usernames and proper nouns.", "## Additional Information", "### Dataset Curators\n\nResearchers at the University of Bucharest and Adobe (see the authors of the paper here).", "### Licensing Information\n\nThe GitHub repository of this dataset has an MIT license.\n\n\n\nIf you are using this dataset in your research, please cite:", "### Contributions\n\nThanks to @Alegzandra for adding this dataset." ]
7dea87ff1f1b63cd98164fa25b96227636144d04
<h1>Berikut adalah kumpulan artikel berita Hoaks dan Non Hoaks</h1> Artikel terdiri dari 90 artikel Hoaks dan 100 artikel Non-Hoaks Berbahasa Indonesia. Artikel ini dikumpulkan pada tahun 2019 <br> Keterangan File:<br> H = Hoaks <br> NH = Non-Hoaks
mathaillah/BeritaHoaks-NonHoaks
[ "region:us" ]
2022-12-09T09:47:43+00:00
{}
2022-12-09T10:08:25+00:00
[]
[]
TAGS #region-us
<h1>Berikut adalah kumpulan artikel berita Hoaks dan Non Hoaks</h1> Artikel terdiri dari 90 artikel Hoaks dan 100 artikel Non-Hoaks Berbahasa Indonesia. Artikel ini dikumpulkan pada tahun 2019 <br> Keterangan File:<br> H = Hoaks <br> NH = Non-Hoaks
[]
[ "TAGS\n#region-us \n" ]
86aea26468da12ee49dc6f0a299af4cd46596154
# Dataset Card for Pokémon wiki captions This project is inspired by [pokmon-blip-captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions), where the captions are all generated by pre-trained BLIP without any manual effort. However, the quality and accuracy of their captions are not satisfactory enough, which leaves it known whether better captions lead to better results. This motivates our dataset. # Example ![pk1.jpg](https://storage.googleapis.com/kagglesdsdata/datasets/1392907/2309103/Pokemon%20Dataset/aipom.png?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=databundle-worker-v2%40kaggle-161607.iam.gserviceaccount.com%2F20221208%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20221208T155930Z&X-Goog-Expires=345600&X-Goog-SignedHeaders=host&X-Goog-Signature=4b9c507a546d5b4517621ad440b274a50c88b3270e254eb1146098026fa5fe68e1f7e9699e1554802cc7bd6512ed39612ab63cf1da0d77957567d9ec4d7df8b0a5137ecdb371142cbbb00c966552599366608382c0381b46dc92dbc734bd72d62bba99e9c1efa45f498c4bdc1bf4b650e6275e3cd4467d6fa8dfe4dc57754b61b66f557b8ce2f03d03a6fa81258c7c330074a30a353a27f2436e1f79d0d19289008b601203fa8dd8da70e6de931ad79afa91134ac94695cbd3c8c09f6919c4144944ad3233314a9148a5f752c1f8b940a30bd7ade90844f9194454cae945da96f077fbc533c2154f0d4eb85df504426ad4b3426a63259cd5e69a379ac515b292) > General attribute, looks like a little monkey, body color is composed of purple and beige, the end of the tail is like a hand ![pk2.jpg](https://storage.googleapis.com/kagglesdsdata/datasets/1392907/2309103/Pokemon%20Dataset/arbok.png?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=databundle-worker-v2%40kaggle-161607.iam.gserviceaccount.com%2F20221208%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20221208T155930Z&X-Goog-Expires=345600&X-Goog-SignedHeaders=host&X-Goog-Signature=42e8cf9eb3af2dac09ca3cfd72a2555c31af543b9b26be79933c7cd5586c8475a64246c5221a3815fb99502acfa8de20c029fe045568fda7c2582ff4f1a0d61423d5aa879d93c5d6331b7993803f25a6287b0ff8f8a8a3ab6bc947a3fe92db4c4551a2698eed46e73c93372bf1aa815268864377b581dafab7989ee757e67d91c126524cb640761758922f1039b4858f5bd7abe8c68fec6939469fe2116f9c6ade26a5d6120168a183f34e517e8d1e2f6873adbf54ad1db946c524894ac0a6945b9a92cb65577fe8b7c5a25aefe797a9b83398fba4407d6e7e2651c83ef97910c15ca1ff7e6075522004f8f776cd6acd014a78ce10010bc4c736eb9194602e81) > Poisonous attributes, it looks like a huge purple cobra, with black stripes on its body, small head, and triangular eyes # Properties All 898 images are from [The Complete Pokemon Images Data Set](https://www.kaggle.com/datasets/arenagrenade/the-complete-pokemon-images-data-set?resource=download) in Kaggle with size 475x475. Each image is accompanied with corresponding pokemon name and its detailed description from [Pokemon Wiki](https://wiki.52poke.com/wiki/%E4%B8%BB%E9%A1%B5), English and Chinese captions are provided. Human efforts are also involved to revise. # How to use ``` from datasets import load_dataset dataset = load_dataset("wanghaofan/pokemon-wiki-captions") ``` The dataset is formatted as below. For each row the dataset contains `image`, `name_en`, `name_zh`, `text_en` and `text_zh` keys. `image` is a varying size PIL jpeg, `name` is the name of pokemon, and `text` is the accompanying text caption. Only a train split is provided. ``` DatasetDict({ train: Dataset({ features: ['image', 'name_en', 'name_zh', 'text_en', 'text_zh'], num_rows: 898 }) }) ``` # Citation If you use this dataset in your work, please cite it as: ``` @misc{wanghaofan2022pokemon, author = {Haofan Wang}, title = {Pokemon wiki captions}, year={2022}, howpublished= {\url{https://huggingface.co/datasets/wanghaofan/pokemon-wiki-captions/}} } ```
wanghaofan/pokemon-wiki-captions
[ "region:us" ]
2022-12-09T11:13:28+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "name_en", "dtype": "string"}, {"name": "name_zh", "dtype": "string"}, {"name": "text_en", "dtype": "string"}, {"name": "text_zh", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 117645424.0, "num_examples": 898}], "download_size": 117512478, "dataset_size": 117645424.0}}
2022-12-09T12:50:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for Pokémon wiki captions This project is inspired by pokmon-blip-captions, where the captions are all generated by pre-trained BLIP without any manual effort. However, the quality and accuracy of their captions are not satisfactory enough, which leaves it known whether better captions lead to better results. This motivates our dataset. # Example !URL > General attribute, looks like a little monkey, body color is composed of purple and beige, the end of the tail is like a hand !URL > Poisonous attributes, it looks like a huge purple cobra, with black stripes on its body, small head, and triangular eyes # Properties All 898 images are from The Complete Pokemon Images Data Set in Kaggle with size 475x475. Each image is accompanied with corresponding pokemon name and its detailed description from Pokemon Wiki, English and Chinese captions are provided. Human efforts are also involved to revise. # How to use The dataset is formatted as below. For each row the dataset contains 'image', 'name_en', 'name_zh', 'text_en' and 'text_zh' keys. 'image' is a varying size PIL jpeg, 'name' is the name of pokemon, and 'text' is the accompanying text caption. Only a train split is provided. If you use this dataset in your work, please cite it as:
[ "# Dataset Card for Pokémon wiki captions\n\nThis project is inspired by pokmon-blip-captions, where the captions are all generated by pre-trained BLIP without any manual effort.\nHowever, the quality and accuracy of their captions are not satisfactory enough, which leaves it known whether better captions lead to better results. This motivates our dataset.", "# Example\n\n!URL\n> General attribute, looks like a little monkey, body color is composed of purple and beige, the end of the tail is like a hand\n\n!URL\n> Poisonous attributes, it looks like a huge purple cobra, with black stripes on its body, small head, and triangular eyes", "# Properties\n\nAll 898 images are from The Complete Pokemon Images Data Set in Kaggle with size 475x475. Each image is accompanied with corresponding \npokemon name and its detailed description from Pokemon Wiki, English and Chinese captions are provided. Human efforts are also involved to revise.", "# How to use\n\n\nThe dataset is formatted as below. For each row the dataset contains 'image', 'name_en', 'name_zh', 'text_en' and 'text_zh' keys. 'image' is a varying size PIL jpeg, 'name' is the name of pokemon, and 'text' is the accompanying text caption. Only a train split is provided.\n\n\n\nIf you use this dataset in your work, please cite it as:" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Pokémon wiki captions\n\nThis project is inspired by pokmon-blip-captions, where the captions are all generated by pre-trained BLIP without any manual effort.\nHowever, the quality and accuracy of their captions are not satisfactory enough, which leaves it known whether better captions lead to better results. This motivates our dataset.", "# Example\n\n!URL\n> General attribute, looks like a little monkey, body color is composed of purple and beige, the end of the tail is like a hand\n\n!URL\n> Poisonous attributes, it looks like a huge purple cobra, with black stripes on its body, small head, and triangular eyes", "# Properties\n\nAll 898 images are from The Complete Pokemon Images Data Set in Kaggle with size 475x475. Each image is accompanied with corresponding \npokemon name and its detailed description from Pokemon Wiki, English and Chinese captions are provided. Human efforts are also involved to revise.", "# How to use\n\n\nThe dataset is formatted as below. For each row the dataset contains 'image', 'name_en', 'name_zh', 'text_en' and 'text_zh' keys. 'image' is a varying size PIL jpeg, 'name' is the name of pokemon, and 'text' is the accompanying text caption. Only a train split is provided.\n\n\n\nIf you use this dataset in your work, please cite it as:" ]
1ea019fd53e0a46c3f0f363723a5b4dcd332c44b
# Dataset Card for "an2012" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Martw/an2012
[ "region:us" ]
2022-12-09T11:17:10+00:00
{"dataset_info": {"features": [{"name": "sentences", "sequence": "string"}, {"name": "labels", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 30151484, "num_examples": 240}], "download_size": 474849, "dataset_size": 30151484}}
2022-12-09T11:17:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "an2012" More Information needed
[ "# Dataset Card for \"an2012\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"an2012\"\n\nMore Information needed" ]
cd44191bda8f015f2af29a150fd78d549408f411
# CARES - A Corpus of Anonymised Radiological Evidences in Spanish 📑🏥 CARES is a high-quality text resource manually labeled with ICD-10 codes and reviewed by radiologists. These types of resources are essential for developing automatic text classification tools as they are necessary for training and fine-tuning our computational systems. The CARES corpus has been manually annotated using the ICD-10 ontology, which stands for for the 10th version of the International Classification of Diseases. For each radiological report, a minimum of one code and a maximum of 9 codes were assigned, while the average number of codes per text is 2.15 with the standard deviation of 1.12. The corpus was additionally preprocessed in order to make its format coherent with the automatic text classification task. Considering the hierarchical structure of the ICD-10 ontology, each sub-code was mapped to its respective code and chapter, obtaining two new sets of labels for each report. The entire CARES collection contains 6,907 sub-code annotations among the 3,219 radiologic reports. There are 223 unique ICD-10 sub-codes within the annotations, which were mapped to 156 unique ICD-10 codes and 16 unique chapters of the cited ontology. As for the dataset train and test subsets, a stratified split was performed in order to guarantee that the number of labels in the test data is representative.
chizhikchi/CARES
[ "task_categories:text-classification", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:es", "license:afl-3.0", "radiology", "biomedicine", "ICD-10", "region:us" ]
2022-12-09T12:13:39+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated"], "language": ["es"], "license": ["afl-3.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "pretty_name": "CARES", "tags": ["radiology", "biomedicine", "ICD-10"], "dataset_info": {"features": [{"name": "iddoc", "dtype": "float64"}, {"name": "id", "dtype": "int64"}, {"name": "full_text", "dtype": "string"}, {"name": "icd10", "sequence": "string"}, {"name": "general", "sequence": "string"}, {"name": "chapters", "sequence": "int64"}, {"name": "area", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 3377631, "num_examples": 2253}, {"name": "test", "num_bytes": 1426962, "num_examples": 966}], "download_size": 2291080, "dataset_size": 4804593}}
2022-12-09T12:22:08+00:00
[]
[ "es" ]
TAGS #task_categories-text-classification #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-Spanish #license-afl-3.0 #radiology #biomedicine #ICD-10 #region-us
# CARES - A Corpus of Anonymised Radiological Evidences in Spanish CARES is a high-quality text resource manually labeled with ICD-10 codes and reviewed by radiologists. These types of resources are essential for developing automatic text classification tools as they are necessary for training and fine-tuning our computational systems. The CARES corpus has been manually annotated using the ICD-10 ontology, which stands for for the 10th version of the International Classification of Diseases. For each radiological report, a minimum of one code and a maximum of 9 codes were assigned, while the average number of codes per text is 2.15 with the standard deviation of 1.12. The corpus was additionally preprocessed in order to make its format coherent with the automatic text classification task. Considering the hierarchical structure of the ICD-10 ontology, each sub-code was mapped to its respective code and chapter, obtaining two new sets of labels for each report. The entire CARES collection contains 6,907 sub-code annotations among the 3,219 radiologic reports. There are 223 unique ICD-10 sub-codes within the annotations, which were mapped to 156 unique ICD-10 codes and 16 unique chapters of the cited ontology. As for the dataset train and test subsets, a stratified split was performed in order to guarantee that the number of labels in the test data is representative.
[ "# CARES - A Corpus of Anonymised Radiological Evidences in Spanish \n\nCARES is a high-quality text resource manually labeled with ICD-10 codes and reviewed by radiologists. These types of resources are essential for developing automatic text classification tools as they are necessary for training and fine-tuning our computational systems. \n\nThe CARES corpus has been manually annotated using the ICD-10 ontology, which stands for for the 10th version of the International Classification of Diseases. For each radiological report, a minimum of one code and a maximum of 9 codes were assigned, while the average number of codes per text is 2.15 with the standard deviation of 1.12.\n\nThe corpus was additionally preprocessed in order to make its format coherent with the automatic text classification task. Considering the hierarchical structure of the ICD-10 ontology, each sub-code was mapped to its respective code and chapter, obtaining two new sets of labels for each report. The entire CARES collection contains 6,907 sub-code annotations among the 3,219 radiologic reports. There are 223 unique ICD-10 sub-codes within the annotations, which were mapped to 156 unique ICD-10 codes and 16 unique chapters of the cited ontology.\n\n\nAs for the dataset train and test subsets, a stratified split was performed in order to guarantee that the number of labels in the test data is representative." ]
[ "TAGS\n#task_categories-text-classification #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-original #language-Spanish #license-afl-3.0 #radiology #biomedicine #ICD-10 #region-us \n", "# CARES - A Corpus of Anonymised Radiological Evidences in Spanish \n\nCARES is a high-quality text resource manually labeled with ICD-10 codes and reviewed by radiologists. These types of resources are essential for developing automatic text classification tools as they are necessary for training and fine-tuning our computational systems. \n\nThe CARES corpus has been manually annotated using the ICD-10 ontology, which stands for for the 10th version of the International Classification of Diseases. For each radiological report, a minimum of one code and a maximum of 9 codes were assigned, while the average number of codes per text is 2.15 with the standard deviation of 1.12.\n\nThe corpus was additionally preprocessed in order to make its format coherent with the automatic text classification task. Considering the hierarchical structure of the ICD-10 ontology, each sub-code was mapped to its respective code and chapter, obtaining two new sets of labels for each report. The entire CARES collection contains 6,907 sub-code annotations among the 3,219 radiologic reports. There are 223 unique ICD-10 sub-codes within the annotations, which were mapped to 156 unique ICD-10 codes and 16 unique chapters of the cited ontology.\n\n\nAs for the dataset train and test subsets, a stratified split was performed in order to guarantee that the number of labels in the test data is representative." ]
bdebcf0d2aff9d20459c8ce8a2d4f2da45438bcc
# Dataset Card for "dataset-identities-v-1.4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
SDbiaseval/identities-sd-1.4
[ "region:us" ]
2022-12-09T12:14:03+00:00
{"dataset_info": {"features": [{"name": "ethnicity", "dtype": "string"}, {"name": "gender", "dtype": "string"}, {"name": "no", "dtype": "int32"}, {"name": "image_path", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 27230671.0, "num_examples": 680}], "download_size": 27136582, "dataset_size": 27230671.0}}
2023-01-26T22:36:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "dataset-identities-v-1.4" More Information needed
[ "# Dataset Card for \"dataset-identities-v-1.4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"dataset-identities-v-1.4\"\n\nMore Information needed" ]
f43aae2a3acb390f24629b419927d9fa91ec7207
# Dataset Card for "gal_yair_8300_10x10_fixed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_8300_10x10_fixed
[ "region:us" ]
2022-12-09T13:08:58+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": "string"}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 3019225.5, "num_examples": 8300}], "download_size": 2658915, "dataset_size": 3019225.5}}
2022-12-09T13:09:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gal_yair_8300_10x10_fixed" More Information needed
[ "# Dataset Card for \"gal_yair_8300_10x10_fixed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gal_yair_8300_10x10_fixed\"\n\nMore Information needed" ]
7e0e9bf8ec61616630135d5a0355aaedac0f0e28
# Dataset Card for "kuvshinov_art_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
WiNE-iNEFF/kuvshinov_art_dataset
[ "region:us" ]
2022-12-09T13:31:10+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 201634655.912, "num_examples": 1802}], "download_size": 239927533, "dataset_size": 201634655.912}}
2022-12-09T13:31:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "kuvshinov_art_dataset" More Information needed
[ "# Dataset Card for \"kuvshinov_art_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"kuvshinov_art_dataset\"\n\nMore Information needed" ]
1206803d2b5fe6eb31d569b00bae108629a82a1e
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: mlxen/electra-smallcase-squad * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@ob](https://huggingface.co/ob) for evaluating this model.
autoevaluate/autoeval-eval-squad_v2-squad_v2-d6c6f4-2395874932
[ "autotrain", "evaluation", "region:us" ]
2022-12-09T13:51:55+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["squad_v2"], "eval_info": {"task": "extractive_question_answering", "model": "mlxen/electra-smallcase-squad", "metrics": [], "dataset_name": "squad_v2", "dataset_config": "squad_v2", "dataset_split": "validation", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-12-09T13:54:32+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: mlxen/electra-smallcase-squad * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @ob for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: mlxen/electra-smallcase-squad\n* Dataset: squad_v2\n* Config: squad_v2\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @ob for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: mlxen/electra-smallcase-squad\n* Dataset: squad_v2\n* Config: squad_v2\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @ob for evaluating this model." ]
6bfde83873caf828d9554979e0cb9c5103725da4
Dataset of a few Marsupilami pictures PS/ I used git+ssh to push this commit to the Hub 🔥 Thank you @XCiD and @sbrandeis
julien-c/autotrain-dreambooth-marsupilami-data
[ "task_categories:image-to-image", "size_categories:n<1K", "license:openrail", "marsupilami", "not-for-all-eyes", "region:us" ]
2022-12-09T15:10:39+00:00
{"license": "openrail", "size_categories": ["n<1K"], "task_categories": ["image-to-image"], "tags": ["marsupilami", "not-for-all-eyes"]}
2023-04-06T10:39:49+00:00
[]
[]
TAGS #task_categories-image-to-image #size_categories-n<1K #license-openrail #marsupilami #not-for-all-eyes #region-us
Dataset of a few Marsupilami pictures PS/ I used git+ssh to push this commit to the Hub Thank you @XCiD and @sbrandeis
[]
[ "TAGS\n#task_categories-image-to-image #size_categories-n<1K #license-openrail #marsupilami #not-for-all-eyes #region-us \n" ]
f58bc41a905185f754721929b0910985732ba0ed
# Dataset Card for "speech2text-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
hts98/speech2text-dataset
[ "region:us" ]
2022-12-09T15:27:22+00:00
{"dataset_info": {"features": [{"name": "input_features", "sequence": {"sequence": "float32"}}, {"name": "labels", "sequence": "int64"}], "splits": [{"name": "train", "num_bytes": 32013193208, "num_examples": 33322}, {"name": "test", "num_bytes": 5950633136, "num_examples": 6194}], "download_size": 7460647938, "dataset_size": 37963826344}}
2022-12-09T15:43:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "speech2text-dataset" More Information needed
[ "# Dataset Card for \"speech2text-dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"speech2text-dataset\"\n\nMore Information needed" ]
7fa1103c3606b2d5603552f544d41b6811d532bb
# Dataset Card for "c_corpus_br_finetuning_language_model_deberta" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
rosimeirecosta/c_corpus_br_finetuning_language_model_deberta
[ "region:us" ]
2022-12-09T17:01:17+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 36065567, "num_examples": 228736}, {"name": "validation", "num_bytes": 9012563, "num_examples": 57184}], "download_size": 0, "dataset_size": 45078130}}
2022-12-15T18:22:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "c_corpus_br_finetuning_language_model_deberta" More Information needed
[ "# Dataset Card for \"c_corpus_br_finetuning_language_model_deberta\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"c_corpus_br_finetuning_language_model_deberta\"\n\nMore Information needed" ]
924a029aac69bfdd8d34598f27f2286550b1c00b
# Dataset Card for "gal_yair_166000_256x256_fixed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
galman33/gal_yair_166000_256x256_fixed
[ "region:us" ]
2022-12-09T18:02:01+00:00
{"dataset_info": {"features": [{"name": "lat", "dtype": "float64"}, {"name": "lon", "dtype": "float64"}, {"name": "country_code", "dtype": {"class_label": {"names": {"0": "ad", "1": "ae", "2": "al", "3": "aq", "4": "ar", "5": "au", "6": "bd", "7": "be", "8": "bg", "9": "bm", "10": "bo", "11": "br", "12": "bt", "13": "bw", "14": "ca", "15": "ch", "16": "cl", "17": "co", "18": "cz", "19": "de", "20": "dk", "21": "ec", "22": "ee", "23": "es", "24": "fi", "25": "fr", "26": "gb", "27": "gh", "28": "gl", "29": "gr", "30": "gt", "31": "hk", "32": "hr", "33": "hu", "34": "id", "35": "ie", "36": "il", "37": "is", "38": "it", "39": "ix", "40": "jp", "41": "kg", "42": "kh", "43": "kr", "44": "la", "45": "lk", "46": "ls", "47": "lt", "48": "lu", "49": "lv", "50": "me", "51": "mg", "52": "mk", "53": "mn", "54": "mo", "55": "mt", "56": "mx", "57": "my", "58": "nl", "59": "no", "60": "nz", "61": "pe", "62": "ph", "63": "pl", "64": "pt", "65": "ro", "66": "rs", "67": "ru", "68": "se", "69": "sg", "70": "si", "71": "sk", "72": "sn", "73": "sz", "74": "th", "75": "tn", "76": "tr", "77": "tw", "78": "ua", "79": "ug", "80": "us", "81": "uy", "82": "za"}}}}, {"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 16156275005.0, "num_examples": 166000}], "download_size": 16115168331, "dataset_size": 16156275005.0}}
2022-12-09T18:18:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "gal_yair_166000_256x256_fixed" More Information needed
[ "# Dataset Card for \"gal_yair_166000_256x256_fixed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"gal_yair_166000_256x256_fixed\"\n\nMore Information needed" ]
f4b16ad198fda72263aa9b2989f54561056856ad
# Dataset Card for althingi_asr ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Data](#data) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Other Known Limitations](#other-known-limitations) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** Althingi Parliamentary Speech - **Repository:** [LDC](https://catalog.ldc.upenn.edu/LDC2021S01) - **Paper:** [Building an ASR corpus using Althingi’s Parliamentary Speeches](https://www.researchgate.net/profile/Jon-Gudnason/publication/319185185_Building_an_ASR_Corpus_Using_Althingi's_Parliamentary_Speeches/links/5d1dbdd3a6fdcc2462bdda0f/Building-an-ASR-Corpus-Using-Althingis-Parliamentary-Speeches.pdf) - **Point of Contact:** [Jón Guðnason](mailto:[email protected]) ### Dataset Summary Althingi Parliamentary Speech consists of approximately 542 hours of recorded speech from Althingi, the Icelandic Parliament, along with corresponding transcripts, a pronunciation dictionary and two language models. Speeches date from 2005-2016. This dataset was collected in 2016 by the ASR for Althingi project at [Reykjavik University](https://en.ru.is/) in collaboration with the Althingi speech department. The purpose of that project was to develop an ASR (automatic speech recognition) system for parliamentary speech to replace the procedure of manually transcribing performed speeches. ### Data The mean speech length is six minutes, with speeches ranging from under one minute to around thirty minutes. The corpus features 197 speakers (105 male, 92 female) and is split into training, development and evaluation sets. The language models are of two types: a pruned trigram model, used in decoding, and an unpruned constant ARPA 5-gram model, used for re-scoring decoding results. Audio data is presented as single channel 16-bit mp3 files; the majority of these files have a sample rate of 44.1 kHz. Transcripts and other text data are plain text encoded in UTF-8. ### Example Usage The Althingi Corpus is divided in 3 splits: train, validation and test. To load a specific split pass its name as a config name: ```python from datasets import load_dataset althingi_asr = load_dataset("language-and-voice-lab/althingi_asr") ``` To load an specific split (for example, the validation split) do: ```python from datasets import load_dataset althingi_asr = load_dataset("language-and-voice-lab/althingi_asr",split="validation") ``` ### Supported Tasks automatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The audio is in Icelandic. ## Dataset Structure ### Data Instances ```python { 'audio_id': 'rad20160602T000219_00083', 'audio': { 'path': '/home/inga/.cache/HuggingFace/datasets/downloads/extracted/52607f9db9e3394263070575d29323213b99a06a996c43d4fe75bca115827d12/dev/EyH/rad20160602T000219/rad20160602T000219_00083.flac', 'array': array([-0.01098633, -0.01489258, -0.01040649, ..., 0.00314331, 0.00186157, 0.00527954], dtype=float32), 'sampling_rate': 16000 }, 'speaker_id': 'rad20160602T000219', 'duration': 12.67199993133545, 'normalized_text': 'og má svo sannarlega segja að landslagið sé nokkuð breytt frá því þrjú komma tvö prósent þjóðarinnar töldust vera innflytjendur árið tvö þúsund en nú teljast tíu prósent þjóðarinnar vera fyrsta og önnur kynslóð innflytjenda' } ``` ### Data Fields * `audio_id` (string) - id of audio segment * `audio` (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * `speaker_id` (string) - id of speaker * `duration` (float32) - duration of the audio file in seconds. * `normalized_text` (string) - normalized audio segment transcription. ### Data Splits The corpus is split into train, evaluation, and test portions. Lenghts of every portion are: train = 514h29m, test = 13h52m, evaluation=14h02m. To load an specific portion please see the above section "Example Usage". ## Additional Information ### Other Known Limitations "Althingi Parliamentary Speech" by the Language and Voice Laboratory (LVL) at the Reykjavik University is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ### Licensing Information [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/) ### Citation Information ``` @misc{helgadottiralthingi2021, title={Althingi Parliamentary Speech}, ldc_catalog_no={LDC2021S01}, DOI={https://doi.org/10.35111/695b-6697}, author={Helgadóttir, Inga Rún and Kjaran, Róbert and Nikulásdóttir, Anna Björk and Guðnason, Jón}, publisher={Reykjavík University} journal={Linguistic Data Consortium, Philadelphia}, year={2021}, url={https://catalog.ldc.upenn.edu/LDC2021S01}, } ``` ### Contributions This project was made possible through the support of Althingi’s information and publications departments. The authors would like to thank Solveig K. Jónsdóttir, Þorbjörg Árnadóttir and Ingvi Stígsson for their valuable help.
language-and-voice-lab/althingi_asr
[ "task_categories:automatic-speech-recognition", "annotations_creators:machine-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:is", "license:cc-by-4.0", "icelandic", "parliamentary speech", "parlament", "althingi", "region:us" ]
2022-12-09T20:33:28+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["found"], "language": ["is"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "Althingi Parliamentary Speech", "tags": ["icelandic", "parliamentary speech", "parlament", "althingi"]}
2023-02-24T22:14:42+00:00
[]
[ "is" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-Icelandic #license-cc-by-4.0 #icelandic #parliamentary speech #parlament #althingi #region-us
# Dataset Card for althingi_asr ## Table of Contents - Dataset Description - Dataset Summary - Data - Supported Tasks - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Additional Information - Other Known Limitations - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: Althingi Parliamentary Speech - Repository: LDC - Paper: Building an ASR corpus using Althingi’s Parliamentary Speeches - Point of Contact: Jón Guðnason ### Dataset Summary Althingi Parliamentary Speech consists of approximately 542 hours of recorded speech from Althingi, the Icelandic Parliament, along with corresponding transcripts, a pronunciation dictionary and two language models. Speeches date from 2005-2016. This dataset was collected in 2016 by the ASR for Althingi project at Reykjavik University in collaboration with the Althingi speech department. The purpose of that project was to develop an ASR (automatic speech recognition) system for parliamentary speech to replace the procedure of manually transcribing performed speeches. ### Data The mean speech length is six minutes, with speeches ranging from under one minute to around thirty minutes. The corpus features 197 speakers (105 male, 92 female) and is split into training, development and evaluation sets. The language models are of two types: a pruned trigram model, used in decoding, and an unpruned constant ARPA 5-gram model, used for re-scoring decoding results. Audio data is presented as single channel 16-bit mp3 files; the majority of these files have a sample rate of 44.1 kHz. Transcripts and other text data are plain text encoded in UTF-8. ### Example Usage The Althingi Corpus is divided in 3 splits: train, validation and test. To load a specific split pass its name as a config name: To load an specific split (for example, the validation split) do: ### Supported Tasks automatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER). ### Languages The audio is in Icelandic. ## Dataset Structure ### Data Instances ### Data Fields * 'audio_id' (string) - id of audio segment * 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally). * 'speaker_id' (string) - id of speaker * 'duration' (float32) - duration of the audio file in seconds. * 'normalized_text' (string) - normalized audio segment transcription. ### Data Splits The corpus is split into train, evaluation, and test portions. Lenghts of every portion are: train = 514h29m, test = 13h52m, evaluation=14h02m. To load an specific portion please see the above section "Example Usage". ## Additional Information ### Other Known Limitations "Althingi Parliamentary Speech" by the Language and Voice Laboratory (LVL) at the Reykjavik University is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ### Licensing Information CC-BY-4.0 ### Contributions This project was made possible through the support of Althingi’s information and publications departments. The authors would like to thank Solveig K. Jónsdóttir, Þorbjörg Árnadóttir and Ingvi Stígsson for their valuable help.
[ "# Dataset Card for althingi_asr", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Data\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Additional Information\n - Other Known Limitations\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: Althingi Parliamentary Speech\n- Repository: LDC\n- Paper: Building an ASR corpus using Althingi’s Parliamentary Speeches\n- Point of Contact: Jón Guðnason", "### Dataset Summary\n\nAlthingi Parliamentary Speech consists of approximately 542 hours of recorded speech from Althingi, the Icelandic Parliament, along with corresponding transcripts, a pronunciation dictionary and two language models. Speeches date from 2005-2016.\n\nThis dataset was collected in 2016 by the ASR for Althingi project at Reykjavik University in collaboration with the Althingi speech department. The purpose of that project was to develop an ASR (automatic speech recognition) system for parliamentary speech to replace the procedure of manually transcribing performed speeches.", "### Data\n\nThe mean speech length is six minutes, with speeches ranging from under one minute to around thirty minutes. The corpus features 197 speakers (105 male, 92 female) and is split into training, development and evaluation sets. The language models are of two types: a pruned trigram model, used in decoding, and an unpruned constant ARPA 5-gram model, used for re-scoring decoding results.\n\nAudio data is presented as single channel 16-bit mp3 files; the majority of these files have a sample rate of 44.1 kHz. Transcripts and other text data are plain text encoded in UTF-8.", "### Example Usage\nThe Althingi Corpus is divided in 3 splits: train, validation and test. To load a specific split pass its name as a config name:\n\nTo load an specific split (for example, the validation split) do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe audio is in Icelandic.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'speaker_id' (string) - id of speaker\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'normalized_text' (string) - normalized audio segment transcription.", "### Data Splits\nThe corpus is split into train, evaluation, and test portions. Lenghts of every portion are: train = 514h29m, test = 13h52m, evaluation=14h02m.\n\nTo load an specific portion please see the above section \"Example Usage\".", "## Additional Information", "### Other Known Limitations\n\"Althingi Parliamentary Speech\" by the Language and Voice Laboratory (LVL) at the Reykjavik University is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "### Licensing Information\nCC-BY-4.0", "### Contributions\n\nThis project was made possible through the support of Althingi’s information and publications departments. The authors would like to thank Solveig K. Jónsdóttir, Þorbjörg Árnadóttir and Ingvi Stígsson for their valuable help." ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-Icelandic #license-cc-by-4.0 #icelandic #parliamentary speech #parlament #althingi #region-us \n", "# Dataset Card for althingi_asr", "## Table of Contents\n- Dataset Description\n - Dataset Summary\n - Data\n - Supported Tasks\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Additional Information\n - Other Known Limitations\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n- Homepage: Althingi Parliamentary Speech\n- Repository: LDC\n- Paper: Building an ASR corpus using Althingi’s Parliamentary Speeches\n- Point of Contact: Jón Guðnason", "### Dataset Summary\n\nAlthingi Parliamentary Speech consists of approximately 542 hours of recorded speech from Althingi, the Icelandic Parliament, along with corresponding transcripts, a pronunciation dictionary and two language models. Speeches date from 2005-2016.\n\nThis dataset was collected in 2016 by the ASR for Althingi project at Reykjavik University in collaboration with the Althingi speech department. The purpose of that project was to develop an ASR (automatic speech recognition) system for parliamentary speech to replace the procedure of manually transcribing performed speeches.", "### Data\n\nThe mean speech length is six minutes, with speeches ranging from under one minute to around thirty minutes. The corpus features 197 speakers (105 male, 92 female) and is split into training, development and evaluation sets. The language models are of two types: a pruned trigram model, used in decoding, and an unpruned constant ARPA 5-gram model, used for re-scoring decoding results.\n\nAudio data is presented as single channel 16-bit mp3 files; the majority of these files have a sample rate of 44.1 kHz. Transcripts and other text data are plain text encoded in UTF-8.", "### Example Usage\nThe Althingi Corpus is divided in 3 splits: train, validation and test. To load a specific split pass its name as a config name:\n\nTo load an specific split (for example, the validation split) do:", "### Supported Tasks\nautomatic-speech-recognition: The dataset can be used to train a model for Automatic Speech Recognition (ASR). The model is presented with an audio file and asked to transcribe the audio file to written text. The most common evaluation metric is the word error rate (WER).", "### Languages\nThe audio is in Icelandic.", "## Dataset Structure", "### Data Instances", "### Data Fields\n* 'audio_id' (string) - id of audio segment\n* 'audio' (datasets.Audio) - a dictionary containing the path to the audio, the decoded audio array, and the sampling rate. In non-streaming mode (default), the path points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).\n* 'speaker_id' (string) - id of speaker\n* 'duration' (float32) - duration of the audio file in seconds.\n* 'normalized_text' (string) - normalized audio segment transcription.", "### Data Splits\nThe corpus is split into train, evaluation, and test portions. Lenghts of every portion are: train = 514h29m, test = 13h52m, evaluation=14h02m.\n\nTo load an specific portion please see the above section \"Example Usage\".", "## Additional Information", "### Other Known Limitations\n\"Althingi Parliamentary Speech\" by the Language and Voice Laboratory (LVL) at the Reykjavik University is licensed under a Creative Commons Attribution 4.0 International (CC BY 4.0) License with the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "### Licensing Information\nCC-BY-4.0", "### Contributions\n\nThis project was made possible through the support of Althingi’s information and publications departments. The authors would like to thank Solveig K. Jónsdóttir, Þorbjörg Árnadóttir and Ingvi Stígsson for their valuable help." ]
59843ff2c0685fc3cb96d0649a272c6edf04ef59
# Dataset Card for "custom_squad_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mlxen/custom_squad_dataset
[ "region:us" ]
2022-12-09T20:50:05+00:00
{"dataset_info": {"features": [{"name": "number", "dtype": "int64"}, {"name": "id", "dtype": "string"}, {"name": "context_no", "dtype": "int64"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answer", "dtype": "string"}, {"name": "answer_start", "dtype": "int64"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "int64"}, {"name": "text", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 139122, "num_examples": 97}], "download_size": 0, "dataset_size": 139122}}
2022-12-09T20:56:36+00:00
[]
[]
TAGS #region-us
# Dataset Card for "custom_squad_dataset" More Information needed
[ "# Dataset Card for \"custom_squad_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"custom_squad_dataset\"\n\nMore Information needed" ]
bc4b7491f5f275abfb43b0c0188c826115389adf
# Dataset Card for "eqasc_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Sandipan1994/eqasc_data
[ "region:us" ]
2022-12-09T21:12:54+00:00
{"dataset_info": {"features": [{"name": "sentence", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 11336048, "num_examples": 84964}, {"name": "validation", "num_bytes": 1296119, "num_examples": 9710}, {"name": "test", "num_bytes": 1259181, "num_examples": 9630}], "download_size": 4494168, "dataset_size": 13891348}}
2022-12-10T00:30:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "eqasc_data" More Information needed
[ "# Dataset Card for \"eqasc_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"eqasc_data\"\n\nMore Information needed" ]
009699a5e429ac4fd9801df5e4aeceff74a9dbab
# Dataset Card for "squad_adversarial_train_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mlxen/squad_adversarial_train_dataset
[ "region:us" ]
2022-12-09T23:22:22+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}], "splits": [{"name": "train", "num_bytes": 79446510, "num_examples": 87599}], "download_size": 14480010, "dataset_size": 79446510}}
2022-12-09T23:22:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "squad_adversarial_train_dataset" More Information needed
[ "# Dataset Card for \"squad_adversarial_train_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"squad_adversarial_train_dataset\"\n\nMore Information needed" ]
c2525831eddb507aabf514178ad7afa3751bf6a0
# Dataset Card for "squad_adversarial_validation_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mlxen/squad_adversarial_validation_dataset
[ "region:us" ]
2022-12-09T23:22:27+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}], "splits": [{"name": "validation", "num_bytes": 10484818, "num_examples": 10570}], "download_size": 1825207, "dataset_size": 10484818}}
2022-12-09T23:22:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "squad_adversarial_validation_dataset" More Information needed
[ "# Dataset Card for \"squad_adversarial_validation_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"squad_adversarial_validation_dataset\"\n\nMore Information needed" ]
2ca6e6b6b845830fe614a0795898bd0f92d764ae
# Dataset Card for "squad_contrasting_validation_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mlxen/squad_contrasting_validation_dataset
[ "region:us" ]
2022-12-10T03:16:28+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}], "splits": [{"name": "validation", "num_bytes": 10482482, "num_examples": 10570}], "download_size": 1835309, "dataset_size": 10482482}}
2022-12-10T04:26:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "squad_contrasting_validation_dataset" More Information needed
[ "# Dataset Card for \"squad_contrasting_validation_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"squad_contrasting_validation_dataset\"\n\nMore Information needed" ]
026e02701cce00d794751ccab575c411481ed4e6
# Dataset Card for "squad_contrasting_training_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mlxen/squad_contrasting_training_dataset
[ "region:us" ]
2022-12-10T04:26:20+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": [{"name": "text", "dtype": "string"}, {"name": "answer_start", "dtype": "int32"}]}], "splits": [{"name": "train", "num_bytes": 79421752, "num_examples": 87599}], "download_size": 14482954, "dataset_size": 79421752}}
2022-12-10T04:26:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "squad_contrasting_training_dataset" More Information needed
[ "# Dataset Card for \"squad_contrasting_training_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"squad_contrasting_training_dataset\"\n\nMore Information needed" ]
f16ad58df63176c1309b0475670b150a220f6249
# Dataset Card for "olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-suffix-array-dedup" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-suffix-array-dedup
[ "region:us" ]
2022-12-10T06:37:03+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "url", "dtype": "string"}, {"name": "crawl_timestamp", "dtype": "float64"}], "splits": [{"name": "train", "num_bytes": 33979057509.213223, "num_examples": 7520438}], "download_size": 8573685687, "dataset_size": 33979057509.213223}}
2022-12-10T06:56:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-suffix-array-dedup" More Information needed
[ "# Dataset Card for \"olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-suffix-array-dedup\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295-suffix-array-dedup\"\n\nMore Information needed" ]
9d392462b02175db1047497071a1c794a17b4339
# AutoTrain Dataset for project: multiconer2-test1 ## Dataset Description This dataset has been automatically processed by AutoTrain for project multiconer2-test1. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "tokens": [ "robert", "gottschalk", "1939", "academy", "award", "winner", "and", "founder", "of", "panavision" ], "tags": [ 18, 49, 62, 29, 60, 62, 62, 62, 62, 16 ] }, { "tokens": [ "during", "the", "reign", "of", "the", "tongzhi", "emperor", "(", "r", ".", "1861", "\u2013", "1875", ")", ":" ], "tags": [ 62, 62, 62, 62, 62, 18, 49, 62, 62, 62, 62, 62, 62, 62, 62 ] } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "tokens": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)", "tags": "Sequence(feature=ClassLabel(num_classes=63, names=['B-AnatomicalStructure', 'B-ArtWork', 'B-Artist', 'B-Athlete', 'B-CarManufacturer', 'B-Cleric', 'B-Clothing', 'B-Disease', 'B-Drink', 'B-Facility', 'B-Food', 'B-HumanSettlement', 'B-MedicalProcedure', 'B-Medication/Vaccine', 'B-MusicalGRP', 'B-MusicalWork', 'B-ORG', 'B-OtherLOC', 'B-OtherPER', 'B-OtherPROD', 'B-Politician', 'B-PrivateCorp', 'B-PublicCorp', 'B-Scientist', 'B-Software', 'B-SportsGRP', 'B-SportsManager', 'B-Station', 'B-Vehicle', 'B-VisualWork', 'B-WrittenWork', 'I-AnatomicalStructure', 'I-ArtWork', 'I-Artist', 'I-Athlete', 'I-CarManufacturer', 'I-Cleric', 'I-Clothing', 'I-Disease', 'I-Drink', 'I-Facility', 'I-Food', 'I-HumanSettlement', 'I-MedicalProcedure', 'I-Medication/Vaccine', 'I-MusicalGRP', 'I-MusicalWork', 'I-ORG', 'I-OtherLOC', 'I-OtherPER', 'I-OtherPROD', 'I-Politician', 'I-PrivateCorp', 'I-PublicCorp', 'I-Scientist', 'I-Software', 'I-SportsGRP', 'I-SportsManager', 'I-Station', 'I-Vehicle', 'I-VisualWork', 'I-WrittenWork', 'O'], id=None), length=-1, id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 2169 | | valid | 829 |
Nenma/autotrain-data-multiconer2-test1
[ "task_categories:token-classification", "region:us" ]
2022-12-10T12:08:47+00:00
{"task_categories": ["token-classification"]}
2022-12-10T14:01:19+00:00
[]
[]
TAGS #task_categories-token-classification #region-us
AutoTrain Dataset for project: multiconer2-test1 ================================================ Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project multiconer2-test1. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-token-classification #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
f01d4b8229e52622a2da2973884da571051c8779
# Dataset Card for "news" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
steciuk/news
[ "region:us" ]
2022-12-10T12:32:23+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "label", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1121248, "num_examples": 6402}], "download_size": 657470, "dataset_size": 1121248}}
2022-12-10T12:32:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "news" More Information needed
[ "# Dataset Card for \"news\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"news\"\n\nMore Information needed" ]
e9195188dbd6939e10785e3ed9ee1c513731c9bc
# Dataset Card for GLENDA - The ITEC Gynecologic Laparoscopy Endometriosis Dataset ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** http://ftp.itec.aau.at/datasets/GLENDA/index.html - **Repository:** - **Paper:** [GLENDA: Gynecologic Laparoscopy Endometriosis Dataset](https://link.springer.com/chapter/10.1007/978-3-030-37734-2_36) - **Leaderboard:** - **Point of Contact:** [email protected] ### Dataset Summary GLENDA (Gynecologic Laparoscopy ENdometriosis DAtaset) comprises over 350 annotated endometriosis lesion images taken from 100+ gynecologic laparoscopy surgeries as well as over 13K unannotated non pathological images of 20+ surgeries. The dataset is purposefully created to be utilized for a variety of automatic content analysis problems in the context of Endometriosis recognition. **Usage Information (Disclaimer)** The dataset is exclusively provided for scientific research purposes and as such cannot be used commercially or for any other purpose. If any other purpose is intended, you may directly contact the originator of the videos. For additional information (including contact details), please visit [the official website](http://ftp.itec.aau.at/datasets/GLENDA/index.html). **Description** Endometriosis is a benign but potentially painful condition among women in child bearing age involving the growth of uterine-like tissue in locations outside of the uterus. Corresponding lesions can be found in various positions and severities, often in multiple instances per patient requiring a physician to determine its extent. This most frequently is accomplished by calculating its magnitude via utilizing the combination of two popular classification systems, the revised American Society for Reproductive Medicine (rASRM) and the European Enzian scores. Endometriosis can not reliably identified by laymen, therefore, the dataset has been created with the help of medical experts in the field of endometriosis treatment. **Purposes** * binary (endometriosis) classification * detection/localization **Overview** The dataset includes region-based annotations of 4 pathological endometriosis categories as well as non pathological counter example images. Annotations are created for single video frames that may be part of larger sequences comprising several consecutive frames (all showing the annotated condition). Frames can contain multiple annotations, potentially of different categories. Each single annotation is exported as a binary image (similar to below examples, albeit one image per annotation). # TODO: FIXME: A bit more useful info on dataset case distribution class distribution and link to original + preview link TODO: FIXME: A bit more useful info on dataset case distribution class distribution and link to original + preview link ### Supported Tasks and Leaderboards - `image_classification`: The dataset can be used for binary (no pathology / endometriosis) or multiclass image classification (No-Pathology, 6.1.1.1\_Endo-Peritoneum, 6.1.1.2\_Endo-Ovar, 6.1.1.3\_Endo-DIE, 6.1.1.4\_Endo-Uterus. These classes respectively correspond to: no visible pathology in relation to endometriosis, peritoneal endometriosis, endometriosis on ovaries, deep infiltrating endometriosis (DIE) and uterine endometriosis.). ## Dataset Structure ### Data Instances #### binary\_classification TODO DESCRIBE #### multiclass\_classification TODO DESCRIBE ## Dataset Creation ### Curation Rationale From the [official website](http://ftp.itec.aau.at/datasets/GLENDA/index.html) > The dataset is purposefully created to be utilized for a variety of automatic content analysis problems in the context of Endometriosis recognition ### Source Data #### Initial Data Collection and Normalization From the [official website](http://ftp.itec.aau.at/datasets/GLENDA/index.html) > The dataset includes region-based annotations of 4 pathological endometriosis categories as well as non pathological counter example images. Annotations are created for single video frames that may be part of larger sequences comprising several consecutive frames (all showing the annotated condition). Frames can contain multiple annotations, potentially of different categories. Each single annotation is exported as a binary image (similar to below examples, albeit one image per annotation). ### Annotations #### Annotation process From the [official website](http://ftp.itec.aau.at/datasets/GLENDA/index.html) > Corresponding lesions can be found in various positions and severities, often in multiple instances per patient requiring a physician to determine its extent. This most frequently is accomplished by calculating its magnitude via utilizing the combination of two popular classification systems, the revised American Society for Reproductive Medicine (rASRM) and the European Enzian scores. Endometriosis can not reliably identified by laymen, therefore, the dataset has been created with the help of medical experts in the field of endometriosis treatment. #### Who are the annotators? Medical experts in the field of endometriosis treatment. ### Personal and Sensitive Information [More Information Needed] ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information The dataset is exclusively provided for scientific research purposes and as such cannot be used commercially or for any other purpose. If any other purpose is intended, you may directly contact the originator of the videos, Prof. Dr. Jörg Keckstein. GLENDA is licensed under Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0, Creative Commons License) and is created as well as maintained by Distributed Multimedia Systems Group of the Institute of Information Technology (ITEC) at Alpen-Adria Universität in Klagenfurt, Austria. This license allows users of this dataset to copy, distribute and transmit the work under the following conditions: * Attribution: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. * Non-Commercial: You may not use the material for commercial purposes. For further legal details, please read the [complete license terms](https://creativecommons.org/licenses/by-nc/4.0/legalcode). For additional information, please visit the [official website](http://ftp.itec.aau.at/datasets/GLENDA/index.html). ### Citation Information ``` @inproceedings{10.1007/978-3-030-37734-2_36, abstract = {Gynecologic laparoscopy as a type of minimally invasive surgery (MIS) is performed via a live feed of a patient's abdomen surveying the insertion and handling of various instruments for conducting treatment. Adopting this kind of surgical intervention not only facilitates a great variety of treatments, the possibility of recording said video streams is as well essential for numerous post-surgical activities, such as treatment planning, case documentation and education. Nonetheless, the process of manually analyzing surgical recordings, as it is carried out in current practice, usually proves tediously time-consuming. In order to improve upon this situation, more sophisticated computer vision as well as machine learning approaches are actively developed. Since most of such approaches heavily rely on sample data, which especially in the medical field is only sparsely available, with this work we publish the Gynecologic Laparoscopy ENdometriosis DAtaset (GLENDA) -- an image dataset containing region-based annotations of a common medical condition named endometriosis, i.e. the dislocation of uterine-like tissue. The dataset is the first of its kind and it has been created in collaboration with leading medical experts in the field.}, address = {Cham}, author = {Leibetseder, Andreas and Kletz, Sabrina and Schoeffmann, Klaus and Keckstein, Simon and Keckstein, J{\"o}rg}, booktitle = {MultiMedia Modeling}, editor = {Ro, Yong Man and Cheng, Wen-Huang and Kim, Junmo and Chu, Wei-Ta and Cui, Peng and Choi, Jung-Woo and Hu, Min-Chun and De Neve, Wesley}, isbn = {978-3-030-37734-2}, pages = {439--450}, publisher = {Springer International Publishing}, title = {GLENDA: Gynecologic Laparoscopy Endometriosis Dataset}, year = {2020} } ```
MFreidank/glenda
[ "task_categories:image-classification", "annotations_creators:expert-generated", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:en", "license:cc-by-nc-4.0", "region:us" ]
2022-12-10T12:55:32+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["machine-generated"], "language": ["en"], "license": ["cc-by-nc-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "task_categories": ["image-classification"], "task_ids": [], "pretty_name": "GLENDA - The ITEC Gynecologic Laparoscopy Endometriosis Dataset", "dataset_info": [{"config_name": "binary_classification", "features": [{"name": "image", "dtype": "image"}, {"name": "metadata", "struct": [{"name": "id", "dtype": "int32"}, {"name": "width", "dtype": "int32"}, {"name": "height", "dtype": "int32"}, {"name": "file_name", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "fickr_url", "dtype": "string"}, {"name": "coco_url", "dtype": "string"}, {"name": "date_captured", "dtype": "string"}, {"name": "case_id", "dtype": "int32"}, {"name": "video_id", "dtype": "int32"}, {"name": "frame_id", "dtype": "int32"}, {"name": "from_seconds", "dtype": "int32"}, {"name": "to_seconds", "dtype": "int32"}]}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "no_pathology", "1": "endometriosis"}}}}], "splits": [{"name": "train", "num_bytes": 4524957, "num_examples": 13811}], "download_size": 895554144, "dataset_size": 4524957}, {"config_name": "multiclass_classification", "features": [{"name": "image", "dtype": "image"}, {"name": "metadata", "struct": [{"name": "id", "dtype": "int32"}, {"name": "width", "dtype": "int32"}, {"name": "height", "dtype": "int32"}, {"name": "file_name", "dtype": "string"}, {"name": "path", "dtype": "string"}, {"name": "fickr_url", "dtype": "string"}, {"name": "coco_url", "dtype": "string"}, {"name": "date_captured", "dtype": "string"}, {"name": "case_id", "dtype": "int32"}, {"name": "video_id", "dtype": "int32"}, {"name": "frame_id", "dtype": "int32"}, {"name": "from_seconds", "dtype": "int32"}, {"name": "to_seconds", "dtype": "int32"}]}, {"name": "labels", "dtype": {"class_label": {"names": {"0": "No-Pathology", "1": "6.1.1.1_Endo-Peritoneum", "2": "6.1.1.2_Endo-Ovar", "3": "6.1.1.3_Endo-TIE", "4": "6.1.1.4_Endo-Uterus"}}}}], "splits": [{"name": "train", "num_bytes": 4524957, "num_examples": 13811}], "download_size": 895554144, "dataset_size": 4524957}]}
2022-12-29T12:19:47+00:00
[]
[ "en" ]
TAGS #task_categories-image-classification #annotations_creators-expert-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-cc-by-nc-4.0 #region-us
# Dataset Card for GLENDA - The ITEC Gynecologic Laparoscopy Endometriosis Dataset ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: URL - Repository: - Paper: GLENDA: Gynecologic Laparoscopy Endometriosis Dataset - Leaderboard: - Point of Contact: freidankm@URL ### Dataset Summary GLENDA (Gynecologic Laparoscopy ENdometriosis DAtaset) comprises over 350 annotated endometriosis lesion images taken from 100+ gynecologic laparoscopy surgeries as well as over 13K unannotated non pathological images of 20+ surgeries. The dataset is purposefully created to be utilized for a variety of automatic content analysis problems in the context of Endometriosis recognition. Usage Information (Disclaimer) The dataset is exclusively provided for scientific research purposes and as such cannot be used commercially or for any other purpose. If any other purpose is intended, you may directly contact the originator of the videos. For additional information (including contact details), please visit the official website. Description Endometriosis is a benign but potentially painful condition among women in child bearing age involving the growth of uterine-like tissue in locations outside of the uterus. Corresponding lesions can be found in various positions and severities, often in multiple instances per patient requiring a physician to determine its extent. This most frequently is accomplished by calculating its magnitude via utilizing the combination of two popular classification systems, the revised American Society for Reproductive Medicine (rASRM) and the European Enzian scores. Endometriosis can not reliably identified by laymen, therefore, the dataset has been created with the help of medical experts in the field of endometriosis treatment. Purposes * binary (endometriosis) classification * detection/localization Overview The dataset includes region-based annotations of 4 pathological endometriosis categories as well as non pathological counter example images. Annotations are created for single video frames that may be part of larger sequences comprising several consecutive frames (all showing the annotated condition). Frames can contain multiple annotations, potentially of different categories. Each single annotation is exported as a binary image (similar to below examples, albeit one image per annotation). # TODO: FIXME: A bit more useful info on dataset case distribution class distribution and link to original + preview link TODO: FIXME: A bit more useful info on dataset case distribution class distribution and link to original + preview link ### Supported Tasks and Leaderboards - 'image_classification': The dataset can be used for binary (no pathology / endometriosis) or multiclass image classification (No-Pathology, 6.1.1.1\_Endo-Peritoneum, 6.1.1.2\_Endo-Ovar, 6.1.1.3\_Endo-DIE, 6.1.1.4\_Endo-Uterus. These classes respectively correspond to: no visible pathology in relation to endometriosis, peritoneal endometriosis, endometriosis on ovaries, deep infiltrating endometriosis (DIE) and uterine endometriosis.). ## Dataset Structure ### Data Instances #### binary\_classification TODO DESCRIBE #### multiclass\_classification TODO DESCRIBE ## Dataset Creation ### Curation Rationale From the official website > The dataset is purposefully created to be utilized for a variety of automatic content analysis problems in the context of Endometriosis recognition ### Source Data #### Initial Data Collection and Normalization From the official website > The dataset includes region-based annotations of 4 pathological endometriosis categories as well as non pathological counter example images. Annotations are created for single video frames that may be part of larger sequences comprising several consecutive frames (all showing the annotated condition). Frames can contain multiple annotations, potentially of different categories. Each single annotation is exported as a binary image (similar to below examples, albeit one image per annotation). ### Annotations #### Annotation process From the official website > Corresponding lesions can be found in various positions and severities, often in multiple instances per patient requiring a physician to determine its extent. This most frequently is accomplished by calculating its magnitude via utilizing the combination of two popular classification systems, the revised American Society for Reproductive Medicine (rASRM) and the European Enzian scores. Endometriosis can not reliably identified by laymen, therefore, the dataset has been created with the help of medical experts in the field of endometriosis treatment. #### Who are the annotators? Medical experts in the field of endometriosis treatment. ### Personal and Sensitive Information ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information The dataset is exclusively provided for scientific research purposes and as such cannot be used commercially or for any other purpose. If any other purpose is intended, you may directly contact the originator of the videos, Prof. Dr. Jörg Keckstein. GLENDA is licensed under Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0, Creative Commons License) and is created as well as maintained by Distributed Multimedia Systems Group of the Institute of Information Technology (ITEC) at Alpen-Adria Universität in Klagenfurt, Austria. This license allows users of this dataset to copy, distribute and transmit the work under the following conditions: * Attribution: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. * Non-Commercial: You may not use the material for commercial purposes. For further legal details, please read the complete license terms. For additional information, please visit the official website.
[ "# Dataset Card for GLENDA - The ITEC Gynecologic Laparoscopy Endometriosis Dataset", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository:\n- Paper: GLENDA: Gynecologic Laparoscopy Endometriosis Dataset\n- Leaderboard: \n- Point of Contact: freidankm@URL", "### Dataset Summary\n\nGLENDA (Gynecologic Laparoscopy ENdometriosis DAtaset) comprises over 350 annotated endometriosis lesion images taken from 100+ gynecologic laparoscopy surgeries as well as over 13K unannotated non pathological images of 20+ surgeries. The dataset is purposefully created to be utilized for a variety of automatic content analysis problems in the context of Endometriosis recognition.\n\nUsage Information (Disclaimer)\n\nThe dataset is exclusively provided for scientific research purposes and as such cannot be used commercially or for any other purpose. If any other purpose is intended, you may directly contact the originator of the videos. \nFor additional information (including contact details), please visit the official website.\n\nDescription\n\nEndometriosis is a benign but potentially painful condition among women in child bearing age involving the growth of uterine-like tissue in locations outside of the uterus. Corresponding lesions can be found in various positions and severities, often in multiple instances per patient requiring a physician to determine its extent. This most frequently is accomplished by calculating its magnitude via utilizing the combination of two popular classification systems, the revised American Society for Reproductive Medicine (rASRM) and the European Enzian scores. Endometriosis can not reliably identified by laymen, therefore, the dataset has been created with the help of medical experts in the field of endometriosis treatment.\n\nPurposes\n\n* binary (endometriosis) classification\n* detection/localization\n\nOverview\n\nThe dataset includes region-based annotations of 4 pathological endometriosis categories as well as non pathological counter example images. Annotations are created for single video frames that may be part of larger sequences comprising several consecutive frames (all showing the annotated condition). Frames can contain multiple annotations, potentially of different categories. Each single annotation is exported as a binary image (similar to below examples, albeit one image per annotation).", "# TODO: FIXME: A bit more useful info on dataset case distribution class distribution and link to original + preview link\nTODO: FIXME: A bit more useful info on dataset case distribution class distribution and link to original + preview link", "### Supported Tasks and Leaderboards\n- 'image_classification': The dataset can be used for binary (no pathology / endometriosis) or multiclass image classification (No-Pathology, 6.1.1.1\\_Endo-Peritoneum, 6.1.1.2\\_Endo-Ovar, 6.1.1.3\\_Endo-DIE, 6.1.1.4\\_Endo-Uterus. These classes respectively correspond to: no visible pathology in relation to endometriosis, peritoneal endometriosis, endometriosis on ovaries, deep infiltrating endometriosis (DIE) and uterine endometriosis.).", "## Dataset Structure", "### Data Instances", "#### binary\\_classification\nTODO DESCRIBE", "#### multiclass\\_classification\nTODO DESCRIBE", "## Dataset Creation", "### Curation Rationale\n\nFrom the official website\n> The dataset is purposefully created to be utilized for a variety of automatic content analysis problems in the context of Endometriosis recognition", "### Source Data", "#### Initial Data Collection and Normalization\n\nFrom the official website\n> The dataset includes region-based annotations of 4 pathological endometriosis categories as well as non pathological counter example images. Annotations are created for single video frames that may be part of larger sequences comprising several consecutive frames (all showing the annotated condition). Frames can contain multiple annotations, potentially of different categories. Each single annotation is exported as a binary image (similar to below examples, albeit one image per annotation).", "### Annotations", "#### Annotation process\n\nFrom the official website\n> Corresponding lesions can be found in various positions and severities, often in multiple instances per patient requiring a physician to determine its extent. This most frequently is accomplished by calculating its magnitude via utilizing the combination of two popular classification systems, the revised American Society for Reproductive Medicine (rASRM) and the European Enzian scores. Endometriosis can not reliably identified by laymen, therefore, the dataset has been created with the help of medical experts in the field of endometriosis treatment.", "#### Who are the annotators?\n\nMedical experts in the field of endometriosis treatment.", "### Personal and Sensitive Information", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information\n\nThe dataset is exclusively provided for scientific research purposes and as such cannot be used commercially or for any other purpose. If any other purpose is intended, you may directly contact the originator of the videos, Prof. Dr. Jörg Keckstein.\n\nGLENDA is licensed under Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0, Creative Commons License) and is created as well as maintained by Distributed Multimedia Systems Group of the Institute of Information Technology (ITEC) at Alpen-Adria Universität in Klagenfurt, Austria. \n\nThis license allows users of this dataset to copy, distribute and transmit the work under the following conditions:\n* Attribution: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.\n* Non-Commercial: You may not use the material for commercial purposes.\n\nFor further legal details, please read the complete license terms.\n\nFor additional information, please visit the official website." ]
[ "TAGS\n#task_categories-image-classification #annotations_creators-expert-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-cc-by-nc-4.0 #region-us \n", "# Dataset Card for GLENDA - The ITEC Gynecologic Laparoscopy Endometriosis Dataset", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository:\n- Paper: GLENDA: Gynecologic Laparoscopy Endometriosis Dataset\n- Leaderboard: \n- Point of Contact: freidankm@URL", "### Dataset Summary\n\nGLENDA (Gynecologic Laparoscopy ENdometriosis DAtaset) comprises over 350 annotated endometriosis lesion images taken from 100+ gynecologic laparoscopy surgeries as well as over 13K unannotated non pathological images of 20+ surgeries. The dataset is purposefully created to be utilized for a variety of automatic content analysis problems in the context of Endometriosis recognition.\n\nUsage Information (Disclaimer)\n\nThe dataset is exclusively provided for scientific research purposes and as such cannot be used commercially or for any other purpose. If any other purpose is intended, you may directly contact the originator of the videos. \nFor additional information (including contact details), please visit the official website.\n\nDescription\n\nEndometriosis is a benign but potentially painful condition among women in child bearing age involving the growth of uterine-like tissue in locations outside of the uterus. Corresponding lesions can be found in various positions and severities, often in multiple instances per patient requiring a physician to determine its extent. This most frequently is accomplished by calculating its magnitude via utilizing the combination of two popular classification systems, the revised American Society for Reproductive Medicine (rASRM) and the European Enzian scores. Endometriosis can not reliably identified by laymen, therefore, the dataset has been created with the help of medical experts in the field of endometriosis treatment.\n\nPurposes\n\n* binary (endometriosis) classification\n* detection/localization\n\nOverview\n\nThe dataset includes region-based annotations of 4 pathological endometriosis categories as well as non pathological counter example images. Annotations are created for single video frames that may be part of larger sequences comprising several consecutive frames (all showing the annotated condition). Frames can contain multiple annotations, potentially of different categories. Each single annotation is exported as a binary image (similar to below examples, albeit one image per annotation).", "# TODO: FIXME: A bit more useful info on dataset case distribution class distribution and link to original + preview link\nTODO: FIXME: A bit more useful info on dataset case distribution class distribution and link to original + preview link", "### Supported Tasks and Leaderboards\n- 'image_classification': The dataset can be used for binary (no pathology / endometriosis) or multiclass image classification (No-Pathology, 6.1.1.1\\_Endo-Peritoneum, 6.1.1.2\\_Endo-Ovar, 6.1.1.3\\_Endo-DIE, 6.1.1.4\\_Endo-Uterus. These classes respectively correspond to: no visible pathology in relation to endometriosis, peritoneal endometriosis, endometriosis on ovaries, deep infiltrating endometriosis (DIE) and uterine endometriosis.).", "## Dataset Structure", "### Data Instances", "#### binary\\_classification\nTODO DESCRIBE", "#### multiclass\\_classification\nTODO DESCRIBE", "## Dataset Creation", "### Curation Rationale\n\nFrom the official website\n> The dataset is purposefully created to be utilized for a variety of automatic content analysis problems in the context of Endometriosis recognition", "### Source Data", "#### Initial Data Collection and Normalization\n\nFrom the official website\n> The dataset includes region-based annotations of 4 pathological endometriosis categories as well as non pathological counter example images. Annotations are created for single video frames that may be part of larger sequences comprising several consecutive frames (all showing the annotated condition). Frames can contain multiple annotations, potentially of different categories. Each single annotation is exported as a binary image (similar to below examples, albeit one image per annotation).", "### Annotations", "#### Annotation process\n\nFrom the official website\n> Corresponding lesions can be found in various positions and severities, often in multiple instances per patient requiring a physician to determine its extent. This most frequently is accomplished by calculating its magnitude via utilizing the combination of two popular classification systems, the revised American Society for Reproductive Medicine (rASRM) and the European Enzian scores. Endometriosis can not reliably identified by laymen, therefore, the dataset has been created with the help of medical experts in the field of endometriosis treatment.", "#### Who are the annotators?\n\nMedical experts in the field of endometriosis treatment.", "### Personal and Sensitive Information", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information\n\nThe dataset is exclusively provided for scientific research purposes and as such cannot be used commercially or for any other purpose. If any other purpose is intended, you may directly contact the originator of the videos, Prof. Dr. Jörg Keckstein.\n\nGLENDA is licensed under Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0, Creative Commons License) and is created as well as maintained by Distributed Multimedia Systems Group of the Institute of Information Technology (ITEC) at Alpen-Adria Universität in Klagenfurt, Austria. \n\nThis license allows users of this dataset to copy, distribute and transmit the work under the following conditions:\n* Attribution: You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.\n* Non-Commercial: You may not use the material for commercial purposes.\n\nFor further legal details, please read the complete license terms.\n\nFor additional information, please visit the official website." ]
1b42948377bbb5f5adacbfa481b9ece3781bf854
# AutoTrain Dataset for project: disparities_pubmed_mit ## Dataset Description This dataset has been automatically processed by AutoTrain for project disparities_pubmed_mit. ### Languages The BCP-47 code for the dataset's language is en. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "text": "IDH1-R132H acts as a tumor suppressor in glioma via epigenetic up-regulation of the DNA damage response. Patients with glioma whose tumors carry a mutation in isocitrate dehydrogenase 1 (IDH1(R132H)) are younger at diagnosis and live longer. IDH1 mutations co-occur with other molecular lesions, such as 1p/19q codeletion, inactivating mutations in the tumor suppressor protein 53 (TP53) gene, and loss-of-function mutations in alpha thalassemia/mental retardation syndrome X-linked gene (ATRX). All adult low-grade gliomas (LGGs) harboring ATRX loss also express the IDH1(R132H) mutation. The current molecular classification of LGGs is based, partly, on the distribution of these mutations. We developed a genetically engineered mouse model harboring IDH1(R132H), TP53 and ATRX inactivating mutations, and activated NRAS G12V. Previously, we established that ATRX deficiency, in the context of wild-type IDH1, induces genomic instability, impairs nonhomologous end-joining DNA repair, and increases sensitivity to DNA-damaging therapies. In this study, using our mouse model and primary patient-derived glioma cultures with IDH1 mutations, we investigated the function of IDH1(R132H) in the context of TP53 and ATRX loss. We discovered that IDH1(R132H) expression in the genetic context of ATRX and TP53 gene inactivation (i) increases median survival in the absence of treatment, (ii) enhances DNA damage response (DDR) via epigenetic up-regulation of the ataxia-telangiectasia-mutated (ATM) signaling pathway, and (iii) elicits tumor radioresistance. Accordingly, pharmacological inhibition of ATM or checkpoint kinases 1 and 2, essential kinases in the DDR, restored the tumors' radiosensitivity. Translation of these findings to patients with IDH1(132H) glioma harboring TP53 and ATRX loss could improve the therapeutic efficacy of radiotherapy and, consequently, patient survival.", "target": 0 }, { "text": "Activation of prolyl hydroxylase-2 for stabilization of mitochondrial stress along with simultaneous downregulation of HIF-1\u00ce\u00b1/FASN in ER\u00c2\u00a0+\u00c2\u00a0breast cancer subtype. The present study was undertaken to inquest the chemical activation of prolyl hydroxylase-2 for the curtailment of hypoxia-inducible factor-1\u00ce\u00b1 and fatty acid synthase. It was well documented that hypoxia-inducible factor-1\u00ce\u00b1 and fatty acid synthase were overexpressed in mammary gland carcinomas. After screening a battery of compounds, BBAP-2 was retrieved as a potential prolyl hydroxylase-2 activator and validates its activity using ER\u00c2\u00a0+\u00c2\u00a0MCF-7 cell line and n-methyl-n-nitrosourea-induced rat in vivo model, respectively. BBAP-2 was palpable for the morphological characteristics of apoptosis along with changes in the mitochondrial intergrity as visualized by acridine orange/ethidium bromide and JC-1 staining against ER\u00c2\u00a0+\u00c2\u00a0MCF-7 cells. BBAP-2 also arrest the cell cycle of ER\u00c2\u00a0+\u00c2\u00a0MCF-7 cells at G2/M phase. Afterward, BBAP-2 has scrutinized against n-methyl-n-nitrosourea-induced mammary gland carcinoma in albino Wistar rats. BBAP-2 restored the morphological architecture when screened through carmine staining, haematoxylin and eosin staining, and scanning electron microscopy. BBAP-2 also delineated the markers of oxidative stress favourably. The immunoblotting and mRNA expression analysis validated that BBAP-2 has a potentialty activate the prolyl hydroxylase-2 with sequential downregulating effect on hypoxia-inducible factor-1\u00ce\u00b1 and its downstream checkpoint. BBAP-2 also fostered apoptosis through mitochondrial-mediated death pathway. The present study elaborates the chemical activation of prolyl hydroxylase-2 by which the increased expression of HIF-1\u00ce\u00b1 and FASN can be reduced in mammary gland carcinoma.", "target": 0 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "text": "Value(dtype='string', id=None)", "target": "ClassLabel(num_classes=2, names=['0', '1'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 807 | | valid | 203 |
eber/autotrain-data-disparities_pubmed_mit
[ "task_categories:text-classification", "language:en", "region:us" ]
2022-12-10T13:08:55+00:00
{"language": ["en"], "task_categories": ["text-classification"]}
2022-12-10T13:34:47+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #language-English #region-us
AutoTrain Dataset for project: disparities\_pubmed\_mit ======================================================= Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project disparities\_pubmed\_mit. ### Languages The BCP-47 code for the dataset's language is en. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-text-classification #language-English #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is en.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
bfebd41171043bec836113629cbb5480db40b250
# Dataset Card for PANDA ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository:** https://github.com/facebookresearch/ResponsibleNLP/ - **Paper:** https://arxiv.org/abs/2205.12586 - **Point of Contact:** [email protected], [email protected], [email protected], [email protected] ### Dataset Summary PANDA (Perturbation Augmentation NLP DAtaset) consists of approximately 100K pairs of crowdsourced human-perturbed text snippets (original, perturbed). Annotators were given selected terms and target demographic attributes, and instructed to rewrite text snippets along three demographic axes: gender, race and age, while preserving semantic meaning. Text snippets were sourced from a range of text corpora (BookCorpus, Wikipedia, ANLI, MNLI, SST, SQuAD). PANDA can be used for training a learned perturber that can rewrite text with control. PANDA can also be used to evaluate the demographic robustness of language models. ### Languages English ## Dataset Structure ### Data Instances - Size of training data: 198.6 MB - Size of validation data: 22.2 MB Examples of data instances: ``` { "original": "the moment the girl mentions the subject she will be yours .", "selected_word": "girl", "target_attribute": "man", "perturbed": "the moment the boy mentions the subject he will be yours.\n\n" } { "original": "are like magic tricks, says the New York Times ' Michael Kimmelman. <SEP> Michael Kimmelman has never likened anything to a magic trick.", "selected_word": "Michael", "target_attribute": "woman", "perturbed": "are like magic tricks, says the New York Times' Michelle Kimmelman. <SEP> Michelle Kimmelman has never likened anything to a magic trick." } { "original": "lilly ann looked at him asking herself how he cold not know .", "selected_word": "he", "target_attribute": "non-binary", "perturbed": "Lilly Ann looked at them, asking herself how they could not know." } ``` Examples with <SEP> tokens are the result of concatenation of text fields in source datasets, such as the premise and hypothesis of NLI datasets. ### Data Fields - `original`: Source (unperturbed) text snippet, sampled from a variety of English text corpora. - `selected_word`: Demographic term that needs to be perturbed. - `target_attribute`: Target demographic category. - `perturbed`: Perturbed text snippet, which is the source text rewritten to alter the selected word along the specified target demographic attribute. For example, if the selected word is "Lily" and target is "man", all references to "Lily" (eg. pronouns) in the source text are altered to refer to a man. Note that some examples may be unchanged, either due to the lack of demographic information, or ambiguity of the task; given the subjective nature of identifying demographic terms and attributes, we allow some room for interpretation provided the rewrite does not perpetuate harmful social biases. ### Data Splits - `train`: 94966 - `valid`: 10551 ## Dataset Creation ### Curation Rationale We constructed PANDA to create and release the first large scale dataset of demographic text perturbations. This enables the training of the first neural perturber model, which outperforms heuristic approaches. ### Source Data #### Initial Data Collection and Normalization We employed 524 crowdworkers to create PANDA examples over the span of several months. Annotators were tasked with rewriting text snippets sourced from popular English text corpora. For more information on the task UI and methodology, see our paper *Perturbation Augmentation for Fairer NLP*. ### Annotations #### Annotation process PANDA was collected in a 3 stage annotation process: 1. Span identification: Annotators select demographic terms in source text samples. 2. Attribute identification: Identified demographic terms are annotated for gender/race/age attributes, such as "man", "Asian", "old" etc. 3. Rewrite text: Annotators rewrite text by modifying the selected entity to reflect the target demographic attribute. Annotators are encouraged to create minimal edits, eg. "George" -> "Georgina". The annotation process is explained in more detail in our paper. #### Who are the annotators? PANDA was annotated by English speaking Amazon Mechanical Turk workers. We included a voluntary demographic survey along with annotation tasks that did not contribute to pay. For a breakdown of annotators' demographic identities, see our paper. ### Personal and Sensitive Information PANDA does not contain identifying information about annotators. ## Considerations for Using the Data ### Social Impact of Dataset By releasing the first large scale dataset of demographic text rewrites, we hope to enable exciting future work in fairness in NLP toward more scalable, automated approaches to reducing biases in datasets and language models. Furthermore, PANDA aims to be diverse in text domain and demographic representation. PANDA includes a large proportion of non-binary gender annotations, which are underrepresented in existing text corpora and prior fairness datasets. Text examples vary in length, with examples spanning single sentences and long Wikipedia passages, and are sourced from a variety of text corpora that can be used to train a domain agnostic perturber. ### Discussion of Biases For this work, we sourced our annotated data from a range of sources to ensure: (i) permissive data licensing, (ii) that our perturber works well on downstream applications such as NLU classification tasks, and (iii) that our perturber can handle data from multiple domains to be maximally useful. However, we acknowledge that there may be other existing biases in PANDA as a result of our data sourcing choices. For example, it is possible that data sources like BookWiki primarily contain topics of interest to people with a certain amount of influence and educational access, people from the so-called “Western world”, etc. Other topics that might be interesting and relevant to others may be missing or only present in limited quantities. The present approach can only weaken associations inherited from the data sources we use, but in future work, we would love to explore the efficacy of our approach on text from other sources that contain a wider range of topics and text domain differences. ### Other Known Limitations Our augmentation process can sometimes create nonexistent versions of real people, such as discussing an English King Victor (not a historical figure), as opposed to a Queen Victoria (a historical figure). We embrace the counterfactuality of many of our perturbations, but the lack of guaranteed factuality means that our approach may not be well-suited to all NLP tasks. For example, it might not be suitable for augmenting misinformation detection datasets, because peoples’ names, genders, and other demographic information should not be changed. ## Additional Information ### Dataset Curators Rebecca Qian, Candace Ross, Jude Fernandes, Douwe Kiela and Adina Williams. ### Licensing Information PANDA is released under the MIT license. ### Citation Information https://arxiv.org/abs/2205.12586 ### Contributions Thanks to [@Rebecca-Qian](https://github.com/Rebecca-Qian) for adding this dataset.
facebook/panda
[ "task_categories:token-classification", "annotations_creators:expert-generated", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "license:mit", "fairness", "nlp", "demographic", "diverse", "gender", "non-binary", "race", "age", "arxiv:2205.12586", "region:us" ]
2022-12-10T13:54:23+00:00
{"annotations_creators": ["expert-generated", "crowdsourced"], "language_creators": ["crowdsourced", "expert-generated"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["token-classification"], "task_ids": [], "paperswithcode_id": "winobias", "pretty_name": "panda", "tags": ["fairness", "nlp", "demographic", "diverse", "gender", "non-binary", "race", "age"]}
2022-12-10T14:01:45+00:00
[ "2205.12586" ]
[ "en" ]
TAGS #task_categories-token-classification #annotations_creators-expert-generated #annotations_creators-crowdsourced #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-English #license-mit #fairness #nlp #demographic #diverse #gender #non-binary #race #age #arxiv-2205.12586 #region-us
# Dataset Card for PANDA ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Repository: URL - Paper: URL - Point of Contact: rebeccaqian@URL, ccross@URL, douwe@URL, adinawilliams@URL ### Dataset Summary PANDA (Perturbation Augmentation NLP DAtaset) consists of approximately 100K pairs of crowdsourced human-perturbed text snippets (original, perturbed). Annotators were given selected terms and target demographic attributes, and instructed to rewrite text snippets along three demographic axes: gender, race and age, while preserving semantic meaning. Text snippets were sourced from a range of text corpora (BookCorpus, Wikipedia, ANLI, MNLI, SST, SQuAD). PANDA can be used for training a learned perturber that can rewrite text with control. PANDA can also be used to evaluate the demographic robustness of language models. ### Languages English ## Dataset Structure ### Data Instances - Size of training data: 198.6 MB - Size of validation data: 22.2 MB Examples of data instances: Examples with <SEP> tokens are the result of concatenation of text fields in source datasets, such as the premise and hypothesis of NLI datasets. ### Data Fields - 'original': Source (unperturbed) text snippet, sampled from a variety of English text corpora. - 'selected_word': Demographic term that needs to be perturbed. - 'target_attribute': Target demographic category. - 'perturbed': Perturbed text snippet, which is the source text rewritten to alter the selected word along the specified target demographic attribute. For example, if the selected word is "Lily" and target is "man", all references to "Lily" (eg. pronouns) in the source text are altered to refer to a man. Note that some examples may be unchanged, either due to the lack of demographic information, or ambiguity of the task; given the subjective nature of identifying demographic terms and attributes, we allow some room for interpretation provided the rewrite does not perpetuate harmful social biases. ### Data Splits - 'train': 94966 - 'valid': 10551 ## Dataset Creation ### Curation Rationale We constructed PANDA to create and release the first large scale dataset of demographic text perturbations. This enables the training of the first neural perturber model, which outperforms heuristic approaches. ### Source Data #### Initial Data Collection and Normalization We employed 524 crowdworkers to create PANDA examples over the span of several months. Annotators were tasked with rewriting text snippets sourced from popular English text corpora. For more information on the task UI and methodology, see our paper *Perturbation Augmentation for Fairer NLP*. ### Annotations #### Annotation process PANDA was collected in a 3 stage annotation process: 1. Span identification: Annotators select demographic terms in source text samples. 2. Attribute identification: Identified demographic terms are annotated for gender/race/age attributes, such as "man", "Asian", "old" etc. 3. Rewrite text: Annotators rewrite text by modifying the selected entity to reflect the target demographic attribute. Annotators are encouraged to create minimal edits, eg. "George" -> "Georgina". The annotation process is explained in more detail in our paper. #### Who are the annotators? PANDA was annotated by English speaking Amazon Mechanical Turk workers. We included a voluntary demographic survey along with annotation tasks that did not contribute to pay. For a breakdown of annotators' demographic identities, see our paper. ### Personal and Sensitive Information PANDA does not contain identifying information about annotators. ## Considerations for Using the Data ### Social Impact of Dataset By releasing the first large scale dataset of demographic text rewrites, we hope to enable exciting future work in fairness in NLP toward more scalable, automated approaches to reducing biases in datasets and language models. Furthermore, PANDA aims to be diverse in text domain and demographic representation. PANDA includes a large proportion of non-binary gender annotations, which are underrepresented in existing text corpora and prior fairness datasets. Text examples vary in length, with examples spanning single sentences and long Wikipedia passages, and are sourced from a variety of text corpora that can be used to train a domain agnostic perturber. ### Discussion of Biases For this work, we sourced our annotated data from a range of sources to ensure: (i) permissive data licensing, (ii) that our perturber works well on downstream applications such as NLU classification tasks, and (iii) that our perturber can handle data from multiple domains to be maximally useful. However, we acknowledge that there may be other existing biases in PANDA as a result of our data sourcing choices. For example, it is possible that data sources like BookWiki primarily contain topics of interest to people with a certain amount of influence and educational access, people from the so-called “Western world”, etc. Other topics that might be interesting and relevant to others may be missing or only present in limited quantities. The present approach can only weaken associations inherited from the data sources we use, but in future work, we would love to explore the efficacy of our approach on text from other sources that contain a wider range of topics and text domain differences. ### Other Known Limitations Our augmentation process can sometimes create nonexistent versions of real people, such as discussing an English King Victor (not a historical figure), as opposed to a Queen Victoria (a historical figure). We embrace the counterfactuality of many of our perturbations, but the lack of guaranteed factuality means that our approach may not be well-suited to all NLP tasks. For example, it might not be suitable for augmenting misinformation detection datasets, because peoples’ names, genders, and other demographic information should not be changed. ## Additional Information ### Dataset Curators Rebecca Qian, Candace Ross, Jude Fernandes, Douwe Kiela and Adina Williams. ### Licensing Information PANDA is released under the MIT license. URL ### Contributions Thanks to @Rebecca-Qian for adding this dataset.
[ "# Dataset Card for PANDA", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Repository: URL\n- Paper: URL\n- Point of Contact: rebeccaqian@URL, ccross@URL, douwe@URL, adinawilliams@URL", "### Dataset Summary\n\nPANDA (Perturbation Augmentation NLP DAtaset) consists of approximately 100K pairs of crowdsourced human-perturbed text snippets (original, perturbed). Annotators were given selected terms and target demographic attributes, and instructed to rewrite text snippets along three demographic axes: gender, race and age, while preserving semantic meaning. Text snippets were sourced from a range of text corpora (BookCorpus, Wikipedia, ANLI, MNLI, SST, SQuAD). PANDA can be used for training a learned perturber that can rewrite text with control. PANDA can also be used to evaluate the demographic robustness of language models.", "### Languages\n\nEnglish", "## Dataset Structure", "### Data Instances\n\n- Size of training data: 198.6 MB\n- Size of validation data: 22.2 MB\n\nExamples of data instances:\n\n\nExamples with <SEP> tokens are the result of concatenation of text fields in source datasets, such as the premise and hypothesis of NLI datasets.", "### Data Fields\n\n- 'original': Source (unperturbed) text snippet, sampled from a variety of English text corpora.\n- 'selected_word': Demographic term that needs to be perturbed.\n- 'target_attribute': Target demographic category.\n- 'perturbed': Perturbed text snippet, which is the source text rewritten to alter the selected word along the specified target demographic attribute. For example, if the selected word is \"Lily\" and target is \"man\", all references to \"Lily\" (eg. pronouns) in the source text are altered to refer to a man. Note that some examples may be unchanged, either due to the lack of demographic information, or ambiguity of the task; given the subjective nature of identifying demographic terms and attributes, we allow some room for interpretation provided the rewrite does not perpetuate harmful social biases.", "### Data Splits\n\n- 'train': 94966\n- 'valid': 10551", "## Dataset Creation", "### Curation Rationale\n\nWe constructed PANDA to create and release the first large scale dataset of demographic text perturbations. This enables the training of the first neural perturber model, which outperforms heuristic approaches.", "### Source Data", "#### Initial Data Collection and Normalization\n\nWe employed 524 crowdworkers to create PANDA examples over the span of several months. Annotators were tasked with rewriting text snippets sourced from popular English text corpora. For more information on the task UI and methodology, see our paper *Perturbation Augmentation for Fairer NLP*.", "### Annotations", "#### Annotation process\n\nPANDA was collected in a 3 stage annotation process:\n1. Span identification: Annotators select demographic terms in source text samples.\n2. Attribute identification: Identified demographic terms are annotated for gender/race/age attributes, such as \"man\", \"Asian\", \"old\" etc.\n3. Rewrite text: Annotators rewrite text by modifying the selected entity to reflect the target demographic attribute. Annotators are encouraged to create minimal edits, eg. \"George\" -> \"Georgina\".\n\nThe annotation process is explained in more detail in our paper.", "#### Who are the annotators?\n\nPANDA was annotated by English speaking Amazon Mechanical Turk workers. We included a voluntary demographic survey along with annotation tasks that did not contribute to pay. For a breakdown of annotators' demographic identities, see our paper.", "### Personal and Sensitive Information\n\nPANDA does not contain identifying information about annotators.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nBy releasing the first large scale dataset of demographic text rewrites, we hope to enable exciting future work in fairness in NLP toward more scalable, automated approaches to reducing biases in datasets and language models.\n\nFurthermore, PANDA aims to be diverse in text domain and demographic representation. PANDA includes a large proportion of non-binary gender annotations, which are underrepresented in existing text corpora and prior fairness datasets. Text examples vary in length, with examples spanning single sentences and long Wikipedia passages, and are sourced from a variety of text corpora that can be used to train a domain agnostic perturber.", "### Discussion of Biases\n\nFor this work, we sourced our annotated data from a range of sources to ensure: (i) permissive data licensing, (ii) that our perturber works well on downstream applications such as NLU classification tasks, and (iii) that our perturber can handle data from multiple domains to be maximally useful. However, we acknowledge that there may be other existing biases in PANDA as a result of our data sourcing choices. For example, it is possible that data sources like BookWiki primarily contain topics of interest to people with a certain amount of influence and educational access, people from the so-called “Western world”, etc. Other topics that might be interesting and relevant to others may be missing or only present in limited quantities. The present approach can only weaken associations inherited from the data sources we use, but in future work, we would love to explore the efficacy of our approach on text from other sources that contain a wider range of topics and text domain differences.", "### Other Known Limitations\n\nOur augmentation process can sometimes create nonexistent versions of real people, such as discussing an English King Victor (not a historical figure), as opposed to a Queen Victoria (a historical figure). We embrace the counterfactuality of many of our perturbations, but the lack of guaranteed factuality means that our approach may not be well-suited to all NLP tasks. For example, it might not be suitable for augmenting misinformation detection datasets, because peoples’ names, genders, and other demographic information should not be changed.", "## Additional Information", "### Dataset Curators\n\nRebecca Qian, Candace Ross, Jude Fernandes, Douwe Kiela and Adina Williams.", "### Licensing Information\n\nPANDA is released under the MIT license.\n\n\n\nURL", "### Contributions\n\nThanks to @Rebecca-Qian for adding this dataset." ]
[ "TAGS\n#task_categories-token-classification #annotations_creators-expert-generated #annotations_creators-crowdsourced #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-English #license-mit #fairness #nlp #demographic #diverse #gender #non-binary #race #age #arxiv-2205.12586 #region-us \n", "# Dataset Card for PANDA", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Repository: URL\n- Paper: URL\n- Point of Contact: rebeccaqian@URL, ccross@URL, douwe@URL, adinawilliams@URL", "### Dataset Summary\n\nPANDA (Perturbation Augmentation NLP DAtaset) consists of approximately 100K pairs of crowdsourced human-perturbed text snippets (original, perturbed). Annotators were given selected terms and target demographic attributes, and instructed to rewrite text snippets along three demographic axes: gender, race and age, while preserving semantic meaning. Text snippets were sourced from a range of text corpora (BookCorpus, Wikipedia, ANLI, MNLI, SST, SQuAD). PANDA can be used for training a learned perturber that can rewrite text with control. PANDA can also be used to evaluate the demographic robustness of language models.", "### Languages\n\nEnglish", "## Dataset Structure", "### Data Instances\n\n- Size of training data: 198.6 MB\n- Size of validation data: 22.2 MB\n\nExamples of data instances:\n\n\nExamples with <SEP> tokens are the result of concatenation of text fields in source datasets, such as the premise and hypothesis of NLI datasets.", "### Data Fields\n\n- 'original': Source (unperturbed) text snippet, sampled from a variety of English text corpora.\n- 'selected_word': Demographic term that needs to be perturbed.\n- 'target_attribute': Target demographic category.\n- 'perturbed': Perturbed text snippet, which is the source text rewritten to alter the selected word along the specified target demographic attribute. For example, if the selected word is \"Lily\" and target is \"man\", all references to \"Lily\" (eg. pronouns) in the source text are altered to refer to a man. Note that some examples may be unchanged, either due to the lack of demographic information, or ambiguity of the task; given the subjective nature of identifying demographic terms and attributes, we allow some room for interpretation provided the rewrite does not perpetuate harmful social biases.", "### Data Splits\n\n- 'train': 94966\n- 'valid': 10551", "## Dataset Creation", "### Curation Rationale\n\nWe constructed PANDA to create and release the first large scale dataset of demographic text perturbations. This enables the training of the first neural perturber model, which outperforms heuristic approaches.", "### Source Data", "#### Initial Data Collection and Normalization\n\nWe employed 524 crowdworkers to create PANDA examples over the span of several months. Annotators were tasked with rewriting text snippets sourced from popular English text corpora. For more information on the task UI and methodology, see our paper *Perturbation Augmentation for Fairer NLP*.", "### Annotations", "#### Annotation process\n\nPANDA was collected in a 3 stage annotation process:\n1. Span identification: Annotators select demographic terms in source text samples.\n2. Attribute identification: Identified demographic terms are annotated for gender/race/age attributes, such as \"man\", \"Asian\", \"old\" etc.\n3. Rewrite text: Annotators rewrite text by modifying the selected entity to reflect the target demographic attribute. Annotators are encouraged to create minimal edits, eg. \"George\" -> \"Georgina\".\n\nThe annotation process is explained in more detail in our paper.", "#### Who are the annotators?\n\nPANDA was annotated by English speaking Amazon Mechanical Turk workers. We included a voluntary demographic survey along with annotation tasks that did not contribute to pay. For a breakdown of annotators' demographic identities, see our paper.", "### Personal and Sensitive Information\n\nPANDA does not contain identifying information about annotators.", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nBy releasing the first large scale dataset of demographic text rewrites, we hope to enable exciting future work in fairness in NLP toward more scalable, automated approaches to reducing biases in datasets and language models.\n\nFurthermore, PANDA aims to be diverse in text domain and demographic representation. PANDA includes a large proportion of non-binary gender annotations, which are underrepresented in existing text corpora and prior fairness datasets. Text examples vary in length, with examples spanning single sentences and long Wikipedia passages, and are sourced from a variety of text corpora that can be used to train a domain agnostic perturber.", "### Discussion of Biases\n\nFor this work, we sourced our annotated data from a range of sources to ensure: (i) permissive data licensing, (ii) that our perturber works well on downstream applications such as NLU classification tasks, and (iii) that our perturber can handle data from multiple domains to be maximally useful. However, we acknowledge that there may be other existing biases in PANDA as a result of our data sourcing choices. For example, it is possible that data sources like BookWiki primarily contain topics of interest to people with a certain amount of influence and educational access, people from the so-called “Western world”, etc. Other topics that might be interesting and relevant to others may be missing or only present in limited quantities. The present approach can only weaken associations inherited from the data sources we use, but in future work, we would love to explore the efficacy of our approach on text from other sources that contain a wider range of topics and text domain differences.", "### Other Known Limitations\n\nOur augmentation process can sometimes create nonexistent versions of real people, such as discussing an English King Victor (not a historical figure), as opposed to a Queen Victoria (a historical figure). We embrace the counterfactuality of many of our perturbations, but the lack of guaranteed factuality means that our approach may not be well-suited to all NLP tasks. For example, it might not be suitable for augmenting misinformation detection datasets, because peoples’ names, genders, and other demographic information should not be changed.", "## Additional Information", "### Dataset Curators\n\nRebecca Qian, Candace Ross, Jude Fernandes, Douwe Kiela and Adina Williams.", "### Licensing Information\n\nPANDA is released under the MIT license.\n\n\n\nURL", "### Contributions\n\nThanks to @Rebecca-Qian for adding this dataset." ]
c456131bbd846b9becfc9d028911b46967a6cbf7
# Dataset Card for "trdg_random_en_zh_text_recognition" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
priyank-m/trdg_random_en_zh_text_recognition
[ "region:us" ]
2022-12-10T16:42:28+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 12592040013.0, "num_examples": 410000}], "download_size": 12595188446, "dataset_size": 12592040013.0}}
2022-12-14T06:05:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "trdg_random_en_zh_text_recognition" More Information needed
[ "# Dataset Card for \"trdg_random_en_zh_text_recognition\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"trdg_random_en_zh_text_recognition\"\n\nMore Information needed" ]
7134135b90e8c44c55fe5e76c2a2426b74ab3fb9
# Dataset Card for "uniform_top" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
WillHeld/uniform_top
[ "region:us" ]
2022-12-10T17:54:27+00:00
{"dataset_info": {"features": [{"name": "utterance", "dtype": "string"}, {"name": "locale", "dtype": "string"}, {"name": "semantic_parse", "dtype": "string"}], "splits": [{"name": "eval_en", "num_bytes": 283034, "num_examples": 2235}, {"name": "test_en", "num_bytes": 554754, "num_examples": 4386}, {"name": "train_en", "num_bytes": 1973838, "num_examples": 15667}, {"name": "eval_de", "num_bytes": 242996, "num_examples": 1815}, {"name": "test_de", "num_bytes": 471105, "num_examples": 3549}, {"name": "train_de", "num_bytes": 1804566, "num_examples": 13424}, {"name": "eval_es", "num_bytes": 207924, "num_examples": 1527}, {"name": "test_es", "num_bytes": 402468, "num_examples": 2998}, {"name": "train_es", "num_bytes": 1473681, "num_examples": 10934}, {"name": "eval_fr", "num_bytes": 208175, "num_examples": 1577}, {"name": "test_fr", "num_bytes": 427290, "num_examples": 3193}, {"name": "train_fr", "num_bytes": 1578716, "num_examples": 11814}, {"name": "eval_hi", "num_bytes": 435694, "num_examples": 2012}, {"name": "test_hi", "num_bytes": 576384, "num_examples": 2789}, {"name": "train_hi", "num_bytes": 2356893, "num_examples": 11330}, {"name": "eval_th", "num_bytes": 363531, "num_examples": 1671}, {"name": "test_th", "num_bytes": 586408, "num_examples": 2765}, {"name": "train_th", "num_bytes": 2303175, "num_examples": 10759}, {"name": "eval_cstop", "num_bytes": 74530, "num_examples": 559}, {"name": "test_cstop", "num_bytes": 153728, "num_examples": 1167}, {"name": "train_cstop", "num_bytes": 540817, "num_examples": 4077}, {"name": "eval_top_v2", "num_bytes": 2565386, "num_examples": 17160}, {"name": "test_top_v2", "num_bytes": 5759599, "num_examples": 38785}, {"name": "train_top_v2", "num_bytes": 18815125, "num_examples": 124597}, {"name": "validation_hinglish_top", "num_bytes": 220386, "num_examples": 1390}, {"name": "test_hinglish_top", "num_bytes": 1069867, "num_examples": 6513}, {"name": "train_hinglish_top", "num_bytes": 478317, "num_examples": 2993}, {"name": "eval_cstop_artificial", "num_bytes": 70248, "num_examples": 559}, {"name": "test_cstop_artificial", "num_bytes": 144553, "num_examples": 1167}, {"name": "train_cstop_artificial", "num_bytes": 508926, "num_examples": 4077}], "download_size": 17110962, "dataset_size": 46652114}}
2022-12-10T17:59:40+00:00
[]
[]
TAGS #region-us
# Dataset Card for "uniform_top" More Information needed
[ "# Dataset Card for \"uniform_top\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"uniform_top\"\n\nMore Information needed" ]
e76145e15c14851addd36013957a9794749b201a
# Dataset Card for "olm-october-2022-tokenized-1024-suffix-array-dedup" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Tristan/olm-october-2022-tokenized-1024-suffix-array-dedup
[ "region:us" ]
2022-12-10T19:38:35+00:00
{"dataset_info": {"features": [{"name": "input_ids", "sequence": "int32"}, {"name": "attention_mask", "sequence": "int8"}, {"name": "special_tokens_mask", "sequence": "int8"}], "splits": [{"name": "train", "num_bytes": 81147320856, "num_examples": 13181826}], "download_size": 21892490583, "dataset_size": 81147320856}}
2022-12-11T07:10:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "olm-october-2022-tokenized-1024-suffix-array-dedup" More Information needed
[ "# Dataset Card for \"olm-october-2022-tokenized-1024-suffix-array-dedup\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"olm-october-2022-tokenized-1024-suffix-array-dedup\"\n\nMore Information needed" ]
5bde9b7bee121458d38ddc9acc8e8513cbe28a17
# Dataset Card for "mediaspeech-with-cv-tr" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
zeynepgulhan/mediaspeech-with-cv-tr
[ "region:us" ]
2022-12-10T19:49:34+00:00
{"dataset_info": {"features": [{"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "sentence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 10631918399.222, "num_examples": 38638}, {"name": "test", "num_bytes": 278050703.0, "num_examples": 10143}], "download_size": 1643709639, "dataset_size": 10909969102.222}}
2022-12-10T20:07:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "mediaspeech-with-cv-tr" More Information needed
[ "# Dataset Card for \"mediaspeech-with-cv-tr\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"mediaspeech-with-cv-tr\"\n\nMore Information needed" ]
c3cd8df5b0453d8f035c0e9244ab0be3b5ae8d7e
# Numerai Datasets This is a mirror of the official numerai dataset - NOT OFFICIALLY SUPPORTED OR MAINTAINED BY NUMERAI. Official source: https://numer.ai/data Use the official source to submit your predictions, no guarantees for correctness or completeness. This is maintained by the Numerai community.
Numerati/numerai-datasets
[ "task_categories:time-series-forecasting", "task_categories:tabular-classification", "task_categories:other", "task_ids:multivariate-time-series-forecasting", "task_ids:tabular-single-column-regression", "annotations_creators:no-annotation", "language_creators:machine-generated", "multilinguality:other-my-multilinguality", "source_datasets:original", "license:unknown", "numerai", "stock market", "hedge fund", "obfuscated", "region:us" ]
2022-12-10T19:58:16+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["machine-generated"], "language": [], "license": ["unknown"], "multilinguality": ["other-my-multilinguality"], "size_categories": [], "source_datasets": ["original"], "task_categories": ["time-series-forecasting", "tabular-classification", "other"], "task_ids": ["multivariate-time-series-forecasting", "tabular-single-column-regression"], "pretty_name": "Numerai Dataset", "tags": ["numerai", "stock market", "hedge fund", "obfuscated"]}
2022-12-11T13:11:50+00:00
[]
[]
TAGS #task_categories-time-series-forecasting #task_categories-tabular-classification #task_categories-other #task_ids-multivariate-time-series-forecasting #task_ids-tabular-single-column-regression #annotations_creators-no-annotation #language_creators-machine-generated #multilinguality-other-my-multilinguality #source_datasets-original #license-unknown #numerai #stock market #hedge fund #obfuscated #region-us
# Numerai Datasets This is a mirror of the official numerai dataset - NOT OFFICIALLY SUPPORTED OR MAINTAINED BY NUMERAI. Official source: URL Use the official source to submit your predictions, no guarantees for correctness or completeness. This is maintained by the Numerai community.
[ "# Numerai Datasets\nThis is a mirror of the official numerai dataset - NOT OFFICIALLY SUPPORTED OR MAINTAINED BY NUMERAI.\nOfficial source: URL\n\nUse the official source to submit your predictions, no guarantees for correctness or completeness.\nThis is maintained by the Numerai community." ]
[ "TAGS\n#task_categories-time-series-forecasting #task_categories-tabular-classification #task_categories-other #task_ids-multivariate-time-series-forecasting #task_ids-tabular-single-column-regression #annotations_creators-no-annotation #language_creators-machine-generated #multilinguality-other-my-multilinguality #source_datasets-original #license-unknown #numerai #stock market #hedge fund #obfuscated #region-us \n", "# Numerai Datasets\nThis is a mirror of the official numerai dataset - NOT OFFICIALLY SUPPORTED OR MAINTAINED BY NUMERAI.\nOfficial source: URL\n\nUse the official source to submit your predictions, no guarantees for correctness or completeness.\nThis is maintained by the Numerai community." ]
7e3508b23eb2b39d7f79a8b1fc583a3c20309628
train-eval-index: - config: default task: token-classification task_id: entity_extraction splits: eval_split: test col_mapping: tokens: tokens labels: tags
breadlicker45/yahoo_answers_v2
[ "license:mit", "region:us" ]
2022-12-11T00:54:12+00:00
{"license": "mit"}
2023-02-01T12:20:18+00:00
[]
[]
TAGS #license-mit #region-us
train-eval-index: - config: default task: token-classification task_id: entity_extraction splits: eval_split: test col_mapping: tokens: tokens labels: tags
[]
[ "TAGS\n#license-mit #region-us \n" ]
74d8e7c0def08e69f46b4638546f0debc7c3d4f2
1k images for the class "artstyle" that were made with & for the [JoePenna Dreambooth repo](https://github.com/JoePenna/Dreambooth-Stable-Diffusion) with Stable Diffusion 1.5 ``` seed: 10 ddim_eta: 0.0 scale: 10.0 ddim_steps: 50 ```
proxima/SD_1-5_reg_images
[ "license:creativeml-openrail-m", "region:us" ]
2022-12-11T01:31:03+00:00
{"license": "creativeml-openrail-m"}
2022-12-11T03:00:05+00:00
[]
[]
TAGS #license-creativeml-openrail-m #region-us
1k images for the class "artstyle" that were made with & for the JoePenna Dreambooth repo with Stable Diffusion 1.5
[]
[ "TAGS\n#license-creativeml-openrail-m #region-us \n" ]
a37b1891609c0376fa81eced756e7863e1bd873b
# Dataset Card for "oxford-flowers" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nelorth/oxford-flowers
[ "task_categories:image-classification", "task_categories:unconditional-image-generation", "source_datasets:https://www.robots.ox.ac.uk/~vgg/data/flowers", "license:unknown", "flowers", "oxford", "region:us" ]
2022-12-11T02:14:19+00:00
{"license": ["unknown"], "source_datasets": "https://www.robots.ox.ac.uk/~vgg/data/flowers", "task_categories": ["image-classification", "unconditional-image-generation"], "pretty_name": "Oxford Flowers Dataset", "tags": ["flowers", "oxford"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "1", "1": "10", "2": "100", "3": "101", "4": "102", "5": "11", "6": "12", "7": "13", "8": "14", "9": "15", "10": "16", "11": "17", "12": "18", "13": "19", "14": "2", "15": "20", "16": "21", "17": "22", "18": "23", "19": "24", "20": "25", "21": "26", "22": "27", "23": "28", "24": "29", "25": "3", "26": "30", "27": "31", "28": "32", "29": "33", "30": "34", "31": "35", "32": "36", "33": "37", "34": "38", "35": "39", "36": "4", "37": "40", "38": "41", "39": "42", "40": "43", "41": "44", "42": "45", "43": "46", "44": "47", "45": "48", "46": "49", "47": "5", "48": "50", "49": "51", "50": "52", "51": "53", "52": "54", "53": "55", "54": "56", "55": "57", "56": "58", "57": "59", "58": "6", "59": "60", "60": "61", "61": "62", "62": "63", "63": "64", "64": "65", "65": "66", "66": "67", "67": "68", "68": "69", "69": "7", "70": "70", "71": "71", "72": "72", "73": "73", "74": "74", "75": "75", "76": "76", "77": "77", "78": "78", "79": "79", "80": "8", "81": "80", "82": "81", "83": "82", "84": "83", "85": "84", "86": "85", "87": "86", "88": "87", "89": "88", "90": "89", "91": "9", "92": "90", "93": "91", "94": "92", "95": "93", "96": "94", "97": "95", "98": "96", "99": "97", "100": "98", "101": "99"}}}}], "splits": [{"name": "train", "num_bytes": 308119477.446, "num_examples": 7169}, {"name": "test", "num_bytes": 43247670.14, "num_examples": 1020}], "download_size": 346597973, "dataset_size": 351367147.58599997}}
2022-12-11T02:38:31+00:00
[]
[]
TAGS #task_categories-image-classification #task_categories-unconditional-image-generation #source_datasets-https-//www.robots.ox.ac.uk/~vgg/data/flowers #license-unknown #flowers #oxford #region-us
# Dataset Card for "oxford-flowers" More Information needed
[ "# Dataset Card for \"oxford-flowers\"\n\nMore Information needed" ]
[ "TAGS\n#task_categories-image-classification #task_categories-unconditional-image-generation #source_datasets-https-//www.robots.ox.ac.uk/~vgg/data/flowers #license-unknown #flowers #oxford #region-us \n", "# Dataset Card for \"oxford-flowers\"\n\nMore Information needed" ]