sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
sequencelengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
sequencelengths
0
25
languages
sequencelengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
sequencelengths
0
352
processed_texts
sequencelengths
1
353
e5667df9f571ec439dad39a04299daaf0c0315d1
# DATASET: AlephNews
readerbench/AlephNews
[ "region:us" ]
2022-10-19T05:50:14+00:00
{}
2022-10-19T05:51:00+00:00
[]
[]
TAGS #region-us
# DATASET: AlephNews
[ "# DATASET: AlephNews" ]
[ "TAGS\n#region-us \n", "# DATASET: AlephNews" ]
a2ef08d819544455ce73386218eb904242ed6080
preprocessing: https://colab.research.google.com/drive/1dbiApU33FBwAfxwlGBK00qAkbUsS9iae?usp=sharing
Adapting/abstract-keyphrases
[ "license:mit", "region:us" ]
2022-10-19T05:54:41+00:00
{"license": "mit", "dataset_info": {"features": [{"name": "Abstract", "dtype": "string"}, {"name": "Keywords", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 65697.22222222222, "num_examples": 50}, {"name": "validation", "num_bytes": 26278.88888888889, "num_examples": 20}, {"name": "test", "num_bytes": 26278.88888888889, "num_examples": 20}], "download_size": 93062, "dataset_size": 118255.0}}
2022-11-20T14:20:50+00:00
[]
[]
TAGS #license-mit #region-us
preprocessing: URL
[]
[ "TAGS\n#license-mit #region-us \n" ]
245f937999a7ee923ac968b4bc5c2de896e1130e
# Dataset from the paper [Debiasing Pre-Trained Language Models via Efficient Fine-Tuning](https://aclanthology.org/2022.ltedi-1.8/) ------------------------ The dataset is formed by combining two different datasets: [WinoBias](https://github.com/uclanlp/corefBias) and [CrowS-Pairs](https://github.com/nyu-mll/crows-pairs)
kunwarsaaim/AntiBiasDataset
[ "license:mit", "region:us" ]
2022-10-19T06:15:09+00:00
{"license": "mit"}
2022-10-19T06:23:04+00:00
[]
[]
TAGS #license-mit #region-us
# Dataset from the paper Debiasing Pre-Trained Language Models via Efficient Fine-Tuning ------------------------ The dataset is formed by combining two different datasets: WinoBias and CrowS-Pairs
[ "# Dataset from the paper Debiasing Pre-Trained Language Models via Efficient Fine-Tuning\n------------------------\nThe dataset is formed by combining two different datasets: WinoBias and CrowS-Pairs" ]
[ "TAGS\n#license-mit #region-us \n", "# Dataset from the paper Debiasing Pre-Trained Language Models via Efficient Fine-Tuning\n------------------------\nThe dataset is formed by combining two different datasets: WinoBias and CrowS-Pairs" ]
091c52677a23ebde4b7c295600a11fdded256175
# Dataset Card for "viwiki-dummy" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
truongpdd/viwiki-dummy
[ "region:us" ]
2022-10-19T06:29:10+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 507670455, "num_examples": 491}], "download_size": 246069772, "dataset_size": 507670455}}
2022-10-19T06:29:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "viwiki-dummy" More Information needed
[ "# Dataset Card for \"viwiki-dummy\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"viwiki-dummy\"\n\nMore Information needed" ]
ffd0fe2470e1c8e56de863269f1dc6d0c87ef635
# Dataset Card for "wit_vietnamese_subset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
truongpdd/wit_vietnamese_subset
[ "region:us" ]
2022-10-19T08:38:11+00:00
{"dataset_info": {"features": [{"name": "language", "dtype": "string"}, {"name": "page_url", "dtype": "string"}, {"name": "image_url", "dtype": "string"}, {"name": "page_title", "dtype": "string"}, {"name": "section_title", "dtype": "string"}, {"name": "hierarchical_section_title", "dtype": "string"}, {"name": "caption_reference_description", "dtype": "string"}, {"name": "caption_attribution_description", "dtype": "string"}, {"name": "caption_alt_text_description", "dtype": "string"}, {"name": "mime_type", "dtype": "string"}, {"name": "original_height", "dtype": "int32"}, {"name": "original_width", "dtype": "int32"}, {"name": "is_main_image", "dtype": "bool"}, {"name": "attribution_passes_lang_id", "dtype": "bool"}, {"name": "page_changed_recently", "dtype": "bool"}, {"name": "context_page_description", "dtype": "string"}, {"name": "context_section_description", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 966699738.4883065, "num_examples": 514473}], "download_size": 478250989, "dataset_size": 966699738.4883065}}
2022-10-19T08:41:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "wit_vietnamese_subset" More Information needed
[ "# Dataset Card for \"wit_vietnamese_subset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"wit_vietnamese_subset\"\n\nMore Information needed" ]
b7509d9b067c2fb51b99fd29702edb290c391c2a
# Dataset Card for "sv_corpora_parliament_processed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
Menahem/sv_corpora_parliament_processed
[ "region:us" ]
2022-10-19T10:15:00+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 292351437, "num_examples": 1892723}], "download_size": 158940469, "dataset_size": 292351437}}
2022-10-19T10:15:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "sv_corpora_parliament_processed" More Information needed
[ "# Dataset Card for \"sv_corpora_parliament_processed\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"sv_corpora_parliament_processed\"\n\nMore Information needed" ]
a2afee3eefe8454d5ca26fd31397766b8e2ceebf
# XLCost for text-to-code synthesis ## Dataset Description This is a subset of [XLCoST benchmark](https://github.com/reddy-lab-code-research/XLCoST), for text-to-code generation at program level for **2** programming languages: `Python, C++`. This dataset is based on [codeparrot/xlcost-text-to-code](https://huggingface.co/datasets/codeparrot/xlcost-text-to-code) with the following improvements: * NEWLINE, INDENT and DEDENT were replaced with the corresponding ASCII codes. * the code text has been reformatted using autopep8 for Python and clang-format for cpp. * new columns have been introduced to allow evaluation using pass@k metric. * programs containing more than one function call in the driver code were removed ## Languages The dataset contains text in English and its corresponding code translation. The text contains a set of concatenated code comments that allow to synthesize the program. ## Dataset Structure To load the dataset you need to specify the language(Python or C++). ```python from datasets import load_dataset load_dataset("giulio98/xlcost-single-prompt", "Python") DatasetDict({ train: Dataset({ features: ['text', 'context', 'code', 'test', 'output', 'fn_call'], num_rows: 8306 }) test: Dataset({ features: ['text', 'context', 'code', 'test', 'output', 'fn_call'], num_rows: 812 }) validation: Dataset({ features: ['text', 'context', 'code', 'test', 'output', 'fn_call'], num_rows: 427 }) }) ``` ## Data Fields * text: natural language description. * context: import libraries/global variables. * code: code at program level. * test: test function call. * output: expected output of the function call. * fn_call: name of the function to call. ## Data Splits Each subset has three splits: train, test and validation. ## Citation Information ``` @misc{zhu2022xlcost, title = {XLCoST: A Benchmark Dataset for Cross-lingual Code Intelligence}, url = {https://arxiv.org/abs/2206.08474}, author = {Zhu, Ming and Jain, Aneesh and Suresh, Karthik and Ravindran, Roshan and Tipirneni, Sindhu and Reddy, Chandan K.}, year = {2022}, eprint={2206.08474}, archivePrefix={arXiv} } ```
giulio98/xlcost-single-prompt
[ "task_categories:text-generation", "task_ids:language-modeling", "language_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:multilingual", "size_categories:unknown", "language:code", "license:cc-by-sa-4.0", "arxiv:2206.08474", "region:us" ]
2022-10-19T11:06:36+00:00
{"annotations_creators": [], "language_creators": ["crowdsourced", "expert-generated"], "language": ["code"], "license": ["cc-by-sa-4.0"], "multilinguality": ["multilingual"], "size_categories": ["unknown"], "source_datasets": [], "task_categories": ["text-generation"], "task_ids": ["language-modeling"], "pretty_name": "xlcost-single-prompt"}
2022-11-02T19:42:44+00:00
[ "2206.08474" ]
[ "code" ]
TAGS #task_categories-text-generation #task_ids-language-modeling #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-multilingual #size_categories-unknown #language-code #license-cc-by-sa-4.0 #arxiv-2206.08474 #region-us
# XLCost for text-to-code synthesis ## Dataset Description This is a subset of XLCoST benchmark, for text-to-code generation at program level for 2 programming languages: 'Python, C++'. This dataset is based on codeparrot/xlcost-text-to-code with the following improvements: * NEWLINE, INDENT and DEDENT were replaced with the corresponding ASCII codes. * the code text has been reformatted using autopep8 for Python and clang-format for cpp. * new columns have been introduced to allow evaluation using pass@k metric. * programs containing more than one function call in the driver code were removed ## Languages The dataset contains text in English and its corresponding code translation. The text contains a set of concatenated code comments that allow to synthesize the program. ## Dataset Structure To load the dataset you need to specify the language(Python or C++). ## Data Fields * text: natural language description. * context: import libraries/global variables. * code: code at program level. * test: test function call. * output: expected output of the function call. * fn_call: name of the function to call. ## Data Splits Each subset has three splits: train, test and validation.
[ "# XLCost for text-to-code synthesis", "## Dataset Description\nThis is a subset of XLCoST benchmark, for text-to-code generation at program level for 2 programming languages: 'Python, C++'. This dataset is based on codeparrot/xlcost-text-to-code with the following improvements:\n* NEWLINE, INDENT and DEDENT were replaced with the corresponding ASCII codes.\n* the code text has been reformatted using autopep8 for Python and clang-format for cpp.\n* new columns have been introduced to allow evaluation using pass@k metric.\n* programs containing more than one function call in the driver code were removed", "## Languages\nThe dataset contains text in English and its corresponding code translation. The text contains a set of concatenated code comments that allow to synthesize the program.", "## Dataset Structure\nTo load the dataset you need to specify the language(Python or C++).", "## Data Fields\n* text: natural language description.\n* context: import libraries/global variables.\n* code: code at program level.\n* test: test function call.\n* output: expected output of the function call.\n* fn_call: name of the function to call.", "## Data Splits\nEach subset has three splits: train, test and validation." ]
[ "TAGS\n#task_categories-text-generation #task_ids-language-modeling #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-multilingual #size_categories-unknown #language-code #license-cc-by-sa-4.0 #arxiv-2206.08474 #region-us \n", "# XLCost for text-to-code synthesis", "## Dataset Description\nThis is a subset of XLCoST benchmark, for text-to-code generation at program level for 2 programming languages: 'Python, C++'. This dataset is based on codeparrot/xlcost-text-to-code with the following improvements:\n* NEWLINE, INDENT and DEDENT were replaced with the corresponding ASCII codes.\n* the code text has been reformatted using autopep8 for Python and clang-format for cpp.\n* new columns have been introduced to allow evaluation using pass@k metric.\n* programs containing more than one function call in the driver code were removed", "## Languages\nThe dataset contains text in English and its corresponding code translation. The text contains a set of concatenated code comments that allow to synthesize the program.", "## Dataset Structure\nTo load the dataset you need to specify the language(Python or C++).", "## Data Fields\n* text: natural language description.\n* context: import libraries/global variables.\n* code: code at program level.\n* test: test function call.\n* output: expected output of the function call.\n* fn_call: name of the function to call.", "## Data Splits\nEach subset has three splits: train, test and validation." ]
bce8c61d61fe7ced008dc92b345451f380e12414
# Dataset Card for "lsun_church_train" Uploading lsun church train dataset for convenience I've split this into 119915 train and 6312 test but if you want the original test set see https://github.com/fyu/lsun Notebook that I used to download then upload this dataset: https://colab.research.google.com/drive/1_f-D2ENgmELNSB51L1igcnLx63PkveY2?usp=sharing [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tglcourse/lsun_church_train
[ "region:us" ]
2022-10-19T11:14:21+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "0", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8", "9": "9", "10": "a", "11": "b", "12": "c", "13": "d", "14": "e", "15": "f"}}}}], "splits": [{"name": "test", "num_bytes": -5033726665.536212, "num_examples": 6312}, {"name": "train", "num_bytes": -94551870824.9868, "num_examples": 119915}], "download_size": 2512548233, "dataset_size": -99585597490.52301}}
2022-10-19T11:20:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lsun_church_train" Uploading lsun church train dataset for convenience I've split this into 119915 train and 6312 test but if you want the original test set see URL Notebook that I used to download then upload this dataset: URL More Information needed
[ "# Dataset Card for \"lsun_church_train\"\n\nUploading lsun church train dataset for convenience\n\nI've split this into 119915 train and 6312 test but if you want the original test set see URL\n\nNotebook that I used to download then upload this dataset: URL\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lsun_church_train\"\n\nUploading lsun church train dataset for convenience\n\nI've split this into 119915 train and 6312 test but if you want the original test set see URL\n\nNotebook that I used to download then upload this dataset: URL\n\nMore Information needed" ]
0223753056eb23cae494d9b1729c7f74af906ce0
# Dataset Card for `reviews_with_drift` ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [language](#language) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description ### Dataset Summary This dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added (`age`, `gender`, `context`) as well as a made up timestamp `prediction_ts` of when the inference took place. ### Supported Tasks and Leaderboards `text-classification`, `sentiment-classification`: The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative). ### language Text is mainly written in english. ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data [More Information Needed] #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations [More Information Needed] #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@fjcasti1](https://github.com/fjcasti1) for adding this dataset.
arize-ai/beer_reviews_label_drift_neg
[ "task_categories:text-classification", "task_ids:sentiment-classification", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:en", "license:mit", "region:us" ]
2022-10-19T11:24:21+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "task_ids": ["sentiment-classification"], "pretty_name": "sentiment-classification-reviews-with-drift"}
2022-10-19T12:20:26+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-sentiment-classification #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-mit #region-us
# Dataset Card for 'reviews_with_drift' ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - language - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description ### Dataset Summary This dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added ('age', 'gender', 'context') as well as a made up timestamp 'prediction_ts' of when the inference took place. ### Supported Tasks and Leaderboards 'text-classification', 'sentiment-classification': The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative). ### language Text is mainly written in english. ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @fjcasti1 for adding this dataset.
[ "# Dataset Card for 'reviews_with_drift'", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - language\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description", "### Dataset Summary\n\nThis dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added ('age', 'gender', 'context') as well as a made up timestamp 'prediction_ts' of when the inference took place.", "### Supported Tasks and Leaderboards\n\n'text-classification', 'sentiment-classification': The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative).", "### language\n\nText is mainly written in english.", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @fjcasti1 for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_ids-sentiment-classification #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-mit #region-us \n", "# Dataset Card for 'reviews_with_drift'", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - language\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description", "### Dataset Summary\n\nThis dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added ('age', 'gender', 'context') as well as a made up timestamp 'prediction_ts' of when the inference took place.", "### Supported Tasks and Leaderboards\n\n'text-classification', 'sentiment-classification': The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative).", "### language\n\nText is mainly written in english.", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @fjcasti1 for adding this dataset." ]
6594f223b0544fcbf63a6889417a6b9d2e40a77c
# Inaproppriate Image Prompts (I2P) The I2P benchmark contains real user prompts for generative text2image prompts that are unproportionately likely to produce <i>inappropriate</i> images. I2P was introduced in the 2023 CVPR paper [Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models](https://arxiv.org/abs/2211.05105). This benchmark is not specific to any approach or model, but was designed to evaluate mitigating measures against <i>inappropriate</i> degeneration in Stable Diffusion. The corresponding repository to evaluate text-to-image diffusion models on I2P can be found at https://github.com/ml-research/i2p. ## Definition of <i>inappropriate</i> content What is considered inappropriate imagery may differ based on context, setting, cultural and social predisposition as well as individual factors and is overall highly subjective. In this work we base our definition of inappropriate content on the work of Gebru <i> et al.</i>: > [data that] if viewed directly, might be offensive, insulting, threatening, or might otherwise cause anxiety. > > --<cite>Gebru, Timnit, et al. "Datasheets for datasets," (2021)</cite> which is for example also reflected by the OpenAI content policy [OpenAI content policy](https://labs.openai.com/policies/content-policy) that also applies to the use of DALL-E. Specifically, we consider images from the following categories: hate, harassment, violence, self-harm, sexual content, shocking images, illegal activity. Note that inappropriateness is not limited to these concepts, varies between cultures, and constantly evolves. Here we restricted ourselves on images displaying tangible acts of <I>inappropriate</i> behavior. Consequently, our test bed may contain prompts describing e.g.~geopolitical events or hateful symbols. ## Data Collection For the 7 concepts mentioned above we used 26 keywords and phrases describing them in more detail and collected up to 250 real world text prompts for each. For a given keyword we collected the prompts of the top 250 images returned by [lexica.art](https://www.lexica.art). Lexica is a collection of real-world, user generated images base on the official stable diffusion discord and collects the prompt, seed, guidance scale and image dimensions used in generation. Image retrieval in lexica is based on the similarity of an image and search query in CLIP embedding space. Therefore, the collected prompts have generated images that are close to an <i>inappropriate</i> concept in CLIP space. Please note that we identify duplicate prompts based on their unique identifier on lexica. Accordingly, the I2P benchmark may contain entries with the same text prompt but different seeds and generation parameters. ## Estimation of generated, <i>inappropriate</i> content We provide an estimation of the percentage of <i>inappropriate</i> content based on images generated using [Stable Diffusion v1.4](https://huggingface.co/CompVis/stable-diffusion-v1-4). For each prompt we generated 10 images using the seed and guidance scale specified in lexica while keeping the maximum image size to 512x512. Our estimate is based on the portion out of these 10 images that are classified as <i>inappropriate</i> by either the [Q16 classifier](https://github.com/ml-research/Q16) or [NudeNet Detector](https://github.com/notAI-tech/NudeNet). ## Dataset Format The benchmark consists of the following fields: | Field | Explanation | | ----------- | ----------- | | prompt | The text prompt used as input to the model (Please not that this may contain Unicode characters) | | categories | One or multiple of the 7 categories mentioned above that yield this prompt. | | hard | Indicates if this is a hard prompt to remove <i>inappropriate</i> content from. Prompts are labled as hard if at least half of their generated images are <i>inappropriate</i> | | inappropriate_percentage | Percentage of <i>inappropriate</i> images as classified by Q16 and NudeNet out of 10 generated images using Stable Diffusion | | nudity_percentage | Percentage of images as depicting explicit nudity as per the NudeNet out of 10 generated images using Stable Diffusion | | q16_percentage | Percentage of <i>inappropriate</i> images as classified by Q16 out of 10 generated images using Stable Diffusion | | sd_safety_percentage | Percentage of <i>inappropriate</i> images as classified by the SD NSWF safety checker out of 10 generated images using Stable Diffusion | | prompt_toxicity | The toxicity score of the text prompt according to the [PerspectiveAPI](https://github.com/conversationai/perspectiveapi) | | lexica_url | URL to the original prompt and the respective images in lexica for reference | | sd_seed | Stable diffusion seed used in our image generation | | sd_guidance_scale | Stable diffusion guidance scale used in our image generation | | sd_image_width | Stable diffusion image width used in our image generation | | sd_image_height | Stable diffusion image height used in our image generation |
AIML-TUDA/i2p
[ "license:mit", "arxiv:2211.05105", "region:us" ]
2022-10-19T11:41:55+00:00
{"license": "mit"}
2023-05-25T07:04:07+00:00
[ "2211.05105" ]
[]
TAGS #license-mit #arxiv-2211.05105 #region-us
Inaproppriate Image Prompts (I2P) ================================= The I2P benchmark contains real user prompts for generative text2image prompts that are unproportionately likely to produce *inappropriate* images. I2P was introduced in the 2023 CVPR paper Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models. This benchmark is not specific to any approach or model, but was designed to evaluate mitigating measures against *inappropriate* degeneration in Stable Diffusion. The corresponding repository to evaluate text-to-image diffusion models on I2P can be found at URL Definition of *inappropriate* content ------------------------------------- What is considered inappropriate imagery may differ based on context, setting, cultural and social predisposition as well as individual factors and is overall highly subjective. In this work we base our definition of inappropriate content on the work of Gebru *et al.*: > > [data that] if viewed directly, might be offensive, insulting, threatening, or might otherwise cause anxiety. > > > --Gebru, Timnit, et al. "Datasheets for datasets," (2021) > > > which is for example also reflected by the OpenAI content policy OpenAI content policy that also applies to the use of DALL-E. Specifically, we consider images from the following categories: hate, harassment, violence, self-harm, sexual content, shocking images, illegal activity. Note that inappropriateness is not limited to these concepts, varies between cultures, and constantly evolves. Here we restricted ourselves on images displaying tangible acts of *inappropriate* behavior. Consequently, our test bed may contain prompts describing e.g.~geopolitical events or hateful symbols. Data Collection --------------- For the 7 concepts mentioned above we used 26 keywords and phrases describing them in more detail and collected up to 250 real world text prompts for each. For a given keyword we collected the prompts of the top 250 images returned by URL. Lexica is a collection of real-world, user generated images base on the official stable diffusion discord and collects the prompt, seed, guidance scale and image dimensions used in generation. Image retrieval in lexica is based on the similarity of an image and search query in CLIP embedding space. Therefore, the collected prompts have generated images that are close to an *inappropriate* concept in CLIP space. Please note that we identify duplicate prompts based on their unique identifier on lexica. Accordingly, the I2P benchmark may contain entries with the same text prompt but different seeds and generation parameters. Estimation of generated, *inappropriate* content ------------------------------------------------ We provide an estimation of the percentage of *inappropriate* content based on images generated using Stable Diffusion v1.4. For each prompt we generated 10 images using the seed and guidance scale specified in lexica while keeping the maximum image size to 512x512. Our estimate is based on the portion out of these 10 images that are classified as *inappropriate* by either the Q16 classifier or NudeNet Detector. Dataset Format -------------- The benchmark consists of the following fields:
[]
[ "TAGS\n#license-mit #arxiv-2211.05105 #region-us \n" ]
3f9b7abbe497912d75c2fac53d33184299499b77
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary The health misinfo dataset is an English Document dataset containing just over 6k unique articles related to health issues from web. This dataset was created in an effort to detect the misinformation in health documents. This dataset was created from the relevance judgment of the TREC health misinformation ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
TheoTsio/Health_Misinfo
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:en", "health_misinformation, credibility", "region:us" ]
2022-10-19T11:45:11+00:00
{"language": ["en"], "size_categories": ["1K<n<10K"], "task_categories": ["text-classification"], "tags": ["health_misinformation, credibility"]}
2023-08-28T20:51:26+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #size_categories-1K<n<10K #language-English #health_misinformation, credibility #region-us
# Dataset Card for Dataset Name ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary The health misinfo dataset is an English Document dataset containing just over 6k unique articles related to health issues from web. This dataset was created in an effort to detect the misinformation in health documents. This dataset was created from the relevance judgment of the TREC health misinformation ### Supported Tasks and Leaderboards ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions
[ "# Dataset Card for Dataset Name", "## Dataset Description\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary\nThe health misinfo dataset is an English Document dataset containing just over 6k unique articles related to health issues from web. This dataset was created in an effort to detect the misinformation in health documents. This dataset was created from the relevance judgment of the TREC health misinformation", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#task_categories-text-classification #size_categories-1K<n<10K #language-English #health_misinformation, credibility #region-us \n", "# Dataset Card for Dataset Name", "## Dataset Description\n- Homepage: \n- Repository: \n- Paper: \n- Leaderboard: \n- Point of Contact:", "### Dataset Summary\nThe health misinfo dataset is an English Document dataset containing just over 6k unique articles related to health issues from web. This dataset was created in an effort to detect the misinformation in health documents. This dataset was created from the relevance judgment of the TREC health misinformation", "### Supported Tasks and Leaderboards", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
795f874041e2ffed87de1de0179ef6cb5e6ddb94
# Test
israel/AOHWR
[ "region:us" ]
2022-10-19T11:54:42+00:00
{}
2022-10-21T14:47:54+00:00
[]
[]
TAGS #region-us
# Test
[ "# Test" ]
[ "TAGS\n#region-us \n", "# Test" ]
9772109e5f6c5df495967d4ed2165e911ea24547
# Dataset Card for `reviews_with_drift` ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [language](#language) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description ### Dataset Summary This dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added (`age`, `gender`, `context`) as well as a made up timestamp `prediction_ts` of when the inference took place. ### Supported Tasks and Leaderboards `text-classification`, `sentiment-classification`: The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative). ### language Text is mainly written in english. ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data [More Information Needed] #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations [More Information Needed] #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@fjcasti1](https://github.com/fjcasti1) for adding this dataset.
arize-ai/beer_reviews_label_drift_neutral
[ "task_categories:text-classification", "task_ids:sentiment-classification", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:en", "license:mit", "region:us" ]
2022-10-19T12:16:00+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["expert-generated"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "task_categories": ["text-classification"], "task_ids": ["sentiment-classification"], "pretty_name": "sentiment-classification-reviews-with-drift"}
2022-10-19T12:19:17+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-sentiment-classification #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-mit #region-us
# Dataset Card for 'reviews_with_drift' ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - language - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description ### Dataset Summary This dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added ('age', 'gender', 'context') as well as a made up timestamp 'prediction_ts' of when the inference took place. ### Supported Tasks and Leaderboards 'text-classification', 'sentiment-classification': The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative). ### language Text is mainly written in english. ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @fjcasti1 for adding this dataset.
[ "# Dataset Card for 'reviews_with_drift'", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - language\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description", "### Dataset Summary\n\nThis dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added ('age', 'gender', 'context') as well as a made up timestamp 'prediction_ts' of when the inference took place.", "### Supported Tasks and Leaderboards\n\n'text-classification', 'sentiment-classification': The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative).", "### language\n\nText is mainly written in english.", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @fjcasti1 for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_ids-sentiment-classification #annotations_creators-expert-generated #language_creators-expert-generated #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-mit #region-us \n", "# Dataset Card for 'reviews_with_drift'", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - language\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description", "### Dataset Summary\n\nThis dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added ('age', 'gender', 'context') as well as a made up timestamp 'prediction_ts' of when the inference took place.", "### Supported Tasks and Leaderboards\n\n'text-classification', 'sentiment-classification': The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative).", "### language\n\nText is mainly written in english.", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @fjcasti1 for adding this dataset." ]
eb35cd73924b3b1440ca251b8c27f9503000d7d0
# Dataset Card for Kill-Me-Please Dataset ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) ## Dataset Description - **Repository:** [github pet project repo](https://github.com/takiholadi/generative-kill-me-please) ### Dataset Summary It is an Russian-language dataset containing just over 30k unique stories as written as users of https://killpls.me as of period from March 2009 to October 2022. This resource was blocked by Roskomnadzor so consider text-generation task if you want more stories. ### Languages ru-RU ## Dataset Structure ### Data Instances Here is an example of instance: ``` {'text': 'По глупости удалил всю 10 летнюю базу. Восстановлению не подлежит. Мне конец. КМП!' 'tags': 'техника' 'votes': 2914 'url': 'https://killpls.me/story/616' 'datetime': '4 июля 2009, 23:20'} ``` ### Data Fields - `text`: a string containing the body of the story - `tags`: a string containing a comma-separated tags in a multi-label setup, fullset of tags (except of one empty-tagged record): `внешность`, `деньги`, `друзья`, `здоровье`, `отношения`, `работа`, `разное`, `родители`, `секс`, `семья`, `техника`, `учеба` - `votes`: an integer sum of upvotes/downvotes - `url`: a string containing the url where the story was web-scraped from - `datetime`: a string containing with the datetime the story was written ### Data Splits The has 2 multi-label stratified splits: train and test. | Dataset Split | Number of Instances in Split | | ------------- | ------------------------------------------- | | Train | 27,321 | | Test | 2,772 |
takiholadi/kill-me-please-dataset
[ "task_categories:text-generation", "task_categories:text-classification", "annotations_creators:no-annotation", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:ru", "stories", "website", "region:us" ]
2022-10-19T13:18:28+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["found"], "language": ["ru"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["text-generation", "text-classification"], "pretty_name": "Kill-Me-Please Dataset", "tags": ["stories", "website"]}
2022-10-19T14:35:00+00:00
[]
[ "ru" ]
TAGS #task_categories-text-generation #task_categories-text-classification #annotations_creators-no-annotation #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-Russian #stories #website #region-us
Dataset Card for Kill-Me-Please Dataset ======================================= Table of Contents ----------------- * Dataset Description + Dataset Summary + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits Dataset Description ------------------- * Repository: github pet project repo ### Dataset Summary It is an Russian-language dataset containing just over 30k unique stories as written as users of URL as of period from March 2009 to October 2022. This resource was blocked by Roskomnadzor so consider text-generation task if you want more stories. ### Languages ru-RU Dataset Structure ----------------- ### Data Instances Here is an example of instance: ### Data Fields * 'text': a string containing the body of the story * 'tags': a string containing a comma-separated tags in a multi-label setup, fullset of tags (except of one empty-tagged record): 'внешность', 'деньги', 'друзья', 'здоровье', 'отношения', 'работа', 'разное', 'родители', 'секс', 'семья', 'техника', 'учеба' * 'votes': an integer sum of upvotes/downvotes * 'url': a string containing the url where the story was web-scraped from * 'datetime': a string containing with the datetime the story was written ### Data Splits The has 2 multi-label stratified splits: train and test.
[ "### Dataset Summary\n\n\nIt is an Russian-language dataset containing just over 30k unique stories as written as users of URL as of period from March 2009 to October 2022. This resource was blocked by Roskomnadzor so consider text-generation task if you want more stories.", "### Languages\n\n\nru-RU\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nHere is an example of instance:", "### Data Fields\n\n\n* 'text': a string containing the body of the story\n* 'tags': a string containing a comma-separated tags in a multi-label setup, fullset of tags (except of one empty-tagged record): 'внешность', 'деньги', 'друзья', 'здоровье', 'отношения', 'работа', 'разное', 'родители', 'секс', 'семья', 'техника', 'учеба'\n* 'votes': an integer sum of upvotes/downvotes\n* 'url': a string containing the url where the story was web-scraped from\n* 'datetime': a string containing with the datetime the story was written", "### Data Splits\n\n\nThe has 2 multi-label stratified splits: train and test." ]
[ "TAGS\n#task_categories-text-generation #task_categories-text-classification #annotations_creators-no-annotation #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-Russian #stories #website #region-us \n", "### Dataset Summary\n\n\nIt is an Russian-language dataset containing just over 30k unique stories as written as users of URL as of period from March 2009 to October 2022. This resource was blocked by Roskomnadzor so consider text-generation task if you want more stories.", "### Languages\n\n\nru-RU\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nHere is an example of instance:", "### Data Fields\n\n\n* 'text': a string containing the body of the story\n* 'tags': a string containing a comma-separated tags in a multi-label setup, fullset of tags (except of one empty-tagged record): 'внешность', 'деньги', 'друзья', 'здоровье', 'отношения', 'работа', 'разное', 'родители', 'секс', 'семья', 'техника', 'учеба'\n* 'votes': an integer sum of upvotes/downvotes\n* 'url': a string containing the url where the story was web-scraped from\n* 'datetime': a string containing with the datetime the story was written", "### Data Splits\n\n\nThe has 2 multi-label stratified splits: train and test." ]
c680d3168b82af06fb55adc58c07ac8dd4676473
# AutoTrain Dataset for project: swin-muppet ## Dataset Description This dataset has been automatically processed by AutoTrain for project swin-muppet. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "image": "<286x286 RGB PIL image>", "target": 7 }, { "image": "<169x170 RGB PIL image>", "target": 13 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "image": "Image(decode=True, id=None)", "target": "ClassLabel(num_classes=24, names=['Animal', 'Beaker', 'Bert', 'BigBird', 'Bunsen', 'Camilla', 'CookieMonster', 'Elmo', 'Ernie', 'Floyd', 'Fozzie', 'Gonzo', 'Grover', 'Kermit', 'Oscar', 'Pepe', 'Piggy', 'Rowlf', 'Scooter', 'Statler', 'SwedishChef', 'TheCount', 'Waldorf', 'Zoot'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 599 | | valid | 162 |
Norod78/MuppetFaces
[ "task_categories:image-classification", "region:us" ]
2022-10-19T13:33:10+00:00
{"task_categories": ["image-classification"]}
2022-10-19T13:45:17+00:00
[]
[]
TAGS #task_categories-image-classification #region-us
AutoTrain Dataset for project: swin-muppet ========================================== Dataset Description ------------------- This dataset has been automatically processed by AutoTrain for project swin-muppet. ### Languages The BCP-47 code for the dataset's language is unk. Dataset Structure ----------------- ### Data Instances A sample from this dataset looks as follows: ### Dataset Fields The dataset has the following fields (also called "features"): ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow:
[ "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
[ "TAGS\n#task_categories-image-classification #region-us \n", "### Languages\n\n\nThe BCP-47 code for the dataset's language is unk.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA sample from this dataset looks as follows:", "### Dataset Fields\n\n\nThe dataset has the following fields (also called \"features\"):", "### Dataset Splits\n\n\nThis dataset is split into a train and validation split. The split sizes are as follow:" ]
4ebd876e18b1161d1fe37c2fbd4c4777cfe57259
# Dataset Card for "pereira_fMRI_passages" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
helena-balabin/pereira_fMRI_passages
[ "region:us" ]
2022-10-19T13:45:37+00:00
{"dataset_info": {"features": [{"name": "language_lh", "sequence": {"sequence": "float64"}}, {"name": "language_rh", "sequence": {"sequence": "float64"}}, {"name": "vision_body", "sequence": {"sequence": "float64"}}, {"name": "vision_face", "sequence": {"sequence": "float64"}}, {"name": "vision_object", "sequence": {"sequence": "float64"}}, {"name": "vision_scene", "sequence": {"sequence": "float64"}}, {"name": "vision", "sequence": {"sequence": "float64"}}, {"name": "dmn", "sequence": {"sequence": "float64"}}, {"name": "task", "sequence": {"sequence": "float64"}}, {"name": "all", "sequence": {"sequence": "float64"}}, {"name": "paragraphs", "sequence": "string"}, {"name": "topic_indices", "sequence": "uint8"}, {"name": "permuted_paragraphs", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 1649652912, "num_examples": 8}], "download_size": 1658872446, "dataset_size": 1649652912}}
2023-07-27T12:34:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pereira_fMRI_passages" More Information needed
[ "# Dataset Card for \"pereira_fMRI_passages\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pereira_fMRI_passages\"\n\nMore Information needed" ]
3f9f0f1717d145b86f080ae907aa27a1190caa7c
# Dataset Card for "scientific_lay_summarisation" - **Repository:** https://github.com/TGoldsack1/Corpora_for_Lay_Summarisation - **Paper:** [Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature](https://arxiv.org/abs/2210.09932) - **Size of downloaded dataset files:** 850.44 MB - **Size of the generated dataset:** 1.32 GB - **Total amount of disk used:** 2.17 GB ### Dataset Summary This repository contains the PLOS and eLife datasets, introduced in the EMNLP 2022 paper "[Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature ](https://arxiv.org/abs/2210.09932)" . Each dataset contains full biomedical research articles paired with expert-written lay summaries (i.e., non-technical summaries). PLOS articles are derived from various journals published by [the Public Library of Science (PLOS)](https://plos.org/), whereas eLife articles are derived from the [eLife](https://elifesciences.org/) journal. More details/analyses on the content of each dataset are provided in the paper. Both "elife" and "plos" have 6 features: - "article": the body of the document (including the abstract), sections separated by "/n". - "section_headings": the title of each section, separated by "/n". - "keywords": keywords describing the topic of the article, separated by "/n". - "title": the title of the article. - "year": the year the article was published. - "summary": the lay summary of the document. **Note:** The format of both datasets differs from that used in the original repository (given above) in order to make them compatible with the `run_summarization.py` script of Transformers. Specifically, sentence tokenization is removed via " ".join(text), and the abstract and article sections, previously lists of sentences, are combined into a single `string` feature ("article") with each section separated by "\n". For the sentence-tokenized version of the dataset, please use the original git repository. ### Supported Tasks and Leaderboards Papers with code - [PLOS](https://paperswithcode.com/sota/lay-summarization-on-plos) and [eLife](https://paperswithcode.com/sota/lay-summarization-on-elife). ### Languages English ## Dataset Structure ### Data Instances #### plos - **Size of downloaded dataset files:** 425.22 MB - **Size of the generated dataset:** 1.05 GB - **Total amount of disk used:** 1.47 GB An example of 'train' looks as follows. ``` This example was too long and was cropped: { "summary": "In the kidney , structures known as nephrons are responsible for collecting metabolic waste . Nephrons are composed of a ...", "article": "Kidney function depends on the nephron , which comprises a 'blood filter , a tubule that is subdivided into functionally ...", "section_headings": "Abstract\nIntroduction\nResults\nDiscussion\nMaterials and Methods'", "keywords": "developmental biology\ndanio (zebrafish)\nvertebrates\nteleost fishes\nnephrology", "title": "The cdx Genes and Retinoic Acid Control the Positioning and Segmentation of the Zebrafish Pronephros", "year": "2007" } ``` #### elife - **Size of downloaded dataset files:** 425.22 MB - **Size of the generated dataset:** 275.99 MB - **Total amount of disk used:** 1.47 MB An example of 'train' looks as follows. ``` This example was too long and was cropped: { "summary": "In the USA , more deaths happen in the winter than the summer . But when deaths occur varies greatly by sex , age , cause of ...", "article": "In temperate climates , winter deaths exceed summer ones . However , there is limited information on the timing and the ...", "section_headings": "Abstract\nIntroduction\nResults\nDiscussion\nMaterials and methods", "keywords": "epidemiology and global health", "title": "National and regional seasonal dynamics of all-cause and cause-specific mortality in the USA from 1980 to 2016", "year": "2018" } ``` ### Data Fields The data fields are the same among all splits. #### plos - `article`: a `string` feature. - `section_headings`: a `string` feature. - `keywords`: a `string` feature. - `title` : a `string` feature. - `year` : a `string` feature. - `summary`: a `string` feature. #### elife - `article`: a `string` feature. - `section_headings`: a `string` feature. - `keywords`: a `string` feature. - `title` : a `string` feature. - `year` : a `string` feature. - `summary`: a `string` feature. ### Data Splits | name |train |validation|test| |------|-----:|---------:|---:| |plos | 24773| 1376|1376| |elife | 4346| 241| 241| ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` "Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature" Tomas Goldsack, Zhihao Zhang, Chenghua Lin, Carolina Scarton EMNLP 2022 ```
tomasg25/scientific_lay_summarisation
[ "task_categories:summarization", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:cc-by-4.0", "abstractive-summarization", "scientific-papers", "lay-summarization", "PLOS", "eLife", "arxiv:2210.09932", "region:us" ]
2022-10-19T13:46:52+00:00
{"annotations_creators": ["found"], "language_creators": ["found"], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K", "1K<n<10K"], "source_datasets": ["original"], "task_categories": ["summarization"], "task_ids": [], "pretty_name": "ScientificLaySummarisation", "tags": ["abstractive-summarization", "scientific-papers", "lay-summarization", "PLOS", "eLife"]}
2023-12-12T07:51:52+00:00
[ "2210.09932" ]
[ "en" ]
TAGS #task_categories-summarization #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #size_categories-1K<n<10K #source_datasets-original #language-English #license-cc-by-4.0 #abstractive-summarization #scientific-papers #lay-summarization #PLOS #eLife #arxiv-2210.09932 #region-us
Dataset Card for "scientific\_lay\_summarisation" ================================================= * Repository: URL * Paper: Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature * Size of downloaded dataset files: 850.44 MB * Size of the generated dataset: 1.32 GB * Total amount of disk used: 2.17 GB ### Dataset Summary This repository contains the PLOS and eLife datasets, introduced in the EMNLP 2022 paper "Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature " . Each dataset contains full biomedical research articles paired with expert-written lay summaries (i.e., non-technical summaries). PLOS articles are derived from various journals published by the Public Library of Science (PLOS), whereas eLife articles are derived from the eLife journal. More details/analyses on the content of each dataset are provided in the paper. Both "elife" and "plos" have 6 features: ``` - "article": the body of the document (including the abstract), sections separated by "/n". - "section_headings": the title of each section, separated by "/n". - "keywords": keywords describing the topic of the article, separated by "/n". - "title": the title of the article. - "year": the year the article was published. - "summary": the lay summary of the document. ``` Note: The format of both datasets differs from that used in the original repository (given above) in order to make them compatible with the 'run\_summarization.py' script of Transformers. Specifically, sentence tokenization is removed via " ".join(text), and the abstract and article sections, previously lists of sentences, are combined into a single 'string' feature ("article") with each section separated by "\n". For the sentence-tokenized version of the dataset, please use the original git repository. ### Supported Tasks and Leaderboards Papers with code - PLOS and eLife. ### Languages English Dataset Structure ----------------- ### Data Instances #### plos * Size of downloaded dataset files: 425.22 MB * Size of the generated dataset: 1.05 GB * Total amount of disk used: 1.47 GB An example of 'train' looks as follows. #### elife * Size of downloaded dataset files: 425.22 MB * Size of the generated dataset: 275.99 MB * Total amount of disk used: 1.47 MB An example of 'train' looks as follows. ### Data Fields The data fields are the same among all splits. #### plos * 'article': a 'string' feature. * 'section\_headings': a 'string' feature. * 'keywords': a 'string' feature. * 'title' : a 'string' feature. * 'year' : a 'string' feature. * 'summary': a 'string' feature. #### elife * 'article': a 'string' feature. * 'section\_headings': a 'string' feature. * 'keywords': a 'string' feature. * 'title' : a 'string' feature. * 'year' : a 'string' feature. * 'summary': a 'string' feature. ### Data Splits Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information
[ "### Dataset Summary\n\n\nThis repository contains the PLOS and eLife datasets, introduced in the EMNLP 2022 paper \"Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature\n\" .\n\n\nEach dataset contains full biomedical research articles paired with expert-written lay summaries (i.e., non-technical summaries). PLOS articles are derived from various journals published by the Public Library of Science (PLOS), whereas eLife articles are derived from the eLife journal. More details/analyses on the content of each dataset are provided in the paper.\n\n\nBoth \"elife\" and \"plos\" have 6 features:\n\n\n\n```\n- \"article\": the body of the document (including the abstract), sections separated by \"/n\".\n- \"section_headings\": the title of each section, separated by \"/n\". \n- \"keywords\": keywords describing the topic of the article, separated by \"/n\".\n- \"title\": the title of the article.\n- \"year\": the year the article was published.\n- \"summary\": the lay summary of the document.\n\n```\n\nNote: The format of both datasets differs from that used in the original repository (given above) in order to make them compatible with the 'run\\_summarization.py' script of Transformers. Specifically, sentence tokenization is removed via \" \".join(text), and the abstract and article sections, previously lists of sentences, are combined into a single 'string' feature (\"article\") with each section separated by \"\\n\". For the sentence-tokenized version of the dataset, please use the original git repository.", "### Supported Tasks and Leaderboards\n\n\nPapers with code - PLOS and eLife.", "### Languages\n\n\nEnglish\n\n\nDataset Structure\n-----------------", "### Data Instances", "#### plos\n\n\n* Size of downloaded dataset files: 425.22 MB\n* Size of the generated dataset: 1.05 GB\n* Total amount of disk used: 1.47 GB\n\n\nAn example of 'train' looks as follows.", "#### elife\n\n\n* Size of downloaded dataset files: 425.22 MB\n* Size of the generated dataset: 275.99 MB\n* Total amount of disk used: 1.47 MB\n\n\nAn example of 'train' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### plos\n\n\n* 'article': a 'string' feature.\n* 'section\\_headings': a 'string' feature.\n* 'keywords': a 'string' feature.\n* 'title' : a 'string' feature.\n* 'year' : a 'string' feature.\n* 'summary': a 'string' feature.", "#### elife\n\n\n* 'article': a 'string' feature.\n* 'section\\_headings': a 'string' feature.\n* 'keywords': a 'string' feature.\n* 'title' : a 'string' feature.\n* 'year' : a 'string' feature.\n* 'summary': a 'string' feature.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information" ]
[ "TAGS\n#task_categories-summarization #annotations_creators-found #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #size_categories-1K<n<10K #source_datasets-original #language-English #license-cc-by-4.0 #abstractive-summarization #scientific-papers #lay-summarization #PLOS #eLife #arxiv-2210.09932 #region-us \n", "### Dataset Summary\n\n\nThis repository contains the PLOS and eLife datasets, introduced in the EMNLP 2022 paper \"Making Science Simple: Corpora for the Lay Summarisation of Scientific Literature\n\" .\n\n\nEach dataset contains full biomedical research articles paired with expert-written lay summaries (i.e., non-technical summaries). PLOS articles are derived from various journals published by the Public Library of Science (PLOS), whereas eLife articles are derived from the eLife journal. More details/analyses on the content of each dataset are provided in the paper.\n\n\nBoth \"elife\" and \"plos\" have 6 features:\n\n\n\n```\n- \"article\": the body of the document (including the abstract), sections separated by \"/n\".\n- \"section_headings\": the title of each section, separated by \"/n\". \n- \"keywords\": keywords describing the topic of the article, separated by \"/n\".\n- \"title\": the title of the article.\n- \"year\": the year the article was published.\n- \"summary\": the lay summary of the document.\n\n```\n\nNote: The format of both datasets differs from that used in the original repository (given above) in order to make them compatible with the 'run\\_summarization.py' script of Transformers. Specifically, sentence tokenization is removed via \" \".join(text), and the abstract and article sections, previously lists of sentences, are combined into a single 'string' feature (\"article\") with each section separated by \"\\n\". For the sentence-tokenized version of the dataset, please use the original git repository.", "### Supported Tasks and Leaderboards\n\n\nPapers with code - PLOS and eLife.", "### Languages\n\n\nEnglish\n\n\nDataset Structure\n-----------------", "### Data Instances", "#### plos\n\n\n* Size of downloaded dataset files: 425.22 MB\n* Size of the generated dataset: 1.05 GB\n* Total amount of disk used: 1.47 GB\n\n\nAn example of 'train' looks as follows.", "#### elife\n\n\n* Size of downloaded dataset files: 425.22 MB\n* Size of the generated dataset: 275.99 MB\n* Total amount of disk used: 1.47 MB\n\n\nAn example of 'train' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### plos\n\n\n* 'article': a 'string' feature.\n* 'section\\_headings': a 'string' feature.\n* 'keywords': a 'string' feature.\n* 'title' : a 'string' feature.\n* 'year' : a 'string' feature.\n* 'summary': a 'string' feature.", "#### elife\n\n\n* 'article': a 'string' feature.\n* 'section\\_headings': a 'string' feature.\n* 'keywords': a 'string' feature.\n* 'title' : a 'string' feature.\n* 'year' : a 'string' feature.\n* 'summary': a 'string' feature.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information" ]
38ecb87f301650f2c0bac5ffca47bf6c137398d3
# Dataset Card for "pereira_fMRI_sentences" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
helena-balabin/pereira_fMRI_sentences
[ "region:us" ]
2022-10-19T14:28:48+00:00
{"dataset_info": {"features": [{"name": "language_lh", "sequence": {"sequence": "float64"}}, {"name": "language_rh", "sequence": {"sequence": "float64"}}, {"name": "vision_body", "sequence": {"sequence": "float64"}}, {"name": "vision_face", "sequence": {"sequence": "float64"}}, {"name": "vision_object", "sequence": {"sequence": "float64"}}, {"name": "vision_scene", "sequence": {"sequence": "float64"}}, {"name": "vision", "sequence": {"sequence": "float64"}}, {"name": "dmn", "sequence": {"sequence": "float64"}}, {"name": "task", "sequence": {"sequence": "float64"}}, {"name": "all", "sequence": {"sequence": "float64"}}, {"name": "sentences", "sequence": "string"}], "splits": [{"name": "train", "num_bytes": 6597174480, "num_examples": 8}], "download_size": 6598415137, "dataset_size": 6597174480}}
2022-10-19T15:07:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "pereira_fMRI_sentences" More Information needed
[ "# Dataset Card for \"pereira_fMRI_sentences\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"pereira_fMRI_sentences\"\n\nMore Information needed" ]
83b1ebbbf896baa6f2e9780a954eaede1086b3cc
## Kinyarwanda-English parallel text This dataset contains 55,000 Kinyarwanda-English sentence pairs, obtained by scraping web data from religious sources such as: [Bible](https://servervideos.hopto.org/XMLBible/EnglishKJBible.xml) [Quran](https://quranenc.com/en/home/download/csv/kinyarwanda_assoc) This dataset has not been curated only cleaned.
mbazaNLP/Kinyarwanda_English_parallel_dataset
[ "license:cc-by-4.0", "region:us" ]
2022-10-19T14:40:28+00:00
{"license": "cc-by-4.0", "extra_gated_prompt": "You agree to not attempt to determine the identity of individuals in this dataset", "extra_gated_fields": {"Company": "text", "Country": "text", "Email": "text", "I agree to use this model for non-commercial use ONLY": "checkbox"}}
2023-04-08T14:01:28+00:00
[]
[]
TAGS #license-cc-by-4.0 #region-us
## Kinyarwanda-English parallel text This dataset contains 55,000 Kinyarwanda-English sentence pairs, obtained by scraping web data from religious sources such as: Bible Quran This dataset has not been curated only cleaned.
[ "## Kinyarwanda-English parallel text \n\nThis dataset contains 55,000 Kinyarwanda-English sentence pairs, obtained by scraping web data from religious sources such as:\nBible \nQuran\n\nThis dataset has not been curated only cleaned." ]
[ "TAGS\n#license-cc-by-4.0 #region-us \n", "## Kinyarwanda-English parallel text \n\nThis dataset contains 55,000 Kinyarwanda-English sentence pairs, obtained by scraping web data from religious sources such as:\nBible \nQuran\n\nThis dataset has not been curated only cleaned." ]
e1378170980e1e93de987c2f4976dc3dd2183975
## Dataset Overview ### Intro This dataset was downloaded from the good folks at fivethirtyeight. You can find the original (or in the future, updated) versions of this and several similar datasets at [this GitHub link.](https://github.com/fivethirtyeight/data/tree/master/nba-raptor) ### Data layout Here are the columns in this dataset, which contains data on every NBA player, broken out by season, since the 1976 NBA-ABA merger: Column | Description -------|--------------- `player_name` | Player name `player_id` | Basketball-Reference.com player ID `season` | Season `season_type` | Regular season (RS) or playoff (PO) `team` | Basketball-Reference ID of team `poss` | Possessions played `mp` | Minutes played `raptor_box_offense` | Points above average per 100 possessions added by player on offense, based only on box score estimate `raptor_box_defense` | Points above average per 100 possessions added by player on defense, based only on box score estimate `raptor_box_total` | Points above average per 100 possessions added by player, based only on box score estimate `raptor_onoff_offense` | Points above average per 100 possessions added by player on offense, based only on plus-minus data `raptor_onoff_defense` | Points above average per 100 possessions added by player on defense, based only on plus-minus data `raptor_onoff_total` | Points above average per 100 possessions added by player, based only on plus-minus data `raptor_offense` | Points above average per 100 possessions added by player on offense, using both box and on-off components `raptor_defense` | Points above average per 100 possessions added by player on defense, using both box and on-off components `raptor_total` | Points above average per 100 possessions added by player on both offense and defense, using both box and on-off components `war_total` | Wins Above Replacement between regular season and playoffs `war_reg_season` | Wins Above Replacement for regular season `war_playoffs` | Wins Above Replacement for playoffs `predator_offense` | Predictive points above average per 100 possessions added by player on offense `predator_defense` | Predictive points above average per 100 possessions added by player on defense `predator_total` | Predictive points above average per 100 possessions added by player on both offense and defense `pace_impact` | Player impact on team possessions per 48 minutes ### More information This dataset was put together for Hugging Face by this guy: [Andrew Kroening](https://github.com/andrewkroening) He was building some kind of a silly tool using this dataset. It's an NBA WAR Predictor tool, and you can find the Gradio interface [here.](https://huggingface.co/spaces/andrewkroening/nba-war-predictor) The GitHub repo can be found [here.](https://github.com/andrewkroening/nba-war-predictor-tool)
andrewkroening/538-NBA-Historical-Raptor
[ "license:cc", "region:us" ]
2022-10-19T15:47:53+00:00
{"license": "cc"}
2022-11-06T22:14:56+00:00
[]
[]
TAGS #license-cc #region-us
Dataset Overview ---------------- ### Intro This dataset was downloaded from the good folks at fivethirtyeight. You can find the original (or in the future, updated) versions of this and several similar datasets at this GitHub link. ### Data layout Here are the columns in this dataset, which contains data on every NBA player, broken out by season, since the 1976 NBA-ABA merger: ### More information This dataset was put together for Hugging Face by this guy: Andrew Kroening He was building some kind of a silly tool using this dataset. It's an NBA WAR Predictor tool, and you can find the Gradio interface here. The GitHub repo can be found here.
[ "### Intro\n\n\nThis dataset was downloaded from the good folks at fivethirtyeight. You can find the original (or in the future, updated) versions of this and several similar datasets at this GitHub link.", "### Data layout\n\n\nHere are the columns in this dataset, which contains data on every NBA player, broken out by season, since the 1976 NBA-ABA merger:", "### More information\n\n\nThis dataset was put together for Hugging Face by this guy: Andrew Kroening\n\n\nHe was building some kind of a silly tool using this dataset. It's an NBA WAR Predictor tool, and you can find the Gradio interface here. The GitHub repo can be found here." ]
[ "TAGS\n#license-cc #region-us \n", "### Intro\n\n\nThis dataset was downloaded from the good folks at fivethirtyeight. You can find the original (or in the future, updated) versions of this and several similar datasets at this GitHub link.", "### Data layout\n\n\nHere are the columns in this dataset, which contains data on every NBA player, broken out by season, since the 1976 NBA-ABA merger:", "### More information\n\n\nThis dataset was put together for Hugging Face by this guy: Andrew Kroening\n\n\nHe was building some kind of a silly tool using this dataset. It's an NBA WAR Predictor tool, and you can find the Gradio interface here. The GitHub repo can be found here." ]
7deafb29500efc4a31d362844b350e8f607b2f62
# Dataset Card for "CelebA-faces-cropped-128-encoded" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
pcuenq/CelebA-faces-cropped-128-encoded
[ "region:us" ]
2022-10-19T16:07:30+00:00
{"dataset_info": {"features": [{"name": "latents", "sequence": "float32"}], "splits": [{"name": "test", "num_bytes": 41533000, "num_examples": 10130}, {"name": "train", "num_bytes": 789122900, "num_examples": 192469}], "download_size": 843386957, "dataset_size": 830655900}}
2022-10-19T16:09:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "CelebA-faces-cropped-128-encoded" More Information needed
[ "# Dataset Card for \"CelebA-faces-cropped-128-encoded\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"CelebA-faces-cropped-128-encoded\"\n\nMore Information needed" ]
8c2d26d202840a3c34e9130114810181613bf6e4
Data for Semeval 2023 task, clickbait spoiling
Tugay/clickbait-spoiling
[ "region:us" ]
2022-10-19T17:50:16+00:00
{}
2022-10-19T18:08:59+00:00
[]
[]
TAGS #region-us
Data for Semeval 2023 task, clickbait spoiling
[]
[ "TAGS\n#region-us \n" ]
6607cbb5129ed0db4817bbfb3b1e65ff7db9a792
## Dataset Summary - **Homepage:** https://sites.google.com/view/salt-nlp-flang - **Models:** https://huggingface.co/SALT-NLP/FLANG-BERT - **Repository:** https://github.com/SALT-NLP/FLANG ## FLUE FLUE (Financial Language Understanding Evaluation) is a comprehensive and heterogeneous benchmark that has been built from 5 diverse financial domain specific datasets. Sentiment Classification: [Financial PhraseBank](https://huggingface.co/datasets/financial_phrasebank)\ Sentiment Analysis, Question Answering: [FiQA 2018](https://huggingface.co/datasets/SALT-NLP/FLUE-FiQA)\ New Headlines Classification: [Headlines](https://www.kaggle.com/datasets/daittan/gold-commodity-news-and-dimensions)\ Named Entity Recognition: [NER](https://huggingface.co/datasets/SALT-NLP/FLUE-NER)\ Structure Boundary Detection: [FinSBD3](https://sites.google.com/nlg.csie.ntu.edu.tw/finweb2021/shared-task-finsbd-3) ## Dataset Structure The FiQA dataset has a corpus, queries and qrels (relevance judgments file). They are in the following format: - `corpus` file: a `.jsonl` file (jsonlines) that contains a list of dictionaries, each with three fields `_id` with unique document identifier, `title` with document title (optional) and `text` with document paragraph or passage. For example: `{"_id": "doc1", "title": "Albert Einstein", "text": "Albert Einstein was a German-born...."}` - `queries` file: a `.jsonl` file (jsonlines) that contains a list of dictionaries, each with two fields `_id` with unique query identifier and `text` with query text. For example: `{"_id": "q1", "text": "Who developed the mass-energy equivalence formula?"}` - `qrels` file: a `.tsv` file (tab-seperated) that contains three columns, i.e. the `query-id`, `corpus-id` and `score` in this order. Keep 1st row as header. For example: `q1 doc1 1`
SALT-NLP/FLUE-FiQA
[ "license:cc-by-3.0", "region:us" ]
2022-10-19T22:39:48+00:00
{"license": "cc-by-3.0"}
2022-10-21T16:29:14+00:00
[]
[]
TAGS #license-cc-by-3.0 #region-us
## Dataset Summary - Homepage: URL - Models: URL - Repository: URL ## FLUE FLUE (Financial Language Understanding Evaluation) is a comprehensive and heterogeneous benchmark that has been built from 5 diverse financial domain specific datasets. Sentiment Classification: Financial PhraseBank\ Sentiment Analysis, Question Answering: FiQA 2018\ New Headlines Classification: Headlines\ Named Entity Recognition: NER\ Structure Boundary Detection: FinSBD3 ## Dataset Structure The FiQA dataset has a corpus, queries and qrels (relevance judgments file). They are in the following format: - 'corpus' file: a '.jsonl' file (jsonlines) that contains a list of dictionaries, each with three fields '_id' with unique document identifier, 'title' with document title (optional) and 'text' with document paragraph or passage. For example: '{"_id": "doc1", "title": "Albert Einstein", "text": "Albert Einstein was a German-born...."}' - 'queries' file: a '.jsonl' file (jsonlines) that contains a list of dictionaries, each with two fields '_id' with unique query identifier and 'text' with query text. For example: '{"_id": "q1", "text": "Who developed the mass-energy equivalence formula?"}' - 'qrels' file: a '.tsv' file (tab-seperated) that contains three columns, i.e. the 'query-id', 'corpus-id' and 'score' in this order. Keep 1st row as header. For example: 'q1 doc1 1'
[ "## Dataset Summary\n- Homepage: URL\n- Models: URL\n- Repository: URL", "## FLUE\nFLUE (Financial Language Understanding Evaluation) is a comprehensive and heterogeneous benchmark that has been built from 5 diverse financial domain specific datasets.\n\nSentiment Classification: Financial PhraseBank\\\nSentiment Analysis, Question Answering: FiQA 2018\\\nNew Headlines Classification: Headlines\\\nNamed Entity Recognition: NER\\\nStructure Boundary Detection: FinSBD3", "## Dataset Structure\nThe FiQA dataset has a corpus, queries and qrels (relevance judgments file). They are in the following format:\n- 'corpus' file: a '.jsonl' file (jsonlines) that contains a list of dictionaries, each with three fields '_id' with unique document identifier, 'title' with document title (optional) and 'text' with document paragraph or passage. For example: '{\"_id\": \"doc1\", \"title\": \"Albert Einstein\", \"text\": \"Albert Einstein was a German-born....\"}'\n- 'queries' file: a '.jsonl' file (jsonlines) that contains a list of dictionaries, each with two fields '_id' with unique query identifier and 'text' with query text. For example: '{\"_id\": \"q1\", \"text\": \"Who developed the mass-energy equivalence formula?\"}'\n- 'qrels' file: a '.tsv' file (tab-seperated) that contains three columns, i.e. the 'query-id', 'corpus-id' and 'score' in this order. Keep 1st row as header. For example: 'q1 doc1 1'" ]
[ "TAGS\n#license-cc-by-3.0 #region-us \n", "## Dataset Summary\n- Homepage: URL\n- Models: URL\n- Repository: URL", "## FLUE\nFLUE (Financial Language Understanding Evaluation) is a comprehensive and heterogeneous benchmark that has been built from 5 diverse financial domain specific datasets.\n\nSentiment Classification: Financial PhraseBank\\\nSentiment Analysis, Question Answering: FiQA 2018\\\nNew Headlines Classification: Headlines\\\nNamed Entity Recognition: NER\\\nStructure Boundary Detection: FinSBD3", "## Dataset Structure\nThe FiQA dataset has a corpus, queries and qrels (relevance judgments file). They are in the following format:\n- 'corpus' file: a '.jsonl' file (jsonlines) that contains a list of dictionaries, each with three fields '_id' with unique document identifier, 'title' with document title (optional) and 'text' with document paragraph or passage. For example: '{\"_id\": \"doc1\", \"title\": \"Albert Einstein\", \"text\": \"Albert Einstein was a German-born....\"}'\n- 'queries' file: a '.jsonl' file (jsonlines) that contains a list of dictionaries, each with two fields '_id' with unique query identifier and 'text' with query text. For example: '{\"_id\": \"q1\", \"text\": \"Who developed the mass-energy equivalence formula?\"}'\n- 'qrels' file: a '.tsv' file (tab-seperated) that contains three columns, i.e. the 'query-id', 'corpus-id' and 'score' in this order. Keep 1st row as header. For example: 'q1 doc1 1'" ]
5c5c1ed77208cde12cf6dbd819102668587a5fb5
# Dataset Card for "relbert/semeval2012_relational_similarity_v3" ## Dataset Description - **Repository:** [RelBERT](https://github.com/asahi417/relbert) - **Paper:** [https://aclanthology.org/S12-1047/](https://aclanthology.org/S12-1047/) - **Dataset:** SemEval2012: Relational Similarity ### Dataset Summary ***IMPORTANT***: This is the same dataset as [relbert/semeval2012_relational_similarity](https://huggingface.co/datasets/relbert/semeval2012_relational_similarity), but with a different dataset construction. Relational similarity dataset from [SemEval2012 task 2](https://aclanthology.org/S12-1047/), compiled to fine-tune [RelBERT](https://github.com/asahi417/relbert) model. The dataset contains a list of positive and negative word pair from 89 pre-defined relations. The relation types are constructed on top of following 10 parent relation types. ```shell { 1: "Class Inclusion", # Hypernym 2: "Part-Whole", # Meronym, Substance Meronym 3: "Similar", # Synonym, Co-hypornym 4: "Contrast", # Antonym 5: "Attribute", # Attribute, Event 6: "Non Attribute", 7: "Case Relation", 8: "Cause-Purpose", 9: "Space-Time", 10: "Representation" } ``` Each of the parent relation is further grouped into child relation types where the definition can be found [here](https://drive.google.com/file/d/0BzcZKTSeYL8VenY0QkVpZVpxYnc/view?resourcekey=0-ZP-UARfJj39PcLroibHPHw). ## Dataset Structure ### Data Instances An example of `train` looks as follows. ``` { 'relation_type': '8d', 'positives': [ [ "breathe", "live" ], [ "study", "learn" ], [ "speak", "communicate" ], ... ] 'negatives': [ [ "starving", "hungry" ], [ "clean", "bathe" ], [ "hungry", "starving" ], ... ] } ``` ### Data Splits | name |train|validation| |---------|----:|---------:| |semeval2012_relational_similarity| 89 | 89| ### Number of Positive/Negative Word-pairs in each Split | | positives | negatives | |:--------------------------------------------|------------:|------------:| | ('1', 'parent', 'train') | 110 | 680 | | ('1', 'parent', 'validation') | 129 | 760 | | ('10', 'parent', 'train') | 60 | 730 | | ('10', 'parent', 'validation') | 66 | 823 | | ('10a', 'child', 'train') | 10 | 780 | | ('10a', 'child', 'validation') | 14 | 875 | | ('10a', 'child_prototypical', 'train') | 39 | 506 | | ('10a', 'child_prototypical', 'validation') | 63 | 938 | | ('10b', 'child', 'train') | 10 | 780 | | ('10b', 'child', 'validation') | 13 | 876 | | ('10b', 'child_prototypical', 'train') | 39 | 428 | | ('10b', 'child_prototypical', 'validation') | 57 | 707 | | ('10c', 'child', 'train') | 10 | 780 | | ('10c', 'child', 'validation') | 11 | 878 | | ('10c', 'child_prototypical', 'train') | 39 | 545 | | ('10c', 'child_prototypical', 'validation') | 45 | 650 | | ('10d', 'child', 'train') | 10 | 780 | | ('10d', 'child', 'validation') | 10 | 879 | | ('10d', 'child_prototypical', 'train') | 39 | 506 | | ('10d', 'child_prototypical', 'validation') | 39 | 506 | | ('10e', 'child', 'train') | 10 | 780 | | ('10e', 'child', 'validation') | 8 | 881 | | ('10e', 'child_prototypical', 'train') | 39 | 350 | | ('10e', 'child_prototypical', 'validation') | 27 | 218 | | ('10f', 'child', 'train') | 10 | 780 | | ('10f', 'child', 'validation') | 10 | 879 | | ('10f', 'child_prototypical', 'train') | 39 | 506 | | ('10f', 'child_prototypical', 'validation') | 39 | 506 | | ('1a', 'child', 'train') | 10 | 780 | | ('1a', 'child', 'validation') | 14 | 875 | | ('1a', 'child_prototypical', 'train') | 39 | 428 | | ('1a', 'child_prototypical', 'validation') | 63 | 812 | | ('1b', 'child', 'train') | 10 | 780 | | ('1b', 'child', 'validation') | 14 | 875 | | ('1b', 'child_prototypical', 'train') | 39 | 428 | | ('1b', 'child_prototypical', 'validation') | 63 | 812 | | ('1c', 'child', 'train') | 10 | 780 | | ('1c', 'child', 'validation') | 11 | 878 | | ('1c', 'child_prototypical', 'train') | 39 | 545 | | ('1c', 'child_prototypical', 'validation') | 45 | 650 | | ('1d', 'child', 'train') | 10 | 780 | | ('1d', 'child', 'validation') | 16 | 873 | | ('1d', 'child_prototypical', 'train') | 39 | 428 | | ('1d', 'child_prototypical', 'validation') | 75 | 1040 | | ('1e', 'child', 'train') | 10 | 780 | | ('1e', 'child', 'validation') | 8 | 881 | | ('1e', 'child_prototypical', 'train') | 39 | 311 | | ('1e', 'child_prototypical', 'validation') | 27 | 191 | | ('2', 'parent', 'train') | 100 | 690 | | ('2', 'parent', 'validation') | 117 | 772 | | ('2a', 'child', 'train') | 10 | 780 | | ('2a', 'child', 'validation') | 15 | 874 | | ('2a', 'child_prototypical', 'train') | 39 | 506 | | ('2a', 'child_prototypical', 'validation') | 69 | 1061 | | ('2b', 'child', 'train') | 10 | 780 | | ('2b', 'child', 'validation') | 11 | 878 | | ('2b', 'child_prototypical', 'train') | 39 | 389 | | ('2b', 'child_prototypical', 'validation') | 45 | 470 | | ('2c', 'child', 'train') | 10 | 780 | | ('2c', 'child', 'validation') | 13 | 876 | | ('2c', 'child_prototypical', 'train') | 39 | 467 | | ('2c', 'child_prototypical', 'validation') | 57 | 764 | | ('2d', 'child', 'train') | 10 | 780 | | ('2d', 'child', 'validation') | 10 | 879 | | ('2d', 'child_prototypical', 'train') | 39 | 467 | | ('2d', 'child_prototypical', 'validation') | 39 | 467 | | ('2e', 'child', 'train') | 10 | 780 | | ('2e', 'child', 'validation') | 11 | 878 | | ('2e', 'child_prototypical', 'train') | 39 | 506 | | ('2e', 'child_prototypical', 'validation') | 45 | 605 | | ('2f', 'child', 'train') | 10 | 780 | | ('2f', 'child', 'validation') | 11 | 878 | | ('2f', 'child_prototypical', 'train') | 39 | 623 | | ('2f', 'child_prototypical', 'validation') | 45 | 740 | | ('2g', 'child', 'train') | 10 | 780 | | ('2g', 'child', 'validation') | 16 | 873 | | ('2g', 'child_prototypical', 'train') | 39 | 389 | | ('2g', 'child_prototypical', 'validation') | 75 | 965 | | ('2h', 'child', 'train') | 10 | 780 | | ('2h', 'child', 'validation') | 11 | 878 | | ('2h', 'child_prototypical', 'train') | 39 | 506 | | ('2h', 'child_prototypical', 'validation') | 45 | 605 | | ('2i', 'child', 'train') | 10 | 780 | | ('2i', 'child', 'validation') | 9 | 880 | | ('2i', 'child_prototypical', 'train') | 39 | 545 | | ('2i', 'child_prototypical', 'validation') | 33 | 446 | | ('2j', 'child', 'train') | 10 | 780 | | ('2j', 'child', 'validation') | 10 | 879 | | ('2j', 'child_prototypical', 'train') | 39 | 584 | | ('2j', 'child_prototypical', 'validation') | 39 | 584 | | ('3', 'parent', 'train') | 80 | 710 | | ('3', 'parent', 'validation') | 80 | 809 | | ('3a', 'child', 'train') | 10 | 780 | | ('3a', 'child', 'validation') | 11 | 878 | | ('3a', 'child_prototypical', 'train') | 39 | 506 | | ('3a', 'child_prototypical', 'validation') | 45 | 605 | | ('3b', 'child', 'train') | 10 | 780 | | ('3b', 'child', 'validation') | 11 | 878 | | ('3b', 'child_prototypical', 'train') | 39 | 623 | | ('3b', 'child_prototypical', 'validation') | 45 | 740 | | ('3c', 'child', 'train') | 10 | 780 | | ('3c', 'child', 'validation') | 12 | 877 | | ('3c', 'child_prototypical', 'train') | 39 | 467 | | ('3c', 'child_prototypical', 'validation') | 51 | 659 | | ('3d', 'child', 'train') | 10 | 780 | | ('3d', 'child', 'validation') | 14 | 875 | | ('3d', 'child_prototypical', 'train') | 39 | 467 | | ('3d', 'child_prototypical', 'validation') | 63 | 875 | | ('3e', 'child', 'train') | 10 | 780 | | ('3e', 'child', 'validation') | 5 | 884 | | ('3e', 'child_prototypical', 'train') | 39 | 623 | | ('3e', 'child_prototypical', 'validation') | 10 | 140 | | ('3f', 'child', 'train') | 10 | 780 | | ('3f', 'child', 'validation') | 11 | 878 | | ('3f', 'child_prototypical', 'train') | 39 | 662 | | ('3f', 'child_prototypical', 'validation') | 45 | 785 | | ('3g', 'child', 'train') | 10 | 780 | | ('3g', 'child', 'validation') | 6 | 883 | | ('3g', 'child_prototypical', 'train') | 39 | 584 | | ('3g', 'child_prototypical', 'validation') | 15 | 200 | | ('3h', 'child', 'train') | 10 | 780 | | ('3h', 'child', 'validation') | 10 | 879 | | ('3h', 'child_prototypical', 'train') | 39 | 584 | | ('3h', 'child_prototypical', 'validation') | 39 | 584 | | ('4', 'parent', 'train') | 80 | 710 | | ('4', 'parent', 'validation') | 82 | 807 | | ('4a', 'child', 'train') | 10 | 780 | | ('4a', 'child', 'validation') | 11 | 878 | | ('4a', 'child_prototypical', 'train') | 39 | 623 | | ('4a', 'child_prototypical', 'validation') | 45 | 740 | | ('4b', 'child', 'train') | 10 | 780 | | ('4b', 'child', 'validation') | 7 | 882 | | ('4b', 'child_prototypical', 'train') | 39 | 428 | | ('4b', 'child_prototypical', 'validation') | 21 | 203 | | ('4c', 'child', 'train') | 10 | 780 | | ('4c', 'child', 'validation') | 12 | 877 | | ('4c', 'child_prototypical', 'train') | 39 | 545 | | ('4c', 'child_prototypical', 'validation') | 51 | 761 | | ('4d', 'child', 'train') | 10 | 780 | | ('4d', 'child', 'validation') | 4 | 885 | | ('4d', 'child_prototypical', 'train') | 39 | 389 | | ('4d', 'child_prototypical', 'validation') | 6 | 46 | | ('4e', 'child', 'train') | 10 | 780 | | ('4e', 'child', 'validation') | 12 | 877 | | ('4e', 'child_prototypical', 'train') | 39 | 623 | | ('4e', 'child_prototypical', 'validation') | 51 | 863 | | ('4f', 'child', 'train') | 10 | 780 | | ('4f', 'child', 'validation') | 9 | 880 | | ('4f', 'child_prototypical', 'train') | 39 | 623 | | ('4f', 'child_prototypical', 'validation') | 33 | 512 | | ('4g', 'child', 'train') | 10 | 780 | | ('4g', 'child', 'validation') | 15 | 874 | | ('4g', 'child_prototypical', 'train') | 39 | 467 | | ('4g', 'child_prototypical', 'validation') | 69 | 992 | | ('4h', 'child', 'train') | 10 | 780 | | ('4h', 'child', 'validation') | 12 | 877 | | ('4h', 'child_prototypical', 'train') | 39 | 584 | | ('4h', 'child_prototypical', 'validation') | 51 | 812 | | ('5', 'parent', 'train') | 90 | 700 | | ('5', 'parent', 'validation') | 105 | 784 | | ('5a', 'child', 'train') | 10 | 780 | | ('5a', 'child', 'validation') | 14 | 875 | | ('5a', 'child_prototypical', 'train') | 39 | 467 | | ('5a', 'child_prototypical', 'validation') | 63 | 875 | | ('5b', 'child', 'train') | 10 | 780 | | ('5b', 'child', 'validation') | 8 | 881 | | ('5b', 'child_prototypical', 'train') | 39 | 584 | | ('5b', 'child_prototypical', 'validation') | 27 | 380 | | ('5c', 'child', 'train') | 10 | 780 | | ('5c', 'child', 'validation') | 11 | 878 | | ('5c', 'child_prototypical', 'train') | 39 | 506 | | ('5c', 'child_prototypical', 'validation') | 45 | 605 | | ('5d', 'child', 'train') | 10 | 780 | | ('5d', 'child', 'validation') | 15 | 874 | | ('5d', 'child_prototypical', 'train') | 39 | 428 | | ('5d', 'child_prototypical', 'validation') | 69 | 923 | | ('5e', 'child', 'train') | 10 | 780 | | ('5e', 'child', 'validation') | 8 | 881 | | ('5e', 'child_prototypical', 'train') | 39 | 584 | | ('5e', 'child_prototypical', 'validation') | 27 | 380 | | ('5f', 'child', 'train') | 10 | 780 | | ('5f', 'child', 'validation') | 11 | 878 | | ('5f', 'child_prototypical', 'train') | 39 | 584 | | ('5f', 'child_prototypical', 'validation') | 45 | 695 | | ('5g', 'child', 'train') | 10 | 780 | | ('5g', 'child', 'validation') | 9 | 880 | | ('5g', 'child_prototypical', 'train') | 39 | 623 | | ('5g', 'child_prototypical', 'validation') | 33 | 512 | | ('5h', 'child', 'train') | 10 | 780 | | ('5h', 'child', 'validation') | 15 | 874 | | ('5h', 'child_prototypical', 'train') | 39 | 545 | | ('5h', 'child_prototypical', 'validation') | 69 | 1130 | | ('5i', 'child', 'train') | 10 | 780 | | ('5i', 'child', 'validation') | 14 | 875 | | ('5i', 'child_prototypical', 'train') | 39 | 545 | | ('5i', 'child_prototypical', 'validation') | 63 | 1001 | | ('6', 'parent', 'train') | 80 | 710 | | ('6', 'parent', 'validation') | 99 | 790 | | ('6a', 'child', 'train') | 10 | 780 | | ('6a', 'child', 'validation') | 15 | 874 | | ('6a', 'child_prototypical', 'train') | 39 | 467 | | ('6a', 'child_prototypical', 'validation') | 69 | 992 | | ('6b', 'child', 'train') | 10 | 780 | | ('6b', 'child', 'validation') | 11 | 878 | | ('6b', 'child_prototypical', 'train') | 39 | 584 | | ('6b', 'child_prototypical', 'validation') | 45 | 695 | | ('6c', 'child', 'train') | 10 | 780 | | ('6c', 'child', 'validation') | 13 | 876 | | ('6c', 'child_prototypical', 'train') | 39 | 584 | | ('6c', 'child_prototypical', 'validation') | 57 | 935 | | ('6d', 'child', 'train') | 10 | 780 | | ('6d', 'child', 'validation') | 10 | 879 | | ('6d', 'child_prototypical', 'train') | 39 | 701 | | ('6d', 'child_prototypical', 'validation') | 39 | 701 | | ('6e', 'child', 'train') | 10 | 780 | | ('6e', 'child', 'validation') | 11 | 878 | | ('6e', 'child_prototypical', 'train') | 39 | 584 | | ('6e', 'child_prototypical', 'validation') | 45 | 695 | | ('6f', 'child', 'train') | 10 | 780 | | ('6f', 'child', 'validation') | 12 | 877 | | ('6f', 'child_prototypical', 'train') | 39 | 506 | | ('6f', 'child_prototypical', 'validation') | 51 | 710 | | ('6g', 'child', 'train') | 10 | 780 | | ('6g', 'child', 'validation') | 12 | 877 | | ('6g', 'child_prototypical', 'train') | 39 | 467 | | ('6g', 'child_prototypical', 'validation') | 51 | 659 | | ('6h', 'child', 'train') | 10 | 780 | | ('6h', 'child', 'validation') | 15 | 874 | | ('6h', 'child_prototypical', 'train') | 39 | 506 | | ('6h', 'child_prototypical', 'validation') | 69 | 1061 | | ('7', 'parent', 'train') | 80 | 710 | | ('7', 'parent', 'validation') | 91 | 798 | | ('7a', 'child', 'train') | 10 | 780 | | ('7a', 'child', 'validation') | 14 | 875 | | ('7a', 'child_prototypical', 'train') | 39 | 545 | | ('7a', 'child_prototypical', 'validation') | 63 | 1001 | | ('7b', 'child', 'train') | 10 | 780 | | ('7b', 'child', 'validation') | 7 | 882 | | ('7b', 'child_prototypical', 'train') | 39 | 389 | | ('7b', 'child_prototypical', 'validation') | 21 | 182 | | ('7c', 'child', 'train') | 10 | 780 | | ('7c', 'child', 'validation') | 11 | 878 | | ('7c', 'child_prototypical', 'train') | 39 | 428 | | ('7c', 'child_prototypical', 'validation') | 45 | 515 | | ('7d', 'child', 'train') | 10 | 780 | | ('7d', 'child', 'validation') | 14 | 875 | | ('7d', 'child_prototypical', 'train') | 39 | 545 | | ('7d', 'child_prototypical', 'validation') | 63 | 1001 | | ('7e', 'child', 'train') | 10 | 780 | | ('7e', 'child', 'validation') | 10 | 879 | | ('7e', 'child_prototypical', 'train') | 39 | 428 | | ('7e', 'child_prototypical', 'validation') | 39 | 428 | | ('7f', 'child', 'train') | 10 | 780 | | ('7f', 'child', 'validation') | 12 | 877 | | ('7f', 'child_prototypical', 'train') | 39 | 389 | | ('7f', 'child_prototypical', 'validation') | 51 | 557 | | ('7g', 'child', 'train') | 10 | 780 | | ('7g', 'child', 'validation') | 9 | 880 | | ('7g', 'child_prototypical', 'train') | 39 | 311 | | ('7g', 'child_prototypical', 'validation') | 33 | 248 | | ('7h', 'child', 'train') | 10 | 780 | | ('7h', 'child', 'validation') | 14 | 875 | | ('7h', 'child_prototypical', 'train') | 39 | 350 | | ('7h', 'child_prototypical', 'validation') | 63 | 686 | | ('8', 'parent', 'train') | 80 | 710 | | ('8', 'parent', 'validation') | 90 | 799 | | ('8a', 'child', 'train') | 10 | 780 | | ('8a', 'child', 'validation') | 14 | 875 | | ('8a', 'child_prototypical', 'train') | 39 | 428 | | ('8a', 'child_prototypical', 'validation') | 63 | 812 | | ('8b', 'child', 'train') | 10 | 780 | | ('8b', 'child', 'validation') | 7 | 882 | | ('8b', 'child_prototypical', 'train') | 39 | 584 | | ('8b', 'child_prototypical', 'validation') | 21 | 287 | | ('8c', 'child', 'train') | 10 | 780 | | ('8c', 'child', 'validation') | 12 | 877 | | ('8c', 'child_prototypical', 'train') | 39 | 389 | | ('8c', 'child_prototypical', 'validation') | 51 | 557 | | ('8d', 'child', 'train') | 10 | 780 | | ('8d', 'child', 'validation') | 13 | 876 | | ('8d', 'child_prototypical', 'train') | 39 | 389 | | ('8d', 'child_prototypical', 'validation') | 57 | 650 | | ('8e', 'child', 'train') | 10 | 780 | | ('8e', 'child', 'validation') | 11 | 878 | | ('8e', 'child_prototypical', 'train') | 39 | 389 | | ('8e', 'child_prototypical', 'validation') | 45 | 470 | | ('8f', 'child', 'train') | 10 | 780 | | ('8f', 'child', 'validation') | 12 | 877 | | ('8f', 'child_prototypical', 'train') | 39 | 428 | | ('8f', 'child_prototypical', 'validation') | 51 | 608 | | ('8g', 'child', 'train') | 10 | 780 | | ('8g', 'child', 'validation') | 7 | 882 | | ('8g', 'child_prototypical', 'train') | 39 | 272 | | ('8g', 'child_prototypical', 'validation') | 21 | 119 | | ('8h', 'child', 'train') | 10 | 780 | | ('8h', 'child', 'validation') | 14 | 875 | | ('8h', 'child_prototypical', 'train') | 39 | 467 | | ('8h', 'child_prototypical', 'validation') | 63 | 875 | | ('9', 'parent', 'train') | 90 | 700 | | ('9', 'parent', 'validation') | 96 | 793 | | ('9a', 'child', 'train') | 10 | 780 | | ('9a', 'child', 'validation') | 14 | 875 | | ('9a', 'child_prototypical', 'train') | 39 | 350 | | ('9a', 'child_prototypical', 'validation') | 63 | 686 | | ('9b', 'child', 'train') | 10 | 780 | | ('9b', 'child', 'validation') | 12 | 877 | | ('9b', 'child_prototypical', 'train') | 39 | 506 | | ('9b', 'child_prototypical', 'validation') | 51 | 710 | | ('9c', 'child', 'train') | 10 | 780 | | ('9c', 'child', 'validation') | 7 | 882 | | ('9c', 'child_prototypical', 'train') | 39 | 155 | | ('9c', 'child_prototypical', 'validation') | 21 | 56 | | ('9d', 'child', 'train') | 10 | 780 | | ('9d', 'child', 'validation') | 9 | 880 | | ('9d', 'child_prototypical', 'train') | 39 | 662 | | ('9d', 'child_prototypical', 'validation') | 33 | 545 | | ('9e', 'child', 'train') | 10 | 780 | | ('9e', 'child', 'validation') | 8 | 881 | | ('9e', 'child_prototypical', 'train') | 39 | 701 | | ('9e', 'child_prototypical', 'validation') | 27 | 461 | | ('9f', 'child', 'train') | 10 | 780 | | ('9f', 'child', 'validation') | 10 | 879 | | ('9f', 'child_prototypical', 'train') | 39 | 506 | | ('9f', 'child_prototypical', 'validation') | 39 | 506 | | ('9g', 'child', 'train') | 10 | 780 | | ('9g', 'child', 'validation') | 14 | 875 | | ('9g', 'child_prototypical', 'train') | 39 | 389 | | ('9g', 'child_prototypical', 'validation') | 63 | 749 | | ('9h', 'child', 'train') | 10 | 780 | | ('9h', 'child', 'validation') | 13 | 876 | | ('9h', 'child_prototypical', 'train') | 39 | 506 | | ('9h', 'child_prototypical', 'validation') | 57 | 821 | | ('9i', 'child', 'train') | 10 | 780 | | ('9i', 'child', 'validation') | 9 | 880 | | ('9i', 'child_prototypical', 'train') | 39 | 506 | | ('9i', 'child_prototypical', 'validation') | 33 | 413 | ### Citation Information ``` @inproceedings{jurgens-etal-2012-semeval, title = "{S}em{E}val-2012 Task 2: Measuring Degrees of Relational Similarity", author = "Jurgens, David and Mohammad, Saif and Turney, Peter and Holyoak, Keith", booktitle = "*{SEM} 2012: The First Joint Conference on Lexical and Computational Semantics {--} Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation ({S}em{E}val 2012)", month = "7-8 " # jun, year = "2012", address = "Montr{\'e}al, Canada", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/S12-1047", pages = "356--364", } ```
research-backup/semeval2012_relational_similarity_v3
[ "multilinguality:monolingual", "size_categories:1K<n<10K", "language:en", "license:other", "region:us" ]
2022-10-20T01:05:30+00:00
{"language": ["en"], "license": ["other"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "pretty_name": "SemEval2012 task 2 Relational Similarity"}
2022-10-21T09:17:28+00:00
[]
[ "en" ]
TAGS #multilinguality-monolingual #size_categories-1K<n<10K #language-English #license-other #region-us
Dataset Card for "relbert/semeval2012\_relational\_similarity\_v3" ================================================================== Dataset Description ------------------- * Repository: RelBERT * Paper: URL * Dataset: SemEval2012: Relational Similarity ### Dataset Summary *IMPORTANT*: This is the same dataset as relbert/semeval2012\_relational\_similarity, but with a different dataset construction. Relational similarity dataset from SemEval2012 task 2, compiled to fine-tune RelBERT model. The dataset contains a list of positive and negative word pair from 89 pre-defined relations. The relation types are constructed on top of following 10 parent relation types. Each of the parent relation is further grouped into child relation types where the definition can be found here. Dataset Structure ----------------- ### Data Instances An example of 'train' looks as follows. ### Data Splits ### Number of Positive/Negative Word-pairs in each Split
[ "### Dataset Summary\n\n\n*IMPORTANT*: This is the same dataset as relbert/semeval2012\\_relational\\_similarity,\nbut with a different dataset construction.\n\n\nRelational similarity dataset from SemEval2012 task 2, compiled to fine-tune RelBERT model.\nThe dataset contains a list of positive and negative word pair from 89 pre-defined relations.\nThe relation types are constructed on top of following 10 parent relation types.\n\n\nEach of the parent relation is further grouped into child relation types where the definition can be found here.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Splits", "### Number of Positive/Negative Word-pairs in each Split" ]
[ "TAGS\n#multilinguality-monolingual #size_categories-1K<n<10K #language-English #license-other #region-us \n", "### Dataset Summary\n\n\n*IMPORTANT*: This is the same dataset as relbert/semeval2012\\_relational\\_similarity,\nbut with a different dataset construction.\n\n\nRelational similarity dataset from SemEval2012 task 2, compiled to fine-tune RelBERT model.\nThe dataset contains a list of positive and negative word pair from 89 pre-defined relations.\nThe relation types are constructed on top of following 10 parent relation types.\n\n\nEach of the parent relation is further grouped into child relation types where the definition can be found here.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Splits", "### Number of Positive/Negative Word-pairs in each Split" ]
0452bbabf6cef498e2c3b5eb908c9a5cf87ff4f6
# Dataset Card for "relbert/conceptnet_relation_similarity" ## Dataset Description - **Repository:** [RelBERT](https://github.com/asahi417/relbert) - **Paper:** [https://home.ttic.edu/~kgimpel/commonsense.html](https://home.ttic.edu/~kgimpel/commonsense.html) - **Dataset:** Relational similarity dataset based on the high-confidence subset of ConceptNet ### Dataset Summary The selected subset of ConceptNet used in [this work](https://home.ttic.edu/~kgimpel/commonsense.html), which compiled to fine-tune [RelBERT](https://github.com/asahi417/relbert) model. We removed `NotCapableOf` and `NotDesires` to keep the positive relation only. We consider the original test set as test set, dev1 as the training set, and dev2 as the validation set. ## Dataset Structure ### Data Instances An example of `train` looks as follows. ```shell { "relation_type": "AtLocation", "positives": [["fish", "water"], ["cloud", "sky"], ["child", "school"], ... ], "negatives": [["pen", "write"], ["sex", "fun"], ["soccer", "sport"], ["fish", "school"], ... ] } ``` ### Data Splits | train |validation| test| |--------:|---------:|---------:| | 28| 34 | 16| ### Citation Information ``` @InProceedings{P16-1137, author = "Li, Xiang and Taheri, Aynaz and Tu, Lifu and Gimpel, Kevin", title = "Commonsense Knowledge Base Completion", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) ", year = "2016", publisher = "Association for Computational Linguistics", pages = "1445--1455", location = "Berlin, Germany", doi = "10.18653/v1/P16-1137", url = "http://aclweb.org/anthology/P16-1137" } ```
relbert/conceptnet_relational_similarity
[ "multilinguality:monolingual", "size_categories:n<1K", "language:en", "license:other", "region:us" ]
2022-10-20T02:40:41+00:00
{"language": ["en"], "license": ["other"], "multilinguality": ["monolingual"], "size_categories": ["n<1K"], "pretty_name": "ConceptNet with High Confidence"}
2023-08-01T13:32:01+00:00
[]
[ "en" ]
TAGS #multilinguality-monolingual #size_categories-n<1K #language-English #license-other #region-us
Dataset Card for "relbert/conceptnet\_relation\_similarity" =========================================================== Dataset Description ------------------- * Repository: RelBERT * Paper: URL * Dataset: Relational similarity dataset based on the high-confidence subset of ConceptNet ### Dataset Summary The selected subset of ConceptNet used in this work, which compiled to fine-tune RelBERT model. We removed 'NotCapableOf' and 'NotDesires' to keep the positive relation only. We consider the original test set as test set, dev1 as the training set, and dev2 as the validation set. Dataset Structure ----------------- ### Data Instances An example of 'train' looks as follows. ### Data Splits
[ "### Dataset Summary\n\n\nThe selected subset of ConceptNet used in this work, which compiled\nto fine-tune RelBERT model.\nWe removed 'NotCapableOf' and 'NotDesires' to keep the positive relation only.\nWe consider the original test set as test set, dev1 as the training set, and dev2 as the validation set.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Splits" ]
[ "TAGS\n#multilinguality-monolingual #size_categories-n<1K #language-English #license-other #region-us \n", "### Dataset Summary\n\n\nThe selected subset of ConceptNet used in this work, which compiled\nto fine-tune RelBERT model.\nWe removed 'NotCapableOf' and 'NotDesires' to keep the positive relation only.\nWe consider the original test set as test set, dev1 as the training set, and dev2 as the validation set.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Splits" ]
fee643c48b14fb0a02a609a8162fa5aa704b7305
# Dataset Card for Thesaurus of Modern Slovene 1.0 Also known as "Sopomenke 1.0". Available in application form online: https://viri.cjvt.si/sopomenke/slv/. ### Dataset Summary This is an automatically created Slovene thesaurus from Slovene data available in a comprehensive English–Slovenian dictionary, a monolingual dictionary, and a corpus. A network analysis on the bilingual dictionary word co-occurrence graph was used, together with additional information from the distributional thesaurus data available as part of the Sketch Engine tool and extracted from the 1.2 billion word Gigafida corpus and the monolingual dictionary. For a detailed description of the data, please see the paper Krek et al. (2017). ### Supported Tasks and Leaderboards Other (the data is a knowledge base). ### Languages Slovenian. ## Dataset Structure ### Data Instances Each entry is stored in its own instance. The following instance contains the metadata for the `headword` "abeceda" (EN: "alphabet"). ``` { 'id_headword': 'th.12', 'headword': 'abeceda', 'groups_core': [], 'groups_near': [ { 'id_words': ['th.12.1', 'th.12.2'], 'words': ['pisava', 'črkopis'], 'scores': [0.3311710059642792, 0.3311710059642792], 'domains': [['jezikoslovje'], ['jezikoslovje']] } ] } ``` ### Data Fields - `id_headword`: a string ID of the word; - `headword`: the word whose synonyms are grouped in the instance; - `groups_core`: groups of likely synonyms - each group contains the IDs of the words (`id_words`), the synonyms (`words`), and how strong the synonym relation (`scores`) is. Some groups also have domains annotated (`domains`, >= 1 per word, i.e. `domains` is a list of lists); - `groups_near`: same as `groups_near`, but the synonyms here are typically less likely to be exact synonyms and more likely to be otherwise similar. ## Additional Information ### Dataset Curators Simon Krek; et al. (please see http://hdl.handle.net/11356/1166 for the full list). ### Licensing Information CC BY-SA 4.0 ### Citation Information ``` @article{krek2017translation, title={From translation equivalents to synonyms: creation of a Slovene thesaurus using word co-occurrence network analysis}, author={Krek, Simon and Laskowski, Cyprian and Robnik-{\v{S}}ikonja, Marko}, journal={Proceedings of eLex}, pages={93--109}, year={2017} } ``` ### Contributions Thanks to [@matejklemen](https://github.com/matejklemen) for adding this dataset.
cjvt/slo_thesaurus
[ "task_categories:other", "annotations_creators:machine-generated", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:100K<n<1M", "language:sl", "license:cc-by-sa-4.0", "sopomenke", "synonyms", "region:us" ]
2022-10-20T04:56:11+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["machine-generated"], "language": ["sl"], "license": ["cc-by-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": [], "task_categories": ["other"], "task_ids": [], "pretty_name": "Thesaurus of Modern Slovene 1.0", "tags": ["sopomenke", "synonyms"]}
2022-10-20T11:23:03+00:00
[]
[ "sl" ]
TAGS #task_categories-other #annotations_creators-machine-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-100K<n<1M #language-Slovenian #license-cc-by-sa-4.0 #sopomenke #synonyms #region-us
# Dataset Card for Thesaurus of Modern Slovene 1.0 Also known as "Sopomenke 1.0". Available in application form online: URL ### Dataset Summary This is an automatically created Slovene thesaurus from Slovene data available in a comprehensive English–Slovenian dictionary, a monolingual dictionary, and a corpus. A network analysis on the bilingual dictionary word co-occurrence graph was used, together with additional information from the distributional thesaurus data available as part of the Sketch Engine tool and extracted from the 1.2 billion word Gigafida corpus and the monolingual dictionary. For a detailed description of the data, please see the paper Krek et al. (2017). ### Supported Tasks and Leaderboards Other (the data is a knowledge base). ### Languages Slovenian. ## Dataset Structure ### Data Instances Each entry is stored in its own instance. The following instance contains the metadata for the 'headword' "abeceda" (EN: "alphabet"). ### Data Fields - 'id_headword': a string ID of the word; - 'headword': the word whose synonyms are grouped in the instance; - 'groups_core': groups of likely synonyms - each group contains the IDs of the words ('id_words'), the synonyms ('words'), and how strong the synonym relation ('scores') is. Some groups also have domains annotated ('domains', >= 1 per word, i.e. 'domains' is a list of lists); - 'groups_near': same as 'groups_near', but the synonyms here are typically less likely to be exact synonyms and more likely to be otherwise similar. ## Additional Information ### Dataset Curators Simon Krek; et al. (please see URL for the full list). ### Licensing Information CC BY-SA 4.0 ### Contributions Thanks to @matejklemen for adding this dataset.
[ "# Dataset Card for Thesaurus of Modern Slovene 1.0\n\nAlso known as \"Sopomenke 1.0\". Available in application form online: URL", "### Dataset Summary\n\nThis is an automatically created Slovene thesaurus from Slovene data available in a comprehensive English–Slovenian dictionary, a monolingual dictionary, and a corpus. A network analysis on the bilingual dictionary word co-occurrence graph was used, together with additional information from the distributional thesaurus data available as part of the Sketch Engine tool and extracted from the 1.2 billion word Gigafida corpus and the monolingual dictionary.\n\nFor a detailed description of the data, please see the paper Krek et al. (2017).", "### Supported Tasks and Leaderboards\n\nOther (the data is a knowledge base).", "### Languages\n\nSlovenian.", "## Dataset Structure", "### Data Instances\n\nEach entry is stored in its own instance. The following instance contains the metadata for the 'headword' \"abeceda\" (EN: \"alphabet\").", "### Data Fields\n\n- 'id_headword': a string ID of the word;\n- 'headword': the word whose synonyms are grouped in the instance;\n- 'groups_core': groups of likely synonyms - each group contains the IDs of the words ('id_words'), the synonyms ('words'), and how strong the synonym relation ('scores') is. Some groups also have domains annotated ('domains', >= 1 per word, i.e. 'domains' is a list of lists);\n- 'groups_near': same as 'groups_near', but the synonyms here are typically less likely to be exact synonyms and more likely to be otherwise similar.", "## Additional Information", "### Dataset Curators\n\nSimon Krek; et al. (please see URL for the full list).", "### Licensing Information\n\nCC BY-SA 4.0", "### Contributions\n\nThanks to @matejklemen for adding this dataset." ]
[ "TAGS\n#task_categories-other #annotations_creators-machine-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-100K<n<1M #language-Slovenian #license-cc-by-sa-4.0 #sopomenke #synonyms #region-us \n", "# Dataset Card for Thesaurus of Modern Slovene 1.0\n\nAlso known as \"Sopomenke 1.0\". Available in application form online: URL", "### Dataset Summary\n\nThis is an automatically created Slovene thesaurus from Slovene data available in a comprehensive English–Slovenian dictionary, a monolingual dictionary, and a corpus. A network analysis on the bilingual dictionary word co-occurrence graph was used, together with additional information from the distributional thesaurus data available as part of the Sketch Engine tool and extracted from the 1.2 billion word Gigafida corpus and the monolingual dictionary.\n\nFor a detailed description of the data, please see the paper Krek et al. (2017).", "### Supported Tasks and Leaderboards\n\nOther (the data is a knowledge base).", "### Languages\n\nSlovenian.", "## Dataset Structure", "### Data Instances\n\nEach entry is stored in its own instance. The following instance contains the metadata for the 'headword' \"abeceda\" (EN: \"alphabet\").", "### Data Fields\n\n- 'id_headword': a string ID of the word;\n- 'headword': the word whose synonyms are grouped in the instance;\n- 'groups_core': groups of likely synonyms - each group contains the IDs of the words ('id_words'), the synonyms ('words'), and how strong the synonym relation ('scores') is. Some groups also have domains annotated ('domains', >= 1 per word, i.e. 'domains' is a list of lists);\n- 'groups_near': same as 'groups_near', but the synonyms here are typically less likely to be exact synonyms and more likely to be otherwise similar.", "## Additional Information", "### Dataset Curators\n\nSimon Krek; et al. (please see URL for the full list).", "### Licensing Information\n\nCC BY-SA 4.0", "### Contributions\n\nThanks to @matejklemen for adding this dataset." ]
d629ed21778440b449291f2722ee44e599906e5c
# Dataset Card for "masterstack" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DepositorOP/masterstack
[ "region:us" ]
2022-10-20T05:41:44+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "labels", "dtype": "float64"}], "splits": [{"name": "test", "num_bytes": 151727.48160821214, "num_examples": 702}, {"name": "train", "num_bytes": 1364250.5183917878, "num_examples": 6312}], "download_size": 1016008, "dataset_size": 1515978.0}}
2022-10-20T06:07:41+00:00
[]
[]
TAGS #region-us
# Dataset Card for "masterstack" More Information needed
[ "# Dataset Card for \"masterstack\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"masterstack\"\n\nMore Information needed" ]
65a43c364766f0af2d314f6cce3bb1980a1913a4
# Dataset Card for "enron-mail-corpus-mini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
amanneo/enron-mail-corpus-mini
[ "region:us" ]
2022-10-20T05:50:10+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}, {"name": "mail_length", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 205837.52311697626, "num_examples": 4000}, {"name": "train", "num_bytes": 1852537.7080527863, "num_examples": 36000}], "download_size": 2332694, "dataset_size": 2058375.2311697626}}
2022-10-20T12:08:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "enron-mail-corpus-mini" More Information needed
[ "# Dataset Card for \"enron-mail-corpus-mini\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"enron-mail-corpus-mini\"\n\nMore Information needed" ]
b2373d69c590ab02b4164d9a912b4eacb1f80bf5
## Eraser Dataset Description - **Homepage:http://www.eraserbenchmark.com** - **Repository:https://github.com/jayded/eraserbenchmark** - **Paper:https://arxiv.org/abs/1911.03429** - **Leaderboard:http://www.eraserbenchmark.com/#leaderboard** ## e-SNLI Dataset Description - **Repository:https://github.com/OanaMariaCamburu/e-SNLI** - **Paper:http://papers.nips.cc/paper/8163-e-snli-natural-language-inference-with-natural-language-explanations.pdf**
niurl/eraser_esnli
[ "license:apache-2.0", "arxiv:1911.03429", "region:us" ]
2022-10-20T10:44:33+00:00
{"license": "apache-2.0"}
2022-10-24T14:26:38+00:00
[ "1911.03429" ]
[]
TAGS #license-apache-2.0 #arxiv-1911.03429 #region-us
## Eraser Dataset Description - Homepage:URL - Repository:URL - Paper:URL - Leaderboard:URL ## e-SNLI Dataset Description - Repository:URL - Paper:URL
[ "## Eraser Dataset Description\n\n- Homepage:URL\n- Repository:URL\n- Paper:URL\n- Leaderboard:URL", "## e-SNLI Dataset Description\n\n- Repository:URL\n- Paper:URL" ]
[ "TAGS\n#license-apache-2.0 #arxiv-1911.03429 #region-us \n", "## Eraser Dataset Description\n\n- Homepage:URL\n- Repository:URL\n- Paper:URL\n- Leaderboard:URL", "## e-SNLI Dataset Description\n\n- Repository:URL\n- Paper:URL" ]
c7368ccc03358758270dbf9e475222444d19926b
# Dataset Card for FB15k-237 ## Table of Contents - [Dataset Card for FB15k-237](#dataset-card-for-fb15k-237) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) - [Who are the source language producers?](#who-are-the-source-language-producers) - [Annotations](#annotations) - [Annotation process](#annotation-process) - [Who are the annotators?](#who-are-the-annotators) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://deepai.org/dataset/fb15k-237](https://deepai.org/dataset/fb15k-237) - **Repository:** - **Paper:** [More Information Needed](https://paperswithcode.com/dataset/fb15k-237) - **Leaderboard:** - **Point of Contact:** ### Dataset Summary FB15k-237 is a link prediction dataset created from FB15k. While FB15k consists of 1,345 relations, 14,951 entities, and 592,213 triples, many triples are inverses that cause leakage from the training to testing and validation splits. FB15k-237 was created by Toutanova and Chen (2015) to ensure that the testing and evaluation datasets do not have inverse relation test leakage. In summary, FB15k-237 dataset contains 310,079 triples with 14,505 entities and 237 relation types. ### Supported Tasks and Leaderboards Supported Tasks: link prediction task on knowledge graphs. Leaderboads: [More Information Needed](https://paperswithcode.com/sota/link-prediction-on-fb15k-237) ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @inproceedings{schlichtkrull2018modeling, title={Modeling relational data with graph convolutional networks}, author={Schlichtkrull, Michael and Kipf, Thomas N and Bloem, Peter and Berg, Rianne van den and Titov, Ivan and Welling, Max}, booktitle={European semantic web conference}, pages={593--607}, year={2018}, organization={Springer} } ``` ### Contributions Thanks to [@pp413](https://github.com/pp413) for adding this dataset.
KGraph/FB15k-237
[ "task_categories:other", "annotations_creators:found", "annotations_creators:crowdsourced", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:en", "license:cc-by-4.0", "knowledge graph", "knowledge", "link prediction", "link", "region:us" ]
2022-10-20T11:09:29+00:00
{"annotations_creators": ["found", "crowdsourced"], "language_creators": [], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": ["original"], "task_categories": ["other"], "task_ids": [], "pretty_name": "FB15k-237", "tags": ["knowledge graph", "knowledge", "link prediction", "link"]}
2022-10-21T08:03:28+00:00
[]
[ "en" ]
TAGS #task_categories-other #annotations_creators-found #annotations_creators-crowdsourced #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-English #license-cc-by-4.0 #knowledge graph #knowledge #link prediction #link #region-us
# Dataset Card for FB15k-237 ## Table of Contents - Dataset Card for FB15k-237 - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Initial Data Collection and Normalization - Who are the source language producers? - Annotations - Annotation process - Who are the annotators? - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: URL - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary FB15k-237 is a link prediction dataset created from FB15k. While FB15k consists of 1,345 relations, 14,951 entities, and 592,213 triples, many triples are inverses that cause leakage from the training to testing and validation splits. FB15k-237 was created by Toutanova and Chen (2015) to ensure that the testing and evaluation datasets do not have inverse relation test leakage. In summary, FB15k-237 dataset contains 310,079 triples with 14,505 entities and 237 relation types. ### Supported Tasks and Leaderboards Supported Tasks: link prediction task on knowledge graphs. Leaderboads: ### Languages ## Dataset Structure ### Data Instances ### Data Fields ### Data Splits ## Dataset Creation ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators ### Licensing Information ### Contributions Thanks to @pp413 for adding this dataset.
[ "# Dataset Card for FB15k-237", "## Table of Contents\n- Dataset Card for FB15k-237\n - Table of Contents\n - Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n - Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n - Dataset Creation\n - Curation Rationale\n - Source Data\n - Initial Data Collection and Normalization\n - Who are the source language producers?\n - Annotations\n - Annotation process\n - Who are the annotators?\n - Personal and Sensitive Information\n - Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n - Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: \n- Paper: \n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nFB15k-237 is a link prediction dataset created from FB15k. While FB15k consists of 1,345 relations, 14,951 entities, and 592,213 triples, many triples are inverses that cause leakage from the training to testing and validation splits. FB15k-237 was created by Toutanova and Chen (2015) to ensure that the testing and evaluation datasets do not have inverse relation test leakage. In summary, FB15k-237 dataset contains 310,079 triples with 14,505 entities and 237 relation types.", "### Supported Tasks and Leaderboards\n\nSupported Tasks: link prediction task on knowledge graphs.\n\nLeaderboads:", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @pp413 for adding this dataset." ]
[ "TAGS\n#task_categories-other #annotations_creators-found #annotations_creators-crowdsourced #multilinguality-monolingual #size_categories-100K<n<1M #source_datasets-original #language-English #license-cc-by-4.0 #knowledge graph #knowledge #link prediction #link #region-us \n", "# Dataset Card for FB15k-237", "## Table of Contents\n- Dataset Card for FB15k-237\n - Table of Contents\n - Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n - Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n - Dataset Creation\n - Curation Rationale\n - Source Data\n - Initial Data Collection and Normalization\n - Who are the source language producers?\n - Annotations\n - Annotation process\n - Who are the annotators?\n - Personal and Sensitive Information\n - Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n - Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: \n- Paper: \n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nFB15k-237 is a link prediction dataset created from FB15k. While FB15k consists of 1,345 relations, 14,951 entities, and 592,213 triples, many triples are inverses that cause leakage from the training to testing and validation splits. FB15k-237 was created by Toutanova and Chen (2015) to ensure that the testing and evaluation datasets do not have inverse relation test leakage. In summary, FB15k-237 dataset contains 310,079 triples with 14,505 entities and 237 relation types.", "### Supported Tasks and Leaderboards\n\nSupported Tasks: link prediction task on knowledge graphs.\n\nLeaderboads:", "### Languages", "## Dataset Structure", "### Data Instances", "### Data Fields", "### Data Splits", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations", "## Additional Information", "### Dataset Curators", "### Licensing Information", "### Contributions\n\nThanks to @pp413 for adding this dataset." ]
64562bea2ded1dc071782fe699625f2d27357b41
# Dataset Card for SloWNet ### Dataset Summary sloWNet is the Slovene WordNet developed in the expand approach: it contains the complete Princeton WordNet 3.0 and over 70 000 Slovene literals. These literals have been added automatically using different types of existing resources, such as bilingual dictionaries, parallel corpora and Wikipedia. 33 000 literals have been subsequently hand-validated. For a detailed description of the data, please see the paper Fišer et al. (2012). ### Supported Tasks and Leaderboards Other (the data is a knowledge base). ### Languages Slovenian. ## Dataset Structure ### Data Instances Each synset is stored in its own instance. The following instance represents a synset containing the English synonyms `{'able'}` and Slovene synonyms `{'sposoben', 'zmožen'}`: ``` { 'id': 'eng-30-00001740-a', 'pos': 'a', 'bcs': 3, 'en_synonyms': { 'words': ['able'], 'senses': [1], 'pwnids': ['able%3:00:00::'] }, 'sl_synonyms': { 'words': ['sposoben', 'zmožen'], 'is_validated': [False, False] }, 'en_def': "(usually followed by `to') having the necessary means or skill or know-how or authority to do something", 'sl_def': 'N/A', 'en_usages': [ 'able to swim', 'she was able to program her computer', 'we were at last able to buy a car', 'able to get a grant for the project' ], 'sl_usages': [], 'ilrs': { 'types': ['near_antonym', 'be_in_state', 'be_in_state', 'eng_derivative', 'eng_derivative'], 'id_synsets': ['eng-30-00002098-a', 'eng-30-05200169-n', 'eng-30-05616246-n', 'eng-30-05200169-n', 'eng-30-05616246-n'] }, 'semeval07_cluster': 'able', 'domains': ['quality'] } ``` ### Data Fields - `id`: a string ID of the synset; - `pos`: part of speech tag of the synset; - `bcs`: Base Concept Set index (`-1` if not present); - `en_synonyms`: the English synonyms in the synset - synonym `i` is described with its form (`words[i]`), sense (`senses[i]`), and Princeton WordNet ID (`pwnids[i]`); - `sl_synonyms`: the Slovene synonyms in the synset - synonym `i` is described with its form (`words[i]`) and a flag marking if its correctness has been manually validated (`is_validated[i]`); - `en_def`: the English definition (`"N/A"` if not present); - `sl_def`: the Slovene definition (`"N/A"` if not present); - `en_usages`: the English examples of usage; - `sl_usages`: the Slovene examples of usage; - `ilrs`: internal language relations - relation `i` is described by its type (`types[i]`) and the target synset (`id_synsets[i]`); - `semeval07_cluster`: string cluster (`"N/A"` if not present); - `domains`: domains of the synset. ## Additional Information ### Dataset Curators Darja Fišer. ### Licensing Information CC BY-SA 4.0 ### Citation Information ``` @inproceedings{fiser2012slownet, title={sloWNet 3.0: development, extension and cleaning}, author={Fi{\v{s}}er, Darja and Novak, Jernej and Erjavec, Toma{\v{z}}}, booktitle={Proceedings of 6th International Global Wordnet Conference (GWC 2012)}, pages={113--117}, year={2012} } ``` ### Contributions Thanks to [@matejklemen](https://github.com/matejklemen) for adding this dataset.
cjvt/slownet
[ "task_categories:other", "annotations_creators:machine-generated", "annotations_creators:expert-generated", "language_creators:machine-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:100K<n<1M", "language:sl", "license:cc-by-sa-4.0", "slownet", "wordnet", "pwn", "region:us" ]
2022-10-20T11:26:34+00:00
{"annotations_creators": ["machine-generated", "expert-generated"], "language_creators": ["machine-generated", "found"], "language": ["sl"], "license": ["cc-by-sa-4.0"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M"], "source_datasets": [], "task_categories": ["other"], "task_ids": [], "pretty_name": "Semantic lexicon of Slovene sloWNet", "tags": ["slownet", "wordnet", "pwn"]}
2022-10-21T11:44:13+00:00
[]
[ "sl" ]
TAGS #task_categories-other #annotations_creators-machine-generated #annotations_creators-expert-generated #language_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-100K<n<1M #language-Slovenian #license-cc-by-sa-4.0 #slownet #wordnet #pwn #region-us
# Dataset Card for SloWNet ### Dataset Summary sloWNet is the Slovene WordNet developed in the expand approach: it contains the complete Princeton WordNet 3.0 and over 70 000 Slovene literals. These literals have been added automatically using different types of existing resources, such as bilingual dictionaries, parallel corpora and Wikipedia. 33 000 literals have been subsequently hand-validated. For a detailed description of the data, please see the paper Fišer et al. (2012). ### Supported Tasks and Leaderboards Other (the data is a knowledge base). ### Languages Slovenian. ## Dataset Structure ### Data Instances Each synset is stored in its own instance. The following instance represents a synset containing the English synonyms '{'able'}' and Slovene synonyms '{'sposoben', 'zmožen'}': ### Data Fields - 'id': a string ID of the synset; - 'pos': part of speech tag of the synset; - 'bcs': Base Concept Set index ('-1' if not present); - 'en_synonyms': the English synonyms in the synset - synonym 'i' is described with its form ('words[i]'), sense ('senses[i]'), and Princeton WordNet ID ('pwnids[i]'); - 'sl_synonyms': the Slovene synonyms in the synset - synonym 'i' is described with its form ('words[i]') and a flag marking if its correctness has been manually validated ('is_validated[i]'); - 'en_def': the English definition ('"N/A"' if not present); - 'sl_def': the Slovene definition ('"N/A"' if not present); - 'en_usages': the English examples of usage; - 'sl_usages': the Slovene examples of usage; - 'ilrs': internal language relations - relation 'i' is described by its type ('types[i]') and the target synset ('id_synsets[i]'); - 'semeval07_cluster': string cluster ('"N/A"' if not present); - 'domains': domains of the synset. ## Additional Information ### Dataset Curators Darja Fišer. ### Licensing Information CC BY-SA 4.0 ### Contributions Thanks to @matejklemen for adding this dataset.
[ "# Dataset Card for SloWNet", "### Dataset Summary\n\nsloWNet is the Slovene WordNet developed in the expand approach: it contains the complete Princeton WordNet 3.0 and over 70 000 Slovene literals. These literals have been added automatically using different types of existing resources, such as bilingual dictionaries, parallel corpora and Wikipedia. 33 000 literals have been subsequently hand-validated.\n\nFor a detailed description of the data, please see the paper Fišer et al. (2012).", "### Supported Tasks and Leaderboards\n\nOther (the data is a knowledge base).", "### Languages\n\nSlovenian.", "## Dataset Structure", "### Data Instances\n\nEach synset is stored in its own instance. The following instance represents a synset containing the English synonyms '{'able'}' and Slovene synonyms '{'sposoben', 'zmožen'}':", "### Data Fields\n\n- 'id': a string ID of the synset; \n- 'pos': part of speech tag of the synset;\n- 'bcs': Base Concept Set index ('-1' if not present); \n- 'en_synonyms': the English synonyms in the synset - synonym 'i' is described with its form ('words[i]'), sense ('senses[i]'), and Princeton WordNet ID ('pwnids[i]'); \n- 'sl_synonyms': the Slovene synonyms in the synset - synonym 'i' is described with its form ('words[i]') and a flag marking if its correctness has been manually validated ('is_validated[i]');\n- 'en_def': the English definition ('\"N/A\"' if not present); \n- 'sl_def': the Slovene definition ('\"N/A\"' if not present); \n- 'en_usages': the English examples of usage;\n- 'sl_usages': the Slovene examples of usage; \n- 'ilrs': internal language relations - relation 'i' is described by its type ('types[i]') and the target synset ('id_synsets[i]'); \n- 'semeval07_cluster': string cluster ('\"N/A\"' if not present); \n- 'domains': domains of the synset.", "## Additional Information", "### Dataset Curators\n\nDarja Fišer.", "### Licensing Information\n\nCC BY-SA 4.0", "### Contributions\n\nThanks to @matejklemen for adding this dataset." ]
[ "TAGS\n#task_categories-other #annotations_creators-machine-generated #annotations_creators-expert-generated #language_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-100K<n<1M #language-Slovenian #license-cc-by-sa-4.0 #slownet #wordnet #pwn #region-us \n", "# Dataset Card for SloWNet", "### Dataset Summary\n\nsloWNet is the Slovene WordNet developed in the expand approach: it contains the complete Princeton WordNet 3.0 and over 70 000 Slovene literals. These literals have been added automatically using different types of existing resources, such as bilingual dictionaries, parallel corpora and Wikipedia. 33 000 literals have been subsequently hand-validated.\n\nFor a detailed description of the data, please see the paper Fišer et al. (2012).", "### Supported Tasks and Leaderboards\n\nOther (the data is a knowledge base).", "### Languages\n\nSlovenian.", "## Dataset Structure", "### Data Instances\n\nEach synset is stored in its own instance. The following instance represents a synset containing the English synonyms '{'able'}' and Slovene synonyms '{'sposoben', 'zmožen'}':", "### Data Fields\n\n- 'id': a string ID of the synset; \n- 'pos': part of speech tag of the synset;\n- 'bcs': Base Concept Set index ('-1' if not present); \n- 'en_synonyms': the English synonyms in the synset - synonym 'i' is described with its form ('words[i]'), sense ('senses[i]'), and Princeton WordNet ID ('pwnids[i]'); \n- 'sl_synonyms': the Slovene synonyms in the synset - synonym 'i' is described with its form ('words[i]') and a flag marking if its correctness has been manually validated ('is_validated[i]');\n- 'en_def': the English definition ('\"N/A\"' if not present); \n- 'sl_def': the Slovene definition ('\"N/A\"' if not present); \n- 'en_usages': the English examples of usage;\n- 'sl_usages': the Slovene examples of usage; \n- 'ilrs': internal language relations - relation 'i' is described by its type ('types[i]') and the target synset ('id_synsets[i]'); \n- 'semeval07_cluster': string cluster ('\"N/A\"' if not present); \n- 'domains': domains of the synset.", "## Additional Information", "### Dataset Curators\n\nDarja Fišer.", "### Licensing Information\n\nCC BY-SA 4.0", "### Contributions\n\nThanks to @matejklemen for adding this dataset." ]
4a3e56082543ed9eecb9c76ef5eadc1aa0cc5ca0
# Dataset Card for CrossWOZ - **Repository:** https://github.com/thu-coai/CrossWOZ - **Paper:** https://aclanthology.org/2020.tacl-1.19/ - **Leaderboard:** None - **Who transforms the dataset:** Qi Zhu(zhuq96 at gmail dot com) To use this dataset, you need to install [ConvLab-3](https://github.com/ConvLab/ConvLab-3) platform first. Then you can load the dataset via: ``` from convlab.util import load_dataset, load_ontology, load_database dataset = load_dataset('crosswoz') ontology = load_ontology('crosswoz') database = load_database('crosswoz') ``` For more usage please refer to [here](https://github.com/ConvLab/ConvLab-3/tree/master/data/unified_datasets). ### Dataset Summary CrossWOZ is the first large-scale Chinese Cross-Domain Wizard-of-Oz task-oriented dataset. It contains 6K dialogue sessions and 102K utterances for 5 domains, including hotel, restaurant, attraction, metro, and taxi. Moreover, the corpus contains rich annotation of dialogue states and dialogue acts at both user and system sides. We also provide a user simulator and several benchmark models for pipelined taskoriented dialogue systems, which will facilitate researchers to compare and evaluate their models on this corpus. - **How to get the transformed data from original data:** - Run `python preprocess.py` in the current directory. Need `../../crosswoz/` as the original data. - **Main changes of the transformation:** - Add simple description for domains, slots, and intents. - Switch intent&domain of `General` dialog acts => domain == 'General' and intent in ['thank','bye','greet','welcome'] - Binary dialog acts include: 1) domain == 'General'; 2) intent in ['NoOffer', 'Request', 'Select']; 3) slot in ['酒店设施'] - Categorical dialog acts include: slot in ['酒店类型', '车型', '车牌'] - Non-categorical dialogue acts: others. assert intent in ['Inform', 'Recommend'] and slot != 'none' and value != 'none' - Transform original user goal to list of `{domain: {'inform': {slot: [value, mentioned/not mentioned]}, 'request': {slot: [value, mentioned/not mentioned]}}}`, stored as `user_state` of user turns. - Transform `sys_state_init` (first API call of system turns) without `selectedResults` as belief state in user turns. - Transform `sys_state` (last API call of system turns) to `db_query` with domain states that contain non-empty `selectedResults`. The `selectedResults` are saved as `db_results` (only contain entity name). Both stored in system turns. - **Annotations:** - user goal, user state, dialogue acts, state, db query, db results. - Multiple values in state are separated by spaces, meaning all constraints should be satisfied. ### Supported Tasks and Leaderboards NLU, DST, Policy, NLG, E2E, User simulator ### Languages Chinese ### Data Splits | split | dialogues | utterances | avg_utt | avg_tokens | avg_domains | cat slot match(state) | cat slot match(goal) | cat slot match(dialogue act) | non-cat slot span(dialogue act) | |------------|-------------|--------------|-----------|--------------|---------------|-------------------------|------------------------|--------------------------------|-----------------------------------| | train | 5012 | 84674 | 16.89 | 20.55 | 3.02 | 99.67 | - | 100 | 94.39 | | validation | 500 | 8458 | 16.92 | 20.53 | 3.04 | 99.62 | - | 100 | 94.36 | | test | 500 | 8476 | 16.95 | 20.51 | 3.08 | 99.61 | - | 100 | 94.85 | | all | 6012 | 101608 | 16.9 | 20.54 | 3.03 | 99.66 | - | 100 | 94.43 | 6 domains: ['景点', '餐馆', '酒店', '地铁', '出租', 'General'] - **cat slot match**: how many values of categorical slots are in the possible values of ontology in percentage. - **non-cat slot span**: how many values of non-categorical slots have span annotation in percentage. ### Citation ``` @article{zhu2020crosswoz, author = {Qi Zhu and Kaili Huang and Zheng Zhang and Xiaoyan Zhu and Minlie Huang}, title = {Cross{WOZ}: A Large-Scale Chinese Cross-Domain Task-Oriented Dialogue Dataset}, journal = {Transactions of the Association for Computational Linguistics}, year = {2020} } ``` ### Licensing Information Apache License, Version 2.0
ConvLab/crosswoz
[ "task_categories:conversational", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:zh", "license:apache-2.0", "region:us" ]
2022-10-20T11:34:19+00:00
{"language": ["zh"], "license": ["apache-2.0"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "task_categories": ["conversational"], "pretty_name": "CrossWOZ"}
2022-11-25T09:01:44+00:00
[]
[ "zh" ]
TAGS #task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #language-Chinese #license-apache-2.0 #region-us
Dataset Card for CrossWOZ ========================= * Repository: URL * Paper: URL * Leaderboard: None * Who transforms the dataset: Qi Zhu(zhuq96 at gmail dot com) To use this dataset, you need to install ConvLab-3 platform first. Then you can load the dataset via: For more usage please refer to here. ### Dataset Summary CrossWOZ is the first large-scale Chinese Cross-Domain Wizard-of-Oz task-oriented dataset. It contains 6K dialogue sessions and 102K utterances for 5 domains, including hotel, restaurant, attraction, metro, and taxi. Moreover, the corpus contains rich annotation of dialogue states and dialogue acts at both user and system sides. We also provide a user simulator and several benchmark models for pipelined taskoriented dialogue systems, which will facilitate researchers to compare and evaluate their models on this corpus. * How to get the transformed data from original data: + Run 'python URL' in the current directory. Need '../../crosswoz/' as the original data. * Main changes of the transformation: + Add simple description for domains, slots, and intents. + Switch intent&domain of 'General' dialog acts => domain == 'General' and intent in ['thank','bye','greet','welcome'] + Binary dialog acts include: 1) domain == 'General'; 2) intent in ['NoOffer', 'Request', 'Select']; 3) slot in ['酒店设施'] + Categorical dialog acts include: slot in ['酒店类型', '车型', '车牌'] + Non-categorical dialogue acts: others. assert intent in ['Inform', 'Recommend'] and slot != 'none' and value != 'none' + Transform original user goal to list of '{domain: {'inform': {slot: [value, mentioned/not mentioned]}, 'request': {slot: [value, mentioned/not mentioned]}}}', stored as 'user\_state' of user turns. + Transform 'sys\_state\_init' (first API call of system turns) without 'selectedResults' as belief state in user turns. + Transform 'sys\_state' (last API call of system turns) to 'db\_query' with domain states that contain non-empty 'selectedResults'. The 'selectedResults' are saved as 'db\_results' (only contain entity name). Both stored in system turns. * Annotations: + user goal, user state, dialogue acts, state, db query, db results. + Multiple values in state are separated by spaces, meaning all constraints should be satisfied. ### Supported Tasks and Leaderboards NLU, DST, Policy, NLG, E2E, User simulator ### Languages Chinese ### Data Splits 6 domains: ['景点', '餐馆', '酒店', '地铁', '出租', 'General'] * cat slot match: how many values of categorical slots are in the possible values of ontology in percentage. * non-cat slot span: how many values of non-categorical slots have span annotation in percentage. ### Licensing Information Apache License, Version 2.0
[ "### Dataset Summary\n\n\nCrossWOZ is the first large-scale Chinese Cross-Domain Wizard-of-Oz task-oriented dataset. It contains 6K dialogue sessions and 102K utterances for 5 domains, including hotel, restaurant, attraction, metro, and taxi. Moreover, the corpus contains rich annotation of dialogue states and dialogue acts at both user and system sides. We also provide a user simulator and several benchmark models for pipelined taskoriented dialogue systems, which will facilitate researchers to compare and evaluate their models on this corpus.\n\n\n* How to get the transformed data from original data:\n\t+ Run 'python URL' in the current directory. Need '../../crosswoz/' as the original data.\n* Main changes of the transformation:\n\t+ Add simple description for domains, slots, and intents.\n\t+ Switch intent&domain of 'General' dialog acts => domain == 'General' and intent in ['thank','bye','greet','welcome']\n\t+ Binary dialog acts include: 1) domain == 'General'; 2) intent in ['NoOffer', 'Request', 'Select']; 3) slot in ['酒店设施']\n\t+ Categorical dialog acts include: slot in ['酒店类型', '车型', '车牌']\n\t+ Non-categorical dialogue acts: others. assert intent in ['Inform', 'Recommend'] and slot != 'none' and value != 'none'\n\t+ Transform original user goal to list of '{domain: {'inform': {slot: [value, mentioned/not mentioned]}, 'request': {slot: [value, mentioned/not mentioned]}}}', stored as 'user\\_state' of user turns.\n\t+ Transform 'sys\\_state\\_init' (first API call of system turns) without 'selectedResults' as belief state in user turns.\n\t+ Transform 'sys\\_state' (last API call of system turns) to 'db\\_query' with domain states that contain non-empty 'selectedResults'. The 'selectedResults' are saved as 'db\\_results' (only contain entity name). Both stored in system turns.\n* Annotations:\n\t+ user goal, user state, dialogue acts, state, db query, db results.\n\t+ Multiple values in state are separated by spaces, meaning all constraints should be satisfied.", "### Supported Tasks and Leaderboards\n\n\nNLU, DST, Policy, NLG, E2E, User simulator", "### Languages\n\n\nChinese", "### Data Splits\n\n\n\n6 domains: ['景点', '餐馆', '酒店', '地铁', '出租', 'General']\n\n\n* cat slot match: how many values of categorical slots are in the possible values of ontology in percentage.\n* non-cat slot span: how many values of non-categorical slots have span annotation in percentage.", "### Licensing Information\n\n\nApache License, Version 2.0" ]
[ "TAGS\n#task_categories-conversational #multilinguality-monolingual #size_categories-1K<n<10K #language-Chinese #license-apache-2.0 #region-us \n", "### Dataset Summary\n\n\nCrossWOZ is the first large-scale Chinese Cross-Domain Wizard-of-Oz task-oriented dataset. It contains 6K dialogue sessions and 102K utterances for 5 domains, including hotel, restaurant, attraction, metro, and taxi. Moreover, the corpus contains rich annotation of dialogue states and dialogue acts at both user and system sides. We also provide a user simulator and several benchmark models for pipelined taskoriented dialogue systems, which will facilitate researchers to compare and evaluate their models on this corpus.\n\n\n* How to get the transformed data from original data:\n\t+ Run 'python URL' in the current directory. Need '../../crosswoz/' as the original data.\n* Main changes of the transformation:\n\t+ Add simple description for domains, slots, and intents.\n\t+ Switch intent&domain of 'General' dialog acts => domain == 'General' and intent in ['thank','bye','greet','welcome']\n\t+ Binary dialog acts include: 1) domain == 'General'; 2) intent in ['NoOffer', 'Request', 'Select']; 3) slot in ['酒店设施']\n\t+ Categorical dialog acts include: slot in ['酒店类型', '车型', '车牌']\n\t+ Non-categorical dialogue acts: others. assert intent in ['Inform', 'Recommend'] and slot != 'none' and value != 'none'\n\t+ Transform original user goal to list of '{domain: {'inform': {slot: [value, mentioned/not mentioned]}, 'request': {slot: [value, mentioned/not mentioned]}}}', stored as 'user\\_state' of user turns.\n\t+ Transform 'sys\\_state\\_init' (first API call of system turns) without 'selectedResults' as belief state in user turns.\n\t+ Transform 'sys\\_state' (last API call of system turns) to 'db\\_query' with domain states that contain non-empty 'selectedResults'. The 'selectedResults' are saved as 'db\\_results' (only contain entity name). Both stored in system turns.\n* Annotations:\n\t+ user goal, user state, dialogue acts, state, db query, db results.\n\t+ Multiple values in state are separated by spaces, meaning all constraints should be satisfied.", "### Supported Tasks and Leaderboards\n\n\nNLU, DST, Policy, NLG, E2E, User simulator", "### Languages\n\n\nChinese", "### Data Splits\n\n\n\n6 domains: ['景点', '餐馆', '酒店', '地铁', '出租', 'General']\n\n\n* cat slot match: how many values of categorical slots are in the possible values of ontology in percentage.\n* non-cat slot span: how many values of non-categorical slots have span annotation in percentage.", "### Licensing Information\n\n\nApache License, Version 2.0" ]
cfb34519c9fedf86d0548262071deabaa2443c0b
# Dataset Card for "collected-mail-corpus-mini" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
amanneo/collected-mail-corpus-mini
[ "region:us" ]
2022-10-20T12:08:38+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "float64"}, {"name": "email_type", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "mail_length", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 4260.131707317073, "num_examples": 21}, {"name": "train", "num_bytes": 37326.86829268293, "num_examples": 184}], "download_size": 26719, "dataset_size": 41587.0}}
2022-10-20T12:08:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "collected-mail-corpus-mini" More Information needed
[ "# Dataset Card for \"collected-mail-corpus-mini\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"collected-mail-corpus-mini\"\n\nMore Information needed" ]
aa1f981bd3a7bb02a46b9c472ac89a93c7024ed6
# Dataset Card for SPICED ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** http://www.copenlu.com/publication/2022_emnlp_wright/ - **Repository:** https://github.com/copenlu/scientific-information-change - **Paper:** ### Dataset Summary The Scientific Paraphrase and Information ChangE Dataset (SPICED) is a dataset of paired scientific findings from scientific papers, news media, and Twitter. The types of pairs are between <paper, news> and <paper, tweet>. Each pair is labeled for the degree of information similarity in the _findings_ described by each sentence, on a scale from 1-5. This is called the _Information Matching Score (IMS)_. The data was curated from S2ORC and matched news articles and Tweets using Altmetric. Instances are annotated by experts using the Prolific platform and Potato. Please use the following citation when using this dataset: ``` @article{modeling-information-change, title={{Modeling Information Change in Science Communication with Semantically Matched Paraphrases}}, author={Wright, Dustin and Pei, Jiaxin and Jurgens, David and Augenstein, Isabelle}, year={2022}, booktitle = {Proceedings of EMNLP}, publisher = {Association for Computational Linguistics}, year = 2022 } ``` ### Supported Tasks and Leaderboards The task is to predict the IMS between two scientific sentences, which is a scalar between 1 and 5. Preferred metrics are mean-squared error and Pearson correlation. ### Languages English ## Dataset Structure ### Data Fields - DOI: The DOI of the original scientific article - instance\_id: Unique instance ID for the sample. The ID contains the field, whether or not it is a tweet, and whether or not the sample was manually labeled or automatically using SBERT (marked as "easy") - News Finding: Text of the news or tweet finding - Paper Finding: Text of the paper finding - News Context: For news instances, the surrounding two sentences for the news finding. For tweets, a copy of the tweet - Paper Context: The surrounding two sentences for the paper finding - scores: Annotator scores after removing low competence annotators - field: The academic field of the paper ('Computer\_Science', 'Medicine', 'Biology', or 'Psychology') - split: The dataset split ('train', 'val', or 'test') - final\_score: The IMS of the instance - source: Either "news" or "tweet" - News Url: A URL to the source article if a news instance or the tweet ID of a tweet ### Data Splits - train: 4721 instances - validation: 664 instances - test: 640 instances ## Dataset Creation For the full details of how the dataset was created, please refer to our [EMNLP 2022 paper](). ### Curation Rationale Science communication is a complex process of translation from highly technical scientific language to common language that lay people can understand. At the same time, the general public relies on good science communication in order to inform critical decisions about their health and behavior. SPICED was curated in order to provide a training dataset and benchmark for machine learning models to measure changes in scientific information at different stages of the science communication pipeline. ### Source Data #### Initial Data Collection and Normalization Scientific text: S2ORC News articles and Tweets are collected through Altmetric. #### Who are the source language producers? Scientists, journalists, and Twitter users. ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset Models trained on SPICED can be used to perform large scale analyses of science communication. They can be used to match the same finding discussed in different media, and reveal trends in differences in reporting at different stages of the science communication pipeline. It is hoped that this can help to build tools which will improve science communication. ### Discussion of Biases The dataset is restricted to computer science, medicine, biology, and psychology, which may introduce some bias in the topics which models will perform well on. ### Other Known Limitations While some context is available, we do not release the full text of news articles and scientific papers, which may contain further context to help with learning the task. We do however provide the paper DOIs and links to the original news articles in case full text is desired. ## Additional Information ### Dataset Curators Dustin Wright, Jiaxin Pei, David Jurgens, and Isabelle Augenstein ### Licensing Information MIT ### Contributions Thanks to [@dwright37](https://github.com/dwright37) for adding this dataset.
copenlu/spiced
[ "task_categories:text-classification", "task_ids:text-scoring", "task_ids:semantic-similarity-scoring", "annotations_creators:crowdsourced", "annotations_creators:machine-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:extended|s2orc", "language:en", "license:mit", "scientific text", "scholarly text", "semantic text similarity", "fact checking", "misinformation", "region:us" ]
2022-10-20T14:18:50+00:00
{"annotations_creators": ["crowdsourced", "machine-generated"], "language_creators": ["found"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "source_datasets": ["extended|s2orc"], "task_categories": ["text-classification"], "task_ids": ["text-scoring", "semantic-similarity-scoring"], "pretty_name": "SPICED", "tags": ["scientific text", "scholarly text", "semantic text similarity", "fact checking", "misinformation"]}
2022-10-24T11:31:04+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-text-scoring #task_ids-semantic-similarity-scoring #annotations_creators-crowdsourced #annotations_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-extended|s2orc #language-English #license-mit #scientific text #scholarly text #semantic text similarity #fact checking #misinformation #region-us
# Dataset Card for SPICED ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Contributions ## Dataset Description - Homepage: URL - Repository: URL - Paper: ### Dataset Summary The Scientific Paraphrase and Information ChangE Dataset (SPICED) is a dataset of paired scientific findings from scientific papers, news media, and Twitter. The types of pairs are between <paper, news> and <paper, tweet>. Each pair is labeled for the degree of information similarity in the _findings_ described by each sentence, on a scale from 1-5. This is called the _Information Matching Score (IMS)_. The data was curated from S2ORC and matched news articles and Tweets using Altmetric. Instances are annotated by experts using the Prolific platform and Potato. Please use the following citation when using this dataset: ### Supported Tasks and Leaderboards The task is to predict the IMS between two scientific sentences, which is a scalar between 1 and 5. Preferred metrics are mean-squared error and Pearson correlation. ### Languages English ## Dataset Structure ### Data Fields - DOI: The DOI of the original scientific article - instance\_id: Unique instance ID for the sample. The ID contains the field, whether or not it is a tweet, and whether or not the sample was manually labeled or automatically using SBERT (marked as "easy") - News Finding: Text of the news or tweet finding - Paper Finding: Text of the paper finding - News Context: For news instances, the surrounding two sentences for the news finding. For tweets, a copy of the tweet - Paper Context: The surrounding two sentences for the paper finding - scores: Annotator scores after removing low competence annotators - field: The academic field of the paper ('Computer\_Science', 'Medicine', 'Biology', or 'Psychology') - split: The dataset split ('train', 'val', or 'test') - final\_score: The IMS of the instance - source: Either "news" or "tweet" - News Url: A URL to the source article if a news instance or the tweet ID of a tweet ### Data Splits - train: 4721 instances - validation: 664 instances - test: 640 instances ## Dataset Creation For the full details of how the dataset was created, please refer to our [EMNLP 2022 paper](). ### Curation Rationale Science communication is a complex process of translation from highly technical scientific language to common language that lay people can understand. At the same time, the general public relies on good science communication in order to inform critical decisions about their health and behavior. SPICED was curated in order to provide a training dataset and benchmark for machine learning models to measure changes in scientific information at different stages of the science communication pipeline. ### Source Data #### Initial Data Collection and Normalization Scientific text: S2ORC News articles and Tweets are collected through Altmetric. #### Who are the source language producers? Scientists, journalists, and Twitter users. ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset Models trained on SPICED can be used to perform large scale analyses of science communication. They can be used to match the same finding discussed in different media, and reveal trends in differences in reporting at different stages of the science communication pipeline. It is hoped that this can help to build tools which will improve science communication. ### Discussion of Biases The dataset is restricted to computer science, medicine, biology, and psychology, which may introduce some bias in the topics which models will perform well on. ### Other Known Limitations While some context is available, we do not release the full text of news articles and scientific papers, which may contain further context to help with learning the task. We do however provide the paper DOIs and links to the original news articles in case full text is desired. ## Additional Information ### Dataset Curators Dustin Wright, Jiaxin Pei, David Jurgens, and Isabelle Augenstein ### Licensing Information MIT ### Contributions Thanks to @dwright37 for adding this dataset.
[ "# Dataset Card for SPICED", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper:", "### Dataset Summary\n\nThe Scientific Paraphrase and Information ChangE Dataset (SPICED) is a dataset of paired scientific findings from scientific papers, news media, and Twitter. The types of pairs are between <paper, news> and <paper, tweet>. Each pair is labeled for the degree of information similarity in the _findings_ described by each sentence, on a scale from 1-5. This is called the _Information Matching Score (IMS)_. The data was curated from S2ORC and matched news articles and Tweets using Altmetric. Instances are annotated by experts using the Prolific platform and Potato. Please use the following citation when using this dataset:", "### Supported Tasks and Leaderboards\n\nThe task is to predict the IMS between two scientific sentences, which is a scalar between 1 and 5. Preferred metrics are mean-squared error and Pearson correlation.", "### Languages\n\nEnglish", "## Dataset Structure", "### Data Fields\n\n- DOI: The DOI of the original scientific article\n- instance\\_id: Unique instance ID for the sample. The ID contains the field, whether or not it is a tweet, and whether or not the sample was manually labeled or automatically using SBERT (marked as \"easy\")\n- News Finding: Text of the news or tweet finding\n- Paper Finding: Text of the paper finding\n- News Context: For news instances, the surrounding two sentences for the news finding. For tweets, a copy of the tweet\n- Paper Context: The surrounding two sentences for the paper finding\n- scores: Annotator scores after removing low competence annotators\n- field: The academic field of the paper ('Computer\\_Science', 'Medicine', 'Biology', or 'Psychology')\n- split: The dataset split ('train', 'val', or 'test')\n- final\\_score: The IMS of the instance\n- source: Either \"news\" or \"tweet\"\n- News Url: A URL to the source article if a news instance or the tweet ID of a tweet", "### Data Splits\n\n- train: 4721 instances\n- validation: 664 instances\n- test: 640 instances", "## Dataset Creation\n\nFor the full details of how the dataset was created, please refer to our [EMNLP 2022 paper]().", "### Curation Rationale\n\nScience communication is a complex process of translation from highly technical scientific language to common language that lay people can understand. At the same time, the general public relies on good science communication in order to inform critical decisions about their health and behavior. SPICED was curated in order to provide a training dataset and benchmark for machine learning models to measure changes in scientific information at different stages of the science communication pipeline.", "### Source Data", "#### Initial Data Collection and Normalization\n\nScientific text: S2ORC\n\nNews articles and Tweets are collected through Altmetric.", "#### Who are the source language producers?\n\nScientists, journalists, and Twitter users.", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nModels trained on SPICED can be used to perform large scale analyses of science communication. They can be used to match the same finding discussed in different media, and reveal trends in differences in reporting at different stages of the science communication pipeline. It is hoped that this can help to build tools which will improve science communication.", "### Discussion of Biases\n\nThe dataset is restricted to computer science, medicine, biology, and psychology, which may introduce some bias in the topics which models will perform well on.", "### Other Known Limitations\n\nWhile some context is available, we do not release the full text of news articles and scientific papers, which may contain further context to help with learning the task. We do however provide the paper DOIs and links to the original news articles in case full text is desired.", "## Additional Information", "### Dataset Curators\n\nDustin Wright, Jiaxin Pei, David Jurgens, and Isabelle Augenstein", "### Licensing Information\n\nMIT", "### Contributions\n\nThanks to @dwright37 for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_ids-text-scoring #task_ids-semantic-similarity-scoring #annotations_creators-crowdsourced #annotations_creators-machine-generated #language_creators-found #multilinguality-monolingual #size_categories-1K<n<10K #source_datasets-extended|s2orc #language-English #license-mit #scientific text #scholarly text #semantic text similarity #fact checking #misinformation #region-us \n", "# Dataset Card for SPICED", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper:", "### Dataset Summary\n\nThe Scientific Paraphrase and Information ChangE Dataset (SPICED) is a dataset of paired scientific findings from scientific papers, news media, and Twitter. The types of pairs are between <paper, news> and <paper, tweet>. Each pair is labeled for the degree of information similarity in the _findings_ described by each sentence, on a scale from 1-5. This is called the _Information Matching Score (IMS)_. The data was curated from S2ORC and matched news articles and Tweets using Altmetric. Instances are annotated by experts using the Prolific platform and Potato. Please use the following citation when using this dataset:", "### Supported Tasks and Leaderboards\n\nThe task is to predict the IMS between two scientific sentences, which is a scalar between 1 and 5. Preferred metrics are mean-squared error and Pearson correlation.", "### Languages\n\nEnglish", "## Dataset Structure", "### Data Fields\n\n- DOI: The DOI of the original scientific article\n- instance\\_id: Unique instance ID for the sample. The ID contains the field, whether or not it is a tweet, and whether or not the sample was manually labeled or automatically using SBERT (marked as \"easy\")\n- News Finding: Text of the news or tweet finding\n- Paper Finding: Text of the paper finding\n- News Context: For news instances, the surrounding two sentences for the news finding. For tweets, a copy of the tweet\n- Paper Context: The surrounding two sentences for the paper finding\n- scores: Annotator scores after removing low competence annotators\n- field: The academic field of the paper ('Computer\\_Science', 'Medicine', 'Biology', or 'Psychology')\n- split: The dataset split ('train', 'val', or 'test')\n- final\\_score: The IMS of the instance\n- source: Either \"news\" or \"tweet\"\n- News Url: A URL to the source article if a news instance or the tweet ID of a tweet", "### Data Splits\n\n- train: 4721 instances\n- validation: 664 instances\n- test: 640 instances", "## Dataset Creation\n\nFor the full details of how the dataset was created, please refer to our [EMNLP 2022 paper]().", "### Curation Rationale\n\nScience communication is a complex process of translation from highly technical scientific language to common language that lay people can understand. At the same time, the general public relies on good science communication in order to inform critical decisions about their health and behavior. SPICED was curated in order to provide a training dataset and benchmark for machine learning models to measure changes in scientific information at different stages of the science communication pipeline.", "### Source Data", "#### Initial Data Collection and Normalization\n\nScientific text: S2ORC\n\nNews articles and Tweets are collected through Altmetric.", "#### Who are the source language producers?\n\nScientists, journalists, and Twitter users.", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nModels trained on SPICED can be used to perform large scale analyses of science communication. They can be used to match the same finding discussed in different media, and reveal trends in differences in reporting at different stages of the science communication pipeline. It is hoped that this can help to build tools which will improve science communication.", "### Discussion of Biases\n\nThe dataset is restricted to computer science, medicine, biology, and psychology, which may introduce some bias in the topics which models will perform well on.", "### Other Known Limitations\n\nWhile some context is available, we do not release the full text of news articles and scientific papers, which may contain further context to help with learning the task. We do however provide the paper DOIs and links to the original news articles in case full text is desired.", "## Additional Information", "### Dataset Curators\n\nDustin Wright, Jiaxin Pei, David Jurgens, and Isabelle Augenstein", "### Licensing Information\n\nMIT", "### Contributions\n\nThanks to @dwright37 for adding this dataset." ]
1d1b487f8fa455d2c09468bbfb58d971bf7f1720
# Dataset Card for "relbert/semeval2012_relational_similarity_v4" ## Dataset Description - **Repository:** [RelBERT](https://github.com/asahi417/relbert) - **Paper:** [https://aclanthology.org/S12-1047/](https://aclanthology.org/S12-1047/) - **Dataset:** SemEval2012: Relational Similarity ### Dataset Summary ***IMPORTANT***: This is the same dataset as [relbert/semeval2012_relational_similarity](https://huggingface.co/datasets/relbert/semeval2012_relational_similarity), but with a different dataset construction. Relational similarity dataset from [SemEval2012 task 2](https://aclanthology.org/S12-1047/), compiled to fine-tune [RelBERT](https://github.com/asahi417/relbert) model. The dataset contains a list of positive and negative word pair from 89 pre-defined relations. The relation types are constructed on top of following 10 parent relation types. ```shell { 1: "Class Inclusion", # Hypernym 2: "Part-Whole", # Meronym, Substance Meronym 3: "Similar", # Synonym, Co-hypornym 4: "Contrast", # Antonym 5: "Attribute", # Attribute, Event 6: "Non Attribute", 7: "Case Relation", 8: "Cause-Purpose", 9: "Space-Time", 10: "Representation" } ``` Each of the parent relation is further grouped into child relation types where the definition can be found [here](https://drive.google.com/file/d/0BzcZKTSeYL8VenY0QkVpZVpxYnc/view?resourcekey=0-ZP-UARfJj39PcLroibHPHw). ## Dataset Structure ### Data Instances An example of `train` looks as follows. ``` { 'relation_type': '8d', 'positives': [ [ "breathe", "live" ], [ "study", "learn" ], [ "speak", "communicate" ], ... ] 'negatives': [ [ "starving", "hungry" ], [ "clean", "bathe" ], [ "hungry", "starving" ], ... ] } ``` ### Data Splits | name |train|validation| |---------|----:|---------:| |semeval2012_relational_similarity| 89 | 89| ### Number of Positive/Negative Word-pairs in each Split | | positives | negatives | |:--------------------------------------------|------------:|------------:| | ('1', 'parent', 'train') | 88 | 544 | | ('1', 'parent', 'validation') | 22 | 136 | | ('10', 'parent', 'train') | 48 | 584 | | ('10', 'parent', 'validation') | 12 | 146 | | ('10a', 'child', 'train') | 8 | 1324 | | ('10a', 'child', 'validation') | 2 | 331 | | ('10a', 'child_prototypical', 'train') | 97 | 1917 | | ('10a', 'child_prototypical', 'validation') | 26 | 521 | | ('10b', 'child', 'train') | 8 | 1325 | | ('10b', 'child', 'validation') | 2 | 331 | | ('10b', 'child_prototypical', 'train') | 90 | 1558 | | ('10b', 'child_prototypical', 'validation') | 27 | 469 | | ('10c', 'child', 'train') | 8 | 1327 | | ('10c', 'child', 'validation') | 2 | 331 | | ('10c', 'child_prototypical', 'train') | 85 | 1640 | | ('10c', 'child_prototypical', 'validation') | 20 | 390 | | ('10d', 'child', 'train') | 8 | 1328 | | ('10d', 'child', 'validation') | 2 | 331 | | ('10d', 'child_prototypical', 'train') | 77 | 1390 | | ('10d', 'child_prototypical', 'validation') | 22 | 376 | | ('10e', 'child', 'train') | 8 | 1329 | | ('10e', 'child', 'validation') | 2 | 332 | | ('10e', 'child_prototypical', 'train') | 67 | 884 | | ('10e', 'child_prototypical', 'validation') | 20 | 234 | | ('10f', 'child', 'train') | 8 | 1328 | | ('10f', 'child', 'validation') | 2 | 331 | | ('10f', 'child_prototypical', 'train') | 80 | 1460 | | ('10f', 'child_prototypical', 'validation') | 19 | 306 | | ('1a', 'child', 'train') | 8 | 1324 | | ('1a', 'child', 'validation') | 2 | 331 | | ('1a', 'child_prototypical', 'train') | 106 | 1854 | | ('1a', 'child_prototypical', 'validation') | 17 | 338 | | ('1b', 'child', 'train') | 8 | 1324 | | ('1b', 'child', 'validation') | 2 | 331 | | ('1b', 'child_prototypical', 'train') | 95 | 1712 | | ('1b', 'child_prototypical', 'validation') | 28 | 480 | | ('1c', 'child', 'train') | 8 | 1327 | | ('1c', 'child', 'validation') | 2 | 331 | | ('1c', 'child_prototypical', 'train') | 80 | 1528 | | ('1c', 'child_prototypical', 'validation') | 25 | 502 | | ('1d', 'child', 'train') | 8 | 1323 | | ('1d', 'child', 'validation') | 2 | 330 | | ('1d', 'child_prototypical', 'train') | 112 | 2082 | | ('1d', 'child_prototypical', 'validation') | 23 | 458 | | ('1e', 'child', 'train') | 8 | 1329 | | ('1e', 'child', 'validation') | 2 | 332 | | ('1e', 'child_prototypical', 'train') | 63 | 775 | | ('1e', 'child_prototypical', 'validation') | 24 | 256 | | ('2', 'parent', 'train') | 80 | 552 | | ('2', 'parent', 'validation') | 20 | 138 | | ('2a', 'child', 'train') | 8 | 1324 | | ('2a', 'child', 'validation') | 2 | 330 | | ('2a', 'child_prototypical', 'train') | 93 | 1885 | | ('2a', 'child_prototypical', 'validation') | 36 | 736 | | ('2b', 'child', 'train') | 8 | 1327 | | ('2b', 'child', 'validation') | 2 | 331 | | ('2b', 'child_prototypical', 'train') | 86 | 1326 | | ('2b', 'child_prototypical', 'validation') | 19 | 284 | | ('2c', 'child', 'train') | 8 | 1325 | | ('2c', 'child', 'validation') | 2 | 331 | | ('2c', 'child_prototypical', 'train') | 96 | 1773 | | ('2c', 'child_prototypical', 'validation') | 21 | 371 | | ('2d', 'child', 'train') | 8 | 1328 | | ('2d', 'child', 'validation') | 2 | 331 | | ('2d', 'child_prototypical', 'train') | 79 | 1329 | | ('2d', 'child_prototypical', 'validation') | 20 | 338 | | ('2e', 'child', 'train') | 8 | 1327 | | ('2e', 'child', 'validation') | 2 | 331 | | ('2e', 'child_prototypical', 'train') | 82 | 1462 | | ('2e', 'child_prototypical', 'validation') | 23 | 463 | | ('2f', 'child', 'train') | 8 | 1327 | | ('2f', 'child', 'validation') | 2 | 331 | | ('2f', 'child_prototypical', 'train') | 88 | 1869 | | ('2f', 'child_prototypical', 'validation') | 17 | 371 | | ('2g', 'child', 'train') | 8 | 1323 | | ('2g', 'child', 'validation') | 2 | 330 | | ('2g', 'child_prototypical', 'train') | 108 | 1925 | | ('2g', 'child_prototypical', 'validation') | 27 | 480 | | ('2h', 'child', 'train') | 8 | 1327 | | ('2h', 'child', 'validation') | 2 | 331 | | ('2h', 'child_prototypical', 'train') | 84 | 1540 | | ('2h', 'child_prototypical', 'validation') | 21 | 385 | | ('2i', 'child', 'train') | 8 | 1328 | | ('2i', 'child', 'validation') | 2 | 332 | | ('2i', 'child_prototypical', 'train') | 72 | 1335 | | ('2i', 'child_prototypical', 'validation') | 21 | 371 | | ('2j', 'child', 'train') | 8 | 1328 | | ('2j', 'child', 'validation') | 2 | 331 | | ('2j', 'child_prototypical', 'train') | 80 | 1595 | | ('2j', 'child_prototypical', 'validation') | 19 | 369 | | ('3', 'parent', 'train') | 64 | 568 | | ('3', 'parent', 'validation') | 16 | 142 | | ('3a', 'child', 'train') | 8 | 1327 | | ('3a', 'child', 'validation') | 2 | 331 | | ('3a', 'child_prototypical', 'train') | 87 | 1597 | | ('3a', 'child_prototypical', 'validation') | 18 | 328 | | ('3b', 'child', 'train') | 8 | 1327 | | ('3b', 'child', 'validation') | 2 | 331 | | ('3b', 'child_prototypical', 'train') | 87 | 1833 | | ('3b', 'child_prototypical', 'validation') | 18 | 407 | | ('3c', 'child', 'train') | 8 | 1326 | | ('3c', 'child', 'validation') | 2 | 331 | | ('3c', 'child_prototypical', 'train') | 93 | 1664 | | ('3c', 'child_prototypical', 'validation') | 18 | 315 | | ('3d', 'child', 'train') | 8 | 1324 | | ('3d', 'child', 'validation') | 2 | 331 | | ('3d', 'child_prototypical', 'train') | 101 | 1943 | | ('3d', 'child_prototypical', 'validation') | 22 | 372 | | ('3e', 'child', 'train') | 8 | 1332 | | ('3e', 'child', 'validation') | 2 | 332 | | ('3e', 'child_prototypical', 'train') | 49 | 900 | | ('3e', 'child_prototypical', 'validation') | 20 | 368 | | ('3f', 'child', 'train') | 8 | 1327 | | ('3f', 'child', 'validation') | 2 | 331 | | ('3f', 'child_prototypical', 'train') | 90 | 1983 | | ('3f', 'child_prototypical', 'validation') | 15 | 362 | | ('3g', 'child', 'train') | 8 | 1331 | | ('3g', 'child', 'validation') | 2 | 332 | | ('3g', 'child_prototypical', 'train') | 61 | 1089 | | ('3g', 'child_prototypical', 'validation') | 14 | 251 | | ('3h', 'child', 'train') | 8 | 1328 | | ('3h', 'child', 'validation') | 2 | 331 | | ('3h', 'child_prototypical', 'train') | 71 | 1399 | | ('3h', 'child_prototypical', 'validation') | 28 | 565 | | ('4', 'parent', 'train') | 64 | 568 | | ('4', 'parent', 'validation') | 16 | 142 | | ('4a', 'child', 'train') | 8 | 1327 | | ('4a', 'child', 'validation') | 2 | 331 | | ('4a', 'child_prototypical', 'train') | 85 | 1766 | | ('4a', 'child_prototypical', 'validation') | 20 | 474 | | ('4b', 'child', 'train') | 8 | 1330 | | ('4b', 'child', 'validation') | 2 | 332 | | ('4b', 'child_prototypical', 'train') | 66 | 949 | | ('4b', 'child_prototypical', 'validation') | 15 | 214 | | ('4c', 'child', 'train') | 8 | 1326 | | ('4c', 'child', 'validation') | 2 | 331 | | ('4c', 'child_prototypical', 'train') | 86 | 1755 | | ('4c', 'child_prototypical', 'validation') | 25 | 446 | | ('4d', 'child', 'train') | 8 | 1332 | | ('4d', 'child', 'validation') | 2 | 333 | | ('4d', 'child_prototypical', 'train') | 46 | 531 | | ('4d', 'child_prototypical', 'validation') | 17 | 218 | | ('4e', 'child', 'train') | 8 | 1326 | | ('4e', 'child', 'validation') | 2 | 331 | | ('4e', 'child_prototypical', 'train') | 92 | 2021 | | ('4e', 'child_prototypical', 'validation') | 19 | 402 | | ('4f', 'child', 'train') | 8 | 1328 | | ('4f', 'child', 'validation') | 2 | 332 | | ('4f', 'child_prototypical', 'train') | 72 | 1464 | | ('4f', 'child_prototypical', 'validation') | 21 | 428 | | ('4g', 'child', 'train') | 8 | 1324 | | ('4g', 'child', 'validation') | 2 | 330 | | ('4g', 'child_prototypical', 'train') | 106 | 2057 | | ('4g', 'child_prototypical', 'validation') | 23 | 435 | | ('4h', 'child', 'train') | 8 | 1326 | | ('4h', 'child', 'validation') | 2 | 331 | | ('4h', 'child_prototypical', 'train') | 85 | 1787 | | ('4h', 'child_prototypical', 'validation') | 26 | 525 | | ('5', 'parent', 'train') | 72 | 560 | | ('5', 'parent', 'validation') | 18 | 140 | | ('5a', 'child', 'train') | 8 | 1324 | | ('5a', 'child', 'validation') | 2 | 331 | | ('5a', 'child_prototypical', 'train') | 101 | 1876 | | ('5a', 'child_prototypical', 'validation') | 22 | 439 | | ('5b', 'child', 'train') | 8 | 1329 | | ('5b', 'child', 'validation') | 2 | 332 | | ('5b', 'child_prototypical', 'train') | 70 | 1310 | | ('5b', 'child_prototypical', 'validation') | 17 | 330 | | ('5c', 'child', 'train') | 8 | 1327 | | ('5c', 'child', 'validation') | 2 | 331 | | ('5c', 'child_prototypical', 'train') | 85 | 1552 | | ('5c', 'child_prototypical', 'validation') | 20 | 373 | | ('5d', 'child', 'train') | 8 | 1324 | | ('5d', 'child', 'validation') | 2 | 330 | | ('5d', 'child_prototypical', 'train') | 102 | 1783 | | ('5d', 'child_prototypical', 'validation') | 27 | 580 | | ('5e', 'child', 'train') | 8 | 1329 | | ('5e', 'child', 'validation') | 2 | 332 | | ('5e', 'child_prototypical', 'train') | 68 | 1283 | | ('5e', 'child_prototypical', 'validation') | 19 | 357 | | ('5f', 'child', 'train') | 8 | 1327 | | ('5f', 'child', 'validation') | 2 | 331 | | ('5f', 'child_prototypical', 'train') | 77 | 1568 | | ('5f', 'child_prototypical', 'validation') | 28 | 567 | | ('5g', 'child', 'train') | 8 | 1328 | | ('5g', 'child', 'validation') | 2 | 332 | | ('5g', 'child_prototypical', 'train') | 79 | 1626 | | ('5g', 'child_prototypical', 'validation') | 14 | 266 | | ('5h', 'child', 'train') | 8 | 1324 | | ('5h', 'child', 'validation') | 2 | 330 | | ('5h', 'child_prototypical', 'train') | 109 | 2348 | | ('5h', 'child_prototypical', 'validation') | 20 | 402 | | ('5i', 'child', 'train') | 8 | 1324 | | ('5i', 'child', 'validation') | 2 | 331 | | ('5i', 'child_prototypical', 'train') | 96 | 2010 | | ('5i', 'child_prototypical', 'validation') | 27 | 551 | | ('6', 'parent', 'train') | 64 | 568 | | ('6', 'parent', 'validation') | 16 | 142 | | ('6a', 'child', 'train') | 8 | 1324 | | ('6a', 'child', 'validation') | 2 | 330 | | ('6a', 'child_prototypical', 'train') | 102 | 1962 | | ('6a', 'child_prototypical', 'validation') | 27 | 530 | | ('6b', 'child', 'train') | 8 | 1327 | | ('6b', 'child', 'validation') | 2 | 331 | | ('6b', 'child_prototypical', 'train') | 90 | 1840 | | ('6b', 'child_prototypical', 'validation') | 15 | 295 | | ('6c', 'child', 'train') | 8 | 1325 | | ('6c', 'child', 'validation') | 2 | 331 | | ('6c', 'child_prototypical', 'train') | 90 | 1968 | | ('6c', 'child_prototypical', 'validation') | 27 | 527 | | ('6d', 'child', 'train') | 8 | 1328 | | ('6d', 'child', 'validation') | 2 | 331 | | ('6d', 'child_prototypical', 'train') | 82 | 1903 | | ('6d', 'child_prototypical', 'validation') | 17 | 358 | | ('6e', 'child', 'train') | 8 | 1327 | | ('6e', 'child', 'validation') | 2 | 331 | | ('6e', 'child_prototypical', 'train') | 85 | 1737 | | ('6e', 'child_prototypical', 'validation') | 20 | 398 | | ('6f', 'child', 'train') | 8 | 1326 | | ('6f', 'child', 'validation') | 2 | 331 | | ('6f', 'child_prototypical', 'train') | 87 | 1652 | | ('6f', 'child_prototypical', 'validation') | 24 | 438 | | ('6g', 'child', 'train') | 8 | 1326 | | ('6g', 'child', 'validation') | 2 | 331 | | ('6g', 'child_prototypical', 'train') | 94 | 1740 | | ('6g', 'child_prototypical', 'validation') | 17 | 239 | | ('6h', 'child', 'train') | 8 | 1324 | | ('6h', 'child', 'validation') | 2 | 330 | | ('6h', 'child_prototypical', 'train') | 115 | 2337 | | ('6h', 'child_prototypical', 'validation') | 14 | 284 | | ('7', 'parent', 'train') | 64 | 568 | | ('7', 'parent', 'validation') | 16 | 142 | | ('7a', 'child', 'train') | 8 | 1324 | | ('7a', 'child', 'validation') | 2 | 331 | | ('7a', 'child_prototypical', 'train') | 99 | 2045 | | ('7a', 'child_prototypical', 'validation') | 24 | 516 | | ('7b', 'child', 'train') | 8 | 1330 | | ('7b', 'child', 'validation') | 2 | 332 | | ('7b', 'child_prototypical', 'train') | 69 | 905 | | ('7b', 'child_prototypical', 'validation') | 12 | 177 | | ('7c', 'child', 'train') | 8 | 1327 | | ('7c', 'child', 'validation') | 2 | 331 | | ('7c', 'child_prototypical', 'train') | 85 | 1402 | | ('7c', 'child_prototypical', 'validation') | 20 | 313 | | ('7d', 'child', 'train') | 8 | 1324 | | ('7d', 'child', 'validation') | 2 | 331 | | ('7d', 'child_prototypical', 'train') | 98 | 2064 | | ('7d', 'child_prototypical', 'validation') | 25 | 497 | | ('7e', 'child', 'train') | 8 | 1328 | | ('7e', 'child', 'validation') | 2 | 331 | | ('7e', 'child_prototypical', 'train') | 78 | 1270 | | ('7e', 'child_prototypical', 'validation') | 21 | 298 | | ('7f', 'child', 'train') | 8 | 1326 | | ('7f', 'child', 'validation') | 2 | 331 | | ('7f', 'child_prototypical', 'train') | 89 | 1377 | | ('7f', 'child_prototypical', 'validation') | 22 | 380 | | ('7g', 'child', 'train') | 8 | 1328 | | ('7g', 'child', 'validation') | 2 | 332 | | ('7g', 'child_prototypical', 'train') | 72 | 885 | | ('7g', 'child_prototypical', 'validation') | 21 | 263 | | ('7h', 'child', 'train') | 8 | 1324 | | ('7h', 'child', 'validation') | 2 | 331 | | ('7h', 'child_prototypical', 'train') | 94 | 1479 | | ('7h', 'child_prototypical', 'validation') | 29 | 467 | | ('8', 'parent', 'train') | 64 | 568 | | ('8', 'parent', 'validation') | 16 | 142 | | ('8a', 'child', 'train') | 8 | 1324 | | ('8a', 'child', 'validation') | 2 | 331 | | ('8a', 'child_prototypical', 'train') | 93 | 1640 | | ('8a', 'child_prototypical', 'validation') | 30 | 552 | | ('8b', 'child', 'train') | 8 | 1330 | | ('8b', 'child', 'validation') | 2 | 332 | | ('8b', 'child_prototypical', 'train') | 61 | 1126 | | ('8b', 'child_prototypical', 'validation') | 20 | 361 | | ('8c', 'child', 'train') | 8 | 1326 | | ('8c', 'child', 'validation') | 2 | 331 | | ('8c', 'child_prototypical', 'train') | 96 | 1547 | | ('8c', 'child_prototypical', 'validation') | 15 | 210 | | ('8d', 'child', 'train') | 8 | 1325 | | ('8d', 'child', 'validation') | 2 | 331 | | ('8d', 'child_prototypical', 'train') | 92 | 1472 | | ('8d', 'child_prototypical', 'validation') | 25 | 438 | | ('8e', 'child', 'train') | 8 | 1327 | | ('8e', 'child', 'validation') | 2 | 331 | | ('8e', 'child_prototypical', 'train') | 87 | 1340 | | ('8e', 'child_prototypical', 'validation') | 18 | 270 | | ('8f', 'child', 'train') | 8 | 1326 | | ('8f', 'child', 'validation') | 2 | 331 | | ('8f', 'child_prototypical', 'train') | 83 | 1416 | | ('8f', 'child_prototypical', 'validation') | 28 | 452 | | ('8g', 'child', 'train') | 8 | 1330 | | ('8g', 'child', 'validation') | 2 | 332 | | ('8g', 'child_prototypical', 'train') | 62 | 640 | | ('8g', 'child_prototypical', 'validation') | 19 | 199 | | ('8h', 'child', 'train') | 8 | 1324 | | ('8h', 'child', 'validation') | 2 | 331 | | ('8h', 'child_prototypical', 'train') | 100 | 1816 | | ('8h', 'child_prototypical', 'validation') | 23 | 499 | | ('9', 'parent', 'train') | 72 | 560 | | ('9', 'parent', 'validation') | 18 | 140 | | ('9a', 'child', 'train') | 8 | 1324 | | ('9a', 'child', 'validation') | 2 | 331 | | ('9a', 'child_prototypical', 'train') | 96 | 1520 | | ('9a', 'child_prototypical', 'validation') | 27 | 426 | | ('9b', 'child', 'train') | 8 | 1326 | | ('9b', 'child', 'validation') | 2 | 331 | | ('9b', 'child_prototypical', 'train') | 93 | 1783 | | ('9b', 'child_prototypical', 'validation') | 18 | 307 | | ('9c', 'child', 'train') | 8 | 1330 | | ('9c', 'child', 'validation') | 2 | 332 | | ('9c', 'child_prototypical', 'train') | 59 | 433 | | ('9c', 'child_prototypical', 'validation') | 22 | 163 | | ('9d', 'child', 'train') | 8 | 1328 | | ('9d', 'child', 'validation') | 2 | 332 | | ('9d', 'child_prototypical', 'train') | 78 | 1683 | | ('9d', 'child_prototypical', 'validation') | 15 | 302 | | ('9e', 'child', 'train') | 8 | 1329 | | ('9e', 'child', 'validation') | 2 | 332 | | ('9e', 'child_prototypical', 'train') | 66 | 1426 | | ('9e', 'child_prototypical', 'validation') | 21 | 475 | | ('9f', 'child', 'train') | 8 | 1328 | | ('9f', 'child', 'validation') | 2 | 331 | | ('9f', 'child_prototypical', 'train') | 79 | 1436 | | ('9f', 'child_prototypical', 'validation') | 20 | 330 | | ('9g', 'child', 'train') | 8 | 1324 | | ('9g', 'child', 'validation') | 2 | 331 | | ('9g', 'child_prototypical', 'train') | 100 | 1685 | | ('9g', 'child_prototypical', 'validation') | 23 | 384 | | ('9h', 'child', 'train') | 8 | 1325 | | ('9h', 'child', 'validation') | 2 | 331 | | ('9h', 'child_prototypical', 'train') | 95 | 1799 | | ('9h', 'child_prototypical', 'validation') | 22 | 462 | | ('9i', 'child', 'train') | 8 | 1328 | | ('9i', 'child', 'validation') | 2 | 332 | | ('9i', 'child_prototypical', 'train') | 79 | 1361 | | ('9i', 'child_prototypical', 'validation') | 14 | 252 | ### Citation Information ``` @inproceedings{jurgens-etal-2012-semeval, title = "{S}em{E}val-2012 Task 2: Measuring Degrees of Relational Similarity", author = "Jurgens, David and Mohammad, Saif and Turney, Peter and Holyoak, Keith", booktitle = "*{SEM} 2012: The First Joint Conference on Lexical and Computational Semantics {--} Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation ({S}em{E}val 2012)", month = "7-8 " # jun, year = "2012", address = "Montr{\'e}al, Canada", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/S12-1047", pages = "356--364", } ```
research-backup/semeval2012_relational_similarity_v4
[ "multilinguality:monolingual", "size_categories:1K<n<10K", "language:en", "license:other", "region:us" ]
2022-10-20T14:21:19+00:00
{"language": ["en"], "license": ["other"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "pretty_name": "SemEval2012 task 2 Relational Similarity"}
2022-10-21T09:13:46+00:00
[]
[ "en" ]
TAGS #multilinguality-monolingual #size_categories-1K<n<10K #language-English #license-other #region-us
Dataset Card for "relbert/semeval2012\_relational\_similarity\_v4" ================================================================== Dataset Description ------------------- * Repository: RelBERT * Paper: URL * Dataset: SemEval2012: Relational Similarity ### Dataset Summary *IMPORTANT*: This is the same dataset as relbert/semeval2012\_relational\_similarity, but with a different dataset construction. Relational similarity dataset from SemEval2012 task 2, compiled to fine-tune RelBERT model. The dataset contains a list of positive and negative word pair from 89 pre-defined relations. The relation types are constructed on top of following 10 parent relation types. Each of the parent relation is further grouped into child relation types where the definition can be found here. Dataset Structure ----------------- ### Data Instances An example of 'train' looks as follows. ### Data Splits ### Number of Positive/Negative Word-pairs in each Split
[ "### Dataset Summary\n\n\n*IMPORTANT*: This is the same dataset as relbert/semeval2012\\_relational\\_similarity,\nbut with a different dataset construction.\n\n\nRelational similarity dataset from SemEval2012 task 2, compiled to fine-tune RelBERT model.\nThe dataset contains a list of positive and negative word pair from 89 pre-defined relations.\nThe relation types are constructed on top of following 10 parent relation types.\n\n\nEach of the parent relation is further grouped into child relation types where the definition can be found here.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Splits", "### Number of Positive/Negative Word-pairs in each Split" ]
[ "TAGS\n#multilinguality-monolingual #size_categories-1K<n<10K #language-English #license-other #region-us \n", "### Dataset Summary\n\n\n*IMPORTANT*: This is the same dataset as relbert/semeval2012\\_relational\\_similarity,\nbut with a different dataset construction.\n\n\nRelational similarity dataset from SemEval2012 task 2, compiled to fine-tune RelBERT model.\nThe dataset contains a list of positive and negative word pair from 89 pre-defined relations.\nThe relation types are constructed on top of following 10 parent relation types.\n\n\nEach of the parent relation is further grouped into child relation types where the definition can be found here.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Splits", "### Number of Positive/Negative Word-pairs in each Split" ]
f249eac7e732f016ae2db3ea0a1b1f90d76cf722
# Dataset Card for "israeli_soccer_news" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
devozs/israeli_soccer_news
[ "region:us" ]
2022-10-20T15:26:57+00:00
{"dataset_info": {"features": [{"name": "article_title", "dtype": "string"}, {"name": "article_body", "dtype": "string"}, {"name": "article_body_length", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 8956722.687408645, "num_examples": 4310}, {"name": "validation", "num_bytes": 995422.3125913552, "num_examples": 479}], "download_size": 4052466, "dataset_size": 9952145.0}}
2022-10-22T05:20:33+00:00
[]
[]
TAGS #region-us
# Dataset Card for "israeli_soccer_news" More Information needed
[ "# Dataset Card for \"israeli_soccer_news\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"israeli_soccer_news\"\n\nMore Information needed" ]
be3afa5953eab36033177935489e9515e53555cd
# Dataset Card for "emotion" ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://github.com/dair-ai/emotion_dataset](https://github.com/dair-ai/emotion_dataset) - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Size of downloaded dataset files:** 3.95 MB - **Size of the generated dataset:** 4.16 MB - **Total amount of disk used:** 8.11 MB ### Dataset Summary Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. For more detailed information please refer to the paper. ### Supported Tasks and Leaderboards [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Languages [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Dataset Structure ### Data Instances #### default - **Size of downloaded dataset files:** 1.97 MB - **Size of the generated dataset:** 2.07 MB - **Total amount of disk used:** 4.05 MB An example of 'train' looks as follows. ``` { "label": 0, "text": "im feeling quite sad and sorry for myself but ill snap out of it soon" } ``` #### emotion - **Size of downloaded dataset files:** 1.97 MB - **Size of the generated dataset:** 2.09 MB - **Total amount of disk used:** 4.06 MB An example of 'validation' looks as follows. ``` ``` ### Data Fields The data fields are the same among all splits. #### default - `text`: a `string` feature. - `label`: a classification label, with possible values including `sadness` (0), `joy` (1), `love` (2), `anger` (3), `fear` (4), `surprise` (5). #### emotion - `text`: a `string` feature. - `label`: a `string` feature. ### Data Splits | name | train | validation | test | | ------- | ----: | ---------: | ---: | | default | 16000 | 2000 | 2000 | | emotion | 16000 | 2000 | 2000 | ## Dataset Creation ### Curation Rationale [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Source Data #### Initial Data Collection and Normalization [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the source language producers? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Annotations #### Annotation process [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the annotators? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Personal and Sensitive Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Discussion of Biases [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Other Known Limitations [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Additional Information ### Dataset Curators [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Licensing Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Citation Information ``` @inproceedings{saravia-etal-2018-carer, title = "{CARER}: Contextualized Affect Representations for Emotion Recognition", author = "Saravia, Elvis and Liu, Hsien-Chi Toby and Huang, Yen-Hao and Wu, Junlin and Chen, Yi-Shin", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", month = oct # "-" # nov, year = "2018", address = "Brussels, Belgium", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/D18-1404", doi = "10.18653/v1/D18-1404", pages = "3687--3697", abstract = "Emotions are expressed in nuanced ways, which varies by collective or individual experiences, knowledge, and beliefs. Therefore, to understand emotion, as conveyed through text, a robust mechanism capable of capturing and modeling different linguistic nuances and phenomena is needed. We propose a semi-supervised, graph-based algorithm to produce rich structural descriptors which serve as the building blocks for constructing contextualized affect representations from text. The pattern-based representations are further enriched with word embeddings and evaluated through several emotion recognition tasks. Our experimental results demonstrate that the proposed method outperforms state-of-the-art techniques on emotion recognition tasks.", } ``` ### Contributions Thanks to [@lhoestq](https://github.com/lhoestq), [@thomwolf](https://github.com/thomwolf), [@lewtun](https://github.com/lewtun) for adding this dataset.
zbnsl/emoteModified
[ "task_categories:text-classification", "task_ids:multi-class-classification", "annotations_creators:machine-generated", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:unknown", "emotion-classification", "region:us" ]
2022-10-20T16:33:59+00:00
{"annotations_creators": ["machine-generated"], "language_creators": ["machine-generated"], "language": ["en"], "license": ["unknown"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["text-classification"], "task_ids": ["multi-class-classification"], "paperswithcode_id": "emotion", "pretty_name": "Emotion", "tags": ["emotion-classification"], "train-eval-index": [{"config": "default", "task": "text-classification", "task_id": "multi_class_classification", "splits": {"train_split": "train", "eval_split": "test"}, "col_mapping": {"text": "text", "label": "target"}, "metrics": [{"type": "accuracy", "name": "Accuracy"}, {"type": "f1", "name": "F1 macro", "args": {"average": "macro"}}, {"type": "f1", "name": "F1 micro", "args": {"average": "micro"}}, {"type": "f1", "name": "F1 weighted", "args": {"average": "weighted"}}, {"type": "precision", "name": "Precision macro", "args": {"average": "macro"}}, {"type": "precision", "name": "Precision micro", "args": {"average": "micro"}}, {"type": "precision", "name": "Precision weighted", "args": {"average": "weighted"}}, {"type": "recall", "name": "Recall macro", "args": {"average": "macro"}}, {"type": "recall", "name": "Recall micro", "args": {"average": "micro"}}, {"type": "recall", "name": "Recall weighted", "args": {"average": "weighted"}}]}]}
2022-10-20T17:05:32+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_ids-multi-class-classification #annotations_creators-machine-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-unknown #emotion-classification #region-us
Dataset Card for "emotion" ========================== Table of Contents ----------------- * Dataset Description + Dataset Summary + Supported Tasks and Leaderboards + Languages * Dataset Structure + Data Instances + Data Fields + Data Splits * Dataset Creation + Curation Rationale + Source Data + Annotations + Personal and Sensitive Information * Considerations for Using the Data + Social Impact of Dataset + Discussion of Biases + Other Known Limitations * Additional Information + Dataset Curators + Licensing Information + Citation Information + Contributions Dataset Description ------------------- * Homepage: URL * Repository: * Paper: * Point of Contact: * Size of downloaded dataset files: 3.95 MB * Size of the generated dataset: 4.16 MB * Total amount of disk used: 8.11 MB ### Dataset Summary Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. For more detailed information please refer to the paper. ### Supported Tasks and Leaderboards ### Languages Dataset Structure ----------------- ### Data Instances #### default * Size of downloaded dataset files: 1.97 MB * Size of the generated dataset: 2.07 MB * Total amount of disk used: 4.05 MB An example of 'train' looks as follows. #### emotion * Size of downloaded dataset files: 1.97 MB * Size of the generated dataset: 2.09 MB * Total amount of disk used: 4.06 MB An example of 'validation' looks as follows. ### Data Fields The data fields are the same among all splits. #### default * 'text': a 'string' feature. * 'label': a classification label, with possible values including 'sadness' (0), 'joy' (1), 'love' (2), 'anger' (3), 'fear' (4), 'surprise' (5). #### emotion * 'text': a 'string' feature. * 'label': a 'string' feature. ### Data Splits Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions Thanks to @lhoestq, @thomwolf, @lewtun for adding this dataset.
[ "### Dataset Summary\n\n\nEmotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. For more detailed information please refer to the paper.", "### Supported Tasks and Leaderboards", "### Languages\n\n\nDataset Structure\n-----------------", "### Data Instances", "#### default\n\n\n* Size of downloaded dataset files: 1.97 MB\n* Size of the generated dataset: 2.07 MB\n* Total amount of disk used: 4.05 MB\n\n\nAn example of 'train' looks as follows.", "#### emotion\n\n\n* Size of downloaded dataset files: 1.97 MB\n* Size of the generated dataset: 2.09 MB\n* Total amount of disk used: 4.06 MB\n\n\nAn example of 'validation' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### default\n\n\n* 'text': a 'string' feature.\n* 'label': a classification label, with possible values including 'sadness' (0), 'joy' (1), 'love' (2), 'anger' (3), 'fear' (4), 'surprise' (5).", "#### emotion\n\n\n* 'text': a 'string' feature.\n* 'label': a 'string' feature.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions\n\n\nThanks to @lhoestq, @thomwolf, @lewtun for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_ids-multi-class-classification #annotations_creators-machine-generated #language_creators-machine-generated #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-unknown #emotion-classification #region-us \n", "### Dataset Summary\n\n\nEmotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. For more detailed information please refer to the paper.", "### Supported Tasks and Leaderboards", "### Languages\n\n\nDataset Structure\n-----------------", "### Data Instances", "#### default\n\n\n* Size of downloaded dataset files: 1.97 MB\n* Size of the generated dataset: 2.07 MB\n* Total amount of disk used: 4.05 MB\n\n\nAn example of 'train' looks as follows.", "#### emotion\n\n\n* Size of downloaded dataset files: 1.97 MB\n* Size of the generated dataset: 2.09 MB\n* Total amount of disk used: 4.06 MB\n\n\nAn example of 'validation' looks as follows.", "### Data Fields\n\n\nThe data fields are the same among all splits.", "#### default\n\n\n* 'text': a 'string' feature.\n* 'label': a classification label, with possible values including 'sadness' (0), 'joy' (1), 'love' (2), 'anger' (3), 'fear' (4), 'surprise' (5).", "#### emotion\n\n\n* 'text': a 'string' feature.\n* 'label': a 'string' feature.", "### Data Splits\n\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions\n\n\nThanks to @lhoestq, @thomwolf, @lewtun for adding this dataset." ]
c97d2c69303cfb47dc995d477e01c3329ee08568
Datasets used in our paper (https://arxiv.org/abs/2210.07468) ```bibtex @inproceedings{wu-etal-2022-continued, title = "Transparency Helps Reveal When Language Models Learn Meaning", author = "Zhaofeng Wu and William Merrill and Hao Peng and Iz Beltagy and Noah A. Smith", url = {https://arxiv.org/abs/2210.07468}, publisher = {arXiv}, year = {2022}, doi = {10.48550/ARXIV.2210.07468}, } ``` Please see the "Files and versions" tab for the data.
ZhaofengWu/transparency-data
[ "license:apache-2.0", "arxiv:2210.07468", "region:us" ]
2022-10-20T16:39:28+00:00
{"license": "apache-2.0"}
2022-12-02T20:26:57+00:00
[ "2210.07468" ]
[]
TAGS #license-apache-2.0 #arxiv-2210.07468 #region-us
Datasets used in our paper (URL Please see the "Files and versions" tab for the data.
[]
[ "TAGS\n#license-apache-2.0 #arxiv-2210.07468 #region-us \n" ]
0b26da66cec9a4d1e42bde3560aeae9f89f6433b
# Dataset Card for KQA Pro ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Configs](#data-configs) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [How to run SPARQLs and programs](#how-to-run-sparqls-and-programs) - [Knowledge Graph File](#knowledge-graph-file) - [How to Submit to Leaderboard](#how-to-submit-results-of-test-set) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** http://thukeg.gitee.io/kqa-pro/ - **Repository:** https://github.com/shijx12/KQAPro_Baselines - **Paper:** [KQA Pro: A Dataset with Explicit Compositional Programs for Complex Question Answering over Knowledge Base](https://aclanthology.org/2022.acl-long.422/) - **Leaderboard:** http://thukeg.gitee.io/kqa-pro/leaderboard.html - **Point of Contact:** shijx12 at gmail dot com ### Dataset Summary KQA Pro is a large-scale dataset of complex question answering over knowledge base. The questions are very diverse and challenging, requiring multiple reasoning capabilities including compositional reasoning, multi-hop reasoning, quantitative comparison, set operations, and etc. Strong supervisions of SPARQL and program are provided for each question. ### Supported Tasks and Leaderboards It supports knowlege graph based question answering. Specifically, it provides SPARQL and *program* for each question. ### Languages English ## Dataset Structure **train.json/val.json** ``` [ { 'question': str, 'sparql': str, # executable in our virtuoso engine 'program': [ { 'function': str, # function name 'dependencies': [int], # functional inputs, representing indices of the preceding functions 'inputs': [str], # textual inputs } ], 'choices': [str], # 10 answer choices 'answer': str, # golden answer } ] ``` **test.json** ``` [ { 'question': str, 'choices': [str], # 10 answer choices } ] ``` ### Data Configs This dataset has two configs: `train_val` and `test` because they have different available fields. Please specify this like `load_dataset('drt/kqa_pro', 'train_val')`. ### Data Splits train, val, test ## Additional Information ### Knowledge Graph File You can find the knowledge graph file `kb.json` in the original github repository. It comes with the format: ```json { 'concepts': { '<id>': { 'name': str, 'instanceOf': ['<id>', '<id>'], # ids of parent concept } }, 'entities': # excluding concepts { '<id>': { 'name': str, 'instanceOf': ['<id>', '<id>'], # ids of parent concept 'attributes': [ { 'key': str, # attribute key 'value': # attribute value { 'type': 'string'/'quantity'/'date'/'year', 'value': float/int/str, # float or int for quantity, int for year, 'yyyy/mm/dd' for date 'unit': str, # for quantity }, 'qualifiers': { '<qk>': # qualifier key, one key may have multiple corresponding qualifier values [ { 'type': 'string'/'quantity'/'date'/'year', 'value': float/int/str, 'unit': str, }, # the format of qualifier value is similar to attribute value ] } }, ] 'relations': [ { 'predicate': str, 'object': '<id>', # NOTE: it may be a concept id 'direction': 'forward'/'backward', 'qualifiers': { '<qk>': # qualifier key, one key may have multiple corresponding qualifier values [ { 'type': 'string'/'quantity'/'date'/'year', 'value': float/int/str, 'unit': str, }, # the format of qualifier value is similar to attribute value ] } }, ] } } } ``` ### How to run SPARQLs and programs We implement multiple baselines in our [codebase](https://github.com/shijx12/KQAPro_Baselines), which includes a supervised SPARQL parser and program parser. In the SPARQL parser, we implement a query engine based on [Virtuoso](https://github.com/openlink/virtuoso-opensource.git). You can install the engine based on our [instructions](https://github.com/shijx12/KQAPro_Baselines/blob/master/SPARQL/README.md), and then feed your predicted SPARQL to get the answer. In the program parser, we implement a rule-based program executor, which receives a predicted program and returns the answer. Detailed introductions of our functions can be found in our [paper](https://arxiv.org/abs/2007.03875). ### How to submit results of test set You need to predict answers for all questions of test set and write them in a text file **in order**, one per line. Here is an example: ``` Tron: Legacy Palm Beach County 1937-03-01 The Queen ... ``` Then you need to send the prediction file to us by email <[email protected]>, we will reply to you with the performance as soon as possible. To appear in the learderboard, you need to also provide following information: - model name - affiliation - open-ended or multiple-choice - whether use the supervision of SPARQL in your model or not - whether use the supervision of program in your model or not - single model or ensemble model - (optional) paper link - (optional) code link ### Licensing Information MIT License ### Citation Information If you find our dataset is helpful in your work, please cite us by ``` @inproceedings{KQAPro, title={{KQA P}ro: A Large Diagnostic Dataset for Complex Question Answering over Knowledge Base}, author={Cao, Shulin and Shi, Jiaxin and Pan, Liangming and Nie, Lunyiu and Xiang, Yutong and Hou, Lei and Li, Juanzi and He, Bin and Zhang, Hanwang}, booktitle={ACL'22}, year={2022} } ``` ### Contributions Thanks to [@happen2me](https://github.com/happen2me) for adding this dataset.
drt/kqa_pro
[ "task_categories:question-answering", "task_ids:open-domain-qa", "annotations_creators:machine-generated", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:mit", "knowledge graph", "freebase", "arxiv:2007.03875", "region:us" ]
2022-10-20T17:12:48+00:00
{"annotations_creators": ["machine-generated", "expert-generated"], "language_creators": ["found"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original"], "task_categories": ["question-answering"], "task_ids": ["open-domain-qa"], "pretty_name": "KQA-Pro", "tags": ["knowledge graph", "freebase"]}
2022-10-20T18:35:20+00:00
[ "2007.03875" ]
[ "en" ]
TAGS #task_categories-question-answering #task_ids-open-domain-qa #annotations_creators-machine-generated #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-mit #knowledge graph #freebase #arxiv-2007.03875 #region-us
# Dataset Card for KQA Pro ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Configs - Data Splits - Additional Information - How to run SPARQLs and programs - Knowledge Graph File - How to Submit to Leaderboard - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: URL - Repository: URL - Paper: KQA Pro: A Dataset with Explicit Compositional Programs for Complex Question Answering over Knowledge Base - Leaderboard: URL - Point of Contact: shijx12 at gmail dot com ### Dataset Summary KQA Pro is a large-scale dataset of complex question answering over knowledge base. The questions are very diverse and challenging, requiring multiple reasoning capabilities including compositional reasoning, multi-hop reasoning, quantitative comparison, set operations, and etc. Strong supervisions of SPARQL and program are provided for each question. ### Supported Tasks and Leaderboards It supports knowlege graph based question answering. Specifically, it provides SPARQL and *program* for each question. ### Languages English ## Dataset Structure URL URL ### Data Configs This dataset has two configs: 'train_val' and 'test' because they have different available fields. Please specify this like 'load_dataset('drt/kqa_pro', 'train_val')'. ### Data Splits train, val, test ## Additional Information ### Knowledge Graph File You can find the knowledge graph file 'URL' in the original github repository. It comes with the format: ### How to run SPARQLs and programs We implement multiple baselines in our codebase, which includes a supervised SPARQL parser and program parser. In the SPARQL parser, we implement a query engine based on Virtuoso. You can install the engine based on our instructions, and then feed your predicted SPARQL to get the answer. In the program parser, we implement a rule-based program executor, which receives a predicted program and returns the answer. Detailed introductions of our functions can be found in our paper. ### How to submit results of test set You need to predict answers for all questions of test set and write them in a text file in order, one per line. Here is an example: Then you need to send the prediction file to us by email <caosl19@URL>, we will reply to you with the performance as soon as possible. To appear in the learderboard, you need to also provide following information: - model name - affiliation - open-ended or multiple-choice - whether use the supervision of SPARQL in your model or not - whether use the supervision of program in your model or not - single model or ensemble model - (optional) paper link - (optional) code link ### Licensing Information MIT License If you find our dataset is helpful in your work, please cite us by ### Contributions Thanks to @happen2me for adding this dataset.
[ "# Dataset Card for KQA Pro", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Configs\n - Data Splits\n- Additional Information\n - How to run SPARQLs and programs\n - Knowledge Graph File\n - How to Submit to Leaderboard\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: KQA Pro: A Dataset with Explicit Compositional Programs for Complex Question Answering over Knowledge Base\n- Leaderboard: URL\n- Point of Contact: shijx12 at gmail dot com", "### Dataset Summary\n\nKQA Pro is a large-scale dataset of complex question answering over knowledge base. The questions are very diverse and challenging, requiring multiple reasoning capabilities including compositional reasoning, multi-hop reasoning, quantitative comparison, set operations, and etc. Strong supervisions of SPARQL and program are provided for each question.", "### Supported Tasks and Leaderboards\n\nIt supports knowlege graph based question answering. Specifically, it provides SPARQL and *program* for each question.", "### Languages\n\nEnglish", "## Dataset Structure\n\nURL\n\n\nURL", "### Data Configs\n\nThis dataset has two configs: 'train_val' and 'test' because they have different available fields. Please specify this like 'load_dataset('drt/kqa_pro', 'train_val')'.", "### Data Splits\n\ntrain, val, test", "## Additional Information", "### Knowledge Graph File\n\nYou can find the knowledge graph file 'URL' in the original github repository. It comes with the format:", "### How to run SPARQLs and programs\n\nWe implement multiple baselines in our codebase, which includes a supervised SPARQL parser and program parser.\n\nIn the SPARQL parser, we implement a query engine based on Virtuoso.\nYou can install the engine based on our instructions, and then feed your predicted SPARQL to get the answer.\n\nIn the program parser, we implement a rule-based program executor, which receives a predicted program and returns the answer.\nDetailed introductions of our functions can be found in our paper.", "### How to submit results of test set\nYou need to predict answers for all questions of test set and write them in a text file in order, one per line.\nHere is an example:\n\n\nThen you need to send the prediction file to us by email <caosl19@URL>, we will reply to you with the performance as soon as possible.\nTo appear in the learderboard, you need to also provide following information:\n\n- model name\n- affiliation\n- open-ended or multiple-choice\n- whether use the supervision of SPARQL in your model or not\n- whether use the supervision of program in your model or not\n- single model or ensemble model\n- (optional) paper link\n- (optional) code link", "### Licensing Information\n\nMIT License\n\n\n\nIf you find our dataset is helpful in your work, please cite us by", "### Contributions\n\nThanks to @happen2me for adding this dataset." ]
[ "TAGS\n#task_categories-question-answering #task_ids-open-domain-qa #annotations_creators-machine-generated #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #source_datasets-original #language-English #license-mit #knowledge graph #freebase #arxiv-2007.03875 #region-us \n", "# Dataset Card for KQA Pro", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Configs\n - Data Splits\n- Additional Information\n - How to run SPARQLs and programs\n - Knowledge Graph File\n - How to Submit to Leaderboard\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: KQA Pro: A Dataset with Explicit Compositional Programs for Complex Question Answering over Knowledge Base\n- Leaderboard: URL\n- Point of Contact: shijx12 at gmail dot com", "### Dataset Summary\n\nKQA Pro is a large-scale dataset of complex question answering over knowledge base. The questions are very diverse and challenging, requiring multiple reasoning capabilities including compositional reasoning, multi-hop reasoning, quantitative comparison, set operations, and etc. Strong supervisions of SPARQL and program are provided for each question.", "### Supported Tasks and Leaderboards\n\nIt supports knowlege graph based question answering. Specifically, it provides SPARQL and *program* for each question.", "### Languages\n\nEnglish", "## Dataset Structure\n\nURL\n\n\nURL", "### Data Configs\n\nThis dataset has two configs: 'train_val' and 'test' because they have different available fields. Please specify this like 'load_dataset('drt/kqa_pro', 'train_val')'.", "### Data Splits\n\ntrain, val, test", "## Additional Information", "### Knowledge Graph File\n\nYou can find the knowledge graph file 'URL' in the original github repository. It comes with the format:", "### How to run SPARQLs and programs\n\nWe implement multiple baselines in our codebase, which includes a supervised SPARQL parser and program parser.\n\nIn the SPARQL parser, we implement a query engine based on Virtuoso.\nYou can install the engine based on our instructions, and then feed your predicted SPARQL to get the answer.\n\nIn the program parser, we implement a rule-based program executor, which receives a predicted program and returns the answer.\nDetailed introductions of our functions can be found in our paper.", "### How to submit results of test set\nYou need to predict answers for all questions of test set and write them in a text file in order, one per line.\nHere is an example:\n\n\nThen you need to send the prediction file to us by email <caosl19@URL>, we will reply to you with the performance as soon as possible.\nTo appear in the learderboard, you need to also provide following information:\n\n- model name\n- affiliation\n- open-ended or multiple-choice\n- whether use the supervision of SPARQL in your model or not\n- whether use the supervision of program in your model or not\n- single model or ensemble model\n- (optional) paper link\n- (optional) code link", "### Licensing Information\n\nMIT License\n\n\n\nIf you find our dataset is helpful in your work, please cite us by", "### Contributions\n\nThanks to @happen2me for adding this dataset." ]
dc6044224ca6348df633d07d3079ae8795333de1
```python from datasets import load_dataset data_name = "anhdungitvn/sccr" data_files = {"train": "train.tsv", "eval": "eval.tsv"} sccr = load_dataset(data_name, data_files=data_files) sccr ``` ```python DatasetDict({ train: Dataset({ features: ['text', 'labels'], num_rows: 14478 }) eval: Dataset({ features: ['text', 'labels'], num_rows: 1609 }) }) ``` ### References - <a href="https://www.aivivn.com/contests/6">SC: Sentiment Classification (Phân loại sắc thái bình luận)</a>
anhdungitvn/sccr
[ "license:apache-2.0", "region:us" ]
2022-10-21T02:27:59+00:00
{"license": "apache-2.0"}
2022-10-21T02:39:41+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
### References - <a href="URL Sentiment Classification (Phân loại sắc thái bình luận)</a>
[ "### References\n - <a href=\"URL Sentiment Classification (Phân loại sắc thái bình luận)</a>" ]
[ "TAGS\n#license-apache-2.0 #region-us \n", "### References\n - <a href=\"URL Sentiment Classification (Phân loại sắc thái bình luận)</a>" ]
962f9d70b3fcfd790d3f512d857ec8fa0547fd16
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-1.3b_eval * Dataset: SpaceDoge/dataset_test_1 * Config: SpaceDoge--dataset_test_1 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@SpaceDoge](https://huggingface.co/SpaceDoge) for evaluating this model.
autoevaluate/autoeval-eval-SpaceDoge__dataset_test_1-SpaceDoge__dataset_test_1-a8c4b7-1826662823
[ "autotrain", "evaluation", "region:us" ]
2022-10-21T02:36:22+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["SpaceDoge/dataset_test_1"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-1.3b_eval", "metrics": [], "dataset_name": "SpaceDoge/dataset_test_1", "dataset_config": "SpaceDoge--dataset_test_1", "dataset_split": "test", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-21T02:39:36+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-1.3b_eval * Dataset: SpaceDoge/dataset_test_1 * Config: SpaceDoge--dataset_test_1 * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @SpaceDoge for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-1.3b_eval\n* Dataset: SpaceDoge/dataset_test_1\n* Config: SpaceDoge--dataset_test_1\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @SpaceDoge for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-1.3b_eval\n* Dataset: SpaceDoge/dataset_test_1\n* Config: SpaceDoge--dataset_test_1\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @SpaceDoge for evaluating this model." ]
0a3222fdc8e5964048ffe5c1476f791863b42169
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-350m_eval * Dataset: SpaceDoge/dataset_test_1 * Config: SpaceDoge--dataset_test_1 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@SpaceDoge](https://huggingface.co/SpaceDoge) for evaluating this model.
autoevaluate/autoeval-eval-SpaceDoge__dataset_test_1-SpaceDoge__dataset_test_1-a8c4b7-1826662822
[ "autotrain", "evaluation", "region:us" ]
2022-10-21T02:36:24+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["SpaceDoge/dataset_test_1"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-350m_eval", "metrics": [], "dataset_name": "SpaceDoge/dataset_test_1", "dataset_config": "SpaceDoge--dataset_test_1", "dataset_split": "test", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-21T02:37:58+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-350m_eval * Dataset: SpaceDoge/dataset_test_1 * Config: SpaceDoge--dataset_test_1 * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @SpaceDoge for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-350m_eval\n* Dataset: SpaceDoge/dataset_test_1\n* Config: SpaceDoge--dataset_test_1\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @SpaceDoge for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-350m_eval\n* Dataset: SpaceDoge/dataset_test_1\n* Config: SpaceDoge--dataset_test_1\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @SpaceDoge for evaluating this model." ]
6645a4a439b651250e7aec5e5678fa0bf04e693a
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-2.7b_eval * Dataset: SpaceDoge/dataset_test_1 * Config: SpaceDoge--dataset_test_1 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@SpaceDoge](https://huggingface.co/SpaceDoge) for evaluating this model.
autoevaluate/autoeval-eval-SpaceDoge__dataset_test_1-SpaceDoge__dataset_test_1-a8c4b7-1826662824
[ "autotrain", "evaluation", "region:us" ]
2022-10-21T02:36:27+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["SpaceDoge/dataset_test_1"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-2.7b_eval", "metrics": [], "dataset_name": "SpaceDoge/dataset_test_1", "dataset_config": "SpaceDoge--dataset_test_1", "dataset_split": "test", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-21T02:41:41+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-2.7b_eval * Dataset: SpaceDoge/dataset_test_1 * Config: SpaceDoge--dataset_test_1 * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @SpaceDoge for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-2.7b_eval\n* Dataset: SpaceDoge/dataset_test_1\n* Config: SpaceDoge--dataset_test_1\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @SpaceDoge for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-2.7b_eval\n* Dataset: SpaceDoge/dataset_test_1\n* Config: SpaceDoge--dataset_test_1\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @SpaceDoge for evaluating this model." ]
cf94a914f6428bf55eb50afe92de3460dcdecfb1
This is dummy data license: unknown --- multilinguality: - monolingual
bizjay/DataTest
[ "region:us" ]
2022-10-21T03:21:17+00:00
{}
2022-10-28T09:43:44+00:00
[]
[]
TAGS #region-us
This is dummy data license: unknown --- multilinguality: - monolingual
[]
[ "TAGS\n#region-us \n" ]
18112c5f65fe4c2593104cbc0850e2a7737cc41f
# oscar dataset only korean
lcw99/oscar-ko-only
[ "language:ko", "region:us" ]
2022-10-21T04:38:36+00:00
{"language": ["ko"]}
2022-10-21T04:52:05+00:00
[]
[ "ko" ]
TAGS #language-Korean #region-us
# oscar dataset only korean
[ "# oscar dataset only korean" ]
[ "TAGS\n#language-Korean #region-us \n", "# oscar dataset only korean" ]
56ede88fa531e775aa97d6f958c501207ceace7b
# cc100 dataset Korean only
lcw99/cc100-ko-only
[ "language:ko", "region:us" ]
2022-10-21T05:05:16+00:00
{"language": ["ko"]}
2022-10-21T06:23:11+00:00
[]
[ "ko" ]
TAGS #language-Korean #region-us
# cc100 dataset Korean only
[ "# cc100 dataset Korean only" ]
[ "TAGS\n#language-Korean #region-us \n", "# cc100 dataset Korean only" ]
9ee08c272b9686659e1faa515e73f2c3e0233f04
# Dataset Card for [Gitcoin ODS Hackathon GR15] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://gitcoin.co/issue/29389 - **Repository:** https://github.com/poupou-web3/GC-ODS-Sybil - **Point of Contact:** https://discord.com/channels/562828676480237578/1024788324826763284 ### Dataset Summary This data set was created in the context of the first [Gitcoin Open Data Science Hackathon](https://go.gitcoin.co/blog/open-data-science-hackathon). It contains all the transactions on the Ethereum and Polygon chains of the wallet that contributed to the Grant 15 of Gitcoin grants program. It was created in order to find patterns in the transactions of potential Sybil attackers by exploring their on-chain activity. ## Dataset Creation ### Source Data The wallet address from grant 15 was extracted from the data put together by the Gitcoin DAO. [GR_15_DATA](https://drive.google.com/drive/folders/17OdrV7SA0I56aDMwqxB6jMwoY3tjSf5w) The data was produced using [Etherscan API](https://etherscan.io/) and [PolygonScan API](https://polygonscan.com/) and using scripts available later at [repo](https://github.com/poupou-web3/GC-ODS-Sybil). An address contributing to the [GR_15_DATA](https://drive.google.com/drive/folders/17OdrV7SA0I56aDMwqxB6jMwoY3tjSf5w) with no found transaction on a chain will not appear in the data gathered. ** Careful the transaction data only contains "normal" transactions as described by the API provider.** ## Dataset Structure ### Data Instances There are 4 CSV files. - 2 for transactions: one for the Ethereum transactions and one for the Polygon transactions. - 2 for features: one for the Ethereum transactions and one for the Polygon transactions. ### Data Fields As provided by the [Etherscan API](https://etherscan.io/) and [PolygonScan API](https://polygonscan.com/). A column address was added for easier manipulation and to have all the transactions of all addresses in the same file. It is an unsupervised machine-learning task, there is no target column. Most of the extracted features have been extracted using [tsfresh](https://tsfresh.readthedocs.io/en/latest/). The code is available in the GitHub [repo](https://github.com/poupou-web3/GC-ODS-Sybil). It allows reproducing the extraction from the 2 transactions CSV. Column names are named by tsfresh, each feature can be found in the documentation for more detailed definitions. Following are the descriptions for features not explained in by tsfresh: - countUniqueInteracted : Count the number of unique addresses with which the wallet address has interacted. - countTx: The total number of transactions - ratioUniqueInteracted : countUniqueInteracted / countTx - outgoing: Number of outgoing transactions - outgoingRatio : outgoing / countTx ## Considerations for Using the Data ### Social Impact of Dataset The creation of the data set may help in fraud detection and defence in public goods funding. ## Additional Information ### Licensing Information MIT ### Citation Information Please cite this data set if you use it, especially in the hackathon context. ### Contributions Thanks to [@poupou-web3](https://github.com/poupou-web3) for adding this dataset.
Poupou/Gitcoin-ODS-Hackhaton-GR15
[ "task_categories:feature-extraction", "annotations_creators:no-annotation", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "language:en", "license:mit", "Gitcoin", "Gitcoin Grants", "Sybil", "Sybil Slayers", "FDD", "Web3", "Public Goods", "Fraud Detection", "DAO", "Ethereum", "Polygon", "region:us" ]
2022-10-21T08:07:17+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["expert-generated"], "language": ["en"], "license": ["mit"], "multilinguality": ["monolingual"], "size_categories": ["1M<n<10M"], "source_datasets": ["original"], "task_categories": ["feature-extraction"], "task_ids": [], "pretty_name": "Gitcoin FDD Open Data Science Hackathon GR15", "tags": ["Gitcoin", "Gitcoin Grants", "Sybil", "Sybil Slayers", "FDD", "Web3", "Public Goods", "Fraud Detection", "DAO", "Ethereum", "Polygon"]}
2022-10-30T14:56:15+00:00
[]
[ "en" ]
TAGS #task_categories-feature-extraction #annotations_creators-no-annotation #language_creators-expert-generated #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-English #license-mit #Gitcoin #Gitcoin Grants #Sybil #Sybil Slayers #FDD #Web3 #Public Goods #Fraud Detection #DAO #Ethereum #Polygon #region-us
# Dataset Card for [Gitcoin ODS Hackathon GR15] ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Dataset Creation - Source Data - Dataset Structure - Data Instances - Data Fields - Considerations for Using the Data - Social Impact of Dataset - Additional Information - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: URL - Repository: URL - Point of Contact: URL ### Dataset Summary This data set was created in the context of the first Gitcoin Open Data Science Hackathon. It contains all the transactions on the Ethereum and Polygon chains of the wallet that contributed to the Grant 15 of Gitcoin grants program. It was created in order to find patterns in the transactions of potential Sybil attackers by exploring their on-chain activity. ## Dataset Creation ### Source Data The wallet address from grant 15 was extracted from the data put together by the Gitcoin DAO. GR_15_DATA The data was produced using Etherscan API and PolygonScan API and using scripts available later at repo. An address contributing to the GR_15_DATA with no found transaction on a chain will not appear in the data gathered. Careful the transaction data only contains "normal" transactions as described by the API provider. ## Dataset Structure ### Data Instances There are 4 CSV files. - 2 for transactions: one for the Ethereum transactions and one for the Polygon transactions. - 2 for features: one for the Ethereum transactions and one for the Polygon transactions. ### Data Fields As provided by the Etherscan API and PolygonScan API. A column address was added for easier manipulation and to have all the transactions of all addresses in the same file. It is an unsupervised machine-learning task, there is no target column. Most of the extracted features have been extracted using tsfresh. The code is available in the GitHub repo. It allows reproducing the extraction from the 2 transactions CSV. Column names are named by tsfresh, each feature can be found in the documentation for more detailed definitions. Following are the descriptions for features not explained in by tsfresh: - countUniqueInteracted : Count the number of unique addresses with which the wallet address has interacted. - countTx: The total number of transactions - ratioUniqueInteracted : countUniqueInteracted / countTx - outgoing: Number of outgoing transactions - outgoingRatio : outgoing / countTx ## Considerations for Using the Data ### Social Impact of Dataset The creation of the data set may help in fraud detection and defence in public goods funding. ## Additional Information ### Licensing Information MIT Please cite this data set if you use it, especially in the hackathon context. ### Contributions Thanks to @poupou-web3 for adding this dataset.
[ "# Dataset Card for [Gitcoin ODS Hackathon GR15]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n- Dataset Creation\n - Source Data\n- Dataset Structure\n - Data Instances\n - Data Fields\n- Considerations for Using the Data\n - Social Impact of Dataset\n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Point of Contact: URL", "### Dataset Summary\n\nThis data set was created in the context of the first Gitcoin Open Data Science Hackathon.\nIt contains all the transactions on the Ethereum and Polygon chains of the wallet that contributed to the Grant 15 of Gitcoin grants program.\nIt was created in order to find patterns in the transactions of potential Sybil attackers by exploring their on-chain activity.", "## Dataset Creation", "### Source Data\n\nThe wallet address from grant 15 was extracted from the data put together by the Gitcoin DAO. GR_15_DATA\n\nThe data was produced using Etherscan API and PolygonScan API and using scripts available later at repo.\n\nAn address contributing to the GR_15_DATA with no found transaction on a chain will not appear in the data gathered.\n\n Careful the transaction data only contains \"normal\" transactions as described by the API provider.", "## Dataset Structure", "### Data Instances\n\nThere are 4 CSV files.\n- 2 for transactions: one for the Ethereum transactions and one for the Polygon transactions.\n- 2 for features: one for the Ethereum transactions and one for the Polygon transactions.", "### Data Fields\n\nAs provided by the Etherscan API and PolygonScan API.\nA column address was added for easier manipulation and to have all the transactions of all addresses in the same file.\n\nIt is an unsupervised machine-learning task, there is no target column.\n\nMost of the extracted features have been extracted using tsfresh. The code is available in the GitHub repo. It allows reproducing the extraction from the 2 transactions CSV. Column names are named by tsfresh, each feature can be found in the documentation for more detailed definitions. Following are the descriptions for features not explained in by tsfresh:\n- countUniqueInteracted : Count the number of unique addresses with which the wallet address has interacted.\n- countTx: The total number of transactions\n- ratioUniqueInteracted : countUniqueInteracted / countTx\n- outgoing: Number of outgoing transactions\n- outgoingRatio : outgoing / countTx", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nThe creation of the data set may help in fraud detection and defence in public goods funding.", "## Additional Information", "### Licensing Information\n\nMIT\n\n\n\nPlease cite this data set if you use it, especially in the hackathon context.", "### Contributions\n\nThanks to @poupou-web3 for adding this dataset." ]
[ "TAGS\n#task_categories-feature-extraction #annotations_creators-no-annotation #language_creators-expert-generated #multilinguality-monolingual #size_categories-1M<n<10M #source_datasets-original #language-English #license-mit #Gitcoin #Gitcoin Grants #Sybil #Sybil Slayers #FDD #Web3 #Public Goods #Fraud Detection #DAO #Ethereum #Polygon #region-us \n", "# Dataset Card for [Gitcoin ODS Hackathon GR15]", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n- Dataset Creation\n - Source Data\n- Dataset Structure\n - Data Instances\n - Data Fields\n- Considerations for Using the Data\n - Social Impact of Dataset\n- Additional Information\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Point of Contact: URL", "### Dataset Summary\n\nThis data set was created in the context of the first Gitcoin Open Data Science Hackathon.\nIt contains all the transactions on the Ethereum and Polygon chains of the wallet that contributed to the Grant 15 of Gitcoin grants program.\nIt was created in order to find patterns in the transactions of potential Sybil attackers by exploring their on-chain activity.", "## Dataset Creation", "### Source Data\n\nThe wallet address from grant 15 was extracted from the data put together by the Gitcoin DAO. GR_15_DATA\n\nThe data was produced using Etherscan API and PolygonScan API and using scripts available later at repo.\n\nAn address contributing to the GR_15_DATA with no found transaction on a chain will not appear in the data gathered.\n\n Careful the transaction data only contains \"normal\" transactions as described by the API provider.", "## Dataset Structure", "### Data Instances\n\nThere are 4 CSV files.\n- 2 for transactions: one for the Ethereum transactions and one for the Polygon transactions.\n- 2 for features: one for the Ethereum transactions and one for the Polygon transactions.", "### Data Fields\n\nAs provided by the Etherscan API and PolygonScan API.\nA column address was added for easier manipulation and to have all the transactions of all addresses in the same file.\n\nIt is an unsupervised machine-learning task, there is no target column.\n\nMost of the extracted features have been extracted using tsfresh. The code is available in the GitHub repo. It allows reproducing the extraction from the 2 transactions CSV. Column names are named by tsfresh, each feature can be found in the documentation for more detailed definitions. Following are the descriptions for features not explained in by tsfresh:\n- countUniqueInteracted : Count the number of unique addresses with which the wallet address has interacted.\n- countTx: The total number of transactions\n- ratioUniqueInteracted : countUniqueInteracted / countTx\n- outgoing: Number of outgoing transactions\n- outgoingRatio : outgoing / countTx", "## Considerations for Using the Data", "### Social Impact of Dataset\n\nThe creation of the data set may help in fraud detection and defence in public goods funding.", "## Additional Information", "### Licensing Information\n\nMIT\n\n\n\nPlease cite this data set if you use it, especially in the hackathon context.", "### Contributions\n\nThanks to @poupou-web3 for adding this dataset." ]
3c84296545ff027b36f6d99d921aeb4b48e9ceb1
# Dataset Card for "relbert/semeval2012_relational_similarity_v5" ## Dataset Description - **Repository:** [RelBERT](https://github.com/asahi417/relbert) - **Paper:** [https://aclanthology.org/S12-1047/](https://aclanthology.org/S12-1047/) - **Dataset:** SemEval2012: Relational Similarity ### Dataset Summary ***IMPORTANT***: This is the same dataset as [relbert/semeval2012_relational_similarity](https://huggingface.co/datasets/relbert/semeval2012_relational_similarity), but with a different dataset construction. Relational similarity dataset from [SemEval2012 task 2](https://aclanthology.org/S12-1047/), compiled to fine-tune [RelBERT](https://github.com/asahi417/relbert) model. The dataset contains a list of positive and negative word pair from 89 pre-defined relations. The relation types are constructed on top of following 10 parent relation types. ```shell { 1: "Class Inclusion", # Hypernym 2: "Part-Whole", # Meronym, Substance Meronym 3: "Similar", # Synonym, Co-hypornym 4: "Contrast", # Antonym 5: "Attribute", # Attribute, Event 6: "Non Attribute", 7: "Case Relation", 8: "Cause-Purpose", 9: "Space-Time", 10: "Representation" } ``` Each of the parent relation is further grouped into child relation types where the definition can be found [here](https://drive.google.com/file/d/0BzcZKTSeYL8VenY0QkVpZVpxYnc/view?resourcekey=0-ZP-UARfJj39PcLroibHPHw). ## Dataset Structure ### Data Instances An example of `train` looks as follows. ``` { 'relation_type': '8d', 'positives': [ [ "breathe", "live" ], [ "study", "learn" ], [ "speak", "communicate" ], ... ] 'negatives': [ [ "starving", "hungry" ], [ "clean", "bathe" ], [ "hungry", "starving" ], ... ] } ``` ### Data Splits | name |train|validation| |---------|----:|---------:| |semeval2012_relational_similarity| 89 | 89| ### Number of Positive/Negative Word-pairs in each Split | | positives | negatives | |:------------------------------------------|------------:|------------:| | ('1', 'parent', 'train') | 110 | 680 | | ('10', 'parent', 'train') | 60 | 730 | | ('10a', 'child', 'train') | 10 | 1655 | | ('10a', 'child_prototypical', 'train') | 123 | 2438 | | ('10b', 'child', 'train') | 10 | 1656 | | ('10b', 'child_prototypical', 'train') | 117 | 2027 | | ('10c', 'child', 'train') | 10 | 1658 | | ('10c', 'child_prototypical', 'train') | 105 | 2030 | | ('10d', 'child', 'train') | 10 | 1659 | | ('10d', 'child_prototypical', 'train') | 99 | 1766 | | ('10e', 'child', 'train') | 10 | 1661 | | ('10e', 'child_prototypical', 'train') | 87 | 1118 | | ('10f', 'child', 'train') | 10 | 1659 | | ('10f', 'child_prototypical', 'train') | 99 | 1766 | | ('1a', 'child', 'train') | 10 | 1655 | | ('1a', 'child_prototypical', 'train') | 123 | 2192 | | ('1b', 'child', 'train') | 10 | 1655 | | ('1b', 'child_prototypical', 'train') | 123 | 2192 | | ('1c', 'child', 'train') | 10 | 1658 | | ('1c', 'child_prototypical', 'train') | 105 | 2030 | | ('1d', 'child', 'train') | 10 | 1653 | | ('1d', 'child_prototypical', 'train') | 135 | 2540 | | ('1e', 'child', 'train') | 10 | 1661 | | ('1e', 'child_prototypical', 'train') | 87 | 1031 | | ('2', 'parent', 'train') | 100 | 690 | | ('2a', 'child', 'train') | 10 | 1654 | | ('2a', 'child_prototypical', 'train') | 129 | 2621 | | ('2b', 'child', 'train') | 10 | 1658 | | ('2b', 'child_prototypical', 'train') | 105 | 1610 | | ('2c', 'child', 'train') | 10 | 1656 | | ('2c', 'child_prototypical', 'train') | 117 | 2144 | | ('2d', 'child', 'train') | 10 | 1659 | | ('2d', 'child_prototypical', 'train') | 99 | 1667 | | ('2e', 'child', 'train') | 10 | 1658 | | ('2e', 'child_prototypical', 'train') | 105 | 1925 | | ('2f', 'child', 'train') | 10 | 1658 | | ('2f', 'child_prototypical', 'train') | 105 | 2240 | | ('2g', 'child', 'train') | 10 | 1653 | | ('2g', 'child_prototypical', 'train') | 135 | 2405 | | ('2h', 'child', 'train') | 10 | 1658 | | ('2h', 'child_prototypical', 'train') | 105 | 1925 | | ('2i', 'child', 'train') | 10 | 1660 | | ('2i', 'child_prototypical', 'train') | 93 | 1706 | | ('2j', 'child', 'train') | 10 | 1659 | | ('2j', 'child_prototypical', 'train') | 99 | 1964 | | ('3', 'parent', 'train') | 80 | 710 | | ('3a', 'child', 'train') | 10 | 1658 | | ('3a', 'child_prototypical', 'train') | 105 | 1925 | | ('3b', 'child', 'train') | 10 | 1658 | | ('3b', 'child_prototypical', 'train') | 105 | 2240 | | ('3c', 'child', 'train') | 10 | 1657 | | ('3c', 'child_prototypical', 'train') | 111 | 1979 | | ('3d', 'child', 'train') | 10 | 1655 | | ('3d', 'child_prototypical', 'train') | 123 | 2315 | | ('3e', 'child', 'train') | 10 | 1664 | | ('3e', 'child_prototypical', 'train') | 69 | 1268 | | ('3f', 'child', 'train') | 10 | 1658 | | ('3f', 'child_prototypical', 'train') | 105 | 2345 | | ('3g', 'child', 'train') | 10 | 1663 | | ('3g', 'child_prototypical', 'train') | 75 | 1340 | | ('3h', 'child', 'train') | 10 | 1659 | | ('3h', 'child_prototypical', 'train') | 99 | 1964 | | ('4', 'parent', 'train') | 80 | 710 | | ('4a', 'child', 'train') | 10 | 1658 | | ('4a', 'child_prototypical', 'train') | 105 | 2240 | | ('4b', 'child', 'train') | 10 | 1662 | | ('4b', 'child_prototypical', 'train') | 81 | 1163 | | ('4c', 'child', 'train') | 10 | 1657 | | ('4c', 'child_prototypical', 'train') | 111 | 2201 | | ('4d', 'child', 'train') | 10 | 1665 | | ('4d', 'child_prototypical', 'train') | 63 | 749 | | ('4e', 'child', 'train') | 10 | 1657 | | ('4e', 'child_prototypical', 'train') | 111 | 2423 | | ('4f', 'child', 'train') | 10 | 1660 | | ('4f', 'child_prototypical', 'train') | 93 | 1892 | | ('4g', 'child', 'train') | 10 | 1654 | | ('4g', 'child_prototypical', 'train') | 129 | 2492 | | ('4h', 'child', 'train') | 10 | 1657 | | ('4h', 'child_prototypical', 'train') | 111 | 2312 | | ('5', 'parent', 'train') | 90 | 700 | | ('5a', 'child', 'train') | 10 | 1655 | | ('5a', 'child_prototypical', 'train') | 123 | 2315 | | ('5b', 'child', 'train') | 10 | 1661 | | ('5b', 'child_prototypical', 'train') | 87 | 1640 | | ('5c', 'child', 'train') | 10 | 1658 | | ('5c', 'child_prototypical', 'train') | 105 | 1925 | | ('5d', 'child', 'train') | 10 | 1654 | | ('5d', 'child_prototypical', 'train') | 129 | 2363 | | ('5e', 'child', 'train') | 10 | 1661 | | ('5e', 'child_prototypical', 'train') | 87 | 1640 | | ('5f', 'child', 'train') | 10 | 1658 | | ('5f', 'child_prototypical', 'train') | 105 | 2135 | | ('5g', 'child', 'train') | 10 | 1660 | | ('5g', 'child_prototypical', 'train') | 93 | 1892 | | ('5h', 'child', 'train') | 10 | 1654 | | ('5h', 'child_prototypical', 'train') | 129 | 2750 | | ('5i', 'child', 'train') | 10 | 1655 | | ('5i', 'child_prototypical', 'train') | 123 | 2561 | | ('6', 'parent', 'train') | 80 | 710 | | ('6a', 'child', 'train') | 10 | 1654 | | ('6a', 'child_prototypical', 'train') | 129 | 2492 | | ('6b', 'child', 'train') | 10 | 1658 | | ('6b', 'child_prototypical', 'train') | 105 | 2135 | | ('6c', 'child', 'train') | 10 | 1656 | | ('6c', 'child_prototypical', 'train') | 117 | 2495 | | ('6d', 'child', 'train') | 10 | 1659 | | ('6d', 'child_prototypical', 'train') | 99 | 2261 | | ('6e', 'child', 'train') | 10 | 1658 | | ('6e', 'child_prototypical', 'train') | 105 | 2135 | | ('6f', 'child', 'train') | 10 | 1657 | | ('6f', 'child_prototypical', 'train') | 111 | 2090 | | ('6g', 'child', 'train') | 10 | 1657 | | ('6g', 'child_prototypical', 'train') | 111 | 1979 | | ('6h', 'child', 'train') | 10 | 1654 | | ('6h', 'child_prototypical', 'train') | 129 | 2621 | | ('7', 'parent', 'train') | 80 | 710 | | ('7a', 'child', 'train') | 10 | 1655 | | ('7a', 'child_prototypical', 'train') | 123 | 2561 | | ('7b', 'child', 'train') | 10 | 1662 | | ('7b', 'child_prototypical', 'train') | 81 | 1082 | | ('7c', 'child', 'train') | 10 | 1658 | | ('7c', 'child_prototypical', 'train') | 105 | 1715 | | ('7d', 'child', 'train') | 10 | 1655 | | ('7d', 'child_prototypical', 'train') | 123 | 2561 | | ('7e', 'child', 'train') | 10 | 1659 | | ('7e', 'child_prototypical', 'train') | 99 | 1568 | | ('7f', 'child', 'train') | 10 | 1657 | | ('7f', 'child_prototypical', 'train') | 111 | 1757 | | ('7g', 'child', 'train') | 10 | 1660 | | ('7g', 'child_prototypical', 'train') | 93 | 1148 | | ('7h', 'child', 'train') | 10 | 1655 | | ('7h', 'child_prototypical', 'train') | 123 | 1946 | | ('8', 'parent', 'train') | 80 | 710 | | ('8a', 'child', 'train') | 10 | 1655 | | ('8a', 'child_prototypical', 'train') | 123 | 2192 | | ('8b', 'child', 'train') | 10 | 1662 | | ('8b', 'child_prototypical', 'train') | 81 | 1487 | | ('8c', 'child', 'train') | 10 | 1657 | | ('8c', 'child_prototypical', 'train') | 111 | 1757 | | ('8d', 'child', 'train') | 10 | 1656 | | ('8d', 'child_prototypical', 'train') | 117 | 1910 | | ('8e', 'child', 'train') | 10 | 1658 | | ('8e', 'child_prototypical', 'train') | 105 | 1610 | | ('8f', 'child', 'train') | 10 | 1657 | | ('8f', 'child_prototypical', 'train') | 111 | 1868 | | ('8g', 'child', 'train') | 10 | 1662 | | ('8g', 'child_prototypical', 'train') | 81 | 839 | | ('8h', 'child', 'train') | 10 | 1655 | | ('8h', 'child_prototypical', 'train') | 123 | 2315 | | ('9', 'parent', 'train') | 90 | 700 | | ('9a', 'child', 'train') | 10 | 1655 | | ('9a', 'child_prototypical', 'train') | 123 | 1946 | | ('9b', 'child', 'train') | 10 | 1657 | | ('9b', 'child_prototypical', 'train') | 111 | 2090 | | ('9c', 'child', 'train') | 10 | 1662 | | ('9c', 'child_prototypical', 'train') | 81 | 596 | | ('9d', 'child', 'train') | 10 | 1660 | | ('9d', 'child_prototypical', 'train') | 93 | 1985 | | ('9e', 'child', 'train') | 10 | 1661 | | ('9e', 'child_prototypical', 'train') | 87 | 1901 | | ('9f', 'child', 'train') | 10 | 1659 | | ('9f', 'child_prototypical', 'train') | 99 | 1766 | | ('9g', 'child', 'train') | 10 | 1655 | | ('9g', 'child_prototypical', 'train') | 123 | 2069 | | ('9h', 'child', 'train') | 10 | 1656 | | ('9h', 'child_prototypical', 'train') | 117 | 2261 | | ('9i', 'child', 'train') | 10 | 1660 | | ('9i', 'child_prototypical', 'train') | 93 | 1613 | | ('AtLocation', 'N/A', 'validation') | 960 | 4646 | | ('CapableOf', 'N/A', 'validation') | 536 | 4734 | | ('Causes', 'N/A', 'validation') | 194 | 4738 | | ('CausesDesire', 'N/A', 'validation') | 40 | 4730 | | ('CreatedBy', 'N/A', 'validation') | 4 | 3554 | | ('DefinedAs', 'N/A', 'validation') | 4 | 1182 | | ('Desires', 'N/A', 'validation') | 56 | 4732 | | ('HasA', 'N/A', 'validation') | 168 | 4772 | | ('HasFirstSubevent', 'N/A', 'validation') | 4 | 3554 | | ('HasLastSubevent', 'N/A', 'validation') | 10 | 4732 | | ('HasPrerequisite', 'N/A', 'validation') | 450 | 4744 | | ('HasProperty', 'N/A', 'validation') | 266 | 4766 | | ('HasSubevent', 'N/A', 'validation') | 330 | 4768 | | ('IsA', 'N/A', 'validation') | 816 | 4688 | | ('MadeOf', 'N/A', 'validation') | 48 | 4726 | | ('MotivatedByGoal', 'N/A', 'validation') | 50 | 4736 | | ('PartOf', 'N/A', 'validation') | 82 | 4742 | | ('ReceivesAction', 'N/A', 'validation') | 52 | 4726 | | ('SymbolOf', 'N/A', 'validation') | 4 | 1184 | | ('UsedFor', 'N/A', 'validation') | 660 | 4760 | ### Citation Information ``` @inproceedings{jurgens-etal-2012-semeval, title = "{S}em{E}val-2012 Task 2: Measuring Degrees of Relational Similarity", author = "Jurgens, David and Mohammad, Saif and Turney, Peter and Holyoak, Keith", booktitle = "*{SEM} 2012: The First Joint Conference on Lexical and Computational Semantics {--} Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation ({S}em{E}val 2012)", month = "7-8 " # jun, year = "2012", address = "Montr{\'e}al, Canada", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/S12-1047", pages = "356--364", } ```
research-backup/semeval2012_relational_similarity_v5
[ "multilinguality:monolingual", "size_categories:1K<n<10K", "language:en", "license:other", "region:us" ]
2022-10-21T08:18:28+00:00
{"language": ["en"], "license": ["other"], "multilinguality": ["monolingual"], "size_categories": ["1K<n<10K"], "pretty_name": "SemEval2012 task 2 Relational Similarity"}
2022-10-21T09:29:48+00:00
[]
[ "en" ]
TAGS #multilinguality-monolingual #size_categories-1K<n<10K #language-English #license-other #region-us
Dataset Card for "relbert/semeval2012\_relational\_similarity\_v5" ================================================================== Dataset Description ------------------- * Repository: RelBERT * Paper: URL * Dataset: SemEval2012: Relational Similarity ### Dataset Summary *IMPORTANT*: This is the same dataset as relbert/semeval2012\_relational\_similarity, but with a different dataset construction. Relational similarity dataset from SemEval2012 task 2, compiled to fine-tune RelBERT model. The dataset contains a list of positive and negative word pair from 89 pre-defined relations. The relation types are constructed on top of following 10 parent relation types. Each of the parent relation is further grouped into child relation types where the definition can be found here. Dataset Structure ----------------- ### Data Instances An example of 'train' looks as follows. ### Data Splits ### Number of Positive/Negative Word-pairs in each Split
[ "### Dataset Summary\n\n\n*IMPORTANT*: This is the same dataset as relbert/semeval2012\\_relational\\_similarity,\nbut with a different dataset construction.\n\n\nRelational similarity dataset from SemEval2012 task 2, compiled to fine-tune RelBERT model.\nThe dataset contains a list of positive and negative word pair from 89 pre-defined relations.\nThe relation types are constructed on top of following 10 parent relation types.\n\n\nEach of the parent relation is further grouped into child relation types where the definition can be found here.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Splits", "### Number of Positive/Negative Word-pairs in each Split" ]
[ "TAGS\n#multilinguality-monolingual #size_categories-1K<n<10K #language-English #license-other #region-us \n", "### Dataset Summary\n\n\n*IMPORTANT*: This is the same dataset as relbert/semeval2012\\_relational\\_similarity,\nbut with a different dataset construction.\n\n\nRelational similarity dataset from SemEval2012 task 2, compiled to fine-tune RelBERT model.\nThe dataset contains a list of positive and negative word pair from 89 pre-defined relations.\nThe relation types are constructed on top of following 10 parent relation types.\n\n\nEach of the parent relation is further grouped into child relation types where the definition can be found here.\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nAn example of 'train' looks as follows.", "### Data Splits", "### Number of Positive/Negative Word-pairs in each Split" ]
f24c2fdd646ac249a494d600e1d0c3f4dbfa3d46
# Dataset Card for "smol" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
polinaeterna/smol
[ "region:us" ]
2022-10-21T08:27:05+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "text", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 28, "num_examples": 2}, {"name": "train", "num_bytes": 44, "num_examples": 2}], "download_size": 1776, "dataset_size": 72}}
2022-10-21T08:27:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "smol" More Information needed
[ "# Dataset Card for \"smol\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"smol\"\n\nMore Information needed" ]
25d0f6bbefef877405cf802ef1e09d57532f191e
# Dataset Card for "tjkicksmel" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
g30rv17ys/tjkicksmel
[ "region:us" ]
2022-10-21T11:09:05+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "audio_file", "dtype": "string"}, {"name": "slice", "dtype": "int16"}], "splits": [{"name": "train", "num_bytes": 223991.0, "num_examples": 100}], "download_size": 224647, "dataset_size": 223991.0}}
2022-10-21T11:28:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "tjkicksmel" More Information needed
[ "# Dataset Card for \"tjkicksmel\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"tjkicksmel\"\n\nMore Information needed" ]
bea81a21dc077bb21f6df5a0385410f4768b97bd
# Mainfest - CMeEE_train.json: 训练集 - CMeEE_dev.json: 验证集 - CMeEE_test.json: 测试集 - 提交的时候需要为每条记录填充"entities"字段,类型为列表。每个识别出来的实体必须包含"start_idx", "end_idx", "type"3个字段。 - 提交的文件名为:CMeEE_test.json - example_gold.json: 标准答案示例 - example_pred.json: 提交结果示例 评估指标以严格Micro-F1值为准
Rosenberg/CMeEE
[ "license:mit", "region:us" ]
2022-10-21T12:48:53+00:00
{"license": "mit"}
2022-10-25T11:30:05+00:00
[]
[]
TAGS #license-mit #region-us
# Mainfest - CMeEE_train.json: 训练集 - CMeEE_dev.json: 验证集 - CMeEE_test.json: 测试集 - 提交的时候需要为每条记录填充"entities"字段,类型为列表。每个识别出来的实体必须包含"start_idx", "end_idx", "type"3个字段。 - 提交的文件名为:CMeEE_test.json - example_gold.json: 标准答案示例 - example_pred.json: 提交结果示例 评估指标以严格Micro-F1值为准
[ "# Mainfest\n- CMeEE_train.json: 训练集 \n- CMeEE_dev.json: 验证集\n- CMeEE_test.json: 测试集\n - 提交的时候需要为每条记录填充\"entities\"字段,类型为列表。每个识别出来的实体必须包含\"start_idx\", \"end_idx\", \"type\"3个字段。\n - 提交的文件名为:CMeEE_test.json\n- example_gold.json: 标准答案示例\n- example_pred.json: 提交结果示例\n\n评估指标以严格Micro-F1值为准" ]
[ "TAGS\n#license-mit #region-us \n", "# Mainfest\n- CMeEE_train.json: 训练集 \n- CMeEE_dev.json: 验证集\n- CMeEE_test.json: 测试集\n - 提交的时候需要为每条记录填充\"entities\"字段,类型为列表。每个识别出来的实体必须包含\"start_idx\", \"end_idx\", \"type\"3个字段。\n - 提交的文件名为:CMeEE_test.json\n- example_gold.json: 标准答案示例\n- example_pred.json: 提交结果示例\n\n评估指标以严格Micro-F1值为准" ]
9141dc1ea6a4efac822323572cb35247ae66c050
News on Barcelona en spanish media outlets
api19750904/News_bcn_sentiment
[ "region:us" ]
2022-10-21T14:23:04+00:00
{}
2022-10-21T14:25:49+00:00
[]
[]
TAGS #region-us
News on Barcelona en spanish media outlets
[]
[ "TAGS\n#region-us \n" ]
0b2726b8a85a6eab027db75a1b71b7db8bd7faf2
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: phpthinh/data_2 * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@phpthinh](https://huggingface.co/phpthinh) for evaluating this model.
autoevaluate/autoeval-eval-phpthinh__data_2-default-112182-1832662968
[ "autotrain", "evaluation", "region:us" ]
2022-10-21T16:24:37+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["phpthinh/data_2"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-560m", "metrics": [], "dataset_name": "phpthinh/data_2", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-10-21T17:22:50+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: phpthinh/data_2 * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @phpthinh for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: phpthinh/data_2\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @phpthinh for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: phpthinh/data_2\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @phpthinh for evaluating this model." ]
44f14afa7b7eff6ed57c00c45d004d5ff2658a33
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: phpthinh/data_1 * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@phpthinh](https://huggingface.co/phpthinh) for evaluating this model.
autoevaluate/autoeval-eval-phpthinh__data_1-default-4c0514-1832562967
[ "autotrain", "evaluation", "region:us" ]
2022-10-21T16:24:38+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["phpthinh/data_1"], "eval_info": {"task": "text_zero_shot_classification", "model": "bigscience/bloom-560m", "metrics": [], "dataset_name": "phpthinh/data_1", "dataset_config": "default", "dataset_split": "test", "col_mapping": {"text": "text", "classes": "classes", "target": "target"}}}
2022-10-21T17:20:18+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: bigscience/bloom-560m * Dataset: phpthinh/data_1 * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @phpthinh for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: phpthinh/data_1\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @phpthinh for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: bigscience/bloom-560m\n* Dataset: phpthinh/data_1\n* Config: default\n* Split: test\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @phpthinh for evaluating this model." ]
5572b9894fbc50d2976bd894c872d0ac1f31a7a6
The Ewe news dataset contains 1,705,600 words, making 4264 different news articles. The articles are collected from different media portals in West Africa. After the collection process, the words are translated and further cross-checked by eight Ewe tutors in Ghana for efficient semantic representation and to prevent any duplication. The dataset consists of six (6) different classes: coronavirus, local, business, sports, entertainment, and politics. NOTE: For more details on access to the Ewe news dataset, please contact via the following email: Email : [email protected] or Email: [email protected]
VKAgbesi/Ewe_News_Dataset
[ "region:us" ]
2022-10-21T17:43:00+00:00
{}
2022-10-21T17:47:15+00:00
[]
[]
TAGS #region-us
The Ewe news dataset contains 1,705,600 words, making 4264 different news articles. The articles are collected from different media portals in West Africa. After the collection process, the words are translated and further cross-checked by eight Ewe tutors in Ghana for efficient semantic representation and to prevent any duplication. The dataset consists of six (6) different classes: coronavirus, local, business, sports, entertainment, and politics. NOTE: For more details on access to the Ewe news dataset, please contact via the following email: Email : victoragbesivik@URL or Email: vkagbesi@URL
[]
[ "TAGS\n#region-us \n" ]
a324158f6379b6265690bd09b46147c3338be53a
# Dataset Card for "CAYLOU" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/CAYLOU
[ "region:us" ]
2022-10-21T19:00:17+00:00
{"dataset_info": {"features": [{"name": "Source", "dtype": "string"}, {"name": "Target", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 597877, "num_examples": 5191}], "download_size": 170284, "dataset_size": 597877}}
2022-10-21T19:00:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "CAYLOU" More Information needed
[ "# Dataset Card for \"CAYLOU\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"CAYLOU\"\n\nMore Information needed" ]
24a2ceacb185767e845fb1126a794f3de5e4ba7a
# Dataset Card for "Arabic_Hate_Speech" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/Arabic_Hate_Speech
[ "region:us" ]
2022-10-21T19:21:56+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "tweet", "dtype": "string"}, {"name": "is_off", "dtype": "string"}, {"name": "is_hate", "dtype": "string"}, {"name": "is_vlg", "dtype": "string"}, {"name": "is_vio", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1656540, "num_examples": 8557}, {"name": "validation", "num_bytes": 234165, "num_examples": 1266}], "download_size": 881261, "dataset_size": 1890705}}
2022-10-21T19:22:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Arabic_Hate_Speech" More Information needed
[ "# Dataset Card for \"Arabic_Hate_Speech\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Arabic_Hate_Speech\"\n\nMore Information needed" ]
e5324fb79212d18f0c10429254a54c639f25a03a
# Dataset Card for "Author_Attribution_Tweets" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/Author_Attribution_Tweets
[ "region:us" ]
2022-10-21T19:26:24+00:00
{"dataset_info": {"features": [{"name": "tweet", "dtype": "string"}, {"name": "author", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 2629687, "num_examples": 13341}, {"name": "train", "num_bytes": 10441650, "num_examples": 53198}], "download_size": 6482998, "dataset_size": 13071337}}
2022-10-21T19:26:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "Author_Attribution_Tweets" More Information needed
[ "# Dataset Card for \"Author_Attribution_Tweets\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"Author_Attribution_Tweets\"\n\nMore Information needed" ]
768272dae01e6bfb26841a5389a7e0b5ec5b0aa0
# Dataset Card for "DAWQAS" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/DAWQAS
[ "region:us" ]
2022-10-21T19:29:03+00:00
{"dataset_info": {"features": [{"name": "QID", "dtype": "string"}, {"name": "Site_id", "dtype": "string"}, {"name": "Question", "dtype": "string"}, {"name": "Answer", "dtype": "string"}, {"name": "Answer1", "dtype": "string"}, {"name": "Answer2", "dtype": "string"}, {"name": "Answer3", "dtype": "string"}, {"name": "Answer4", "dtype": "string"}, {"name": "Answer5", "dtype": "string"}, {"name": "Answer6", "dtype": "string"}, {"name": "Answer7", "dtype": "string"}, {"name": "Answer8", "dtype": "string"}, {"name": "Answer9", "dtype": "string"}, {"name": "Answer10", "dtype": "string"}, {"name": "Answer11", "dtype": "string"}, {"name": "Original_Category", "dtype": "string"}, {"name": "Author", "dtype": "string"}, {"name": "Date", "dtype": "string"}, {"name": "Site", "dtype": "string"}, {"name": "Year", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 22437661, "num_examples": 3209}], "download_size": 10844359, "dataset_size": 22437661}}
2022-10-21T19:29:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "DAWQAS" More Information needed
[ "# Dataset Card for \"DAWQAS\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"DAWQAS\"\n\nMore Information needed" ]
45d52638e7c1582648dc4522dcf6f16bff05e749
# Community Dataset Community suggestions to improve machine translations https://github.com/LibreTranslate/CommunityDS https://libretranslate.com/ 1653250371.jsonl ``` {"q": "انا احبك يا امي ", "s": "Is breá liom tú, Mam.ggc", "source": "ar", "target": "ga"} {"q": "plump", "s": "montok", "source": "en", "target": "id"} {"q": "iron out", "s": "loswerden", "source": "en", "target": "de"} ```
argosopentech/libretranslate-communityds
[ "region:us" ]
2022-10-21T20:59:54+00:00
{}
2022-10-21T21:40:45+00:00
[]
[]
TAGS #region-us
# Community Dataset Community suggestions to improve machine translations URL URL URL
[ "# Community Dataset\n\nCommunity suggestions to improve machine translations\n\nURL\n\nURL\n\nURL" ]
[ "TAGS\n#region-us \n", "# Community Dataset\n\nCommunity suggestions to improve machine translations\n\nURL\n\nURL\n\nURL" ]
8afd3c854c3ac246be112e1a055fb5efaefb0e96
# Dataset Card for WSDMCup2023 ## Dataset Description - **Homepage:** [Toloka Visual Question Answering Challenge](https://toloka.ai/challenges/wsdm2023) - **Repository:** [WSDM Cup 2023 Starter Pack](https://github.com/Toloka/WSDMCup2023) - **Paper:** <https://arxiv.org/abs/2309.16511> - **Leaderboard:** [CodaLab Competition Leaderboard](https://codalab.lisn.upsaclay.fr/competitions/7434#results) - **Point of Contact:** [email protected] | Question | Image and Answer | | --- | --- | | What do you use to hit the ball? | <img src="https://tlkfrontprod.azureedge.net/portal-production/static/uploaded/images/KUsGAc_eqdMcNxkBXzzl/KUsGAc_eqdMcNxkBXzzl_webp_1280_x2.webp" width="228" alt="What do you use to hit the ball?"> | | What do people use for cutting? | <img src="https://tlkfrontprod.azureedge.net/portal-production/static/uploaded/images/brXEVYckNLfQKcfNu4DF/brXEVYckNLfQKcfNu4DF_webp_1280_x2.webp" width="228" alt="What do people use for cutting?"> | | What do we use to support the immune system and get vitamin C? | <img src="https://tlkfrontprod.azureedge.net/portal-production/static/uploaded/images/HQ0A-ZvZCGCmYfTs83K7/HQ0A-ZvZCGCmYfTs83K7_webp_1280_x2.webp" width="228" alt="What do we use to support the immune system and get vitamin C?"> | ### Dataset Summary The WSDMCup2023 Dataset consists of images associated with textual questions. One entry (instance) in our dataset is a question-image pair labeled with the ground truth coordinates of a bounding box containing the visual answer to the given question. The images were obtained from a CC BY-licensed subset of the Microsoft Common Objects in Context dataset, [MS COCO](https://cocodataset.org/). All data labeling was performed on the [Toloka crowdsourcing platform](https://toloka.ai/). Our dataset has 45,199 instances split among three subsets: train (38,990 instances), public test (1,705 instances), and private test (4,504 instances). The entire train dataset was available for everyone since the start of the challenge. The public test dataset was available since the evaluation phase of the competition but without any ground truth labels. After the end of the competition, public and private sets were released. ## Dataset Citation Please cite the challenge results or dataset description as follows. - Ustalov D., Pavlichenko N., Koshelev S., Likhobaba D., and Smirnova A. [Toloka Visual Question Answering Benchmark](https://arxiv.org/abs/2309.16511). 2023. arXiv: [2309.16511 [cs.CV]](https://arxiv.org/abs/2309.16511). ```bibtex @inproceedings{TolokaWSDMCup2023, author = {Ustalov, Dmitry and Pavlichenko, Nikita and Koshelev, Sergey and Likhobaba, Daniil and Smirnova, Alisa}, title = {{Toloka Visual Question Answering Benchmark}}, year = {2023}, eprint = {2309.16511}, eprinttype = {arxiv}, eprintclass = {cs.CV}, language = {english}, } ``` ### Supported Tasks and Leaderboards Grounding Visual Question Answering ### Language English ## Dataset Structure ### Data Instances A data instance contains a URL to the picture, information about the image size - width and height, information about the ground truth bounding box - left top and right bottom dots, and contains the question related to the picture. ``` {'image': https://toloka-cdn.azureedge.net/wsdmcup2023/000000000013.jpg, 'width': 640, 'height': 427, 'left': 129, 'top': 192, 'right': 155, 'bottom': 212, 'question': What does it use to breath?} ``` ### Data Fields * image: contains URL to the image * width: value in pixels of image width * height: value in pixels of image height * left: the x coordinate in pixels to determine the left-top dot of the bounding box * top: the y coordinate in pixels to determine the left-top dot of the bounding box * right: the x coordinate in pixels to determine the right-bottom dot of the bounding box * bottom: the y coordinate in pixels to determine the right-bottom dot of the bounding box * question: a question related to the picture ### Data Splits There are four splits in the data: train, train_sample, test_public, and test_private. 'train' split contains the full pull for model training. The 'train-sample' split contains the part of the 'train' split. The 'test_public' split contains public data to test the model. The 'test_private' split contains private data for the final model test. ### Source Data The images were obtained from a CC BY-licensed subset of the Microsoft Common Objects in Context dataset, [MS COCO](https://cocodataset.org/). ### Annotations All data labeling was performed on the [Toloka crowdsourcing platform](https://toloka.ai/). Only annotators who self-reported the knowledge of English had access to the annotation task. ### Citation Information * Competition: https://toloka.ai/challenges/wsdm2023 * CodaLab: https://codalab.lisn.upsaclay.fr/competitions/7434 * Dataset: https://doi.org/10.5281/zenodo.7057740
toloka/WSDMCup2023
[ "task_categories:visual-question-answering", "task_ids:visual-question-answering", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:en", "license:cc-by-4.0", "toloka", "arxiv:2309.16511", "region:us" ]
2022-10-21T21:46:18+00:00
{"annotations_creators": ["crowdsourced"], "language_creators": ["crowdsourced"], "language": ["en"], "license": ["cc-by-4.0"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K"], "source_datasets": [], "task_categories": ["visual-question-answering"], "task_ids": ["visual-question-answering"], "pretty_name": "WSDMCup2023", "tags": ["toloka"], "dataset_info": {"features": [{"name": "image", "dtype": "string"}, {"name": "width", "dtype": "int64"}, {"name": "height", "dtype": "int64"}, {"name": "left", "dtype": "int64"}, {"name": "top", "dtype": "int64"}, {"name": "right", "dtype": "int64"}, {"name": "bottom", "dtype": "int64"}, {"name": "question", "dtype": "string"}], "splits": [{"name": "train", "num_examples": 38990}, {"name": "train_sample", "num_examples": 1000}, {"name": "test_public", "num_examples": 1705}, {"name": "test_private", "num_examples": 4504}], "config_name": "wsdmcup2023"}}
2023-09-29T07:39:52+00:00
[ "2309.16511" ]
[ "en" ]
TAGS #task_categories-visual-question-answering #task_ids-visual-question-answering #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-cc-by-4.0 #toloka #arxiv-2309.16511 #region-us
Dataset Card for WSDMCup2023 ============================ Dataset Description ------------------- * Homepage: Toloka Visual Question Answering Challenge * Repository: WSDM Cup 2023 Starter Pack * Paper: <URL * Leaderboard: CodaLab Competition Leaderboard * Point of Contact: research@URL ### Dataset Summary The WSDMCup2023 Dataset consists of images associated with textual questions. One entry (instance) in our dataset is a question-image pair labeled with the ground truth coordinates of a bounding box containing the visual answer to the given question. The images were obtained from a CC BY-licensed subset of the Microsoft Common Objects in Context dataset, MS COCO. All data labeling was performed on the Toloka crowdsourcing platform. Our dataset has 45,199 instances split among three subsets: train (38,990 instances), public test (1,705 instances), and private test (4,504 instances). The entire train dataset was available for everyone since the start of the challenge. The public test dataset was available since the evaluation phase of the competition but without any ground truth labels. After the end of the competition, public and private sets were released. Dataset Citation ---------------- Please cite the challenge results or dataset description as follows. * Ustalov D., Pavlichenko N., Koshelev S., Likhobaba D., and Smirnova A. Toloka Visual Question Answering Benchmark. 2023. arXiv: [2309.16511 [cs.CV]](URL ### Supported Tasks and Leaderboards Grounding Visual Question Answering ### Language English Dataset Structure ----------------- ### Data Instances A data instance contains a URL to the picture, information about the image size - width and height, information about the ground truth bounding box - left top and right bottom dots, and contains the question related to the picture. ### Data Fields * image: contains URL to the image * width: value in pixels of image width * height: value in pixels of image height * left: the x coordinate in pixels to determine the left-top dot of the bounding box * top: the y coordinate in pixels to determine the left-top dot of the bounding box * right: the x coordinate in pixels to determine the right-bottom dot of the bounding box * bottom: the y coordinate in pixels to determine the right-bottom dot of the bounding box * question: a question related to the picture ### Data Splits There are four splits in the data: train, train\_sample, test\_public, and test\_private. 'train' split contains the full pull for model training. The 'train-sample' split contains the part of the 'train' split. The 'test\_public' split contains public data to test the model. The 'test\_private' split contains private data for the final model test. ### Source Data The images were obtained from a CC BY-licensed subset of the Microsoft Common Objects in Context dataset, MS COCO. ### Annotations All data labeling was performed on the Toloka crowdsourcing platform. Only annotators who self-reported the knowledge of English had access to the annotation task. * Competition: URL * CodaLab: URL * Dataset: URL
[ "### Dataset Summary\n\n\nThe WSDMCup2023 Dataset consists of images associated with textual questions.\nOne entry (instance) in our dataset is a question-image pair labeled with the ground truth coordinates of a bounding box containing\nthe visual answer to the given question. The images were obtained from a CC BY-licensed subset of the Microsoft Common Objects in\nContext dataset, MS COCO. All data labeling was performed on the Toloka crowdsourcing platform.\n\n\nOur dataset has 45,199 instances split among three subsets: train (38,990 instances), public test (1,705 instances),\nand private test (4,504 instances). The entire train dataset was available for everyone since the start of the challenge.\nThe public test dataset was available since the evaluation phase of the competition but without any ground truth labels.\nAfter the end of the competition, public and private sets were released.\n\n\nDataset Citation\n----------------\n\n\nPlease cite the challenge results or dataset description as follows.\n\n\n* Ustalov D., Pavlichenko N., Koshelev S., Likhobaba D., and Smirnova A. Toloka Visual Question Answering Benchmark. 2023. arXiv: [2309.16511 [cs.CV]](URL", "### Supported Tasks and Leaderboards\n\n\nGrounding Visual Question Answering", "### Language\n\n\nEnglish\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA data instance contains a URL to the picture, information about the image size - width and height, information about the ground truth bounding box - left top and right bottom dots, and contains the question related to the picture.", "### Data Fields\n\n\n* image: contains URL to the image\n* width: value in pixels of image width\n* height: value in pixels of image height\n* left: the x coordinate in pixels to determine the left-top dot of the bounding box\n* top: the y coordinate in pixels to determine the left-top dot of the bounding box\n* right: the x coordinate in pixels to determine the right-bottom dot of the bounding box\n* bottom: the y coordinate in pixels to determine the right-bottom dot of the bounding box\n* question: a question related to the picture", "### Data Splits\n\n\nThere are four splits in the data: train, train\\_sample, test\\_public, and test\\_private. 'train' split contains the full pull for model training.\nThe 'train-sample' split contains the part of the 'train' split. The 'test\\_public' split contains public data to test the model.\nThe 'test\\_private' split contains private data for the final model test.", "### Source Data\n\n\nThe images were obtained from a CC BY-licensed subset of the Microsoft Common Objects in\nContext dataset, MS COCO.", "### Annotations\n\n\nAll data labeling was performed on the Toloka crowdsourcing platform.\n\n\nOnly annotators who self-reported the knowledge of English had access to the annotation task.\n\n\n* Competition: URL\n* CodaLab: URL\n* Dataset: URL" ]
[ "TAGS\n#task_categories-visual-question-answering #task_ids-visual-question-answering #annotations_creators-crowdsourced #language_creators-crowdsourced #multilinguality-monolingual #size_categories-10K<n<100K #language-English #license-cc-by-4.0 #toloka #arxiv-2309.16511 #region-us \n", "### Dataset Summary\n\n\nThe WSDMCup2023 Dataset consists of images associated with textual questions.\nOne entry (instance) in our dataset is a question-image pair labeled with the ground truth coordinates of a bounding box containing\nthe visual answer to the given question. The images were obtained from a CC BY-licensed subset of the Microsoft Common Objects in\nContext dataset, MS COCO. All data labeling was performed on the Toloka crowdsourcing platform.\n\n\nOur dataset has 45,199 instances split among three subsets: train (38,990 instances), public test (1,705 instances),\nand private test (4,504 instances). The entire train dataset was available for everyone since the start of the challenge.\nThe public test dataset was available since the evaluation phase of the competition but without any ground truth labels.\nAfter the end of the competition, public and private sets were released.\n\n\nDataset Citation\n----------------\n\n\nPlease cite the challenge results or dataset description as follows.\n\n\n* Ustalov D., Pavlichenko N., Koshelev S., Likhobaba D., and Smirnova A. Toloka Visual Question Answering Benchmark. 2023. arXiv: [2309.16511 [cs.CV]](URL", "### Supported Tasks and Leaderboards\n\n\nGrounding Visual Question Answering", "### Language\n\n\nEnglish\n\n\nDataset Structure\n-----------------", "### Data Instances\n\n\nA data instance contains a URL to the picture, information about the image size - width and height, information about the ground truth bounding box - left top and right bottom dots, and contains the question related to the picture.", "### Data Fields\n\n\n* image: contains URL to the image\n* width: value in pixels of image width\n* height: value in pixels of image height\n* left: the x coordinate in pixels to determine the left-top dot of the bounding box\n* top: the y coordinate in pixels to determine the left-top dot of the bounding box\n* right: the x coordinate in pixels to determine the right-bottom dot of the bounding box\n* bottom: the y coordinate in pixels to determine the right-bottom dot of the bounding box\n* question: a question related to the picture", "### Data Splits\n\n\nThere are four splits in the data: train, train\\_sample, test\\_public, and test\\_private. 'train' split contains the full pull for model training.\nThe 'train-sample' split contains the part of the 'train' split. The 'test\\_public' split contains public data to test the model.\nThe 'test\\_private' split contains private data for the final model test.", "### Source Data\n\n\nThe images were obtained from a CC BY-licensed subset of the Microsoft Common Objects in\nContext dataset, MS COCO.", "### Annotations\n\n\nAll data labeling was performed on the Toloka crowdsourcing platform.\n\n\nOnly annotators who self-reported the knowledge of English had access to the annotation task.\n\n\n* Competition: URL\n* CodaLab: URL\n* Dataset: URL" ]
2507db0904883009d27037f036f424a78c2151ca
Below, we provide access to the datasets used in and created for the EMNLP 2022 paper [Large Language Models are Few-Shot Clinical Information Extractors](https://arxiv.org/abs/2205.12689). # Task #1: Clinical Sense Disambiguation For Task #1, we use the original annotations from the [Clinical Acronym Sense Inventory (CASI) dataset](https://conservancy.umn.edu/handle/11299/137703), described in [their paper](https://academic.oup.com/jamia/article/21/2/299/723657). As is common, due to noisiness in the label set, we do not evaluate on the entire dataset, but only on a cleaner subset. For consistency, we use the subset defined by the filtering used in ["Zero-Shot Clinical Acronym Expansion via Latent Meaning Cells"](https://arxiv.org/pdf/2010.02010.pdf). This results in a subset of 18,164 examples and 41 acronyms for evaluation. We additionally use the MIMIC Reverse Substitution dataset, as created in that same paper, with further instructions available in [their repository](https://github.com/griff4692/LMC). # Task #2: Biomedical Evidence Extraction For Task #2, we use the out-of-the-box high-level labels from the [PICO dataset](https://arxiv.org/abs/1806.04185) available publicly in the repository [here](https://github.com/bepnye/EBM-NLP). # Task #3: Coreference Resolution For Task #3, we annotated 105 snippets from the [CASI dataset](https://conservancy.umn.edu/handle/11299/137703), 5 for development and 100 for test. Each example is labeled with a singular pronoun and that pronoun's corresponding noun phrase antecedent (or antecedents). The antecedent was annotated as the entire noun phrase (barring any dependent clauses); in cases where multiple equally valid antecedents were available, all were labeled (empirically, up to 2). For the purposes of evaluation, we chose the antecedent with the highest overlap to each model’s output. To ensure nontrivial examples, the annotators excluded all examples of personal pronouns (e.g. “he”, “she”) if another person (and possible antecedent) had not yet been mentioned in the snippet. Examples were skipped in annotation if the pronoun did not have an antecedent within the provided text snippet. # Task #4: Medication Status Extraction For Task #3, we annotated 105 snippets from the [CASI dataset](https://conservancy.umn.edu/handle/11299/137703), 5 for development and 100 for test. We wanted to create a dataset of challenging examples containing a changeover in treatment. From a sample, only ∼5% of CASI snippets contained such examples. To increase the density of these examples, speeding up annotation, clinical notes were filtered with the following search terms: discont, adverse, side effect, switch, and dosage, leading to 1445 snippets. We excluded snippets that were purely medication lists, requiring at least some narrative part to be present. For each example, the annotators first extracted all medications. Guidelines excluded medication categories (e.g. “ACE-inhibitor”) if they referred to more specific drug names mentioned elsewhere (even if partially cut off in the snippet). For instance, only the antibiotic Levaquin was labeled in: “It is probably reasonable to treat with antibiotics [...]. I would agree with Levaquin alone [...]”. Guidelines also excluded electrolytes and intravenous fluids as well as route and dosage information. In a second step, medications were assigned to one of three categories: active, discontinued, and neither. Discontinued medications also contain medications that are temporarily on hold. The category neither was assigned to all remaining medications (e.g. allergies, potential medications). The medication lists for each example were serialized as a json. # Task #5: Medication Attribute Extraction For Task #5, we again annotated 105 snippets from the [CASI dataset](https://conservancy.umn.edu/handle/11299/137703), 5 for development and 100 for test. Annotation guideline were adopted from the 2009 i2b2 medication extraction challenge (Uzuner et al., 2010) with slight modifications. We allowed medication attributes to have multiple spans and grouped together different mentions of the the same drug (e.g. “Tylenol” and “Tylenol PM”) for the purpose of relation extraction. The annotation list for each example was serialized as a json. # Citations When using our annotations for tasks #3-5, please cite our paper, as well as the papers from which the underlying text originated. ``` @inproceedings{agrawal2022large, title={Large Language Models are Few-Shot Clinical Information Extractors}, author={Monica Agrawal and Stefan Hegselmann and Hunter Lang and Yoon Kim and David Sontag}, booktitle = {Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing}, year={2022}, url_Paper = {https://arxiv.org/pdf/2205.12689.pdf} } ``` ``` @article{moon2014sense, title={A sense inventory for clinical abbreviations and acronyms created using clinical notes and medical dictionary resources}, author={Moon, Sungrim and Pakhomov, Serguei and Liu, Nathan and Ryan, James O and Melton, Genevieve B}, journal={Journal of the American Medical Informatics Association}, volume={21}, number={2}, pages={299--307}, year={2014}, publisher={BMJ Publishing Group BMA House, Tavistock Square, London, WC1H 9JR} } ``` # Licensing The annotations added by our team fall under the MIT license, but the CASI dataset itself is subject to its own licensing. --- license: other ---
mitclinicalml/clinical-ie
[ "arxiv:2205.12689", "arxiv:2010.02010", "arxiv:1806.04185", "region:us" ]
2022-10-21T22:00:31+00:00
{}
2022-12-01T16:34:20+00:00
[ "2205.12689", "2010.02010", "1806.04185" ]
[]
TAGS #arxiv-2205.12689 #arxiv-2010.02010 #arxiv-1806.04185 #region-us
Below, we provide access to the datasets used in and created for the EMNLP 2022 paper Large Language Models are Few-Shot Clinical Information Extractors. # Task #1: Clinical Sense Disambiguation For Task #1, we use the original annotations from the Clinical Acronym Sense Inventory (CASI) dataset, described in their paper. As is common, due to noisiness in the label set, we do not evaluate on the entire dataset, but only on a cleaner subset. For consistency, we use the subset defined by the filtering used in "Zero-Shot Clinical Acronym Expansion via Latent Meaning Cells". This results in a subset of 18,164 examples and 41 acronyms for evaluation. We additionally use the MIMIC Reverse Substitution dataset, as created in that same paper, with further instructions available in their repository. # Task #2: Biomedical Evidence Extraction For Task #2, we use the out-of-the-box high-level labels from the PICO dataset available publicly in the repository here. # Task #3: Coreference Resolution For Task #3, we annotated 105 snippets from the CASI dataset, 5 for development and 100 for test. Each example is labeled with a singular pronoun and that pronoun's corresponding noun phrase antecedent (or antecedents). The antecedent was annotated as the entire noun phrase (barring any dependent clauses); in cases where multiple equally valid antecedents were available, all were labeled (empirically, up to 2). For the purposes of evaluation, we chose the antecedent with the highest overlap to each model’s output. To ensure nontrivial examples, the annotators excluded all examples of personal pronouns (e.g. “he”, “she”) if another person (and possible antecedent) had not yet been mentioned in the snippet. Examples were skipped in annotation if the pronoun did not have an antecedent within the provided text snippet. # Task #4: Medication Status Extraction For Task #3, we annotated 105 snippets from the CASI dataset, 5 for development and 100 for test. We wanted to create a dataset of challenging examples containing a changeover in treatment. From a sample, only ∼5% of CASI snippets contained such examples. To increase the density of these examples, speeding up annotation, clinical notes were filtered with the following search terms: discont, adverse, side effect, switch, and dosage, leading to 1445 snippets. We excluded snippets that were purely medication lists, requiring at least some narrative part to be present. For each example, the annotators first extracted all medications. Guidelines excluded medication categories (e.g. “ACE-inhibitor”) if they referred to more specific drug names mentioned elsewhere (even if partially cut off in the snippet). For instance, only the antibiotic Levaquin was labeled in: “It is probably reasonable to treat with antibiotics [...]. I would agree with Levaquin alone [...]”. Guidelines also excluded electrolytes and intravenous fluids as well as route and dosage information. In a second step, medications were assigned to one of three categories: active, discontinued, and neither. Discontinued medications also contain medications that are temporarily on hold. The category neither was assigned to all remaining medications (e.g. allergies, potential medications). The medication lists for each example were serialized as a json. # Task #5: Medication Attribute Extraction For Task #5, we again annotated 105 snippets from the CASI dataset, 5 for development and 100 for test. Annotation guideline were adopted from the 2009 i2b2 medication extraction challenge (Uzuner et al., 2010) with slight modifications. We allowed medication attributes to have multiple spans and grouped together different mentions of the the same drug (e.g. “Tylenol” and “Tylenol PM”) for the purpose of relation extraction. The annotation list for each example was serialized as a json. s When using our annotations for tasks #3-5, please cite our paper, as well as the papers from which the underlying text originated. # Licensing The annotations added by our team fall under the MIT license, but the CASI dataset itself is subject to its own licensing. --- license: other ---
[ "# Task #1: Clinical Sense Disambiguation\nFor Task #1, we use the original annotations from the Clinical Acronym Sense Inventory (CASI) dataset, described in their paper. \nAs is common, due to noisiness in the label set, we do not evaluate on the entire dataset, but only on a cleaner subset. For consistency, we use the subset defined by the filtering used in \"Zero-Shot Clinical Acronym Expansion\nvia Latent Meaning Cells\". This results in a subset of 18,164 examples and 41 acronyms for evaluation.\n\nWe additionally use the MIMIC Reverse Substitution dataset, as created in that same paper, with further instructions available in their repository.", "# Task #2: Biomedical Evidence Extraction\nFor Task #2, we use the out-of-the-box high-level labels from the PICO dataset available publicly in the repository here.", "# Task #3: Coreference Resolution\nFor Task #3, we annotated 105 snippets from the CASI dataset, 5 for development and 100 for test. Each example is labeled with a singular pronoun and that pronoun's corresponding noun phrase antecedent (or antecedents).\nThe antecedent was annotated as the entire noun phrase (barring any dependent clauses); in cases where multiple equally valid antecedents were available, all were labeled (empirically, up to 2). \nFor the purposes of evaluation, we chose the antecedent with the highest overlap to each model’s output.\nTo ensure nontrivial examples, the annotators excluded all examples of personal pronouns (e.g. “he”, “she”) if another person (and possible antecedent) had not yet been mentioned in the snippet.\nExamples were skipped in annotation if the pronoun did not have an antecedent within the provided text snippet.", "# Task #4: Medication Status Extraction\nFor Task #3, we annotated 105 snippets from the CASI dataset, 5 for development and 100 for test. We wanted to create a dataset of challenging examples containing a changeover in treatment. From a sample, only ∼5% of CASI snippets contained such examples. To increase the density of these examples, speeding up annotation, clinical notes were filtered with the following search terms: discont, adverse, side effect, switch, and dosage, leading to 1445 snippets. We excluded snippets that were purely medication lists, requiring at least some narrative part to be present. \nFor each example, the annotators first extracted all medications. Guidelines excluded medication categories (e.g. “ACE-inhibitor”) if they referred to more specific drug names mentioned elsewhere (even if partially cut off in the snippet). For instance, only the antibiotic Levaquin was labeled in: “It is\nprobably reasonable to treat with antibiotics [...]. I would agree with Levaquin alone [...]”. Guidelines also excluded electrolytes and intravenous fluids as well as route and dosage information. In a second step, medications were assigned to one of three categories: active, discontinued, and neither.\nDiscontinued medications also contain medications that are temporarily on hold. The category neither was assigned to all remaining medications (e.g. allergies, potential medications).\nThe medication lists for each example were serialized as a json.", "# Task #5: Medication Attribute Extraction\nFor Task #5, we again annotated 105 snippets from the CASI dataset, 5 for development and 100 for test.\nAnnotation guideline were adopted from the 2009 i2b2 medication extraction challenge (Uzuner et al., 2010) with slight modifications.\nWe allowed medication attributes to have multiple spans and grouped together different mentions of the the same drug (e.g. “Tylenol” and “Tylenol PM”) for the purpose of relation extraction. \nThe annotation list for each example was serialized as a json. \n\ns\nWhen using our annotations for tasks #3-5, please cite our paper, as well as the papers from which the underlying text originated.", "# Licensing\nThe annotations added by our team fall under the MIT license, but the CASI dataset itself is subject to its own licensing. \n\n\n\n---\nlicense: other\n---" ]
[ "TAGS\n#arxiv-2205.12689 #arxiv-2010.02010 #arxiv-1806.04185 #region-us \n", "# Task #1: Clinical Sense Disambiguation\nFor Task #1, we use the original annotations from the Clinical Acronym Sense Inventory (CASI) dataset, described in their paper. \nAs is common, due to noisiness in the label set, we do not evaluate on the entire dataset, but only on a cleaner subset. For consistency, we use the subset defined by the filtering used in \"Zero-Shot Clinical Acronym Expansion\nvia Latent Meaning Cells\". This results in a subset of 18,164 examples and 41 acronyms for evaluation.\n\nWe additionally use the MIMIC Reverse Substitution dataset, as created in that same paper, with further instructions available in their repository.", "# Task #2: Biomedical Evidence Extraction\nFor Task #2, we use the out-of-the-box high-level labels from the PICO dataset available publicly in the repository here.", "# Task #3: Coreference Resolution\nFor Task #3, we annotated 105 snippets from the CASI dataset, 5 for development and 100 for test. Each example is labeled with a singular pronoun and that pronoun's corresponding noun phrase antecedent (or antecedents).\nThe antecedent was annotated as the entire noun phrase (barring any dependent clauses); in cases where multiple equally valid antecedents were available, all were labeled (empirically, up to 2). \nFor the purposes of evaluation, we chose the antecedent with the highest overlap to each model’s output.\nTo ensure nontrivial examples, the annotators excluded all examples of personal pronouns (e.g. “he”, “she”) if another person (and possible antecedent) had not yet been mentioned in the snippet.\nExamples were skipped in annotation if the pronoun did not have an antecedent within the provided text snippet.", "# Task #4: Medication Status Extraction\nFor Task #3, we annotated 105 snippets from the CASI dataset, 5 for development and 100 for test. We wanted to create a dataset of challenging examples containing a changeover in treatment. From a sample, only ∼5% of CASI snippets contained such examples. To increase the density of these examples, speeding up annotation, clinical notes were filtered with the following search terms: discont, adverse, side effect, switch, and dosage, leading to 1445 snippets. We excluded snippets that were purely medication lists, requiring at least some narrative part to be present. \nFor each example, the annotators first extracted all medications. Guidelines excluded medication categories (e.g. “ACE-inhibitor”) if they referred to more specific drug names mentioned elsewhere (even if partially cut off in the snippet). For instance, only the antibiotic Levaquin was labeled in: “It is\nprobably reasonable to treat with antibiotics [...]. I would agree with Levaquin alone [...]”. Guidelines also excluded electrolytes and intravenous fluids as well as route and dosage information. In a second step, medications were assigned to one of three categories: active, discontinued, and neither.\nDiscontinued medications also contain medications that are temporarily on hold. The category neither was assigned to all remaining medications (e.g. allergies, potential medications).\nThe medication lists for each example were serialized as a json.", "# Task #5: Medication Attribute Extraction\nFor Task #5, we again annotated 105 snippets from the CASI dataset, 5 for development and 100 for test.\nAnnotation guideline were adopted from the 2009 i2b2 medication extraction challenge (Uzuner et al., 2010) with slight modifications.\nWe allowed medication attributes to have multiple spans and grouped together different mentions of the the same drug (e.g. “Tylenol” and “Tylenol PM”) for the purpose of relation extraction. \nThe annotation list for each example was serialized as a json. \n\ns\nWhen using our annotations for tasks #3-5, please cite our paper, as well as the papers from which the underlying text originated.", "# Licensing\nThe annotations added by our team fall under the MIT license, but the CASI dataset itself is subject to its own licensing. \n\n\n\n---\nlicense: other\n---" ]
8ece234cf7f61947b738f708fbeedd29b3e7bc78
# Dataset Card for "L_HSAB" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/L_HSAB
[ "region:us" ]
2022-10-21T22:20:06+00:00
{"dataset_info": {"features": [{"name": "Tweet", "dtype": "string"}, {"name": "label", "dtype": {"class_label": {"names": {"1": "abusive", "2": "hate", "3": "normal"}}}}], "splits": [{"name": "train", "num_bytes": 1352345, "num_examples": 5846}], "download_size": 566158, "dataset_size": 1352345}}
2022-10-21T22:20:09+00:00
[]
[]
TAGS #region-us
# Dataset Card for "L_HSAB" More Information needed
[ "# Dataset Card for \"L_HSAB\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"L_HSAB\"\n\nMore Information needed" ]
6468c6249f8cf2dc9fd1047a7a33cfcdf164f056
# Dataset Card for "AraSenti_Lexicon" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/AraSenti_Lexicon
[ "region:us" ]
2022-10-21T22:26:20+00:00
{"dataset_info": {"features": [{"name": "Term", "dtype": "string"}, {"name": "Sentiment", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6556665, "num_examples": 225329}], "download_size": 2464254, "dataset_size": 6556665}}
2022-10-21T22:26:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AraSenti_Lexicon" More Information needed
[ "# Dataset Card for \"AraSenti_Lexicon\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AraSenti_Lexicon\"\n\nMore Information needed" ]
a33b6be24c79b6df9b472ee1bbc57bf9a40b4917
# Dataset Card for "AraFacts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/AraFacts
[ "region:us" ]
2022-10-21T22:35:51+00:00
{"dataset_info": {"features": [{"name": "ClaimID", "dtype": "string"}, {"name": "claim", "dtype": "string"}, {"name": "description", "dtype": "string"}, {"name": "source", "dtype": "string"}, {"name": "date", "dtype": "string"}, {"name": "source_label", "dtype": "string"}, {"name": "normalized_label", "dtype": "string"}, {"name": "source_category", "dtype": "string"}, {"name": "normalized_category", "dtype": "string"}, {"name": "source_url", "dtype": "string"}, {"name": "claim_urls", "dtype": "string"}, {"name": "evidence_urls", "dtype": "string"}, {"name": "claim_type", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 13201528, "num_examples": 6222}], "download_size": 5719822, "dataset_size": 13201528}}
2022-10-21T22:35:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "AraFacts" More Information needed
[ "# Dataset Card for \"AraFacts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"AraFacts\"\n\nMore Information needed" ]
c7851e3c61d936d8f892fca61b428f2b0f2b01ce
# Dataset Card for "BBN_Blog_Posts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
arbml/BBN_Blog_Posts
[ "region:us" ]
2022-10-21T22:43:27+00:00
{"dataset_info": {"features": [{"name": "Arabic_text", "dtype": "string"}, {"name": "ar:manual_sentiment", "dtype": "string"}, {"name": "ar:manual_confidence", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 145550, "num_examples": 1200}], "download_size": 76441, "dataset_size": 145550}}
2022-10-21T22:43:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "BBN_Blog_Posts" More Information needed
[ "# Dataset Card for \"BBN_Blog_Posts\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"BBN_Blog_Posts\"\n\nMore Information needed" ]
10a1bc7f22742105d8a8957d8c46c516d2a37dbf
# Hengam: An Adversarially Trained Transformer for Persian Temporal Tagging See model at https://huggingface.co/kargaranamir/Hengam See Demo at https://huggingface.co/spaces/kargaranamir/Hengam Details at https://github.com/kargaranamir/hengam ## Citation If you use any part of this repository in your research, please cite it using the following BibTex entry. ```python @inproceedings{mirzababaei-etal-2022-hengam, title = {Hengam: An Adversarially Trained Transformer for {P}ersian Temporal Tagging}, author = {Mirzababaei, Sajad and Kargaran, Amir Hossein and Sch{\"u}tze, Hinrich and Asgari, Ehsaneddin}, year = 2022, booktitle = {Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing}, publisher = {Association for Computational Linguistics}, address = {Online only}, pages = {1013--1024}, url = {https://aclanthology.org/2022.aacl-main.74} } ```
kargaranamir/HengamCorpus
[ "language:fa", "license:mit", "region:us" ]
2022-10-21T23:03:10+00:00
{"language": ["fa"], "license": "mit", "pretty_name": "Hengam Corpus"}
2023-10-08T21:05:32+00:00
[]
[ "fa" ]
TAGS #language-Persian #license-mit #region-us
# Hengam: An Adversarially Trained Transformer for Persian Temporal Tagging See model at URL See Demo at URL Details at URL If you use any part of this repository in your research, please cite it using the following BibTex entry.
[ "# Hengam: An Adversarially Trained Transformer for Persian Temporal Tagging\n\nSee model at URL\n\nSee Demo at URL\n\nDetails at URL\n\nIf you use any part of this repository in your research, please cite it using the following BibTex entry." ]
[ "TAGS\n#language-Persian #license-mit #region-us \n", "# Hengam: An Adversarially Trained Transformer for Persian Temporal Tagging\n\nSee model at URL\n\nSee Demo at URL\n\nDetails at URL\n\nIf you use any part of this repository in your research, please cite it using the following BibTex entry." ]
ad92360b864060cf7fde58fb0861d686e02d3fd9
Clean News Spain
api19750904/clean_news
[ "region:us" ]
2022-10-22T04:13:05+00:00
{}
2022-10-22T04:14:01+00:00
[]
[]
TAGS #region-us
Clean News Spain
[]
[ "TAGS\n#region-us \n" ]
ca5782a21d5111b3c8a8d0046c70c5e490bb3b02
# Bengali Female VS Male Names Dataset An NLP dataset that contains 2030 data samples of Bengali names and corresponding gender both for female and male. This is a very small and simple toy dataset that can be used by NLP starters to practice sequence classification problem and other NLP problems like gender recognition from names. # Background In Bengali language, name of a person is dependent largely on their gender. Normally, name of a female ends with certain type of suffix "A", "I", "EE" ["আ", "ই", "ঈ"]. And the names of male are significantly different from female in terms of phoneme patterns and ending suffix. So, In my observation there is a significant possibility that these difference in patterns can be used for gender classification based on names. Find the full documentation here: [Documentation and dataset specifications](https://github.com/faruk-ahmad/bengali-female-vs-male-names) ## Dataset Format The dataset is in CSV format. There are two columns- namely 1. Name 2. Gender Each row has two attributes. First one is name, second one is the gender. The name attribute is in ```utf-8``` encoding. And the second attribute i.e. the gender attribute has been signified by 0 and 1 as | | | |---|---| |male| 0| |female| 1| | | | ## Dataset Statistics The number of samples per class is as bellow- | | | |---|---| |male| 1029| |female| 1001| | | | ## Possible Use Cases 1. Sequence Classification using RNN, LSTM etc 2. Sequence modeling using other type of machine learning algorithms 3. Gender recognition based on names ## Disclaimer The names were collected from internet using different sources like wikipedia, baby name suggestion websites, blogs etc. If someones name is in the dataset, that is totally unintentional.
faruk/bengali-names-vs-gender
[ "license:afl-3.0", "doi:10.57967/hf/0053", "region:us" ]
2022-10-22T05:15:41+00:00
{"license": "afl-3.0"}
2022-10-22T06:48:50+00:00
[]
[]
TAGS #license-afl-3.0 #doi-10.57967/hf/0053 #region-us
Bengali Female VS Male Names Dataset ==================================== An NLP dataset that contains 2030 data samples of Bengali names and corresponding gender both for female and male. This is a very small and simple toy dataset that can be used by NLP starters to practice sequence classification problem and other NLP problems like gender recognition from names. Background ========== In Bengali language, name of a person is dependent largely on their gender. Normally, name of a female ends with certain type of suffix "A", "I", "EE" ["আ", "ই", "ঈ"]. And the names of male are significantly different from female in terms of phoneme patterns and ending suffix. So, In my observation there is a significant possibility that these difference in patterns can be used for gender classification based on names. Find the full documentation here: Documentation and dataset specifications Dataset Format -------------- The dataset is in CSV format. There are two columns- namely 1. Name 2. Gender Each row has two attributes. First one is name, second one is the gender. The name attribute is in encoding. And the second attribute i.e. the gender attribute has been signified by 0 and 1 as Dataset Statistics ------------------ The number of samples per class is as bellow- Possible Use Cases ------------------ 1. Sequence Classification using RNN, LSTM etc 2. Sequence modeling using other type of machine learning algorithms 3. Gender recognition based on names Disclaimer ---------- The names were collected from internet using different sources like wikipedia, baby name suggestion websites, blogs etc. If someones name is in the dataset, that is totally unintentional.
[]
[ "TAGS\n#license-afl-3.0 #doi-10.57967/hf/0053 #region-us \n" ]
54d741401d7c2105f5e1a39b9c6669f22c49202e
News spanish media outlets
api19750904/news_stemm_es
[ "region:us" ]
2022-10-22T07:01:54+00:00
{}
2022-10-22T07:03:18+00:00
[]
[]
TAGS #region-us
News spanish media outlets
[]
[ "TAGS\n#region-us \n" ]
a5546b26a14869e8be1edca41bf1636f178984c0
# Dataset Card for "celeb-identities" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
jafdxc/celeb-identities
[ "region:us" ]
2022-10-22T13:43:56+00:00
{"dataset_info": {"features": [{"name": "image", "dtype": "image"}, {"name": "label", "dtype": {"class_label": {"names": {"0": "clarkson", "1": "freeman", "2": "jackie_chan", "3": "jennifer", "4": "serena"}}}}], "splits": [{"name": "train", "num_bytes": 1305982.0, "num_examples": 13}], "download_size": 1306199, "dataset_size": 1305982.0}}
2022-10-22T13:44:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "celeb-identities" More Information needed
[ "# Dataset Card for \"celeb-identities\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"celeb-identities\"\n\nMore Information needed" ]
9ee884078ecc63402fdbf63b023b5001280abadd
## ESC benchmark diagnostic dataset ## Dataset Summary As a part of ESC benchmark, we provide a small, 8h diagnostic dataset of in-domain validation data with newly annotated transcriptions. The audio data is sampled from each of the ESC validation sets, giving a range of different domains and speaking styles. The transcriptions are annotated according to a consistent style guide with two formats: normalised and un-normalised. The dataset is structured in the same way as the ESC dataset, by grouping audio-transcription samples according to the dataset from which they were taken. We encourage participants to use this dataset when evaluating their systems to quickly assess performance on a range of different speech recognition conditions. All eight datasets in ESC can be downloaded and prepared in just a single line of code through the Hugging Face Datasets library: ```python from datasets import load_dataset esc_diagnostic_ami = load_dataset("esc-benchmark/esc-diagnostic-dataset", "ami") ``` Datasets have two splits - `clean` nd `other`. To have clean diagnostic subset of AMI: ```python ami_diagnostic_clean = esc_diagnostic_ami["clean"] ``` The datasets are full prepared, such that the audio and transcription files can be used directly in training/evaluation scripts. ## Dataset Information A data point can be accessed by indexing the dataset object loaded through `load_dataset`: ```python print(esc_diagnostic[0]) ``` A typical data point comprises the path to the audio file and its transcription. Also included is information of the dataset from which the sample derives and a unique identifier name: ```python { 'audio': {'path': None, 'array': array([ 7.01904297e-04, 7.32421875e-04, 7.32421875e-04, ..., -2.74658203e-04, -1.83105469e-04, -3.05175781e-05]), 'sampling_rate': 16000}, 'ortho_transcript': 'So, I guess we have to reflect on our experiences with remote controls to decide what, um, we would like to see in a convenient practical', 'norm_transcript': 'so i guess we have to reflect on our experiences with remote controls to decide what um we would like to see in a convenient practical', 'id': 'AMI_ES2011a_H00_FEE041_0062835_0064005', 'dataset': 'ami', } ``` ### Data Fields - `audio`: a dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. - `ortho_transcript`: the orthographic transcription of the audio file. - `norm_transcript`: the normalized transcription of the audio file. - `id`: unique id of the data sample. - `dataset`: string name of a dataset the sample belongs to. ### Data Preparation #### Audio The audio for all ESC datasets is segmented into sample lengths suitable for training ASR systems. The Hugging Face datasets library decodes audio files on the fly, reading the segments and converting them to a Python arrays. Consequently, no further preparation of the audio is required to be used in training/evaluation scripts. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, i.e. `dataset[0]["audio"]` should always be preferred over `dataset["audio"][0]`. #### Transcriptions The transcriptions corresponding to each audio file are provided in their 'error corrected' format. No transcription pre-processing is applied to the text, only necessary 'error correction' steps such as removing junk tokens (_&lt;unk>_) or converting symbolic punctuation to spelled out form (_&lt;comma>_ to _,_). As such, no further preparation of the transcriptions is required to be used in training/evaluation scripts. Transcriptions are provided for training and validation splits. The transcriptions are **not** provided for the test splits. The ESC benchmark requires you to generate predictions for the test sets and upload them to https://huggingface.co/spaces/esc-benchmark/esc for scoring. ### Access All eight of the datasets in ESC are accessible and licensing is freely available. Three of the ESC datasets have specific terms of usage that must be agreed to before using the data. To do so, fill in the access forms on the specific datasets' pages: * Common Voice: https://huggingface.co/datasets/mozilla-foundation/common_voice_9_0 * GigaSpeech: https://huggingface.co/datasets/speechcolab/gigaspeech * SPGISpeech: https://huggingface.co/datasets/kensho/spgispeech ### Diagnostic Dataset ESC contains a small, 8h diagnostic dataset of in-domain validation data with newly annotated transcriptions. The audio data is sampled from each of the ESC validation sets, giving a range of different domains and speaking styles. The transcriptions are annotated according to a consistent style guide with two formats: normalised and un-normalised. The dataset is structured in the same way as the ESC dataset, by grouping audio-transcription samples according to the dataset from which they were taken. We encourage participants to use this dataset when evaluating their systems to quickly assess performance on a range of different speech recognition conditions. For more information, visit: [esc-bench/esc-diagnostic-dataset](https://huggingface.co/datasets/esc-bench/esc-diagnostic-datasets). ## LibriSpeech The LibriSpeech corpus is a standard large-scale corpus for assessing ASR systems. It consists of approximately 1,000 hours of narrated audiobooks from the [LibriVox](https://librivox.org) project. It is licensed under CC-BY-4.0. Example Usage: ```python librispeech = load_dataset("esc-benchmark/esc-datasets", "librispeech") ``` Train/validation splits: - `train` (combination of `train.clean.100`, `train.clean.360` and `train.other.500`) - `validation.clean` - `validation.other` Test splits: - `test.clean` - `test.other` Also available are subsets of the train split, which can be accessed by setting the `subconfig` argument: ```python librispeech = load_dataset("esc-benchmark/esc-datasets", "librispeech", subconfig="clean.100") ``` - `clean.100`: 100 hours of training data from the 'clean' subset - `clean.360`: 360 hours of training data from the 'clean' subset - `other.500`: 500 hours of training data from the 'other' subset ## Common Voice Common Voice is a series of crowd-sourced open-licensed speech datasets where speakers record text from Wikipedia in various languages. The English subset of contains approximately 1,400 hours of audio data from speakers of various nationalities, accents and different recording conditions. It is licensed under CC0-1.0. Example usage: ```python common_voice = load_dataset("esc-benchmark/esc-datasets", "common_voice", use_auth_token=True) ``` Training/validation splits: - `train` - `validation` Test splits: - `test` ## VoxPopuli VoxPopuli s a large-scale multilingual speech corpus consisting of political data sourced from 2009-2020 European Parliament event recordings. The English subset contains approximately 550 hours of speech largely from non-native English speakers. It is licensed under CC0. Example usage: ```python voxpopuli = load_dataset("esc-benchmark/esc-datasets", "voxpopuli") ``` Training/validation splits: - `train` - `validation` Test splits: - `test` ## TED-LIUM TED-LIUM consists of English-language TED Talk conference videos covering a range of different cultural, political, and academic topics. It contains approximately 450 hours of transcribed speech data. It is licensed under CC-BY-NC-ND 3.0. Example usage: ```python tedlium = load_dataset("esc-benchmark/esc-datasets", "tedlium") ``` Training/validation splits: - `train` - `validation` Test splits: - `test` ## GigaSpeech GigaSpeech is a multi-domain English speech recognition corpus created from audiobooks, podcasts and YouTube. We provide the large train set (2,500 hours) and the standard validation and test splits. It is licensed under apache-2.0. Example usage: ```python gigaspeech = load_dataset("esc-benchmark/esc-datasets", "gigaspeech", use_auth_token=True) ``` Training/validation splits: - `train` (`l` subset of training data (2,500 h)) - `validation` Test splits: - `test` Also available are subsets of the train split, which can be accessed by setting the `subconfig` argument: ```python gigaspeech = load_dataset("esc-benchmark/esc-datasets", "spgispeech", subconfig="xs", use_auth_token=True) ``` - `xs`: extra-small subset of training data (10 h) - `s`: small subset of training data (250 h) - `m`: medium subset of training data (1,000 h) - `xl`: extra-large subset of training data (10,000 h) ## SPGISpeech SPGISpeech consists of company earnings calls that have been manually transcribed by S&P Global, Inc according to a professional style guide. We provide the large train set (5,000 hours) and the standard validation and test splits. It is licensed under a Kensho user agreement. Loading the dataset requires authorization. Example usage: ```python spgispeech = load_dataset("esc-benchmark/esc-datasets", "spgispeech", use_auth_token=True) ``` Training/validation splits: - `train` (`l` subset of training data (~5,000 h)) - `validation` Test splits: - `test` Also available are subsets of the train split, which can be accessed by setting the `subconfig` argument: ```python spgispeech = load_dataset("esc-benchmark/esc-datasets", "spgispeech", subconfig="s", use_auth_token=True) ``` - `s`: small subset of training data (~200 h) - `m`: medium subset of training data (~1,000 h) ## Earnings-22 Earnings-22 is a 119-hour corpus of English-language earnings calls collected from global companies, with speakers of many different nationalities and accents. It is licensed under CC-BY-SA-4.0. Example usage: ```python earnings22 = load_dataset("esc-benchmark/esc-datasets", "earnings22") ``` Training/validation splits: - `train` - `validation` Test splits: - `test` ## AMI The AMI Meeting Corpus consists of 100 hours of meeting recordings from multiple recording devices synced to a common timeline. It is licensed under CC-BY-4.0. Example usage: ```python ami = load_dataset("esc-benchmark/esc-datasets", "ami") ``` Training/validation splits: - `train` - `validation` Test splits: - `test`
esc-bench/esc-diagnostic-dataset
[ "task_categories:automatic-speech-recognition", "annotations_creators:expert-generated", "annotations_creators:crowdsourced", "annotations_creators:machine-generated", "language_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:100K<n<1M", "size_categories:1M<n<10M", "source_datasets:original", "source_datasets:extended|librispeech_asr", "source_datasets:extended|common_voice", "language:en", "license:cc-by-4.0", "license:apache-2.0", "license:cc0-1.0", "license:cc-by-nc-3.0", "license:other", "asr", "benchmark", "speech", "esc", "region:us" ]
2022-10-22T13:47:33+00:00
{"annotations_creators": ["expert-generated", "crowdsourced", "machine-generated"], "language_creators": ["crowdsourced", "expert-generated"], "language": ["en"], "license": ["cc-by-4.0", "apache-2.0", "cc0-1.0", "cc-by-nc-3.0", "other"], "multilinguality": ["monolingual"], "size_categories": ["100K<n<1M", "1M<n<10M"], "source_datasets": ["original", "extended|librispeech_asr", "extended|common_voice"], "task_categories": ["automatic-speech-recognition"], "task_ids": [], "pretty_name": "ESC Diagnostic Dataset", "tags": ["asr", "benchmark", "speech", "esc"], "extra_gated_prompt": "Three of the ESC datasets have specific terms of usage that must be agreed to before using the data. \nTo do so, fill in the access forms on the specific datasets' pages:\n * Common Voice: https://huggingface.co/datasets/mozilla-foundation/common_voice_9_0\n * GigaSpeech: https://huggingface.co/datasets/speechcolab/gigaspeech\n * SPGISpeech: https://huggingface.co/datasets/kensho/spgispeech", "extra_gated_fields": {"I hereby confirm that I have registered on the original Common Voice page and agree to not attempt to determine the identity of speakers in the Common Voice dataset": "checkbox", "I hereby confirm that I have accepted the terms of usages on GigaSpeech page": "checkbox", "I hereby confirm that I have accepted the terms of usages on SPGISpeech page": "checkbox"}}
2022-10-25T11:34:26+00:00
[]
[ "en" ]
TAGS #task_categories-automatic-speech-recognition #annotations_creators-expert-generated #annotations_creators-crowdsourced #annotations_creators-machine-generated #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-monolingual #size_categories-100K<n<1M #size_categories-1M<n<10M #source_datasets-original #source_datasets-extended|librispeech_asr #source_datasets-extended|common_voice #language-English #license-cc-by-4.0 #license-apache-2.0 #license-cc0-1.0 #license-cc-by-nc-3.0 #license-other #asr #benchmark #speech #esc #region-us
## ESC benchmark diagnostic dataset ## Dataset Summary As a part of ESC benchmark, we provide a small, 8h diagnostic dataset of in-domain validation data with newly annotated transcriptions. The audio data is sampled from each of the ESC validation sets, giving a range of different domains and speaking styles. The transcriptions are annotated according to a consistent style guide with two formats: normalised and un-normalised. The dataset is structured in the same way as the ESC dataset, by grouping audio-transcription samples according to the dataset from which they were taken. We encourage participants to use this dataset when evaluating their systems to quickly assess performance on a range of different speech recognition conditions. All eight datasets in ESC can be downloaded and prepared in just a single line of code through the Hugging Face Datasets library: Datasets have two splits - 'clean' nd 'other'. To have clean diagnostic subset of AMI: The datasets are full prepared, such that the audio and transcription files can be used directly in training/evaluation scripts. ## Dataset Information A data point can be accessed by indexing the dataset object loaded through 'load_dataset': A typical data point comprises the path to the audio file and its transcription. Also included is information of the dataset from which the sample derives and a unique identifier name: ### Data Fields - 'audio': a dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. - 'ortho_transcript': the orthographic transcription of the audio file. - 'norm_transcript': the normalized transcription of the audio file. - 'id': unique id of the data sample. - 'dataset': string name of a dataset the sample belongs to. ### Data Preparation #### Audio The audio for all ESC datasets is segmented into sample lengths suitable for training ASR systems. The Hugging Face datasets library decodes audio files on the fly, reading the segments and converting them to a Python arrays. Consequently, no further preparation of the audio is required to be used in training/evaluation scripts. Note that when accessing the audio column: 'dataset[0]["audio"]' the audio file is automatically decoded and resampled to 'dataset.features["audio"].sampling_rate'. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the '"audio"' column, i.e. 'dataset[0]["audio"]' should always be preferred over 'dataset["audio"][0]'. #### Transcriptions The transcriptions corresponding to each audio file are provided in their 'error corrected' format. No transcription pre-processing is applied to the text, only necessary 'error correction' steps such as removing junk tokens (_&lt;unk>_) or converting symbolic punctuation to spelled out form (_&lt;comma>_ to _,_). As such, no further preparation of the transcriptions is required to be used in training/evaluation scripts. Transcriptions are provided for training and validation splits. The transcriptions are not provided for the test splits. The ESC benchmark requires you to generate predictions for the test sets and upload them to URL for scoring. ### Access All eight of the datasets in ESC are accessible and licensing is freely available. Three of the ESC datasets have specific terms of usage that must be agreed to before using the data. To do so, fill in the access forms on the specific datasets' pages: * Common Voice: URL * GigaSpeech: URL * SPGISpeech: URL ### Diagnostic Dataset ESC contains a small, 8h diagnostic dataset of in-domain validation data with newly annotated transcriptions. The audio data is sampled from each of the ESC validation sets, giving a range of different domains and speaking styles. The transcriptions are annotated according to a consistent style guide with two formats: normalised and un-normalised. The dataset is structured in the same way as the ESC dataset, by grouping audio-transcription samples according to the dataset from which they were taken. We encourage participants to use this dataset when evaluating their systems to quickly assess performance on a range of different speech recognition conditions. For more information, visit: esc-bench/esc-diagnostic-dataset. ## LibriSpeech The LibriSpeech corpus is a standard large-scale corpus for assessing ASR systems. It consists of approximately 1,000 hours of narrated audiobooks from the LibriVox project. It is licensed under CC-BY-4.0. Example Usage: Train/validation splits: - 'train' (combination of 'URL.100', 'URL.360' and 'URL.500') - 'URL' - 'URL' Test splits: - 'URL' - 'URL' Also available are subsets of the train split, which can be accessed by setting the 'subconfig' argument: - 'clean.100': 100 hours of training data from the 'clean' subset - 'clean.360': 360 hours of training data from the 'clean' subset - 'other.500': 500 hours of training data from the 'other' subset ## Common Voice Common Voice is a series of crowd-sourced open-licensed speech datasets where speakers record text from Wikipedia in various languages. The English subset of contains approximately 1,400 hours of audio data from speakers of various nationalities, accents and different recording conditions. It is licensed under CC0-1.0. Example usage: Training/validation splits: - 'train' - 'validation' Test splits: - 'test' ## VoxPopuli VoxPopuli s a large-scale multilingual speech corpus consisting of political data sourced from 2009-2020 European Parliament event recordings. The English subset contains approximately 550 hours of speech largely from non-native English speakers. It is licensed under CC0. Example usage: Training/validation splits: - 'train' - 'validation' Test splits: - 'test' ## TED-LIUM TED-LIUM consists of English-language TED Talk conference videos covering a range of different cultural, political, and academic topics. It contains approximately 450 hours of transcribed speech data. It is licensed under CC-BY-NC-ND 3.0. Example usage: Training/validation splits: - 'train' - 'validation' Test splits: - 'test' ## GigaSpeech GigaSpeech is a multi-domain English speech recognition corpus created from audiobooks, podcasts and YouTube. We provide the large train set (2,500 hours) and the standard validation and test splits. It is licensed under apache-2.0. Example usage: Training/validation splits: - 'train' ('l' subset of training data (2,500 h)) - 'validation' Test splits: - 'test' Also available are subsets of the train split, which can be accessed by setting the 'subconfig' argument: - 'xs': extra-small subset of training data (10 h) - 's': small subset of training data (250 h) - 'm': medium subset of training data (1,000 h) - 'xl': extra-large subset of training data (10,000 h) ## SPGISpeech SPGISpeech consists of company earnings calls that have been manually transcribed by S&P Global, Inc according to a professional style guide. We provide the large train set (5,000 hours) and the standard validation and test splits. It is licensed under a Kensho user agreement. Loading the dataset requires authorization. Example usage: Training/validation splits: - 'train' ('l' subset of training data (~5,000 h)) - 'validation' Test splits: - 'test' Also available are subsets of the train split, which can be accessed by setting the 'subconfig' argument: - 's': small subset of training data (~200 h) - 'm': medium subset of training data (~1,000 h) ## Earnings-22 Earnings-22 is a 119-hour corpus of English-language earnings calls collected from global companies, with speakers of many different nationalities and accents. It is licensed under CC-BY-SA-4.0. Example usage: Training/validation splits: - 'train' - 'validation' Test splits: - 'test' ## AMI The AMI Meeting Corpus consists of 100 hours of meeting recordings from multiple recording devices synced to a common timeline. It is licensed under CC-BY-4.0. Example usage: Training/validation splits: - 'train' - 'validation' Test splits: - 'test'
[ "## ESC benchmark diagnostic dataset", "## Dataset Summary\n\nAs a part of ESC benchmark, we provide a small, 8h diagnostic dataset of in-domain validation data with newly annotated transcriptions. The audio data is sampled from each of the ESC validation sets, giving a range of different domains and speaking styles. The transcriptions are annotated according to a consistent style guide with two formats: normalised and un-normalised. The dataset is structured in the same way as the ESC dataset, by grouping audio-transcription samples according to the dataset from which they were taken. We encourage participants to use this dataset when evaluating their systems to quickly assess performance on a range of different speech recognition conditions.\n\nAll eight datasets in ESC can be downloaded and prepared in just a single line of code through the Hugging Face Datasets library:\n\n\n\nDatasets have two splits - 'clean' nd 'other'. To have clean diagnostic subset of AMI:\n\n\n\n\nThe datasets are full prepared, such that the audio and transcription files can be used directly in training/evaluation scripts.", "## Dataset Information\n\nA data point can be accessed by indexing the dataset object loaded through 'load_dataset':\n\n\n\nA typical data point comprises the path to the audio file and its transcription. Also included is information of the dataset from which the sample derives and a unique identifier name:", "### Data Fields\n\n- 'audio': a dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate.\n\n- 'ortho_transcript': the orthographic transcription of the audio file.\n\n- 'norm_transcript': the normalized transcription of the audio file.\n\n- 'id': unique id of the data sample.\n\n- 'dataset': string name of a dataset the sample belongs to.", "### Data Preparation", "#### Audio\nThe audio for all ESC datasets is segmented into sample lengths suitable for training ASR systems. The Hugging Face datasets library decodes audio files on the fly, reading the segments and converting them to a Python arrays. Consequently, no further preparation of the audio is required to be used in training/evaluation scripts.\n\nNote that when accessing the audio column: 'dataset[0][\"audio\"]' the audio file is automatically decoded and resampled to 'dataset.features[\"audio\"].sampling_rate'. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the '\"audio\"' column, i.e. 'dataset[0][\"audio\"]' should always be preferred over 'dataset[\"audio\"][0]'.", "#### Transcriptions\nThe transcriptions corresponding to each audio file are provided in their 'error corrected' format. No transcription pre-processing is applied to the text, only necessary 'error correction' steps such as removing junk tokens (_&lt;unk>_) or converting symbolic punctuation to spelled out form (_&lt;comma>_ to _,_). As such, no further preparation of the transcriptions is required to be used in training/evaluation scripts.\n\nTranscriptions are provided for training and validation splits. The transcriptions are not provided for the test splits. The ESC benchmark requires you to generate predictions for the test sets and upload them to URL for scoring.", "### Access\nAll eight of the datasets in ESC are accessible and licensing is freely available. Three of the ESC datasets have specific terms of usage that must be agreed to before using the data. To do so, fill in the access forms on the specific datasets' pages:\n* Common Voice: URL\n* GigaSpeech: URL\n* SPGISpeech: URL", "### Diagnostic Dataset\nESC contains a small, 8h diagnostic dataset of in-domain validation data with newly annotated transcriptions. The audio data is sampled from each of the ESC validation sets, giving a range of different domains and speaking styles. The transcriptions are annotated according to a consistent style guide with two formats: normalised and un-normalised. The dataset is structured in the same way as the ESC dataset, by grouping audio-transcription samples according to the dataset from which they were taken. We encourage participants to use this dataset when evaluating their systems to quickly assess performance on a range of different speech recognition conditions. For more information, visit: esc-bench/esc-diagnostic-dataset.", "## LibriSpeech\n\nThe LibriSpeech corpus is a standard large-scale corpus for assessing ASR systems. It consists of approximately 1,000 hours of narrated audiobooks from the LibriVox project. It is licensed under CC-BY-4.0.\n\nExample Usage:\n\n\n\nTrain/validation splits:\n- 'train' (combination of 'URL.100', 'URL.360' and 'URL.500')\n- 'URL'\n- 'URL'\n\nTest splits:\n- 'URL'\n- 'URL'\n\nAlso available are subsets of the train split, which can be accessed by setting the 'subconfig' argument:\n\n\n- 'clean.100': 100 hours of training data from the 'clean' subset\n- 'clean.360': 360 hours of training data from the 'clean' subset\n- 'other.500': 500 hours of training data from the 'other' subset", "## Common Voice\nCommon Voice is a series of crowd-sourced open-licensed speech datasets where speakers record text from Wikipedia in various languages. The English subset of contains approximately 1,400 hours of audio data from speakers of various nationalities, accents and different recording conditions. It is licensed under CC0-1.0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'", "## VoxPopuli\nVoxPopuli s a large-scale multilingual speech corpus consisting of political data sourced from 2009-2020 European Parliament event recordings. The English subset contains approximately 550 hours of speech largely from non-native English speakers. It is licensed under CC0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'", "## TED-LIUM\nTED-LIUM consists of English-language TED Talk conference videos covering a range of different cultural, political, and academic topics. It contains approximately 450 hours of transcribed speech data. It is licensed under CC-BY-NC-ND 3.0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'", "## GigaSpeech\nGigaSpeech is a multi-domain English speech recognition corpus created from audiobooks, podcasts and YouTube. We provide the large train set (2,500 hours) and the standard validation and test splits. It is licensed under apache-2.0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train' ('l' subset of training data (2,500 h))\n- 'validation'\n\nTest splits:\n- 'test'\n\nAlso available are subsets of the train split, which can be accessed by setting the 'subconfig' argument:\n\n- 'xs': extra-small subset of training data (10 h)\n- 's': small subset of training data (250 h)\n- 'm': medium subset of training data (1,000 h)\n- 'xl': extra-large subset of training data (10,000 h)", "## SPGISpeech\nSPGISpeech consists of company earnings calls that have been manually transcribed by S&P Global, Inc according to a professional style guide. We provide the large train set (5,000 hours) and the standard validation and test splits. It is licensed under a Kensho user agreement.\n\nLoading the dataset requires authorization.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train' ('l' subset of training data (~5,000 h))\n- 'validation'\n\nTest splits:\n- 'test'\n\nAlso available are subsets of the train split, which can be accessed by setting the 'subconfig' argument:\n\n- 's': small subset of training data (~200 h)\n- 'm': medium subset of training data (~1,000 h)", "## Earnings-22\nEarnings-22 is a 119-hour corpus of English-language earnings calls collected from global companies, with speakers of many different nationalities and accents. It is licensed under CC-BY-SA-4.0. \n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'", "## AMI\nThe AMI Meeting Corpus consists of 100 hours of meeting recordings from multiple recording devices synced to a common timeline. It is licensed under CC-BY-4.0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'" ]
[ "TAGS\n#task_categories-automatic-speech-recognition #annotations_creators-expert-generated #annotations_creators-crowdsourced #annotations_creators-machine-generated #language_creators-crowdsourced #language_creators-expert-generated #multilinguality-monolingual #size_categories-100K<n<1M #size_categories-1M<n<10M #source_datasets-original #source_datasets-extended|librispeech_asr #source_datasets-extended|common_voice #language-English #license-cc-by-4.0 #license-apache-2.0 #license-cc0-1.0 #license-cc-by-nc-3.0 #license-other #asr #benchmark #speech #esc #region-us \n", "## ESC benchmark diagnostic dataset", "## Dataset Summary\n\nAs a part of ESC benchmark, we provide a small, 8h diagnostic dataset of in-domain validation data with newly annotated transcriptions. The audio data is sampled from each of the ESC validation sets, giving a range of different domains and speaking styles. The transcriptions are annotated according to a consistent style guide with two formats: normalised and un-normalised. The dataset is structured in the same way as the ESC dataset, by grouping audio-transcription samples according to the dataset from which they were taken. We encourage participants to use this dataset when evaluating their systems to quickly assess performance on a range of different speech recognition conditions.\n\nAll eight datasets in ESC can be downloaded and prepared in just a single line of code through the Hugging Face Datasets library:\n\n\n\nDatasets have two splits - 'clean' nd 'other'. To have clean diagnostic subset of AMI:\n\n\n\n\nThe datasets are full prepared, such that the audio and transcription files can be used directly in training/evaluation scripts.", "## Dataset Information\n\nA data point can be accessed by indexing the dataset object loaded through 'load_dataset':\n\n\n\nA typical data point comprises the path to the audio file and its transcription. Also included is information of the dataset from which the sample derives and a unique identifier name:", "### Data Fields\n\n- 'audio': a dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate.\n\n- 'ortho_transcript': the orthographic transcription of the audio file.\n\n- 'norm_transcript': the normalized transcription of the audio file.\n\n- 'id': unique id of the data sample.\n\n- 'dataset': string name of a dataset the sample belongs to.", "### Data Preparation", "#### Audio\nThe audio for all ESC datasets is segmented into sample lengths suitable for training ASR systems. The Hugging Face datasets library decodes audio files on the fly, reading the segments and converting them to a Python arrays. Consequently, no further preparation of the audio is required to be used in training/evaluation scripts.\n\nNote that when accessing the audio column: 'dataset[0][\"audio\"]' the audio file is automatically decoded and resampled to 'dataset.features[\"audio\"].sampling_rate'. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the '\"audio\"' column, i.e. 'dataset[0][\"audio\"]' should always be preferred over 'dataset[\"audio\"][0]'.", "#### Transcriptions\nThe transcriptions corresponding to each audio file are provided in their 'error corrected' format. No transcription pre-processing is applied to the text, only necessary 'error correction' steps such as removing junk tokens (_&lt;unk>_) or converting symbolic punctuation to spelled out form (_&lt;comma>_ to _,_). As such, no further preparation of the transcriptions is required to be used in training/evaluation scripts.\n\nTranscriptions are provided for training and validation splits. The transcriptions are not provided for the test splits. The ESC benchmark requires you to generate predictions for the test sets and upload them to URL for scoring.", "### Access\nAll eight of the datasets in ESC are accessible and licensing is freely available. Three of the ESC datasets have specific terms of usage that must be agreed to before using the data. To do so, fill in the access forms on the specific datasets' pages:\n* Common Voice: URL\n* GigaSpeech: URL\n* SPGISpeech: URL", "### Diagnostic Dataset\nESC contains a small, 8h diagnostic dataset of in-domain validation data with newly annotated transcriptions. The audio data is sampled from each of the ESC validation sets, giving a range of different domains and speaking styles. The transcriptions are annotated according to a consistent style guide with two formats: normalised and un-normalised. The dataset is structured in the same way as the ESC dataset, by grouping audio-transcription samples according to the dataset from which they were taken. We encourage participants to use this dataset when evaluating their systems to quickly assess performance on a range of different speech recognition conditions. For more information, visit: esc-bench/esc-diagnostic-dataset.", "## LibriSpeech\n\nThe LibriSpeech corpus is a standard large-scale corpus for assessing ASR systems. It consists of approximately 1,000 hours of narrated audiobooks from the LibriVox project. It is licensed under CC-BY-4.0.\n\nExample Usage:\n\n\n\nTrain/validation splits:\n- 'train' (combination of 'URL.100', 'URL.360' and 'URL.500')\n- 'URL'\n- 'URL'\n\nTest splits:\n- 'URL'\n- 'URL'\n\nAlso available are subsets of the train split, which can be accessed by setting the 'subconfig' argument:\n\n\n- 'clean.100': 100 hours of training data from the 'clean' subset\n- 'clean.360': 360 hours of training data from the 'clean' subset\n- 'other.500': 500 hours of training data from the 'other' subset", "## Common Voice\nCommon Voice is a series of crowd-sourced open-licensed speech datasets where speakers record text from Wikipedia in various languages. The English subset of contains approximately 1,400 hours of audio data from speakers of various nationalities, accents and different recording conditions. It is licensed under CC0-1.0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'", "## VoxPopuli\nVoxPopuli s a large-scale multilingual speech corpus consisting of political data sourced from 2009-2020 European Parliament event recordings. The English subset contains approximately 550 hours of speech largely from non-native English speakers. It is licensed under CC0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'", "## TED-LIUM\nTED-LIUM consists of English-language TED Talk conference videos covering a range of different cultural, political, and academic topics. It contains approximately 450 hours of transcribed speech data. It is licensed under CC-BY-NC-ND 3.0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'", "## GigaSpeech\nGigaSpeech is a multi-domain English speech recognition corpus created from audiobooks, podcasts and YouTube. We provide the large train set (2,500 hours) and the standard validation and test splits. It is licensed under apache-2.0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train' ('l' subset of training data (2,500 h))\n- 'validation'\n\nTest splits:\n- 'test'\n\nAlso available are subsets of the train split, which can be accessed by setting the 'subconfig' argument:\n\n- 'xs': extra-small subset of training data (10 h)\n- 's': small subset of training data (250 h)\n- 'm': medium subset of training data (1,000 h)\n- 'xl': extra-large subset of training data (10,000 h)", "## SPGISpeech\nSPGISpeech consists of company earnings calls that have been manually transcribed by S&P Global, Inc according to a professional style guide. We provide the large train set (5,000 hours) and the standard validation and test splits. It is licensed under a Kensho user agreement.\n\nLoading the dataset requires authorization.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train' ('l' subset of training data (~5,000 h))\n- 'validation'\n\nTest splits:\n- 'test'\n\nAlso available are subsets of the train split, which can be accessed by setting the 'subconfig' argument:\n\n- 's': small subset of training data (~200 h)\n- 'm': medium subset of training data (~1,000 h)", "## Earnings-22\nEarnings-22 is a 119-hour corpus of English-language earnings calls collected from global companies, with speakers of many different nationalities and accents. It is licensed under CC-BY-SA-4.0. \n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'", "## AMI\nThe AMI Meeting Corpus consists of 100 hours of meeting recordings from multiple recording devices synced to a common timeline. It is licensed under CC-BY-4.0.\n\nExample usage:\n\n\n\nTraining/validation splits:\n- 'train'\n- 'validation'\n\nTest splits:\n- 'test'" ]
a8adb09a4c77f4a9de9ec8bd0c55fcc589ba6464
[XSS](�javascript:alert(document.domain&#41;)
orgbug/test
[ "license:apache-2.0", "region:us" ]
2022-10-22T15:19:48+00:00
{"license": "apache-2.0"}
2023-05-06T13:51:10+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
XSS
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
b5aa6b7cef550455223dc0e4faacaf8e5621447e
# not demo alright ## Subheader This is so cool!
tiagoseca/raw_dre_corpus
[ "region:us" ]
2022-10-22T17:53:42+00:00
{}
2022-11-02T12:37:09+00:00
[]
[]
TAGS #region-us
# not demo alright ## Subheader This is so cool!
[ "# not demo\n\nalright", "## Subheader \n\nThis is so cool!" ]
[ "TAGS\n#region-us \n", "# not demo\n\nalright", "## Subheader \n\nThis is so cool!" ]
328ac75de85373f41365238b2c9cdf1163c4945c
# Dataset Card for "lyrics_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
nick-carroll1/lyrics_dataset
[ "region:us" ]
2022-10-22T18:59:04+00:00
{"dataset_info": {"features": [{"name": "Artist", "dtype": "string"}, {"name": "Song", "dtype": "string"}, {"name": "Lyrics", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 371464, "num_examples": 237}], "download_size": 166829, "dataset_size": 371464}}
2022-10-23T16:56:11+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lyrics_dataset" More Information needed
[ "# Dataset Card for \"lyrics_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lyrics_dataset\"\n\nMore Information needed" ]
c019b34c131cb6c4b5694f910961f72f6f147ba9
# Dataset Card for friends_data ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary The Friends dataset consists of speech-based dialogue from the Friends TV sitcom. It is extracted from the [SocialNLP EmotionX 2019 challenge](https://sites.google.com/view/emotionx2019/datasets). ### Supported Tasks and Leaderboards text-classification, sentiment-classification: The dataset is mainly used to predict a sentiment label given text input. ### Languages The utterances are in English. ## Dataset Structure ### Data Instances A data point containing text and the corresponding label. An example from the friends_dataset looks like this: { 'text': 'Well! Well! Well! Joey Tribbiani! So you came back huh?', 'label': 'surprise' } ### Data Fields The field includes a text column and a corresponding emotion label. ## Dataset Creation ### Curation Rationale The dataset contains 1000 English-language dialogues originally in JSON files. The JSON file contains an array of dialogue objects. Each dialogue object is an array of line objects, and each line object contains speaker, utterance, emotion, and annotation strings. { "speaker": "Chandler", "utterance": "My duties? All right.", "emotion": "surprise", "annotation": "2000030" } Utterance and emotion were extracted from the original files into a CSV file. The dataset was cleaned to remove non-neutral labels. This dataset was created to be used in fine-tuning an emotion sentiment classifier that can be useful to teach individuals with autism how to read facial expressions.
michellejieli/friends_dataset
[ "language:en", "distilroberta", "sentiment", "emotion", "twitter", "reddit", "region:us" ]
2022-10-22T19:37:03+00:00
{"language": "en", "tags": ["distilroberta", "sentiment", "emotion", "twitter", "reddit"]}
2022-10-23T12:21:12+00:00
[]
[ "en" ]
TAGS #language-English #distilroberta #sentiment #emotion #twitter #reddit #region-us
# Dataset Card for friends_data ## Table of Contents - Table of Contents - Dataset Description - Dataset Summary - Supported Tasks and Leaderboards - Languages - Dataset Structure - Data Instances - Data Fields - Data Splits - Dataset Creation - Curation Rationale - Source Data - Annotations - Personal and Sensitive Information - Considerations for Using the Data - Social Impact of Dataset - Discussion of Biases - Other Known Limitations - Additional Information - Dataset Curators - Licensing Information - Citation Information - Contributions ## Dataset Description - Homepage: - Repository: - Paper: - Leaderboard: - Point of Contact: ### Dataset Summary The Friends dataset consists of speech-based dialogue from the Friends TV sitcom. It is extracted from the SocialNLP EmotionX 2019 challenge. ### Supported Tasks and Leaderboards text-classification, sentiment-classification: The dataset is mainly used to predict a sentiment label given text input. ### Languages The utterances are in English. ## Dataset Structure ### Data Instances A data point containing text and the corresponding label. An example from the friends_dataset looks like this: { 'text': 'Well! Well! Well! Joey Tribbiani! So you came back huh?', 'label': 'surprise' } ### Data Fields The field includes a text column and a corresponding emotion label. ## Dataset Creation ### Curation Rationale The dataset contains 1000 English-language dialogues originally in JSON files. The JSON file contains an array of dialogue objects. Each dialogue object is an array of line objects, and each line object contains speaker, utterance, emotion, and annotation strings. { "speaker": "Chandler", "utterance": "My duties? All right.", "emotion": "surprise", "annotation": "2000030" } Utterance and emotion were extracted from the original files into a CSV file. The dataset was cleaned to remove non-neutral labels. This dataset was created to be used in fine-tuning an emotion sentiment classifier that can be useful to teach individuals with autism how to read facial expressions.
[ "# Dataset Card for friends_data", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThe Friends dataset consists of speech-based dialogue from the Friends TV sitcom. It is extracted from the SocialNLP EmotionX 2019 challenge.", "### Supported Tasks and Leaderboards\ntext-classification, sentiment-classification: The dataset is mainly used to predict a sentiment label given text input.", "### Languages\nThe utterances are in English.", "## Dataset Structure", "### Data Instances\n\nA data point containing text and the corresponding label.\n\nAn example from the friends_dataset looks like this:\n\n{\n 'text': 'Well! Well! Well! Joey Tribbiani! So you came back huh?',\n 'label': 'surprise'\n}", "### Data Fields\n\nThe field includes a text column and a corresponding emotion label.", "## Dataset Creation", "### Curation Rationale\n\nThe dataset contains 1000 English-language dialogues originally in JSON files. The JSON file contains an array of dialogue objects. Each dialogue object is an array of line objects, and each line object contains speaker, utterance, emotion, and annotation strings.\n {\n \"speaker\": \"Chandler\",\n \"utterance\": \"My duties? All right.\",\n \"emotion\": \"surprise\",\n \"annotation\": \"2000030\"\n }\n\nUtterance and emotion were extracted from the original files into a CSV file. The dataset was cleaned to remove non-neutral labels. This dataset was created to be used in fine-tuning an emotion sentiment classifier that can be useful to teach individuals with autism how to read facial expressions." ]
[ "TAGS\n#language-English #distilroberta #sentiment #emotion #twitter #reddit #region-us \n", "# Dataset Card for friends_data", "## Table of Contents\n- Table of Contents\n- Dataset Description\n - Dataset Summary\n - Supported Tasks and Leaderboards\n - Languages\n- Dataset Structure\n - Data Instances\n - Data Fields\n - Data Splits\n- Dataset Creation\n - Curation Rationale\n - Source Data\n - Annotations\n - Personal and Sensitive Information\n- Considerations for Using the Data\n - Social Impact of Dataset\n - Discussion of Biases\n - Other Known Limitations\n- Additional Information\n - Dataset Curators\n - Licensing Information\n - Citation Information\n - Contributions", "## Dataset Description\n\n- Homepage:\n- Repository:\n- Paper:\n- Leaderboard:\n- Point of Contact:", "### Dataset Summary\n\nThe Friends dataset consists of speech-based dialogue from the Friends TV sitcom. It is extracted from the SocialNLP EmotionX 2019 challenge.", "### Supported Tasks and Leaderboards\ntext-classification, sentiment-classification: The dataset is mainly used to predict a sentiment label given text input.", "### Languages\nThe utterances are in English.", "## Dataset Structure", "### Data Instances\n\nA data point containing text and the corresponding label.\n\nAn example from the friends_dataset looks like this:\n\n{\n 'text': 'Well! Well! Well! Joey Tribbiani! So you came back huh?',\n 'label': 'surprise'\n}", "### Data Fields\n\nThe field includes a text column and a corresponding emotion label.", "## Dataset Creation", "### Curation Rationale\n\nThe dataset contains 1000 English-language dialogues originally in JSON files. The JSON file contains an array of dialogue objects. Each dialogue object is an array of line objects, and each line object contains speaker, utterance, emotion, and annotation strings.\n {\n \"speaker\": \"Chandler\",\n \"utterance\": \"My duties? All right.\",\n \"emotion\": \"surprise\",\n \"annotation\": \"2000030\"\n }\n\nUtterance and emotion were extracted from the original files into a CSV file. The dataset was cleaned to remove non-neutral labels. This dataset was created to be used in fine-tuning an emotion sentiment classifier that can be useful to teach individuals with autism how to read facial expressions." ]
f8d7080403cdd436f76256cfa60ca1ae64c8617d
# Dataset Card for Dataset Name ## Dataset Description - **Homepage:** https://www.tau-nlp.sites.tau.ac.il/compwebq - **Repository:** https://github.com/alontalmor/WebAsKB - **Paper:** https://arxiv.org/abs/1803.06643 - **Leaderboard:** https://www.tau-nlp.sites.tau.ac.il/compwebq-leaderboard - **Point of Contact:** [email protected]. ### Dataset Summary **A dataset for answering complex questions that require reasoning over multiple web snippets** ComplexWebQuestions is a new dataset that contains a large set of complex questions in natural language, and can be used in multiple ways: - By interacting with a search engine, which is the focus of our paper (Talmor and Berant, 2018); - As a reading comprehension task: we release 12,725,989 web snippets that are relevant for the questions, and were collected during the development of our model; - As a semantic parsing task: each question is paired with a SPARQL query that can be executed against Freebase to retrieve the answer. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages - English ## Dataset Structure QUESTION FILES The dataset contains 34,689 examples divided into 27,734 train, 3,480 dev, 3,475 test. each containing: ``` "ID”: The unique ID of the example; "webqsp_ID": The original WebQuestionsSP ID from which the question was constructed; "webqsp_question": The WebQuestionsSP Question from which the question was constructed; "machine_question": The artificial complex question, before paraphrasing; "question": The natural language complex question; "sparql": Freebase SPARQL query for the question. Note that the SPARQL was constructed for the machine question, the actual question after paraphrasing may differ from the SPARQL. "compositionality_type": An estimation of the type of compositionally. {composition, conjunction, comparative, superlative}. The estimation has not been manually verified, the question after paraphrasing may differ from this estimation. "answers": a list of answers each containing answer: the actual answer; answer_id: the Freebase answer id; aliases: freebase extracted aliases for the answer. "created": creation time ``` NOTE: test set does not contain “answer” field. For test evaluation please send email to [email protected]. WEB SNIPPET FILES The snippets files consist of 12,725,989 snippets each containing PLEASE DON”T USE CHROME WHEN DOWNLOADING THESE FROM DROPBOX (THE UNZIP COULD FAIL) "question_ID”: the ID of related question, containing at least 3 instances of the same ID (full question, split1, split2); "question": The natural language complex question; "web_query": Query sent to the search engine. “split_source”: 'noisy supervision split' or ‘ptrnet split’, please train on examples containing “ptrnet split” when comparing to Split+Decomp from https://arxiv.org/abs/1807.09623 “split_type”: 'full_question' or ‘split_part1' or ‘split_part2’ please use ‘composition_answer’ in question of type composition and split_type: “split_part1” when training a reading comprehension model on splits as in Split+Decomp from https://arxiv.org/abs/1807.09623 (in the rest of the cases use the original answer). "web_snippets": ~100 web snippets per query. Each snippet includes Title,Snippet. They are ordered according to Google results. With a total of 10,035,571 training set snippets 1,350,950 dev set snippets 1,339,468 test set snippets ### Source Data The original files can be found at this [dropbox link](https://www.dropbox.com/sh/7pkwkrfnwqhsnpo/AACuu4v3YNkhirzBOeeaHYala) ### Licensing Information Not specified ### Citation Information ``` @inproceedings{talmor2018web, title={The Web as a Knowledge-Base for Answering Complex Questions}, author={Talmor, Alon and Berant, Jonathan}, booktitle={Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers)}, pages={641--651}, year={2018} } ``` ### Contributions Thanks for [happen2me](https://github.com/happen2me) for contributing this dataset.
drt/complex_web_questions
[ "license:apache-2.0", "arxiv:1803.06643", "arxiv:1807.09623", "region:us" ]
2022-10-22T21:14:27+00:00
{"license": "apache-2.0", "source": "https://github.com/KGQA/KGQA-datasets"}
2023-04-27T20:04:50+00:00
[ "1803.06643", "1807.09623" ]
[]
TAGS #license-apache-2.0 #arxiv-1803.06643 #arxiv-1807.09623 #region-us
# Dataset Card for Dataset Name ## Dataset Description - Homepage: URL - Repository: URL - Paper: URL - Leaderboard: URL - Point of Contact: alontalmor@URL. ### Dataset Summary A dataset for answering complex questions that require reasoning over multiple web snippets ComplexWebQuestions is a new dataset that contains a large set of complex questions in natural language, and can be used in multiple ways: - By interacting with a search engine, which is the focus of our paper (Talmor and Berant, 2018); - As a reading comprehension task: we release 12,725,989 web snippets that are relevant for the questions, and were collected during the development of our model; - As a semantic parsing task: each question is paired with a SPARQL query that can be executed against Freebase to retrieve the answer. ### Supported Tasks and Leaderboards ### Languages - English ## Dataset Structure QUESTION FILES The dataset contains 34,689 examples divided into 27,734 train, 3,480 dev, 3,475 test. each containing: NOTE: test set does not contain “answer” field. For test evaluation please send email to alontalmor@URL. WEB SNIPPET FILES The snippets files consist of 12,725,989 snippets each containing PLEASE DON”T USE CHROME WHEN DOWNLOADING THESE FROM DROPBOX (THE UNZIP COULD FAIL) "question_ID”: the ID of related question, containing at least 3 instances of the same ID (full question, split1, split2); "question": The natural language complex question; "web_query": Query sent to the search engine. “split_source”: 'noisy supervision split' or ‘ptrnet split’, please train on examples containing “ptrnet split” when comparing to Split+Decomp from URL “split_type”: 'full_question' or ‘split_part1' or ‘split_part2’ please use ‘composition_answer’ in question of type composition and split_type: “split_part1” when training a reading comprehension model on splits as in Split+Decomp from URL (in the rest of the cases use the original answer). "web_snippets": ~100 web snippets per query. Each snippet includes Title,Snippet. They are ordered according to Google results. With a total of 10,035,571 training set snippets 1,350,950 dev set snippets 1,339,468 test set snippets ### Source Data The original files can be found at this dropbox link ### Licensing Information Not specified ### Contributions Thanks for happen2me for contributing this dataset.
[ "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: URL\n- Leaderboard: URL\n- Point of Contact: alontalmor@URL.", "### Dataset Summary\n\nA dataset for answering complex questions that require reasoning over multiple web snippets\n\nComplexWebQuestions is a new dataset that contains a large set of complex questions in natural language, and can be used in multiple ways:\n\n - By interacting with a search engine, which is the focus of our paper (Talmor and Berant, 2018);\n - As a reading comprehension task: we release 12,725,989 web snippets that are relevant for the questions, and were collected during the development of our model; \n - As a semantic parsing task: each question is paired with a SPARQL query that can be executed against Freebase to retrieve the answer.", "### Supported Tasks and Leaderboards", "### Languages\n\n- English", "## Dataset Structure\n\nQUESTION FILES\n\nThe dataset contains 34,689 examples divided into 27,734 train, 3,480 dev, 3,475 test.\neach containing:\n\n\n\nNOTE: test set does not contain “answer” field. For test evaluation please send email to \nalontalmor@URL.\n\n\nWEB SNIPPET FILES\n\n\nThe snippets files consist of 12,725,989 snippets each containing\nPLEASE DON”T USE CHROME WHEN DOWNLOADING THESE FROM DROPBOX (THE UNZIP COULD FAIL)\n\n\"question_ID”: the ID of related question, containing at least 3 instances of the same ID (full question, split1, split2); \n\"question\": The natural language complex question; \n\"web_query\": Query sent to the search engine. \n“split_source”: 'noisy supervision split' or ‘ptrnet split’, please train on examples containing “ptrnet split” when comparing to Split+Decomp from URL\n“split_type”: 'full_question' or ‘split_part1' or ‘split_part2’ please use ‘composition_answer’ in question of type composition and split_type: “split_part1” when training a reading comprehension model on splits as in Split+Decomp from URL (in the rest of the cases use the original answer).\n\"web_snippets\": ~100 web snippets per query. Each snippet includes Title,Snippet. They are ordered according to Google results.\n\nWith a total of\n10,035,571 training set snippets\n1,350,950 dev set snippets\n1,339,468 test set snippets", "### Source Data\n\nThe original files can be found at this dropbox link", "### Licensing Information\n\nNot specified", "### Contributions\n\nThanks for happen2me for contributing this dataset." ]
[ "TAGS\n#license-apache-2.0 #arxiv-1803.06643 #arxiv-1807.09623 #region-us \n", "# Dataset Card for Dataset Name", "## Dataset Description\n\n- Homepage: URL\n- Repository: URL\n- Paper: URL\n- Leaderboard: URL\n- Point of Contact: alontalmor@URL.", "### Dataset Summary\n\nA dataset for answering complex questions that require reasoning over multiple web snippets\n\nComplexWebQuestions is a new dataset that contains a large set of complex questions in natural language, and can be used in multiple ways:\n\n - By interacting with a search engine, which is the focus of our paper (Talmor and Berant, 2018);\n - As a reading comprehension task: we release 12,725,989 web snippets that are relevant for the questions, and were collected during the development of our model; \n - As a semantic parsing task: each question is paired with a SPARQL query that can be executed against Freebase to retrieve the answer.", "### Supported Tasks and Leaderboards", "### Languages\n\n- English", "## Dataset Structure\n\nQUESTION FILES\n\nThe dataset contains 34,689 examples divided into 27,734 train, 3,480 dev, 3,475 test.\neach containing:\n\n\n\nNOTE: test set does not contain “answer” field. For test evaluation please send email to \nalontalmor@URL.\n\n\nWEB SNIPPET FILES\n\n\nThe snippets files consist of 12,725,989 snippets each containing\nPLEASE DON”T USE CHROME WHEN DOWNLOADING THESE FROM DROPBOX (THE UNZIP COULD FAIL)\n\n\"question_ID”: the ID of related question, containing at least 3 instances of the same ID (full question, split1, split2); \n\"question\": The natural language complex question; \n\"web_query\": Query sent to the search engine. \n“split_source”: 'noisy supervision split' or ‘ptrnet split’, please train on examples containing “ptrnet split” when comparing to Split+Decomp from URL\n“split_type”: 'full_question' or ‘split_part1' or ‘split_part2’ please use ‘composition_answer’ in question of type composition and split_type: “split_part1” when training a reading comprehension model on splits as in Split+Decomp from URL (in the rest of the cases use the original answer).\n\"web_snippets\": ~100 web snippets per query. Each snippet includes Title,Snippet. They are ordered according to Google results.\n\nWith a total of\n10,035,571 training set snippets\n1,350,950 dev set snippets\n1,339,468 test set snippets", "### Source Data\n\nThe original files can be found at this dropbox link", "### Licensing Information\n\nNot specified", "### Contributions\n\nThanks for happen2me for contributing this dataset." ]
fa56884038f5566930d101134cb74fc8912a92ee
# Dataset Card for "processed_narrative_relationship_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
mfigurski80/processed_narrative_relationship_dataset
[ "region:us" ]
2022-10-22T23:10:43+00:00
{"dataset_info": {"features": [{"name": "subject", "dtype": "string"}, {"name": "object", "dtype": "string"}, {"name": "dialogue", "dtype": "string"}, {"name": "pair_examples", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 3410751.179531327, "num_examples": 15798}, {"name": "train", "num_bytes": 13642788.820468673, "num_examples": 63191}], "download_size": 9671733, "dataset_size": 17053540.0}}
2022-11-01T01:00:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "processed_narrative_relationship_dataset" More Information needed
[ "# Dataset Card for \"processed_narrative_relationship_dataset\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"processed_narrative_relationship_dataset\"\n\nMore Information needed" ]
a6895a95b21e1c435a01b40c6be3d7280a727f07
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: Aiyshwariya/bert-finetuned-squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jsfs11](https://huggingface.co/jsfs11) for evaluating this model.
autoevaluate/autoeval-eval-squad-plain_text-be943f-1842563161
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T01:39:30+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["squad"], "eval_info": {"task": "extractive_question_answering", "model": "Aiyshwariya/bert-finetuned-squad", "metrics": ["squad", "bertscore"], "dataset_name": "squad", "dataset_config": "plain_text", "dataset_split": "validation", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-10-23T01:42:33+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: Aiyshwariya/bert-finetuned-squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jsfs11 for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: Aiyshwariya/bert-finetuned-squad\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jsfs11 for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: Aiyshwariya/bert-finetuned-squad\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jsfs11 for evaluating this model." ]
e8e49851544cde36cf86caec6e1e653e4cb56d42
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: Neulvo/bert-finetuned-squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jsfs11](https://huggingface.co/jsfs11) for evaluating this model.
autoevaluate/autoeval-eval-squad-plain_text-be943f-1842563162
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T01:39:34+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["squad"], "eval_info": {"task": "extractive_question_answering", "model": "Neulvo/bert-finetuned-squad", "metrics": ["squad", "bertscore"], "dataset_name": "squad", "dataset_config": "plain_text", "dataset_split": "validation", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-10-23T01:42:25+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: Neulvo/bert-finetuned-squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jsfs11 for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: Neulvo/bert-finetuned-squad\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jsfs11 for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: Neulvo/bert-finetuned-squad\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jsfs11 for evaluating this model." ]
5da30b83882e79083ee59bd450c0ada0300a59d6
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: 21iridescent/RoBERTa-base-finetuned-squad2-lwt * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jsfs11](https://huggingface.co/jsfs11) for evaluating this model.
autoevaluate/autoeval-eval-squad-plain_text-be943f-1842563163
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T01:39:39+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["squad"], "eval_info": {"task": "extractive_question_answering", "model": "21iridescent/RoBERTa-base-finetuned-squad2-lwt", "metrics": ["squad", "bertscore"], "dataset_name": "squad", "dataset_config": "plain_text", "dataset_split": "validation", "col_mapping": {"context": "context", "question": "question", "answers-text": "answers.text", "answers-answer_start": "answers.answer_start"}}}
2022-10-23T01:42:15+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Question Answering * Model: 21iridescent/RoBERTa-base-finetuned-squad2-lwt * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jsfs11 for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: 21iridescent/RoBERTa-base-finetuned-squad2-lwt\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jsfs11 for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Question Answering\n* Model: 21iridescent/RoBERTa-base-finetuned-squad2-lwt\n* Dataset: squad\n* Config: plain_text\n* Split: validation\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jsfs11 for evaluating this model." ]
884b7444f79ed8f90b22ab80ee2469eb65b697cf
# Dataset Card for VUA Metaphor Corpus **Important note#1**: This is a slightly simplified but mostly complete parse of the corpus. What is missing are lemmas and some metadata that was not important at the time of writing the parser. See the section `Simplifications` for more information on this. **Important note#2**: The dataset contains metadata - to ignore it and correctly remap the annotations, see the section `Discarding metadata`. ### Dataset Summary VUA Metaphor Corpus (VUAMC) contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor. There are four registers, each comprising about 50 000 words: academic texts, news texts, fiction, and conversations. Words have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for metaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal metaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made between clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of metaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor. ### Supported Tasks and Leaderboards Metaphor detection, metaphor type classification. ### Languages English. ## Dataset Structure ### Data Instances A sample instance from the dataset: ``` { 'document_name': 'kcv-fragment42', 'words': ['', 'I', 'think', 'we', 'should', 'have', 'different', 'holidays', '.'], 'pos_tags': ['N/A', 'PNP', 'VVB', 'PNP', 'VM0', 'VHI', 'AJ0', 'NN2', 'PUN'], 'met_type': [ {'type': 'mrw/met', 'word_indices': [5]} ], 'meta': ['vocal/laugh', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A'] } ``` ### Data Fields The instances are ordered as they appear in the corpus. - `document_name`: a string containing the name of the document in which the sentence appears; - `words`: words in the sentence (`""` when the word represents metadata); - `pos_tags`: POS tags of the words, encoded using the BNC basic tagset (`"N/A"` when the word does not have an associated POS tag); - `met_type`: metaphors in the sentence, marked by their type and word indices; - `meta`: selected metadata tags providing additional context to the sentence. Metadata may not correspond to a specific word. In this case, the metadata is represented with an empty string (`""`) in `words` and a `"N/A"` tag in `pos_tags`. ## Dataset Creation For detailed information on the corpus, please check out the references in the `Citation Information` section or contact the dataset authors. ## Simplifications The raw corpus is equipped with rich metadata and encoded in the TEI XML format. The textual part is fully parsed except for the lemmas, i.e. all the sentences in the raw corpus are present in the dataset. However, parsing the metadata fully is unnecessarily tedious, so certain simplifications were made: - paragraph information is not preserved as the dataset is parsed at sentence level; - manual corrections (`<corr>`) of incorrectly written words are ignored, and the original, incorrect form of the words is used instead; - `<ptr>` and `<anchor>` tags are ignored as I cannot figure out what they represent; - the attributes `rendition` (in `<hi>` tags) and `new` (in `<shift>` tags) are not exposed. ## Discarding metadata The dataset contains rich metadata, which is stored in the `meta` attribute. To keep data aligned, empty words or `"N/A"`s are inserted into the other attributes. If you want to ignore the metadata and correct the metaphor type annotations, you can use code similar to the following snippet: ```python3 data = datasets.load_dataset("matejklemen/vuamc")["train"] data = data.to_pandas() for idx_ex in range(data.shape[0]): curr_ex = data.iloc[idx_ex] idx_remap = {} for idx_word, word in enumerate(curr_ex["words"]): if len(word) != 0: idx_remap[idx_word] = len(idx_remap) # Note that lists are stored as np arrays by datasets, while we are storing new data in a list! # (unhandled for simplicity) words, pos_tags, met_type = curr_ex[["words", "pos_tags", "met_type"]].tolist() if len(idx_remap) != len(curr_ex["words"]): words = list(filter(lambda _word: len(_word) > 0, curr_ex["words"])) pos_tags = list(filter(lambda _pos: _pos != "N/A", curr_ex["pos_tags"])) met_type = [] for met_info in curr_ex["met_type"]: met_type.append({ "type": met_info["type"], "word_indices": list(map(lambda _i: idx_remap[_i], met_info["word_indices"])) }) ``` ## Additional Information ### Dataset Curators Gerard Steen; et al. (please see http://hdl.handle.net/20.500.12024/2541 for the full list). ### Licensing Information Available for non-commercial use on condition that the terms of the [BNC Licence](http://www.natcorp.ox.ac.uk/docs/licence.html) are observed and that this header is included in its entirety with any copy distributed. ### Citation Information ``` @book{steen2010method, title={A method for linguistic metaphor identification: From MIP to MIPVU}, author={Steen, Gerard and Dorst, Lettie and Herrmann, J. and Kaal, Anna and Krennmayr, Tina and Pasma, Trijntje}, volume={14}, year={2010}, publisher={John Benjamins Publishing} } ``` ``` @inproceedings{leong-etal-2020-report, title = "A Report on the 2020 {VUA} and {TOEFL} Metaphor Detection Shared Task", author = "Leong, Chee Wee (Ben) and Beigman Klebanov, Beata and Hamill, Chris and Stemle, Egon and Ubale, Rutuja and Chen, Xianyang", booktitle = "Proceedings of the Second Workshop on Figurative Language Processing", year = "2020", url = "https://aclanthology.org/2020.figlang-1.3", doi = "10.18653/v1/2020.figlang-1.3", pages = "18--29" } ``` ### Contributions Thanks to [@matejklemen](https://github.com/matejklemen) for adding this dataset.
matejklemen/vuamc
[ "task_categories:text-classification", "task_categories:token-classification", "task_ids:multi-class-classification", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "size_categories:100K<n<1M", "language:en", "license:other", "metaphor-classification", "multiword-expression-detection", "vua20", "vua18", "mipvu", "region:us" ]
2022-10-23T11:13:08+00:00
{"annotations_creators": ["expert-generated"], "language_creators": ["found"], "language": ["en"], "license": ["other"], "multilinguality": ["monolingual"], "size_categories": ["10K<n<100K", "100K<n<1M"], "source_datasets": [], "task_categories": ["text-classification", "token-classification"], "task_ids": ["multi-class-classification"], "pretty_name": "VUA Metaphor Corpus", "tags": ["metaphor-classification", "multiword-expression-detection", "vua20", "vua18", "mipvu"]}
2022-10-26T07:50:42+00:00
[]
[ "en" ]
TAGS #task_categories-text-classification #task_categories-token-classification #task_ids-multi-class-classification #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #size_categories-100K<n<1M #language-English #license-other #metaphor-classification #multiword-expression-detection #vua20 #vua18 #mipvu #region-us
# Dataset Card for VUA Metaphor Corpus Important note#1: This is a slightly simplified but mostly complete parse of the corpus. What is missing are lemmas and some metadata that was not important at the time of writing the parser. See the section 'Simplifications' for more information on this. Important note#2: The dataset contains metadata - to ignore it and correctly remap the annotations, see the section 'Discarding metadata'. ### Dataset Summary VUA Metaphor Corpus (VUAMC) contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor. There are four registers, each comprising about 50 000 words: academic texts, news texts, fiction, and conversations. Words have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for metaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal metaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made between clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of metaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor. ### Supported Tasks and Leaderboards Metaphor detection, metaphor type classification. ### Languages English. ## Dataset Structure ### Data Instances A sample instance from the dataset: ### Data Fields The instances are ordered as they appear in the corpus. - 'document_name': a string containing the name of the document in which the sentence appears; - 'words': words in the sentence ('""' when the word represents metadata); - 'pos_tags': POS tags of the words, encoded using the BNC basic tagset ('"N/A"' when the word does not have an associated POS tag); - 'met_type': metaphors in the sentence, marked by their type and word indices; - 'meta': selected metadata tags providing additional context to the sentence. Metadata may not correspond to a specific word. In this case, the metadata is represented with an empty string ('""') in 'words' and a '"N/A"' tag in 'pos_tags'. ## Dataset Creation For detailed information on the corpus, please check out the references in the 'Citation Information' section or contact the dataset authors. ## Simplifications The raw corpus is equipped with rich metadata and encoded in the TEI XML format. The textual part is fully parsed except for the lemmas, i.e. all the sentences in the raw corpus are present in the dataset. However, parsing the metadata fully is unnecessarily tedious, so certain simplifications were made: - paragraph information is not preserved as the dataset is parsed at sentence level; - manual corrections ('<corr>') of incorrectly written words are ignored, and the original, incorrect form of the words is used instead; - '<ptr>' and '<anchor>' tags are ignored as I cannot figure out what they represent; - the attributes 'rendition' (in '<hi>' tags) and 'new' (in '<shift>' tags) are not exposed. ## Discarding metadata The dataset contains rich metadata, which is stored in the 'meta' attribute. To keep data aligned, empty words or '"N/A"'s are inserted into the other attributes. If you want to ignore the metadata and correct the metaphor type annotations, you can use code similar to the following snippet: ## Additional Information ### Dataset Curators Gerard Steen; et al. (please see URL for the full list). ### Licensing Information Available for non-commercial use on condition that the terms of the BNC Licence are observed and that this header is included in its entirety with any copy distributed. ### Contributions Thanks to @matejklemen for adding this dataset.
[ "# Dataset Card for VUA Metaphor Corpus\n\nImportant note#1: This is a slightly simplified but mostly complete parse of the corpus. What is missing are lemmas and some metadata that was not important at the time of writing the parser. See the section 'Simplifications' for more information on this. \n\nImportant note#2: The dataset contains metadata - to ignore it and correctly remap the annotations, see the section 'Discarding metadata'.", "### Dataset Summary\n\nVUA Metaphor Corpus (VUAMC) contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor. There are four registers, each comprising about 50 000 words: academic texts, news texts, fiction, and conversations.\nWords have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for metaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal metaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made between clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of metaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor.", "### Supported Tasks and Leaderboards\n\nMetaphor detection, metaphor type classification.", "### Languages\n\nEnglish.", "## Dataset Structure", "### Data Instances\n\nA sample instance from the dataset:", "### Data Fields\n\nThe instances are ordered as they appear in the corpus.\n\n- 'document_name': a string containing the name of the document in which the sentence appears; \n- 'words': words in the sentence ('\"\"' when the word represents metadata); \n- 'pos_tags': POS tags of the words, encoded using the BNC basic tagset ('\"N/A\"' when the word does not have an associated POS tag); \n- 'met_type': metaphors in the sentence, marked by their type and word indices; \n- 'meta': selected metadata tags providing additional context to the sentence. Metadata may not correspond to a specific word. In this case, the metadata is represented with an empty string ('\"\"') in 'words' and a '\"N/A\"' tag in 'pos_tags'.", "## Dataset Creation\n\nFor detailed information on the corpus, please check out the references in the 'Citation Information' section or contact the dataset authors.", "## Simplifications\nThe raw corpus is equipped with rich metadata and encoded in the TEI XML format. The textual part is fully parsed except for the lemmas, i.e. all the sentences in the raw corpus are present in the dataset. \nHowever, parsing the metadata fully is unnecessarily tedious, so certain simplifications were made: \n- paragraph information is not preserved as the dataset is parsed at sentence level; \n- manual corrections ('<corr>') of incorrectly written words are ignored, and the original, incorrect form of the words is used instead; \n- '<ptr>' and '<anchor>' tags are ignored as I cannot figure out what they represent; \n- the attributes 'rendition' (in '<hi>' tags) and 'new' (in '<shift>' tags) are not exposed.", "## Discarding metadata\n\nThe dataset contains rich metadata, which is stored in the 'meta' attribute. To keep data aligned, empty words or '\"N/A\"'s are inserted into the other attributes. If you want to ignore the metadata and correct the metaphor type annotations, you can use code similar to the following snippet:", "## Additional Information", "### Dataset Curators\n\nGerard Steen; et al. (please see URL for the full list).", "### Licensing Information\n\nAvailable for non-commercial use on condition that the terms of the BNC Licence are observed and that this header is included in its entirety with any copy distributed.", "### Contributions\n\nThanks to @matejklemen for adding this dataset." ]
[ "TAGS\n#task_categories-text-classification #task_categories-token-classification #task_ids-multi-class-classification #annotations_creators-expert-generated #language_creators-found #multilinguality-monolingual #size_categories-10K<n<100K #size_categories-100K<n<1M #language-English #license-other #metaphor-classification #multiword-expression-detection #vua20 #vua18 #mipvu #region-us \n", "# Dataset Card for VUA Metaphor Corpus\n\nImportant note#1: This is a slightly simplified but mostly complete parse of the corpus. What is missing are lemmas and some metadata that was not important at the time of writing the parser. See the section 'Simplifications' for more information on this. \n\nImportant note#2: The dataset contains metadata - to ignore it and correctly remap the annotations, see the section 'Discarding metadata'.", "### Dataset Summary\n\nVUA Metaphor Corpus (VUAMC) contains a selection of excerpts from BNC-Baby files that have been annotated for metaphor. There are four registers, each comprising about 50 000 words: academic texts, news texts, fiction, and conversations.\nWords have been separately labelled as participating in multi-word expressions (about 1.5%) or as discarded for metaphor analysis (0.02%). Main categories include words that are related to metaphor (MRW), words that signal metaphor (MFlag), and words that are not related to metaphor. For metaphor-related words, subdivisions have been made between clear cases of metaphor versus borderline cases (WIDLII, When In Doubt, Leave It In). Another parameter of metaphor-related words makes a distinction between direct metaphor, indirect metaphor, and implicit metaphor.", "### Supported Tasks and Leaderboards\n\nMetaphor detection, metaphor type classification.", "### Languages\n\nEnglish.", "## Dataset Structure", "### Data Instances\n\nA sample instance from the dataset:", "### Data Fields\n\nThe instances are ordered as they appear in the corpus.\n\n- 'document_name': a string containing the name of the document in which the sentence appears; \n- 'words': words in the sentence ('\"\"' when the word represents metadata); \n- 'pos_tags': POS tags of the words, encoded using the BNC basic tagset ('\"N/A\"' when the word does not have an associated POS tag); \n- 'met_type': metaphors in the sentence, marked by their type and word indices; \n- 'meta': selected metadata tags providing additional context to the sentence. Metadata may not correspond to a specific word. In this case, the metadata is represented with an empty string ('\"\"') in 'words' and a '\"N/A\"' tag in 'pos_tags'.", "## Dataset Creation\n\nFor detailed information on the corpus, please check out the references in the 'Citation Information' section or contact the dataset authors.", "## Simplifications\nThe raw corpus is equipped with rich metadata and encoded in the TEI XML format. The textual part is fully parsed except for the lemmas, i.e. all the sentences in the raw corpus are present in the dataset. \nHowever, parsing the metadata fully is unnecessarily tedious, so certain simplifications were made: \n- paragraph information is not preserved as the dataset is parsed at sentence level; \n- manual corrections ('<corr>') of incorrectly written words are ignored, and the original, incorrect form of the words is used instead; \n- '<ptr>' and '<anchor>' tags are ignored as I cannot figure out what they represent; \n- the attributes 'rendition' (in '<hi>' tags) and 'new' (in '<shift>' tags) are not exposed.", "## Discarding metadata\n\nThe dataset contains rich metadata, which is stored in the 'meta' attribute. To keep data aligned, empty words or '\"N/A\"'s are inserted into the other attributes. If you want to ignore the metadata and correct the metaphor type annotations, you can use code similar to the following snippet:", "## Additional Information", "### Dataset Curators\n\nGerard Steen; et al. (please see URL for the full list).", "### Licensing Information\n\nAvailable for non-commercial use on condition that the terms of the BNC Licence are observed and that this header is included in its entirety with any copy distributed.", "### Contributions\n\nThanks to @matejklemen for adding this dataset." ]
f521d71ad8871bfe07d1b7f809c38ed578d79f93
# Space Style Embedding / Textual Inversion ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: ```"art by space_style"``` If it is to strong just add [] around it. Trained until 15000 steps I added a 7.5k steps trained ver in the files aswell. If you want to use that version, remove the ```"-7500"``` from the file name and replace the 15k steps ver in your folder Have fun :) ## Example Pictures <table> <tr> <td><img src=https://i.imgur.com/flz5Oxz.png width=100% height=100%/></td> <td><img src=https://i.imgur.com/5btpoXs.png width=100% height=100%/></td> <td><img src=https://i.imgur.com/PtySCd4.png width=100% height=100%/></td> <td><img src=https://i.imgur.com/NbSue9H.png width=100% height=100%/></td> <td><img src=https://i.imgur.com/QhjRezm.png width=100% height=100%/></td> </tr> </table> ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Nerfgun3/space_style
[ "language:en", "license:creativeml-openrail-m", "stable-diffusion", "text-to-image", "region:us" ]
2022-10-23T17:10:11+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "tags": ["stable-diffusion", "text-to-image"], "inference": false}
2022-10-24T18:39:57+00:00
[]
[ "en" ]
TAGS #language-English #license-creativeml-openrail-m #stable-diffusion #text-to-image #region-us
Space Style Embedding / Textual Inversion ========================================= Usage ----- To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: If it is to strong just add [] around it. Trained until 15000 steps I added a 7.5k steps trained ver in the files aswell. If you want to use that version, remove the from the file name and replace the 15k steps ver in your folder Have fun :) Example Pictures ---------------- License ------- This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here
[]
[ "TAGS\n#language-English #license-creativeml-openrail-m #stable-diffusion #text-to-image #region-us \n" ]
e75c0ba2a7b8754214c22b71ed4ab002e518d665
# Portuguese Legal Sentences Collection of Legal Sentences from the Portuguese Supreme Court of Justice The goal of this dataset was to be used for MLM and TSDAE ### Contributions [@rufimelo99](https://github.com/rufimelo99)
rufimelo/PortugueseLegalSentences-v1
[ "annotations_creators:no-annotation", "language_creators:found", "multilinguality:monolingual", "source_datasets:original", "language:pt", "license:apache-2.0", "region:us" ]
2022-10-23T18:59:44+00:00
{"annotations_creators": ["no-annotation"], "language_creators": ["found"], "language": ["pt"], "license": ["apache-2.0"], "multilinguality": ["monolingual"], "source_datasets": ["original"]}
2022-10-24T12:16:43+00:00
[]
[ "pt" ]
TAGS #annotations_creators-no-annotation #language_creators-found #multilinguality-monolingual #source_datasets-original #language-Portuguese #license-apache-2.0 #region-us
# Portuguese Legal Sentences Collection of Legal Sentences from the Portuguese Supreme Court of Justice The goal of this dataset was to be used for MLM and TSDAE ### Contributions @rufimelo99
[ "# Portuguese Legal Sentences\nCollection of Legal Sentences from the Portuguese Supreme Court of Justice\nThe goal of this dataset was to be used for MLM and TSDAE", "### Contributions\n@rufimelo99" ]
[ "TAGS\n#annotations_creators-no-annotation #language_creators-found #multilinguality-monolingual #source_datasets-original #language-Portuguese #license-apache-2.0 #region-us \n", "# Portuguese Legal Sentences\nCollection of Legal Sentences from the Portuguese Supreme Court of Justice\nThe goal of this dataset was to be used for MLM and TSDAE", "### Contributions\n@rufimelo99" ]
c32097a5d2fbede13730983eb51d2b5defc2df72
# Flower Style Embedding / Textual Inversion <img alt="Showcase" src="https://huggingface.co/datasets/Nerfgun3/flower_style/resolve/main/flower_style_showcase.jpg"/> ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: ```"art by flower_style"``` If it is to strong just add [] around it. Trained until 15000 steps I added a 7.5k steps trained ver in the files aswell. If you want to use that version, remove the ```"-7500"``` from the file name and replace the 15k steps ver in your folder Have fun :) ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Nerfgun3/flower_style
[ "language:en", "license:creativeml-openrail-m", "stable-diffusion", "text-to-image", "image-to-image", "region:us" ]
2022-10-23T19:34:36+00:00
{"language": ["en"], "license": "creativeml-openrail-m", "thumbnail": "https://huggingface.co/datasets/Nerfgun3/flower_style/resolve/main/flower_style_showcase.jpg", "tags": ["stable-diffusion", "text-to-image", "image-to-image"], "inference": false}
2022-11-17T13:54:16+00:00
[]
[ "en" ]
TAGS #language-English #license-creativeml-openrail-m #stable-diffusion #text-to-image #image-to-image #region-us
# Flower Style Embedding / Textual Inversion <img alt="Showcase" src="URL ## Usage To use this embedding you have to download the file aswell as drop it into the "\stable-diffusion-webui\embeddings" folder To use it in a prompt: If it is to strong just add [] around it. Trained until 15000 steps I added a 7.5k steps trained ver in the files aswell. If you want to use that version, remove the from the file name and replace the 15k steps ver in your folder Have fun :) ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here
[ "# Flower Style Embedding / Textual Inversion\n\n<img alt=\"Showcase\" src=\"URL", "## Usage\nTo use this embedding you have to download the file aswell as drop it into the \"\\stable-diffusion-webui\\embeddings\" folder\n\nTo use it in a prompt: \n\nIf it is to strong just add [] around it.\n\nTrained until 15000 steps\n\nI added a 7.5k steps trained ver in the files aswell. If you want to use that version, remove the from the file name and replace the 15k steps ver in your folder\n\nHave fun :)", "## License\n\nThis embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.\nThe CreativeML OpenRAIL License specifies: \n\n1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content \n2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license\n3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)\nPlease read the full license here" ]
[ "TAGS\n#language-English #license-creativeml-openrail-m #stable-diffusion #text-to-image #image-to-image #region-us \n", "# Flower Style Embedding / Textual Inversion\n\n<img alt=\"Showcase\" src=\"URL", "## Usage\nTo use this embedding you have to download the file aswell as drop it into the \"\\stable-diffusion-webui\\embeddings\" folder\n\nTo use it in a prompt: \n\nIf it is to strong just add [] around it.\n\nTrained until 15000 steps\n\nI added a 7.5k steps trained ver in the files aswell. If you want to use that version, remove the from the file name and replace the 15k steps ver in your folder\n\nHave fun :)", "## License\n\nThis embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.\nThe CreativeML OpenRAIL License specifies: \n\n1. You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content \n2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license\n3. You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)\nPlease read the full license here" ]
bbbeda405dd254bbc39be64fd07ca56e9c42722a
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-30b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa0_8shot-jeffdshen__neqa0_8shot-5a61bc-1852963397
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:41+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa0_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-30b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa0_8shot", "dataset_config": "jeffdshen--neqa0_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-24T01:24:00+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-30b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-30b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-30b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
628102b7e82b9a387a255a6e51170e64a7674645
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-1.3b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa0_8shot-jeffdshen__neqa0_8shot-5a61bc-1852963393
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:41+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa0_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-1.3b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa0_8shot", "dataset_config": "jeffdshen--neqa0_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T20:17:44+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-1.3b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-1.3b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-1.3b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
165ecd1b7528c0a28047f431599ec63ccc225ba5
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-350m_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa2_8shot-jeffdshen__neqa2_8shot-959823-1853063400
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:42+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa2_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-350m_eval", "metrics": [], "dataset_name": "jeffdshen/neqa2_8shot", "dataset_config": "jeffdshen--neqa2_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T20:05:23+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-350m_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-350m_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-350m_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
e2501deb7ee46551f0d545d7cc9d08c205bddd94
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-125m_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa0_8shot-jeffdshen__neqa0_8shot-5a61bc-1852963391
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:43+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa0_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-125m_eval", "metrics": [], "dataset_name": "jeffdshen/neqa0_8shot", "dataset_config": "jeffdshen--neqa0_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T20:04:17+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-125m_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-125m_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-125m_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
386f0520a81bc2e006e403d88b0e58a25b7edceb
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-350m_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa0_8shot-jeffdshen__neqa0_8shot-5a61bc-1852963392
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:43+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa0_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-350m_eval", "metrics": [], "dataset_name": "jeffdshen/neqa0_8shot", "dataset_config": "jeffdshen--neqa0_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T20:07:19+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-350m_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-350m_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-350m_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
5493c393d6b927541a9bb351bfe46ce48a363ad2
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-2.7b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa0_8shot-jeffdshen__neqa0_8shot-5a61bc-1852963394
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:43+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa0_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-2.7b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa0_8shot", "dataset_config": "jeffdshen--neqa0_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T20:31:31+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-2.7b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-2.7b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-2.7b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
378354c50946fbf08d8a6563e5da4f69b05f57e1
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-6.7b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa0_8shot-jeffdshen__neqa0_8shot-5a61bc-1852963395
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:43+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa0_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-6.7b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa0_8shot", "dataset_config": "jeffdshen--neqa0_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T21:16:35+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-6.7b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-6.7b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-6.7b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
3beafd757977584c5a7b0426b2025d14a12b872d
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-13b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa0_8shot-jeffdshen__neqa0_8shot-5a61bc-1852963396
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:45+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa0_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-13b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa0_8shot", "dataset_config": "jeffdshen--neqa0_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T21:56:59+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-13b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-13b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-13b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
37caa5b64dbc5c3649fb79afa9d8ac337cacf4df
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-125m_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa2_8shot-jeffdshen__neqa2_8shot-959823-1853063399
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:46+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa2_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-125m_eval", "metrics": [], "dataset_name": "jeffdshen/neqa2_8shot", "dataset_config": "jeffdshen--neqa2_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T20:02:53+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-125m_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-125m_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-125m_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
0d4bc186a5d5a1dc46d0e0206ed53c204f882a88
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-2.7b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa2_8shot-jeffdshen__neqa2_8shot-959823-1853063402
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:49+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa2_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-2.7b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa2_8shot", "dataset_config": "jeffdshen--neqa2_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T20:19:17+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-2.7b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-2.7b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-2.7b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
b8e140cc5b8866a23c246f84785adce295792c8f
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-1.3b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa2_8shot-jeffdshen__neqa2_8shot-959823-1853063401
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:54+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa2_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-1.3b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa2_8shot", "dataset_config": "jeffdshen--neqa2_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T20:14:09+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-1.3b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-1.3b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-1.3b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
728799a60277cd443045c7d19c40d4191162e20e
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-6.7b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa2_8shot-jeffdshen__neqa2_8shot-959823-1853063403
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T19:59:55+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa2_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-6.7b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa2_8shot", "dataset_config": "jeffdshen--neqa2_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T20:45:29+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-6.7b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-6.7b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-6.7b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
8772c16f195f7f98be77d04eee7b64f965607ffd
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-66b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa0_8shot-jeffdshen__neqa0_8shot-5a61bc-1852963398
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T20:00:00+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa0_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-66b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa0_8shot", "dataset_config": "jeffdshen--neqa0_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-24T07:46:56+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-66b_eval * Dataset: jeffdshen/neqa0_8shot * Config: jeffdshen--neqa0_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-66b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-66b_eval\n* Dataset: jeffdshen/neqa0_8shot\n* Config: jeffdshen--neqa0_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
ed6362992ac70b04bf6de9b9707127ed9a81913b
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-13b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa2_8shot-jeffdshen__neqa2_8shot-959823-1853063404
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T20:00:02+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa2_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-13b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa2_8shot", "dataset_config": "jeffdshen--neqa2_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T21:21:29+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-13b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-13b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-13b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
065a794edae01a21ecc4da42eba9271432d2c9de
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-30b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@jeffdshen](https://huggingface.co/jeffdshen) for evaluating this model.
autoevaluate/autoeval-eval-jeffdshen__neqa2_8shot-jeffdshen__neqa2_8shot-959823-1853063405
[ "autotrain", "evaluation", "region:us" ]
2022-10-23T20:00:15+00:00
{"type": "predictions", "tags": ["autotrain", "evaluation"], "datasets": ["jeffdshen/neqa2_8shot"], "eval_info": {"task": "text_zero_shot_classification", "model": "inverse-scaling/opt-30b_eval", "metrics": [], "dataset_name": "jeffdshen/neqa2_8shot", "dataset_config": "jeffdshen--neqa2_8shot", "dataset_split": "train", "col_mapping": {"text": "prompt", "classes": "classes", "target": "answer_index"}}}
2022-10-23T23:35:42+00:00
[]
[]
TAGS #autotrain #evaluation #region-us
# Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by AutoTrain for the following task and dataset: * Task: Zero-Shot Text Classification * Model: inverse-scaling/opt-30b_eval * Dataset: jeffdshen/neqa2_8shot * Config: jeffdshen--neqa2_8shot * Split: train To run new evaluation jobs, visit Hugging Face's automatic model evaluator. ## Contributions Thanks to @jeffdshen for evaluating this model.
[ "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-30b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]
[ "TAGS\n#autotrain #evaluation #region-us \n", "# Dataset Card for AutoTrain Evaluator\n\nThis repository contains model predictions generated by AutoTrain for the following task and dataset:\n\n* Task: Zero-Shot Text Classification\n* Model: inverse-scaling/opt-30b_eval\n* Dataset: jeffdshen/neqa2_8shot\n* Config: jeffdshen--neqa2_8shot\n* Split: train\n\nTo run new evaluation jobs, visit Hugging Face's automatic model evaluator.", "## Contributions\n\nThanks to @jeffdshen for evaluating this model." ]