sha
stringlengths
40
40
text
stringlengths
1
13.4M
id
stringlengths
2
117
tags
sequencelengths
1
7.91k
created_at
stringlengths
25
25
metadata
stringlengths
2
875k
last_modified
stringlengths
25
25
arxiv
sequencelengths
0
25
languages
sequencelengths
0
7.91k
tags_str
stringlengths
17
159k
text_str
stringlengths
1
447k
text_lists
sequencelengths
0
352
processed_texts
sequencelengths
1
353
b78a81013ee1cc60f406af82e5412bd2f2174d97
# Dataset Card for "metatree_BNG_cmc_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_cmc_
[ "region:us" ]
2024-02-06T06:02:59+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1395720, "num_examples": 38770}, {"name": "validation", "num_bytes": 594936, "num_examples": 16526}], "download_size": 1235840, "dataset_size": 1990656}}
2024-02-06T06:03:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_cmc_" More Information needed
[ "# Dataset Card for \"metatree_BNG_cmc_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_cmc_\"\n\nMore Information needed" ]
8598a88ce0d02815a51fa075ee4ac982a169caec
## Cria-SFT-v1 is a collection of the following datasets - Datasets: - [VMware/open-instruct](https://huggingface.co/datasets/VMware/open-instruct) - [LDJnr/Capybara](https://huggingface.co/datasets/LDJnr/Capybara) - [cognitivecomputations/ultrachat-uncensored](https://huggingface.co/datasets/cognitivecomputations/ultrachat-uncensored) - [starfishmedical/webGPT_x_dolly](https://huggingface.co/datasets/starfishmedical/webGPT_x_dolly) - [THUDM/webglm-qa](https://huggingface.co/datasets/THUDM/webglm-qa) ## Prompt Format ``` <|im_start|>system {system_message}<|im_end|> <|im_start|>user {user_message}<|im_end|> <|im_start|>assistant {assistant message}<|im_end|> ```
Isotonic/Cria-MultiDialogues
[ "region:us" ]
2024-02-06T06:03:06+00:00
{"dataset_info": {"features": [{"name": "text", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 595416313, "num_examples": 253859}], "download_size": 206563103, "dataset_size": 595416313}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-02-07T15:50:57+00:00
[]
[]
TAGS #region-us
## Cria-SFT-v1 is a collection of the following datasets - Datasets: - VMware/open-instruct - LDJnr/Capybara - cognitivecomputations/ultrachat-uncensored - starfishmedical/webGPT_x_dolly - THUDM/webglm-qa ## Prompt Format
[ "## Cria-SFT-v1 is a collection of the following datasets\n- Datasets:\n - VMware/open-instruct\n - LDJnr/Capybara\n - cognitivecomputations/ultrachat-uncensored\n - starfishmedical/webGPT_x_dolly\n - THUDM/webglm-qa", "## Prompt Format" ]
[ "TAGS\n#region-us \n", "## Cria-SFT-v1 is a collection of the following datasets\n- Datasets:\n - VMware/open-instruct\n - LDJnr/Capybara\n - cognitivecomputations/ultrachat-uncensored\n - starfishmedical/webGPT_x_dolly\n - THUDM/webglm-qa", "## Prompt Format" ]
602e8b23aeb917f814101ac923f193e876eea69d
# Dataset Card for "metatree_BNG_colic_ORIG_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_colic_ORIG_
[ "region:us" ]
2024-02-06T06:03:18+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 53209500, "num_examples": 700125}, {"name": "validation", "num_bytes": 22790500, "num_examples": 299875}], "download_size": 73797860, "dataset_size": 76000000}}
2024-02-06T06:03:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_colic_ORIG_" More Information needed
[ "# Dataset Card for \"metatree_BNG_colic_ORIG_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_colic_ORIG_\"\n\nMore Information needed" ]
ed7f4bb828d3f0d7478ae55289489b3dc357fc20
# Dataset Card for "metatree_BNG_colic_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_colic_
[ "region:us" ]
2024-02-06T06:03:44+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 53223940, "num_examples": 700315}, {"name": "validation", "num_bytes": 22776060, "num_examples": 299685}], "download_size": 73796900, "dataset_size": 76000000}}
2024-02-06T06:03:49+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_colic_" More Information needed
[ "# Dataset Card for \"metatree_BNG_colic_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_colic_\"\n\nMore Information needed" ]
dacb0aad9fe703809956d96ab1692004b6e62c23
# Dataset Card for "metatree_BNG_credit_a_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_credit_a_
[ "region:us" ]
2024-02-06T06:04:08+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 47589256, "num_examples": 699842}, {"name": "validation", "num_bytes": 20410744, "num_examples": 300158}], "download_size": 55443058, "dataset_size": 68000000}}
2024-02-06T06:04:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_credit_a_" More Information needed
[ "# Dataset Card for \"metatree_BNG_credit_a_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_credit_a_\"\n\nMore Information needed" ]
a7c90c8f87fc6cb4070cc8be7611de1938c0f1cc
# Dataset Card for "metatree_BNG_page_blocks_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_page_blocks_
[ "region:us" ]
2024-02-06T06:04:29+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 20608800, "num_examples": 206088}, {"name": "validation", "num_bytes": 8915700, "num_examples": 89157}], "download_size": 29975608, "dataset_size": 29524500}}
2024-02-06T06:04:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_page_blocks_" More Information needed
[ "# Dataset Card for \"metatree_BNG_page_blocks_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_page_blocks_\"\n\nMore Information needed" ]
211ca2f7a703d78da3b63c34d911e6c9973445d6
# Dataset Card for "metatree_BNG_credit_g_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_credit_g_
[ "region:us" ]
2024-02-06T06:04:54+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 53147028, "num_examples": 699303}, {"name": "validation", "num_bytes": 22852972, "num_examples": 300697}], "download_size": 40809668, "dataset_size": 76000000}}
2024-02-06T06:04:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_credit_g_" More Information needed
[ "# Dataset Card for \"metatree_BNG_credit_g_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_credit_g_\"\n\nMore Information needed" ]
09bd543a45fb9a04d45fe08855cb940d5075edf4
# Dataset Card for "metatree_BNG_pendigits_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_pendigits_
[ "region:us" ]
2024-02-06T06:05:17+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 103544056, "num_examples": 699622}, {"name": "validation", "num_bytes": 44455944, "num_examples": 300378}], "download_size": 145615949, "dataset_size": 148000000}}
2024-02-06T06:05:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_pendigits_" More Information needed
[ "# Dataset Card for \"metatree_BNG_pendigits_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_pendigits_\"\n\nMore Information needed" ]
069d764df62de1c56a68eebd095ad0f99b1d1c94
# Dataset Card for "metatree_BNG_cylinder_bands_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_cylinder_bands_
[ "region:us" ]
2024-02-06T06:05:45+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 114804428, "num_examples": 700027}, {"name": "validation", "num_bytes": 49195572, "num_examples": 299973}], "download_size": 143839997, "dataset_size": 164000000}}
2024-02-06T06:05:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_cylinder_bands_" More Information needed
[ "# Dataset Card for \"metatree_BNG_cylinder_bands_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_cylinder_bands_\"\n\nMore Information needed" ]
4f92118bef425153b5f8b0f3b48748dd6168c99d
# Dataset Card for "metatree_BNG_dermatology_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_dermatology_
[ "region:us" ]
2024-02-06T06:06:09+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 19603864, "num_examples": 700138}, {"name": "validation", "num_bytes": 8396136, "num_examples": 299862}], "download_size": 15872024, "dataset_size": 28000000}}
2024-02-06T06:06:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_dermatology_" More Information needed
[ "# Dataset Card for \"metatree_BNG_dermatology_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_dermatology_\"\n\nMore Information needed" ]
f5b3b16bb80b6936edc5bdfa4f2cbbe53a387204
# Dataset Card for "metatree_BNG_sonar_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_sonar_
[ "region:us" ]
2024-02-06T06:06:36+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 349985000, "num_examples": 699970}, {"name": "validation", "num_bytes": 150015000, "num_examples": 300030}], "download_size": 568705383, "dataset_size": 500000000}}
2024-02-06T06:06:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_sonar_" More Information needed
[ "# Dataset Card for \"metatree_BNG_sonar_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_sonar_\"\n\nMore Information needed" ]
af08acfc78324f44745a1a50bb50f867d5d4e00f
# Dataset Card for "metatree_BNG_glass_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_glass_
[ "region:us" ]
2024-02-06T06:07:13+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 8868064, "num_examples": 96392}, {"name": "validation", "num_bytes": 3807788, "num_examples": 41389}], "download_size": 11206380, "dataset_size": 12675852}}
2024-02-06T06:07:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_glass_" More Information needed
[ "# Dataset Card for \"metatree_BNG_glass_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_glass_\"\n\nMore Information needed" ]
eca7b0d03cbc888949d6dc9d50417dc57703c40f
# Dataset Card for "metatree_BNG_heart_c_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_heart_c_
[ "region:us" ]
2024-02-06T06:07:34+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 47567156, "num_examples": 699517}, {"name": "validation", "num_bytes": 20432844, "num_examples": 300483}], "download_size": 53796728, "dataset_size": 68000000}}
2024-02-06T06:07:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_heart_c_" More Information needed
[ "# Dataset Card for \"metatree_BNG_heart_c_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_heart_c_\"\n\nMore Information needed" ]
e82f60a83f5a1e3876a4c2cea7e05f1af33f4fcc
# Dataset Card for "metatree_BNG_heart_statlog_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_heart_statlog_
[ "region:us" ]
2024-02-06T06:08:01+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 86832736, "num_examples": 700264}, {"name": "validation", "num_bytes": 37167264, "num_examples": 299736}], "download_size": 65505966, "dataset_size": 124000000}}
2024-02-06T06:08:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_heart_statlog_" More Information needed
[ "# Dataset Card for \"metatree_BNG_heart_statlog_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_heart_statlog_\"\n\nMore Information needed" ]
7bfaf5072a5d610576042199889492bae8f9850f
# Dataset Card for "metatree_BNG_vehicle_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_vehicle_
[ "region:us" ]
2024-02-06T06:08:27+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 114807216, "num_examples": 700044}, {"name": "validation", "num_bytes": 49192784, "num_examples": 299956}], "download_size": 179344605, "dataset_size": 164000000}}
2024-02-06T06:08:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_vehicle_" More Information needed
[ "# Dataset Card for \"metatree_BNG_vehicle_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_vehicle_\"\n\nMore Information needed" ]
3bed682a0ce1e56e9679e46bd5fe352f9467199a
# Dataset Card for "metatree_BNG_hepatitis_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_hepatitis_
[ "region:us" ]
2024-02-06T06:08:54+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 47555800, "num_examples": 699350}, {"name": "validation", "num_bytes": 20444200, "num_examples": 300650}], "download_size": 62669300, "dataset_size": 68000000}}
2024-02-06T06:08:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_hepatitis_" More Information needed
[ "# Dataset Card for \"metatree_BNG_hepatitis_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_hepatitis_\"\n\nMore Information needed" ]
db5a3f4efaf72387a899368e2f74d8722162371a
# Dataset Card for "metatree_BNG_waveform_5000_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_waveform_5000_
[ "region:us" ]
2024-02-06T06:09:22+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 237790220, "num_examples": 699383}, {"name": "validation", "num_bytes": 102209780, "num_examples": 300617}], "download_size": 405577557, "dataset_size": 340000000}}
2024-02-06T06:09:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_waveform_5000_" More Information needed
[ "# Dataset Card for \"metatree_BNG_waveform_5000_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_waveform_5000_\"\n\nMore Information needed" ]
ef23a2cd685b640fbfea09dd1081c845a23779a8
# Dataset Card for "metatree_BNG_zoo_" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_BNG_zoo_
[ "region:us" ]
2024-02-06T06:09:55+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 19597172, "num_examples": 699899}, {"name": "validation", "num_bytes": 8402828, "num_examples": 300101}], "download_size": 8412533, "dataset_size": 28000000}}
2024-02-06T06:09:59+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_BNG_zoo_" More Information needed
[ "# Dataset Card for \"metatree_BNG_zoo_\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_BNG_zoo_\"\n\nMore Information needed" ]
ca13c94ac849a9040e2edb0e9a493bfc280fa6d5
# Dataset Card for "metatree_vehicle_sensIT" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_vehicle_sensIT
[ "region:us" ]
2024-02-06T06:10:16+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float32"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 28973280, "num_examples": 68984}, {"name": "validation", "num_bytes": 12408480, "num_examples": 29544}], "download_size": 60104700, "dataset_size": 41381760}}
2024-02-06T06:10:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_vehicle_sensIT" More Information needed
[ "# Dataset Card for \"metatree_vehicle_sensIT\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_vehicle_sensIT\"\n\nMore Information needed" ]
ad7fb5e907a8c6a49d76ebeec99404638f439286
# Dataset Card for "metatree_UNIX_user_data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_UNIX_user_data
[ "region:us" ]
2024-02-06T06:15:26+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 180040, "num_examples": 6430}, {"name": "validation", "num_bytes": 74760, "num_examples": 2670}], "download_size": 131300, "dataset_size": 254800}}
2024-02-06T06:15:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_UNIX_user_data" More Information needed
[ "# Dataset Card for \"metatree_UNIX_user_data\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_UNIX_user_data\"\n\nMore Information needed" ]
cebfaec0d014db65905d22e00a1d10b79ee02c0b
# Dataset Card for "metatree_fri_c3_1000_25" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c3_1000_25
[ "region:us" ]
2024-02-06T06:15:42+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 155760, "num_examples": 708}, {"name": "validation", "num_bytes": 64240, "num_examples": 292}], "download_size": 254318, "dataset_size": 220000}}
2024-02-06T06:15:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c3_1000_25" More Information needed
[ "# Dataset Card for \"metatree_fri_c3_1000_25\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c3_1000_25\"\n\nMore Information needed" ]
2a1e10a2327391d7f8bd1e37875e62331fafbaa6
# Dataset Card for "metatree_rmftsa_sleepdata" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_rmftsa_sleepdata
[ "region:us" ]
2024-02-06T06:15:57+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 26028, "num_examples": 723}, {"name": "validation", "num_bytes": 10836, "num_examples": 301}], "download_size": 11961, "dataset_size": 36864}}
2024-02-06T06:16:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_rmftsa_sleepdata" More Information needed
[ "# Dataset Card for \"metatree_rmftsa_sleepdata\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_rmftsa_sleepdata\"\n\nMore Information needed" ]
6133b7d2ebf9d6c4539dfad0a2b49fdaf621a0b5
# Dataset Card for "metatree_JapaneseVowels" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_JapaneseVowels
[ "region:us" ]
2024-02-06T06:16:14+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 920436, "num_examples": 6973}, {"name": "validation", "num_bytes": 394416, "num_examples": 2988}], "download_size": 1260000, "dataset_size": 1314852}}
2024-02-06T06:16:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_JapaneseVowels" More Information needed
[ "# Dataset Card for \"metatree_JapaneseVowels\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_JapaneseVowels\"\n\nMore Information needed" ]
8ad9212108a96e1f5d10e4c9dc6eaa7e3ebf2e00
# Dataset Card for "metatree_fri_c4_1000_100" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c4_1000_100
[ "region:us" ]
2024-02-06T06:16:31+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 590400, "num_examples": 720}, {"name": "validation", "num_bytes": 229600, "num_examples": 280}], "download_size": 1008641, "dataset_size": 820000}}
2024-02-06T06:16:34+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c4_1000_100" More Information needed
[ "# Dataset Card for \"metatree_fri_c4_1000_100\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c4_1000_100\"\n\nMore Information needed" ]
43092e27d28e29abc894446d297dd8d922c871fb
# Dataset Card for "metatree_abalone" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_abalone
[ "region:us" ]
2024-02-06T06:16:48+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 223516, "num_examples": 2941}, {"name": "validation", "num_bytes": 93936, "num_examples": 1236}], "download_size": 101819, "dataset_size": 317452}}
2024-02-06T06:16:50+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_abalone" More Information needed
[ "# Dataset Card for \"metatree_abalone\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_abalone\"\n\nMore Information needed" ]
f284147df8ed270c9e1a62ea35ed81d112bf1759
# Dataset Card for "metatree_fri_c4_1000_25" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c4_1000_25
[ "region:us" ]
2024-02-06T06:17:04+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 161920, "num_examples": 736}, {"name": "validation", "num_bytes": 58080, "num_examples": 264}], "download_size": 254491, "dataset_size": 220000}}
2024-02-06T06:17:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c4_1000_25" More Information needed
[ "# Dataset Card for \"metatree_fri_c4_1000_25\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c4_1000_25\"\n\nMore Information needed" ]
9c3df3f102824542604b555dfa4af6423d8657c7
# Dataset Card for "metatree_bank8FM" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_bank8FM
[ "region:us" ]
2024-02-06T06:17:20+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 484344, "num_examples": 5766}, {"name": "validation", "num_bytes": 203784, "num_examples": 2426}], "download_size": 619632, "dataset_size": 688128}}
2024-02-06T06:17:23+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_bank8FM" More Information needed
[ "# Dataset Card for \"metatree_bank8FM\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_bank8FM\"\n\nMore Information needed" ]
614a45f333ef7533e1bdcdddf7c8b0b13d1bab5c
# Dataset Card for "metatree_analcatdata_supreme" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_analcatdata_supreme
[ "region:us" ]
2024-02-06T06:17:36+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 215308, "num_examples": 2833}, {"name": "validation", "num_bytes": 92644, "num_examples": 1219}], "download_size": 70134, "dataset_size": 307952}}
2024-02-06T06:17:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_analcatdata_supreme" More Information needed
[ "# Dataset Card for \"metatree_analcatdata_supreme\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_analcatdata_supreme\"\n\nMore Information needed" ]
fd30414d9cec4502d7004050b0707a7ba8cea1af
# Dataset Card for "metatree_ailerons" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_ailerons
[ "region:us" ]
2024-02-06T06:17:57+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 3283040, "num_examples": 9656}, {"name": "validation", "num_bytes": 1391960, "num_examples": 4094}], "download_size": 828635, "dataset_size": 4675000}}
2024-02-06T06:18:00+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_ailerons" More Information needed
[ "# Dataset Card for \"metatree_ailerons\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_ailerons\"\n\nMore Information needed" ]
2771a97e01a912599cb6c6902c1ee8604b87c07c
# Dataset Card for "metatree_cpu_small" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_cpu_small
[ "region:us" ]
2024-02-06T06:18:13+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 655400, "num_examples": 5650}, {"name": "validation", "num_bytes": 294872, "num_examples": 2542}], "download_size": 703010, "dataset_size": 950272}}
2024-02-06T06:18:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_cpu_small" More Information needed
[ "# Dataset Card for \"metatree_cpu_small\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_cpu_small\"\n\nMore Information needed" ]
32e68cf553357d6f5309f6a37a3a818fb5d81080
# Dataset Card for "metatree_space_ga" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_space_ga
[ "region:us" ]
2024-02-06T06:18:29+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 148580, "num_examples": 2185}, {"name": "validation", "num_bytes": 62696, "num_examples": 922}], "download_size": 195171, "dataset_size": 211276}}
2024-02-06T06:18:32+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_space_ga" More Information needed
[ "# Dataset Card for \"metatree_space_ga\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_space_ga\"\n\nMore Information needed" ]
dcf9114b059ff24afcf5cf288d76647c27f9d14e
# Dataset Card for "metatree_fri_c1_1000_5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c1_1000_5
[ "region:us" ]
2024-02-06T06:18:44+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 42120, "num_examples": 702}, {"name": "validation", "num_bytes": 17880, "num_examples": 298}], "download_size": 56736, "dataset_size": 60000}}
2024-02-06T06:18:47+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c1_1000_5" More Information needed
[ "# Dataset Card for \"metatree_fri_c1_1000_5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c1_1000_5\"\n\nMore Information needed" ]
2c383156d685a83b4aa02c89323a8494edad92fa
# Dataset Card for "metatree_puma32H" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_puma32H
[ "region:us" ]
2024-02-06T06:19:01+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1580100, "num_examples": 5725}, {"name": "validation", "num_bytes": 680892, "num_examples": 2467}], "download_size": 2637619, "dataset_size": 2260992}}
2024-02-06T06:19:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_puma32H" More Information needed
[ "# Dataset Card for \"metatree_puma32H\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_puma32H\"\n\nMore Information needed" ]
59d1ad7b671f0624e9bc1557c4dabb8aec10c1cd
# Dataset Card for "metatree_fri_c3_1000_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c3_1000_10
[ "region:us" ]
2024-02-06T06:19:17+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 71600, "num_examples": 716}, {"name": "validation", "num_bytes": 28400, "num_examples": 284}], "download_size": 105272, "dataset_size": 100000}}
2024-02-06T06:19:19+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c3_1000_10" More Information needed
[ "# Dataset Card for \"metatree_fri_c3_1000_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c3_1000_10\"\n\nMore Information needed" ]
7ed0b01acf3e9a963bab9822fcff40bf00ad0f8a
# Dataset Card for "metatree_cpu_act" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_cpu_act
[ "region:us" ]
2024-02-06T06:19:34+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1075736, "num_examples": 5722}, {"name": "validation", "num_bytes": 464360, "num_examples": 2470}], "download_size": 888030, "dataset_size": 1540096}}
2024-02-06T06:19:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_cpu_act" More Information needed
[ "# Dataset Card for \"metatree_cpu_act\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_cpu_act\"\n\nMore Information needed" ]
cc77a585343681a59771cb512341e970c3440b7e
# Dataset Card for "metatree_fri_c4_1000_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c4_1000_10
[ "region:us" ]
2024-02-06T06:19:49+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 69700, "num_examples": 697}, {"name": "validation", "num_bytes": 30300, "num_examples": 303}], "download_size": 105275, "dataset_size": 100000}}
2024-02-06T06:19:52+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c4_1000_10" More Information needed
[ "# Dataset Card for \"metatree_fri_c4_1000_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c4_1000_10\"\n\nMore Information needed" ]
fcbdd4e5a6ce1d95afeeec54276aba9be49b6116
# Dataset Card for "metatree_quake" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_quake
[ "region:us" ]
2024-02-06T06:20:05+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 67276, "num_examples": 1529}, {"name": "validation", "num_bytes": 28556, "num_examples": 649}], "download_size": 59604, "dataset_size": 95832}}
2024-02-06T06:20:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_quake" More Information needed
[ "# Dataset Card for \"metatree_quake\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_quake\"\n\nMore Information needed" ]
216b78bd2a36774060082fbde837af23f08298d9
# Dataset Card for "metatree_fri_c4_1000_50" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c4_1000_50
[ "region:us" ]
2024-02-06T06:20:21+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 296100, "num_examples": 705}, {"name": "validation", "num_bytes": 123900, "num_examples": 295}], "download_size": 504225, "dataset_size": 420000}}
2024-02-06T06:20:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c4_1000_50" More Information needed
[ "# Dataset Card for \"metatree_fri_c4_1000_50\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c4_1000_50\"\n\nMore Information needed" ]
8455098f3bd4a399760fb27f907738e86b96edae
# Dataset Card for "metatree_fri_c0_1000_5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c0_1000_5
[ "region:us" ]
2024-02-06T06:20:36+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 41400, "num_examples": 690}, {"name": "validation", "num_bytes": 18600, "num_examples": 310}], "download_size": 56712, "dataset_size": 60000}}
2024-02-06T06:20:39+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c0_1000_5" More Information needed
[ "# Dataset Card for \"metatree_fri_c0_1000_5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c0_1000_5\"\n\nMore Information needed" ]
5768551352db8548b96dea8c0957bca69dc35b90
# Dataset Card for "metatree_delta_ailerons" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_delta_ailerons
[ "region:us" ]
2024-02-06T06:20:52+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 297780, "num_examples": 4963}, {"name": "validation", "num_bytes": 129960, "num_examples": 2166}], "download_size": 103663, "dataset_size": 427740}}
2024-02-06T06:20:55+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_delta_ailerons" More Information needed
[ "# Dataset Card for \"metatree_delta_ailerons\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_delta_ailerons\"\n\nMore Information needed" ]
b107bafee55125e0919be86a2b9a4f4b18ba8432
# Dataset Card for "metatree_fri_c3_1000_50" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c3_1000_50
[ "region:us" ]
2024-02-06T06:21:08+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 304920, "num_examples": 726}, {"name": "validation", "num_bytes": 115080, "num_examples": 274}], "download_size": 504483, "dataset_size": 420000}}
2024-02-06T06:21:12+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c3_1000_50" More Information needed
[ "# Dataset Card for \"metatree_fri_c3_1000_50\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c3_1000_50\"\n\nMore Information needed" ]
52f4cb8ecc12e225e655ce27f59bc0c020f2036f
# Dataset Card for "metatree_kin8nm" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_kin8nm
[ "region:us" ]
2024-02-06T06:21:24+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 485940, "num_examples": 5785}, {"name": "validation", "num_bytes": 202188, "num_examples": 2407}], "download_size": 684949, "dataset_size": 688128}}
2024-02-06T06:21:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_kin8nm" More Information needed
[ "# Dataset Card for \"metatree_kin8nm\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_kin8nm\"\n\nMore Information needed" ]
2c9322df00e6a575604202bcd42d7fdff94c1119
# Dataset Card for "metatree_fri_c3_1000_5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c3_1000_5
[ "region:us" ]
2024-02-06T06:21:39+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 42180, "num_examples": 703}, {"name": "validation", "num_bytes": 17820, "num_examples": 297}], "download_size": 56734, "dataset_size": 60000}}
2024-02-06T06:21:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c3_1000_5" More Information needed
[ "# Dataset Card for \"metatree_fri_c3_1000_5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c3_1000_5\"\n\nMore Information needed" ]
7e687a1e9387485cd900f989ecb1456e7c27240b
# Dataset Card for "metatree_puma8NH" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_puma8NH
[ "region:us" ]
2024-02-06T06:21:55+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 489048, "num_examples": 5822}, {"name": "validation", "num_bytes": 199080, "num_examples": 2370}], "download_size": 685137, "dataset_size": 688128}}
2024-02-06T06:21:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_puma8NH" More Information needed
[ "# Dataset Card for \"metatree_puma8NH\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_puma8NH\"\n\nMore Information needed" ]
4f1fbd66ce8fbdd43da6f9fdd9d44afdf9068384
# Dataset Card for "metatree_delta_elevators" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_delta_elevators
[ "region:us" ]
2024-02-06T06:22:11+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 453764, "num_examples": 6673}, {"name": "validation", "num_bytes": 193392, "num_examples": 2844}], "download_size": 216802, "dataset_size": 647156}}
2024-02-06T06:22:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_delta_elevators" More Information needed
[ "# Dataset Card for \"metatree_delta_elevators\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_delta_elevators\"\n\nMore Information needed" ]
48b1971674eb7fe0f6ab5ca926bc6daa7a50e025
# Dataset Card for "metatree_houses" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_houses
[ "region:us" ]
2024-02-06T06:22:28+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1202208, "num_examples": 14312}, {"name": "validation", "num_bytes": 531552, "num_examples": 6328}], "download_size": 1223611, "dataset_size": 1733760}}
2024-02-06T06:22:31+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_houses" More Information needed
[ "# Dataset Card for \"metatree_houses\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_houses\"\n\nMore Information needed" ]
feb110e8795159865ca8e6f242629ddb53bad093
# Dataset Card for "metatree_bank32nh" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_bank32nh
[ "region:us" ]
2024-02-06T06:22:45+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1561608, "num_examples": 5658}, {"name": "validation", "num_bytes": 699384, "num_examples": 2534}], "download_size": 2373798, "dataset_size": 2260992}}
2024-02-06T06:22:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_bank32nh" More Information needed
[ "# Dataset Card for \"metatree_bank32nh\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_bank32nh\"\n\nMore Information needed" ]
1e725d5f4c7468003b39ddd801231b907628e7e7
# Dataset Card for "metatree_fri_c1_1000_50" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c1_1000_50
[ "region:us" ]
2024-02-06T06:23:00+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 294000, "num_examples": 700}, {"name": "validation", "num_bytes": 126000, "num_examples": 300}], "download_size": 504299, "dataset_size": 420000}}
2024-02-06T06:23:03+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c1_1000_50" More Information needed
[ "# Dataset Card for \"metatree_fri_c1_1000_50\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c1_1000_50\"\n\nMore Information needed" ]
7b0d8c861bc2caf621c23e90755b00d7a11b1dfe
# Dataset Card for "metatree_house_8L" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_house_8L
[ "region:us" ]
2024-02-06T06:23:17+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1335348, "num_examples": 15897}, {"name": "validation", "num_bytes": 578508, "num_examples": 6887}], "download_size": 1444273, "dataset_size": 1913856}}
2024-02-06T06:23:20+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_house_8L" More Information needed
[ "# Dataset Card for \"metatree_house_8L\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_house_8L\"\n\nMore Information needed" ]
324676c71aff18b46d5b946384f55256b2bc08ed
# Dataset Card for "metatree_fri_c0_1000_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c0_1000_10
[ "region:us" ]
2024-02-06T06:23:32+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 71100, "num_examples": 711}, {"name": "validation", "num_bytes": 28900, "num_examples": 289}], "download_size": 105321, "dataset_size": 100000}}
2024-02-06T06:23:35+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c0_1000_10" More Information needed
[ "# Dataset Card for \"metatree_fri_c0_1000_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c0_1000_10\"\n\nMore Information needed" ]
ef28d41f42b7321d8c5c0c641eeb2037064cc86b
# Dataset Card for "metatree_elevators" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_elevators
[ "region:us" ]
2024-02-06T06:23:48+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 1892560, "num_examples": 11540}, {"name": "validation", "num_bytes": 829676, "num_examples": 5059}], "download_size": 848031, "dataset_size": 2722236}}
2024-02-06T06:23:51+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_elevators" More Information needed
[ "# Dataset Card for \"metatree_elevators\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_elevators\"\n\nMore Information needed" ]
0708481fe789943eed7d59fa0725a9161b481977
# Dataset Card for "metatree_wind" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_wind
[ "region:us" ]
2024-02-06T06:24:04+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 610104, "num_examples": 4622}, {"name": "validation", "num_bytes": 257664, "num_examples": 1952}], "download_size": 180088, "dataset_size": 867768}}
2024-02-06T06:24:07+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_wind" More Information needed
[ "# Dataset Card for \"metatree_wind\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_wind\"\n\nMore Information needed" ]
7fb965d332219492fea9e379cb3585ba38feb986
# Dataset Card for "metatree_fri_c0_1000_25" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c0_1000_25
[ "region:us" ]
2024-02-06T06:24:19+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 157080, "num_examples": 714}, {"name": "validation", "num_bytes": 62920, "num_examples": 286}], "download_size": 254313, "dataset_size": 220000}}
2024-02-06T06:24:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c0_1000_25" More Information needed
[ "# Dataset Card for \"metatree_fri_c0_1000_25\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c0_1000_25\"\n\nMore Information needed" ]
7f53492a31b8d717b0d8d90fe3f1ce9d255f58b0
# Dataset Card for "metatree_fri_c2_1000_50" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c2_1000_50
[ "region:us" ]
2024-02-06T06:24:35+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 299460, "num_examples": 713}, {"name": "validation", "num_bytes": 120540, "num_examples": 287}], "download_size": 504473, "dataset_size": 420000}}
2024-02-06T06:24:37+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c2_1000_50" More Information needed
[ "# Dataset Card for \"metatree_fri_c2_1000_50\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c2_1000_50\"\n\nMore Information needed" ]
7865d59bf2a2174a101e82b947aaaf6c2d5f2ee8
# Dataset Card for "metatree_pollen" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_pollen
[ "region:us" ]
2024-02-06T06:24:50+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 161340, "num_examples": 2689}, {"name": "validation", "num_bytes": 69540, "num_examples": 1159}], "download_size": 177984, "dataset_size": 230880}}
2024-02-06T06:24:53+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_pollen" More Information needed
[ "# Dataset Card for \"metatree_pollen\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_pollen\"\n\nMore Information needed" ]
dc0d3b616a20a35dcf59d83b5b0bec779ef37d44
# Dataset Card for "metatree_mv" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_mv
[ "region:us" ]
2024-02-06T06:25:07+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2151256, "num_examples": 28306}, {"name": "validation", "num_bytes": 947112, "num_examples": 12462}], "download_size": 2823534, "dataset_size": 3098368}}
2024-02-06T06:25:10+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_mv" More Information needed
[ "# Dataset Card for \"metatree_mv\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_mv\"\n\nMore Information needed" ]
fc6178a7a36a799faf9d14e8149fee5445a53089
# Dataset Card for "metatree_fried" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fried
[ "region:us" ]
2024-02-06T06:25:24+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 2846400, "num_examples": 28464}, {"name": "validation", "num_bytes": 1230400, "num_examples": 12304}], "download_size": 962241, "dataset_size": 4076800}}
2024-02-06T06:25:27+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fried" More Information needed
[ "# Dataset Card for \"metatree_fried\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fried\"\n\nMore Information needed" ]
bdab9a4ad95b506d254d5398cf9d10411d635121
# Dataset Card for "metatree_fri_c2_1000_25" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c2_1000_25
[ "region:us" ]
2024-02-06T06:25:39+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 156640, "num_examples": 712}, {"name": "validation", "num_bytes": 63360, "num_examples": 288}], "download_size": 254296, "dataset_size": 220000}}
2024-02-06T06:25:42+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c2_1000_25" More Information needed
[ "# Dataset Card for \"metatree_fri_c2_1000_25\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c2_1000_25\"\n\nMore Information needed" ]
08eae08c126e09b69aafa4f7361127f53b4d951d
# Dataset Card for "metatree_fri_c0_1000_50" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c0_1000_50
[ "region:us" ]
2024-02-06T06:25:55+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 287280, "num_examples": 684}, {"name": "validation", "num_bytes": 132720, "num_examples": 316}], "download_size": 504121, "dataset_size": 420000}}
2024-02-06T06:25:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c0_1000_50" More Information needed
[ "# Dataset Card for \"metatree_fri_c0_1000_50\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c0_1000_50\"\n\nMore Information needed" ]
63788fb78e80c155aef7a4f273042f72b27467b3
# Dataset Card for "metatree_fri_c1_1000_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c1_1000_10
[ "region:us" ]
2024-02-06T06:26:11+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 69300, "num_examples": 693}, {"name": "validation", "num_bytes": 30700, "num_examples": 307}], "download_size": 105285, "dataset_size": 100000}}
2024-02-06T06:26:13+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c1_1000_10" More Information needed
[ "# Dataset Card for \"metatree_fri_c1_1000_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c1_1000_10\"\n\nMore Information needed" ]
4ab820debd2ddb1ec6ca0e15c3c3973c01de755b
# Dataset Card for "metatree_fri_c2_1000_5" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c2_1000_5
[ "region:us" ]
2024-02-06T06:26:26+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 43440, "num_examples": 724}, {"name": "validation", "num_bytes": 16560, "num_examples": 276}], "download_size": 56761, "dataset_size": 60000}}
2024-02-06T06:26:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c2_1000_5" More Information needed
[ "# Dataset Card for \"metatree_fri_c2_1000_5\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c2_1000_5\"\n\nMore Information needed" ]
845e192f5146b009bfd8dc368642e508b92e96c1
# Dataset Card for "metatree_fri_c2_1000_10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c2_1000_10
[ "region:us" ]
2024-02-06T06:26:41+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 71800, "num_examples": 718}, {"name": "validation", "num_bytes": 28200, "num_examples": 282}], "download_size": 105292, "dataset_size": 100000}}
2024-02-06T06:26:43+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c2_1000_10" More Information needed
[ "# Dataset Card for \"metatree_fri_c2_1000_10\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c2_1000_10\"\n\nMore Information needed" ]
a86004f7ab2b7486ba931fb9c1119ac501d1d12e
# Dataset Card for "metatree_fri_c1_1000_25" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_fri_c1_1000_25
[ "region:us" ]
2024-02-06T06:26:56+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 157520, "num_examples": 716}, {"name": "validation", "num_bytes": 62480, "num_examples": 284}], "download_size": 254322, "dataset_size": 220000}}
2024-02-06T06:26:58+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_fri_c1_1000_25" More Information needed
[ "# Dataset Card for \"metatree_fri_c1_1000_25\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_fri_c1_1000_25\"\n\nMore Information needed" ]
08ba81dfa58cfd66f3d5625d04db0be2089960d2
# Dataset Card for "metatree_visualizing_soil" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_visualizing_soil
[ "region:us" ]
2024-02-06T06:27:11+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 266816, "num_examples": 6064}, {"name": "validation", "num_bytes": 113388, "num_examples": 2577}], "download_size": 226448, "dataset_size": 380204}}
2024-02-06T06:27:14+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_visualizing_soil" More Information needed
[ "# Dataset Card for \"metatree_visualizing_soil\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_visualizing_soil\"\n\nMore Information needed" ]
ee4653e0c6b0df165271a6832481d65ba7ff78d6
# Dataset Card for "metatree_socmob" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_socmob
[ "region:us" ]
2024-02-06T06:27:27+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 22736, "num_examples": 812}, {"name": "validation", "num_bytes": 9632, "num_examples": 344}], "download_size": 13298, "dataset_size": 32368}}
2024-02-06T06:27:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_socmob" More Information needed
[ "# Dataset Card for \"metatree_socmob\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_socmob\"\n\nMore Information needed" ]
6399d57cd78375f4bac4ec6541796f360244f43a
# Dataset Card for "metatree_mozilla4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_mozilla4
[ "region:us" ]
2024-02-06T06:27:42+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 659640, "num_examples": 10994}, {"name": "validation", "num_bytes": 273060, "num_examples": 4551}], "download_size": 618390, "dataset_size": 932700}}
2024-02-06T06:27:45+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_mozilla4" More Information needed
[ "# Dataset Card for \"metatree_mozilla4\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_mozilla4\"\n\nMore Information needed" ]
ecf40015750fdfe2c8ec15d78784caad6aac0d58
# Dataset Card for "metatree_pc3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_pc3
[ "region:us" ]
2024-02-06T06:27:59+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 343492, "num_examples": 1087}, {"name": "validation", "num_bytes": 150416, "num_examples": 476}], "download_size": 280604, "dataset_size": 493908}}
2024-02-06T06:28:01+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_pc3" More Information needed
[ "# Dataset Card for \"metatree_pc3\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_pc3\"\n\nMore Information needed" ]
009949363eec32a2249fbf8085b76f8031e20e11
# Dataset Card for "metatree_pc1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
yzhuang/metatree_pc1
[ "region:us" ]
2024-02-06T06:28:15+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "int64"}, {"name": "X", "sequence": "float64"}, {"name": "y", "dtype": "int64"}], "splits": [{"name": "train", "num_bytes": 147392, "num_examples": 784}, {"name": "validation", "num_bytes": 61100, "num_examples": 325}], "download_size": 72194, "dataset_size": 208492}}
2024-02-06T06:28:17+00:00
[]
[]
TAGS #region-us
# Dataset Card for "metatree_pc1" More Information needed
[ "# Dataset Card for \"metatree_pc1\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"metatree_pc1\"\n\nMore Information needed" ]
09e3dee2f265b420fd6010598aac0bd6f85ee0a9
from : Atipico1/mrqa_preprocessed_thres-0.95_by-dpr
Atipico1/mrqa_v2_unans
[ "region:us" ]
2024-02-06T06:41:15+00:00
{"dataset_info": {"features": [{"name": "subset", "dtype": "string"}, {"name": "qid", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "sequence": "string"}, {"name": "masked_query", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "answer_sent", "dtype": "string"}, {"name": "answer_in_context", "sequence": "string"}, {"name": "query_embedding", "sequence": "float32"}, {"name": "Q_similar_context", "dtype": "string"}, {"name": "C_similar_context", "dtype": "string"}, {"name": "QC_similar_context", "dtype": "string"}, {"name": "random_context", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 1271622097.5756857, "num_examples": 203281}], "download_size": 1124157534, "dataset_size": 1271622097.5756857}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-02-06T06:58:41+00:00
[]
[]
TAGS #region-us
from : Atipico1/mrqa_preprocessed_thres-0.95_by-dpr
[]
[ "TAGS\n#region-us \n" ]
8059d5d3cbbce4221990c759a8e2c0e2c2daaf8f
# Dataset Card for pokemon-images-dataset ### Dataset Summary A collection of images featuring Pokémon characters. ## Dataset Creation ### Context I collected this dataset for my school project. The project is to train GAN to generate new Pokemon. I had a difficult time finding a training dataset that is complete and clean. So I gather this collection of images and publish it here hoping that it will help others who need a similar dataset. You can find my project on my [Github][1] My latest code to generate pokemon [Github][4] ### Content 819 transparent Pokemon images in png format size 256x256. * Update August 10, 2020 819 white background in jpg format ### Acknowledgements I collected the image mostly from this website [https://veekun.com/dex/downloads][2] Banner image is taken from [https://viking011.deviantart.com/art/Pokemon-Poster-436455502][3] ### Inspiration Since I failed to generate new Pokemon with clarity (I can only generate the shape) I wish there will be others that could do it with this dataset. If you managed to, please share it! [1]: https://github.com/kvpratama/gan/tree/master/pokemon [2]: https://veekun.com/dex/downloads [3]: https://viking011.deviantart.com/art/Pokemon-Poster-436455502 [4]: https://github.com/kvpratama/gan/tree/master/pokemon_dcgan
kvpratama/pokemon-images-dataset
[ "task_categories:image-to-image", "size_categories:n<1K", "language:en", "license:apache-2.0", "region:us" ]
2024-02-06T06:57:46+00:00
{"language": ["en"], "license": "apache-2.0", "size_categories": ["n<1K"], "task_categories": ["image-to-image"], "dataset_info": {"features": [{"name": "image", "dtype": "image"}], "splits": [{"name": "train", "num_bytes": 41347049, "num_examples": 819}], "download_size": 41350027, "dataset_size": 41347049}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-02-06T07:35:55+00:00
[]
[ "en" ]
TAGS #task_categories-image-to-image #size_categories-n<1K #language-English #license-apache-2.0 #region-us
# Dataset Card for pokemon-images-dataset ### Dataset Summary A collection of images featuring Pokémon characters. ## Dataset Creation ### Context I collected this dataset for my school project. The project is to train GAN to generate new Pokemon. I had a difficult time finding a training dataset that is complete and clean. So I gather this collection of images and publish it here hoping that it will help others who need a similar dataset. You can find my project on my [Github][1] My latest code to generate pokemon [Github][4] ### Content 819 transparent Pokemon images in png format size 256x256. * Update August 10, 2020 819 white background in jpg format ### Acknowledgements I collected the image mostly from this website [URL Banner image is taken from [URL ### Inspiration Since I failed to generate new Pokemon with clarity (I can only generate the shape) I wish there will be others that could do it with this dataset. If you managed to, please share it! [1]: URL [2]: URL [3]: URL [4]: URL
[ "# Dataset Card for pokemon-images-dataset", "### Dataset Summary\n\nA collection of images featuring Pokémon characters.", "## Dataset Creation", "### Context\n\nI collected this dataset for my school project. The project is to train GAN to generate new Pokemon. I had a difficult time finding a training dataset that is complete and clean. So I gather this collection of images and publish it here hoping that it will help others who need a similar dataset.\n\nYou can find my project on my [Github][1]\nMy latest code to generate pokemon [Github][4]", "### Content\n\n819 transparent Pokemon images in png format size 256x256.\n\n* Update August 10, 2020\n819 white background in jpg format", "### Acknowledgements\n\nI collected the image mostly from this website [URL\n\nBanner image is taken from [URL", "### Inspiration\n\nSince I failed to generate new Pokemon with clarity (I can only generate the shape) I wish there will be others that could do it with this dataset. If you managed to, please share it!\n\n\n [1]: URL\n [2]: URL\n [3]: URL\n [4]: URL" ]
[ "TAGS\n#task_categories-image-to-image #size_categories-n<1K #language-English #license-apache-2.0 #region-us \n", "# Dataset Card for pokemon-images-dataset", "### Dataset Summary\n\nA collection of images featuring Pokémon characters.", "## Dataset Creation", "### Context\n\nI collected this dataset for my school project. The project is to train GAN to generate new Pokemon. I had a difficult time finding a training dataset that is complete and clean. So I gather this collection of images and publish it here hoping that it will help others who need a similar dataset.\n\nYou can find my project on my [Github][1]\nMy latest code to generate pokemon [Github][4]", "### Content\n\n819 transparent Pokemon images in png format size 256x256.\n\n* Update August 10, 2020\n819 white background in jpg format", "### Acknowledgements\n\nI collected the image mostly from this website [URL\n\nBanner image is taken from [URL", "### Inspiration\n\nSince I failed to generate new Pokemon with clarity (I can only generate the shape) I wish there will be others that could do it with this dataset. If you managed to, please share it!\n\n\n [1]: URL\n [2]: URL\n [3]: URL\n [4]: URL" ]
7a37e770f07b5e22ae68786271320e60e3d3a321
1 million OpenAI Embeddings - 3072 dimensions Created: February 2024. Text used for Embedding: title (string) + text (string) Embedding Model: text-embedding-3-large ## Credits: This dataset was generated from the first 1M entries of https://huggingface.co/datasets/BeIR/dbpedia-entity
Supabase/dbpedia-openai-3-large-1M
[ "size_categories:1M<n<10M", "language:en", "license:mit", "region:us" ]
2024-02-06T07:44:48+00:00
{"language": ["en"], "license": "mit", "size_categories": ["1M<n<10M"], "pretty_name": "OpenAI text-embedding-3-large with 1M DBPedia Entities", "dataset_info": {"features": [{"name": "_id", "dtype": "string"}, {"name": "title", "dtype": "string"}, {"name": "text", "dtype": "string"}, {"name": "embedding", "sequence": "float32"}], "splits": [{"name": "train", "num_bytes": 17782586772, "num_examples": 1000000}], "download_size": 17782586772, "dataset_size": 1000000}}
2024-02-06T09:53:03+00:00
[]
[ "en" ]
TAGS #size_categories-1M<n<10M #language-English #license-mit #region-us
1 million OpenAI Embeddings - 3072 dimensions Created: February 2024. Text used for Embedding: title (string) + text (string) Embedding Model: text-embedding-3-large ## Credits: This dataset was generated from the first 1M entries of URL
[ "## Credits:\n\nThis dataset was generated from the first 1M entries of URL" ]
[ "TAGS\n#size_categories-1M<n<10M #language-English #license-mit #region-us \n", "## Credits:\n\nThis dataset was generated from the first 1M entries of URL" ]
8c10f0cea8a754a232b0f4bceacd6ba2a9bacdbc
# Dataset Card for "high_vs_random_min_len_100" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kristmh/high_vs_random_min_len_100
[ "region:us" ]
2024-02-06T07:47:18+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}, {"split": "train", "path": "data/train-*"}, {"split": "validate", "path": "data/validate-*"}]}], "dataset_info": {"features": [{"name": "text_clean", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 34061883, "num_examples": 34904}, {"name": "train", "num_bytes": 266004361, "num_examples": 279232}, {"name": "validate", "num_bytes": 33378905, "num_examples": 34904}], "download_size": 163590570, "dataset_size": 333445149}}
2024-02-06T07:48:24+00:00
[]
[]
TAGS #region-us
# Dataset Card for "high_vs_random_min_len_100" More Information needed
[ "# Dataset Card for \"high_vs_random_min_len_100\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"high_vs_random_min_len_100\"\n\nMore Information needed" ]
63fe6811b0c6f2ca2a64bcaf004ee7f9de4228b4
# Dataset Card for "high_vs_random_min_len_1000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
kristmh/high_vs_random_min_len_1000
[ "region:us" ]
2024-02-06T07:48:42+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data/test-*"}, {"split": "train", "path": "data/train-*"}, {"split": "validate", "path": "data/validate-*"}]}], "dataset_info": {"features": [{"name": "text_clean", "dtype": "string"}, {"name": "label", "dtype": "int64"}, {"name": "__index_level_0__", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 19282841, "num_examples": 7642}, {"name": "train", "num_bytes": 157361909, "num_examples": 61136}, {"name": "validate", "num_bytes": 18779565, "num_examples": 7642}], "download_size": 85467675, "dataset_size": 195424315}}
2024-02-06T07:49:16+00:00
[]
[]
TAGS #region-us
# Dataset Card for "high_vs_random_min_len_1000" More Information needed
[ "# Dataset Card for \"high_vs_random_min_len_1000\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"high_vs_random_min_len_1000\"\n\nMore Information needed" ]
5f5057080e9ffe954deab41320a2d871c48306ac
# Dataset Card for "target-locations-0.2split" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
McSpicyWithMilo/target-locations-0.2split
[ "region:us" ]
2024-02-06T08:28:21+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "target_location", "dtype": "string"}, {"name": "instruction_type", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 7719.8, "num_examples": 80}, {"name": "test", "num_bytes": 1929.95, "num_examples": 20}], "download_size": 11045, "dataset_size": 9649.75}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2024-02-06T08:28:29+00:00
[]
[]
TAGS #region-us
# Dataset Card for "target-locations-0.2split" More Information needed
[ "# Dataset Card for \"target-locations-0.2split\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"target-locations-0.2split\"\n\nMore Information needed" ]
067250da359f64e6447b90d1daf1a9b98f810883
# Dataset Card for "target-locations-0.3split" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
McSpicyWithMilo/target-locations-0.3split
[ "region:us" ]
2024-02-06T08:28:50+00:00
{"dataset_info": {"features": [{"name": "instruction", "dtype": "string"}, {"name": "target_location", "dtype": "string"}, {"name": "instruction_type", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 6754.825, "num_examples": 70}, {"name": "test", "num_bytes": 2894.925, "num_examples": 30}], "download_size": 11251, "dataset_size": 9649.75}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}, {"split": "test", "path": "data/test-*"}]}]}
2024-02-06T08:28:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "target-locations-0.3split" More Information needed
[ "# Dataset Card for \"target-locations-0.3split\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"target-locations-0.3split\"\n\nMore Information needed" ]
86a3b0e6f2266281d481bcb46a0bda5b511cffb0
# 介绍(Introduction) **LV-Eval**是一个具备5个长度等级(16k、32k、64k、128k和256k)、最大文本测试长度达到256k的长文本评测基准。**LV-Eval**的平均文本长度达到102,380字,最小/最大文本长度为11,896/387,406字。**LV-Eval**主要有两类评测任务——单跳QA和多跳QA,共包含11个涵盖中英文的评测数据子集。**LV-Eval**设计时引入3个关键技术:干扰事实插入(**C**onfusiong **F**acts **I**nsertion,CFI)提高挑战性,关键词和短语替换(**K**eyword and **P**hrase **R**eplacement,KPR)减少信息泄漏,以及基于关键词召回的评测指标(**A**nswer **K**eywords,AK,指代结合答案关键词和字词黑名单的评价指标)提高评测数值客观性。我们希望*LV*-Eval为未来长文本大语言模型的研究发展提供有价值的性能参考。 **LV-Eval**有以下关键特性: * **超长文本长度**: **LV-Eval**由5个长度等级构成,分别是16k、32k、64k、128k以及256k。同一数据集在不同长度等级下具有相同的问答对集合,只是构成各长度等级的上下文长度不同。我们的目的是保持问答对一致的情况下,充分测试模型在不同长度等级上下文中的性能表现,更可控地评估模型的长文本能力。 * **结合混淆和干扰信息来提升评测难度**: 构建测试数据的过程中,我们将问答相关文档和无关文档混合拼接起来构成测试文档。该构建方式在扩展文本长度的同时,可有效评测模型从冗长混淆文本中提取关键信息的能力。此外,我们还使用GPT-4生成多个干扰信息,并在人工检查后随机插入到测试文档中,以评测模型在有相似事实描述的干扰下保持准确推理的能力。 * **替换数据中的关键信息以减少信息泄漏**: 为了解决长文本能力评测中由于信息泄漏而引起的指标虚高问题,我们采用关键词和短语替换的方式处理数据的上下文以及问答对,替换后的信息不再是公共知识,也在很大程度上与数据源的原始信息不同。所有的替换词和短语标注都由人类标注员完成。这样一来, **LV-Eval**能够严格要求被测模型根据数据中实际提供的上下文信息来回答问题,而非通过“背题”或者预训练阶段的常识记忆的方式来回答问题。 * **基于关键词召回的指标可更客观公正地评测模型性能**: 目前已有的评测指标(如F1分、ROUGH等)存在受回答格式和无关字词干扰的问题,容易导致评测结果虚高。为解决这个问题,我们人工标注了答案关键词和字词黑名单。答案关键词是从原始答案中提取的最具回答信息量的词汇或短语,而字词黑名单主要包含一些无信息量的代词、助词,比如“的”、“和”、“了”等。评测指标的计算被设计为两阶段过程,以F1分数为例:第一阶段先计算模型回答对答案关键词的召回分数,如果分数低于预设阈值,则直接计0分;如果召回分数高于阈值,则进一步计算模型回答与完整答案的F1分数——首先将字词黑名单中的词从回答和答案中过滤掉,再正常进行F1分数计算。这样一来,评测指标可使得模型得分更加客观公正。 如果您想了解更多关于**LV-Eval**的细节,我们建议您参阅[GitHub代码库](https://github.com/infinigence/LVEval)以及[论文](https://arxiv.org/abs/2402.05136)。 **LV-Eval** is a challenging long-context benchmark with five length levels (16k, 32k, 64k, 128k, and 256k) reaching up to 256k words. The average number of words is 102,380, and the Min/Max number of words is 11,896/387,406. **LV-Eval** features two main tasks, single-hop QA and multi-hop QA, comprising 11 bilingual datasets. The design of **LV-Eval** has incorporated three key techniques, namely confusing facts insertion (CFI), keyword and phrase replacement (KPR), and keyword-recall-based metrics (AK, short for metics with Answer Keywords and word blacklist) design, which jointly provide a challenging, mitigated-knowledge-leakege, and more accurate evaluation of the long-context capability of LLMs. We anticipate that **LV-Eval** will serve as a valuable resource for supporting future research on long-context LLMs. The Key Characteristics of **LV-Eval** include: * **Sufficiently long context length to evaluate state-of-the-art models**: **LV-Eval** comprises 5 length levels with word counts of 16k, 32k, 64k, 128k, and 256k. Test instances across these levels share the same set of question-answer (QA) pairs, and only differ in the context content and length. Testing on the same QA pairs with different context lengths facilitates a controllable evaluation of models' long-context ability. * **Incorporation of distraction and confusion to increase difficulty**: When constructing the context for each test instance, we mix up distracting documents and supporting documents. This approach evaluates the model's ability in pinpointing key information in a large bunch of distracting texts. In addition, we insert confusing facts generated by GPT-4 and revised by human annotators into the context. This assesses the model's capability to accurately reason in the presence of interference. * **Keyword and phrase replacement to mitigate knowledge leakage**: To mitigate the biased evaluation of long-context ability caused by knowledge leakage, we apply keyword and phrase replacement in the context and QA pairs. The replacement rules are annotated by human annotators. In this way, **LV-Eval** requires LLMs to rely on their understanding of the long context to answer questions rather than relying on memorization or common-sense knowledge. * **Keyword-recall-based metric for more objective scoring**: Existing *N*-gram metrics such as the F1 score are sensitive to the format variations and non-informative words in the answer, which results in inaccurate scores. To address this, we manually annotate answer keywords and a blacklist of unrelated words. The answer keywords are the critical words or sentences extracted from original ground-truth (GT) answers, while the word blacklist contains common and non-informative words such as 'the', 'a', 'of', and so on. The metric calculation follows a two-stage procedure: the first stage calculates the recall of answer keywords; if the recall exceeds a certain threshold, the second stage will remove all the blacklisted words and then calculate the F1 score between the prediction and the GT answer. This metric design can get scores with higher objectivity. If you want to learn more about **LV-Eval**, we recommend you to refer to the [GitHub repository](https://github.com/infinigence/LVEval) and the [paper](https://arxiv.org/abs/2402.05136). # How to use it? #### Quick Start Our dataset evaluates the long-text capabilities of the large language models from multiple perspectives. Each subset has different length divisions, so please add a length limit when loading the dataset. ``` data = load_dataset("Infinigence/LVEval", "hotpotwikiqa_mixup_16k", split='test') ``` #### Loading Data ```python from datasets import load_dataset DATASET_NAMES = [ "hotpotwikiqa_mixup", "loogle_SD_mixup", "loogle_CR_mixup", "loogle_MIR_mixup", \ "multifieldqa_en_mixup", "multifieldqa_zh_mixup", "factrecall_en", "factrecall_zh", \ "cmrc_mixup", "lic_mixup", "dureader_mixup" ] DATASET_LENGTH_LEVEL = [ '16k', '32k', '64k', '128k', '256k' ] def get_dataset_names(dataset_names, length_levels): datasets = [] for name in dataset_names: for length in length_levels: datasets.append(f"{name}_{length}") return datasets for dataset in get_dataset_names(DATASET_NAMES, DATASET_LENGTH_LEVEL): data = load_dataset("Infinigence/LVEval", dataset, split='test') ``` If you want to download the data for **hotpotwikiqa_mixup**, you can visit [this link](https://huggingface.co/datasets/Infinigence/LVEval/resolve/main/hotpotwikiqa_mixup.zip). If you need other subsets of data, simply change the zip file name in the link above. #### Data Format All data in **LV-Eval** follows the following format. For certain datasets ("loogle_SD_mixup," "loogle_CR_mixup," "loogle_MIR_mixup"), there is an additional key called "answer_keywords". This key indicates the most crucial word or sentence in the answer. During the evaluation of predicted values, if the match between the prediction and the "answer_keywords" falls below a certain threshold, it directly returns 0. Otherwise, it compares the "answers" list with the predicted value. For some datasets ("factrecall_en," "factrecall_zh," "cmrc_mixup"), there is an extra key called "confusing_facts". This key represents confounding elements added to increase the benchmark difficulty and has been randomly placed within long texts. For certain datasets ("hotpotwikiqa_mixup," "multifieldqa_en_mixup," "multifieldqa_zh_mixup," "lic_mixup"), both "answer_keywords" and "confusing_facts" are present. ```json { "input": "The input/command for the task, usually short, such as questions in QA, queries in Few-shot tasks, etc", "context": "The documents input into the long-text task.", "answers": "A List of all true answers", "length": "Total length of the first three items (counted in characters for Chinese and words for English)", "dataset": "The name of the dataset to which this piece of data belongs", "language": "The language of this piece of data", "answer_keywords": "The key words or sentences manually filtered from the answers.", "confusing_facts": "This key represents confounding elements added to increase the benchmark difficulty and has been randomly placed within long texts. This helps make the test instances more challenging." } ``` #### Evaluation This repository provides data download for LV-Eval. If you wish to use this dataset for automated evaluation, please refer to our [github](https://github.com/infinigence/LVEval). # Task statistics | Task | Datasets | CFI | \#KPR | AK | Language | \#QA pairs | \#Contexts | |:-------------:|:-----------------------:|:----------:|-------|:----------:|:--------:|:----------:|:------------:| | Single-hop QA | loogle\_SD\_mixup | | | &#10004; | en | 160 | 800 | | | cmrc\_mixup | | 786 | | zh | 200 | 1,000 | | | multifieldqa\_en\_mixup | &#10004; | 476 | &#10004; | en | 101 | 505 | | | multifieldqa\_zh\_mixup | &#10004; | 424 | &#10004; | zh | 133 | 665 | | | factrecall\_en | &#10004; | 3 | &#10004; | en | 1 | 200*5 | | | factrecall\_zh | &#10004; | 3 | &#10004; | zh | 1 | 200*5 | | Multi-hop QA | dureader\_mixup | | | | zh | 176 | 880 | | | loogle\_CR\_mixup | | | &#10004; | en | 99 | 495 | | | loogle\_MR\_mixup | | | &#10004; | en | 139 | 695 | | | hotpotwikiqa\_mixup | &#10004; | 232 | &#10004; | en | 124 | 620 | | | lic\_mixup | &#10004; | | &#10004; | zh | 197 | 985 | The abbreviations for **CFI, KPR, AK** represent for confusing fact insertion, keyword and phrase replacement, and answer keywords, respectively. The confusing fact insertion has already been inserted into the context and will be displayed in the jsonl file as **"confusing_facts"**. The answer keywords will be shown in the form of **"answer_keywords"** in the jsonl file. # Task construction ### Multi-hop QA In a multi-hop QA task, the reasoning process to derive the answer need to gather multiple pieces of information from various locations in the context. - **lic-mixup** is originated from the [Long-instruction-en2zh](https://huggingface.co/datasets/yuyijiong/Long-instruction-en2zh) dataset on Hugging Face. The original Long-instruction-en2zh contains 8,000+ high-quality Chinese multi-doc QA data translated from English. We selected 197 QA pairs and their corresponding documents as supporting data, while the remaining documents serve as distracting data for context mixing. - **hotpotwikiqa-mixup** is originated from two Wikipedia-based multi-hop QA datasets: [HotpotQA](https://huggingface.co/datasets/hotpot_qa) and [2WikiMultihopQA](https://huggingface.co/datasets/voidful/2WikiMultihopQA). HotpotQA contains 112,779 2-hop questions that are written by native speakers according to two given paragraphs as the context. 2WikiMultihopQA contains 192,606 5-hop questions that are synthesized using manually designed templates to prevent shortcut solutions. We select 124 samples from the two datasets. - **loogle-MR-mixup** and **loogle-CR-mixup** originate from [LooGLE](https://huggingface.co/datasets/bigainlco/LooGLE)'s Long-dependency QA task, specifically the *Multiple information Retrieval* and *Comprehension and Reasoning* subtasks. The *Multiple information Retrieval* task requires aggregation of the evidence that can be directly located in original sentences, while the *Comprehension and Reasoning* task contains implicit evidence within the context, it requires multi-step reasoning to get the correct answers. We select 139 and 99 questions for **loogle-MR-mixup** and **loogle-CR-mixup**, respectively. - **dureader-mixup** is built from the [DuReader](https://github.com/baidu/DuReader) dataset. We first randomly select 200 instances and then manually remove 24 samples whose answers are longer than 360 words. ### Single-hop QA In a single-hop QA task, only a single evidence in the context is needed to derive the answer. - **loogle-SD-mixup** contains 160 unique QA pairs and 800 documents originated from the short-dependency QA task in [LooGLE](https://huggingface.co/datasets/bigainlco/LooGLE). - **cmrc-mixup** is derived from the [CMRC 2018 Public Datasets](https://github.com/ymcui/cmrc2018), designed for Chinese machine reading comprehension. It contains ~20k questions annotated on Wikipedia paragraphs by human experts. We manually pick 200 QA pairs and their corresponding documents as supporting QA pairs and paragraphs. - **multifieldqa-en-mixup** and **multifieldqa-zh-mixup** are built from the MultiFieldQA datasets in [LongBench](https://huggingface.co/datasets/THUDM/LongBench). We manually remove questions that can be answered using common-sense knowledge without referring to the context, and eventually get 101 and 133 unique QA pairs for **multifieldqa-en-mixup** and **multifieldqa-zh-mixup**, respectively. - **factrecall-en** and **factrecall-zh** are two synthetic datasets designed to assess the LLMs' ability to identify a small piece of evidence (“fact”) located at various locations within a very lengthy context. We write one English fact-question-answer pair for **factrecall-en** and one Chinese fact-question-answer pair for **factrecall-zh**. Distracting documents are sourced from *PG-19* dataset (English) and the book of *Dream of the Red Chamber* (Chinese) to create five contexts of different length levels. For each context, we generate 200 documents by inserting the fact at 200 evenly spaced positions within the context. # License In **LV-Eval**, the cmrc-mixup and lic-mixup datasets follow `CC-BY-SA-4.0` license, and the other datasets follow `MIT` license. # Citation ``` @misc{yuan2024lveval, title={LV-Eval: A Balanced Long-Context Benchmark with 5 Length Levels Up to 256K}, author={Tao Yuan and Xuefei Ning and Dong Zhou and Zhijie Yang and Shiyao Li and Minghui Zhuang and Zheyue Tan and Zhuyu Yao and Dahua Lin and Boxun Li and Guohao Dai and Shengen Yan and Yu Wang}, year={2024}, eprint={2402.05136}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
Infinigence/LVEval
[ "language:en", "language:zh", "license:mit", "arxiv:2402.05136", "region:us" ]
2024-02-06T08:40:39+00:00
{"language": ["en", "zh"], "license": "mit", "viewer": true}
2024-02-10T08:17:11+00:00
[ "2402.05136" ]
[ "en", "zh" ]
TAGS #language-English #language-Chinese #license-mit #arxiv-2402.05136 #region-us
介绍(Introduction) ================ LV-Eval是一个具备5个长度等级(16k、32k、64k、128k和256k)、最大文本测试长度达到256k的长文本评测基准。LV-Eval的平均文本长度达到102,380字,最小/最大文本长度为11,896/387,406字。LV-Eval主要有两类评测任务——单跳QA和多跳QA,共包含11个涵盖中英文的评测数据子集。LV-Eval设计时引入3个关键技术:干扰事实插入(Confusiong Facts Insertion,CFI)提高挑战性,关键词和短语替换(Keyword and Phrase Replacement,KPR)减少信息泄漏,以及基于关键词召回的评测指标(Answer Keywords,AK,指代结合答案关键词和字词黑名单的评价指标)提高评测数值客观性。我们希望*LV*-Eval为未来长文本大语言模型的研究发展提供有价值的性能参考。 LV-Eval有以下关键特性: * 超长文本长度: LV-Eval由5个长度等级构成,分别是16k、32k、64k、128k以及256k。同一数据集在不同长度等级下具有相同的问答对集合,只是构成各长度等级的上下文长度不同。我们的目的是保持问答对一致的情况下,充分测试模型在不同长度等级上下文中的性能表现,更可控地评估模型的长文本能力。 * 结合混淆和干扰信息来提升评测难度: 构建测试数据的过程中,我们将问答相关文档和无关文档混合拼接起来构成测试文档。该构建方式在扩展文本长度的同时,可有效评测模型从冗长混淆文本中提取关键信息的能力。此外,我们还使用GPT-4生成多个干扰信息,并在人工检查后随机插入到测试文档中,以评测模型在有相似事实描述的干扰下保持准确推理的能力。 * 替换数据中的关键信息以减少信息泄漏: 为了解决长文本能力评测中由于信息泄漏而引起的指标虚高问题,我们采用关键词和短语替换的方式处理数据的上下文以及问答对,替换后的信息不再是公共知识,也在很大程度上与数据源的原始信息不同。所有的替换词和短语标注都由人类标注员完成。这样一来, LV-Eval能够严格要求被测模型根据数据中实际提供的上下文信息来回答问题,而非通过“背题”或者预训练阶段的常识记忆的方式来回答问题。 * 基于关键词召回的指标可更客观公正地评测模型性能: 目前已有的评测指标(如F1分、ROUGH等)存在受回答格式和无关字词干扰的问题,容易导致评测结果虚高。为解决这个问题,我们人工标注了答案关键词和字词黑名单。答案关键词是从原始答案中提取的最具回答信息量的词汇或短语,而字词黑名单主要包含一些无信息量的代词、助词,比如“的”、“和”、“了”等。评测指标的计算被设计为两阶段过程,以F1分数为例:第一阶段先计算模型回答对答案关键词的召回分数,如果分数低于预设阈值,则直接计0分;如果召回分数高于阈值,则进一步计算模型回答与完整答案的F1分数——首先将字词黑名单中的词从回答和答案中过滤掉,再正常进行F1分数计算。这样一来,评测指标可使得模型得分更加客观公正。 如果您想了解更多关于LV-Eval的细节,我们建议您参阅GitHub代码库以及论文。 LV-Eval is a challenging long-context benchmark with five length levels (16k, 32k, 64k, 128k, and 256k) reaching up to 256k words. The average number of words is 102,380, and the Min/Max number of words is 11,896/387,406. LV-Eval features two main tasks, single-hop QA and multi-hop QA, comprising 11 bilingual datasets. The design of LV-Eval has incorporated three key techniques, namely confusing facts insertion (CFI), keyword and phrase replacement (KPR), and keyword-recall-based metrics (AK, short for metics with Answer Keywords and word blacklist) design, which jointly provide a challenging, mitigated-knowledge-leakege, and more accurate evaluation of the long-context capability of LLMs. We anticipate that LV-Eval will serve as a valuable resource for supporting future research on long-context LLMs. The Key Characteristics of LV-Eval include: * Sufficiently long context length to evaluate state-of-the-art models: LV-Eval comprises 5 length levels with word counts of 16k, 32k, 64k, 128k, and 256k. Test instances across these levels share the same set of question-answer (QA) pairs, and only differ in the context content and length. Testing on the same QA pairs with different context lengths facilitates a controllable evaluation of models' long-context ability. * Incorporation of distraction and confusion to increase difficulty: When constructing the context for each test instance, we mix up distracting documents and supporting documents. This approach evaluates the model's ability in pinpointing key information in a large bunch of distracting texts. In addition, we insert confusing facts generated by GPT-4 and revised by human annotators into the context. This assesses the model's capability to accurately reason in the presence of interference. * Keyword and phrase replacement to mitigate knowledge leakage: To mitigate the biased evaluation of long-context ability caused by knowledge leakage, we apply keyword and phrase replacement in the context and QA pairs. The replacement rules are annotated by human annotators. In this way, LV-Eval requires LLMs to rely on their understanding of the long context to answer questions rather than relying on memorization or common-sense knowledge. * Keyword-recall-based metric for more objective scoring: Existing *N*-gram metrics such as the F1 score are sensitive to the format variations and non-informative words in the answer, which results in inaccurate scores. To address this, we manually annotate answer keywords and a blacklist of unrelated words. The answer keywords are the critical words or sentences extracted from original ground-truth (GT) answers, while the word blacklist contains common and non-informative words such as 'the', 'a', 'of', and so on. The metric calculation follows a two-stage procedure: the first stage calculates the recall of answer keywords; if the recall exceeds a certain threshold, the second stage will remove all the blacklisted words and then calculate the F1 score between the prediction and the GT answer. This metric design can get scores with higher objectivity. If you want to learn more about LV-Eval, we recommend you to refer to the GitHub repository and the paper. How to use it? ============== #### Quick Start Our dataset evaluates the long-text capabilities of the large language models from multiple perspectives. Each subset has different length divisions, so please add a length limit when loading the dataset. #### Loading Data If you want to download the data for hotpotwikiqa\_mixup, you can visit this link. If you need other subsets of data, simply change the zip file name in the link above. #### Data Format All data in LV-Eval follows the following format. For certain datasets ("loogle\_SD\_mixup," "loogle\_CR\_mixup," "loogle\_MIR\_mixup"), there is an additional key called "answer\_keywords". This key indicates the most crucial word or sentence in the answer. During the evaluation of predicted values, if the match between the prediction and the "answer\_keywords" falls below a certain threshold, it directly returns 0. Otherwise, it compares the "answers" list with the predicted value. For some datasets ("factrecall\_en," "factrecall\_zh," "cmrc\_mixup"), there is an extra key called "confusing\_facts". This key represents confounding elements added to increase the benchmark difficulty and has been randomly placed within long texts. For certain datasets ("hotpotwikiqa\_mixup," "multifieldqa\_en\_mixup," "multifieldqa\_zh\_mixup," "lic\_mixup"), both "answer\_keywords" and "confusing\_facts" are present. #### Evaluation This repository provides data download for LV-Eval. If you wish to use this dataset for automated evaluation, please refer to our github. Task statistics =============== The abbreviations for CFI, KPR, AK represent for confusing fact insertion, keyword and phrase replacement, and answer keywords, respectively. The confusing fact insertion has already been inserted into the context and will be displayed in the jsonl file as "confusing\_facts". The answer keywords will be shown in the form of "answer\_keywords" in the jsonl file. Task construction ================= ### Multi-hop QA In a multi-hop QA task, the reasoning process to derive the answer need to gather multiple pieces of information from various locations in the context. * lic-mixup is originated from the Long-instruction-en2zh dataset on Hugging Face. The original Long-instruction-en2zh contains 8,000+ high-quality Chinese multi-doc QA data translated from English. We selected 197 QA pairs and their corresponding documents as supporting data, while the remaining documents serve as distracting data for context mixing. * hotpotwikiqa-mixup is originated from two Wikipedia-based multi-hop QA datasets: HotpotQA and 2WikiMultihopQA. HotpotQA contains 112,779 2-hop questions that are written by native speakers according to two given paragraphs as the context. 2WikiMultihopQA contains 192,606 5-hop questions that are synthesized using manually designed templates to prevent shortcut solutions. We select 124 samples from the two datasets. * loogle-MR-mixup and loogle-CR-mixup originate from LooGLE's Long-dependency QA task, specifically the *Multiple information Retrieval* and *Comprehension and Reasoning* subtasks. The *Multiple information Retrieval* task requires aggregation of the evidence that can be directly located in original sentences, while the *Comprehension and Reasoning* task contains implicit evidence within the context, it requires multi-step reasoning to get the correct answers. We select 139 and 99 questions for loogle-MR-mixup and loogle-CR-mixup, respectively. * dureader-mixup is built from the DuReader dataset. We first randomly select 200 instances and then manually remove 24 samples whose answers are longer than 360 words. ### Single-hop QA In a single-hop QA task, only a single evidence in the context is needed to derive the answer. * loogle-SD-mixup contains 160 unique QA pairs and 800 documents originated from the short-dependency QA task in LooGLE. * cmrc-mixup is derived from the CMRC 2018 Public Datasets, designed for Chinese machine reading comprehension. It contains ~20k questions annotated on Wikipedia paragraphs by human experts. We manually pick 200 QA pairs and their corresponding documents as supporting QA pairs and paragraphs. * multifieldqa-en-mixup and multifieldqa-zh-mixup are built from the MultiFieldQA datasets in LongBench. We manually remove questions that can be answered using common-sense knowledge without referring to the context, and eventually get 101 and 133 unique QA pairs for multifieldqa-en-mixup and multifieldqa-zh-mixup, respectively. * factrecall-en and factrecall-zh are two synthetic datasets designed to assess the LLMs' ability to identify a small piece of evidence (“fact”) located at various locations within a very lengthy context. We write one English fact-question-answer pair for factrecall-en and one Chinese fact-question-answer pair for factrecall-zh. Distracting documents are sourced from *PG-19* dataset (English) and the book of *Dream of the Red Chamber* (Chinese) to create five contexts of different length levels. For each context, we generate 200 documents by inserting the fact at 200 evenly spaced positions within the context. License ======= In LV-Eval, the cmrc-mixup and lic-mixup datasets follow 'CC-BY-SA-4.0' license, and the other datasets follow 'MIT' license.
[ "#### Quick Start\n\n\nOur dataset evaluates the long-text capabilities of the large language models from multiple perspectives. Each subset has different length divisions, so please add a length limit when loading the dataset.", "#### Loading Data\n\n\nIf you want to download the data for hotpotwikiqa\\_mixup, you can visit this link.\nIf you need other subsets of data, simply change the zip file name in the link above.", "#### Data Format\n\n\nAll data in LV-Eval follows the following format. For certain datasets (\"loogle\\_SD\\_mixup,\" \"loogle\\_CR\\_mixup,\" \"loogle\\_MIR\\_mixup\"), there is an additional key called \"answer\\_keywords\". This key indicates the most crucial word or sentence in the answer. During the evaluation of predicted values, if the match between the prediction and the \"answer\\_keywords\" falls below a certain threshold, it directly returns 0. Otherwise, it compares the \"answers\" list with the predicted value.\n\n\nFor some datasets (\"factrecall\\_en,\" \"factrecall\\_zh,\" \"cmrc\\_mixup\"), there is an extra key called \"confusing\\_facts\". This key represents confounding elements added to increase the benchmark difficulty and has been randomly placed within long texts.\n\n\nFor certain datasets (\"hotpotwikiqa\\_mixup,\" \"multifieldqa\\_en\\_mixup,\" \"multifieldqa\\_zh\\_mixup,\" \"lic\\_mixup\"), both \"answer\\_keywords\" and \"confusing\\_facts\" are present.", "#### Evaluation\n\n\nThis repository provides data download for LV-Eval. If you wish to use this dataset for automated evaluation, please refer to our github.\n\n\nTask statistics\n===============\n\n\n\nThe abbreviations for CFI, KPR, AK represent for confusing fact insertion, keyword and phrase replacement, and answer keywords, respectively. The confusing fact insertion has already been inserted into the context and will be displayed in the jsonl file as \"confusing\\_facts\". The answer keywords will be shown in the form of \"answer\\_keywords\" in the jsonl file.\n\n\nTask construction\n=================", "### Multi-hop QA\n\n\nIn a multi-hop QA task, the reasoning process to derive the answer need to gather multiple pieces of information from various locations in the context.\n\n\n* lic-mixup is originated from the Long-instruction-en2zh dataset on Hugging Face. The original Long-instruction-en2zh contains 8,000+ high-quality Chinese multi-doc QA data translated from English. We selected 197 QA pairs and their corresponding documents as supporting data, while the remaining documents serve as distracting data for context mixing.\n* hotpotwikiqa-mixup is originated from two Wikipedia-based multi-hop QA datasets: HotpotQA and 2WikiMultihopQA. HotpotQA contains 112,779 2-hop questions that are written by native speakers according to two given paragraphs as the context. 2WikiMultihopQA contains 192,606 5-hop questions that are synthesized using manually designed templates to prevent shortcut solutions. We select 124 samples from the two datasets.\n* loogle-MR-mixup and loogle-CR-mixup originate from LooGLE's Long-dependency QA task, specifically the *Multiple information Retrieval* and *Comprehension and Reasoning* subtasks. The *Multiple information Retrieval* task requires aggregation of the evidence that can be directly located in original sentences, while the *Comprehension and Reasoning* task contains implicit evidence within the context, it requires multi-step reasoning to get the correct answers. We select 139 and 99 questions for loogle-MR-mixup and loogle-CR-mixup, respectively.\n* dureader-mixup is built from the DuReader dataset. We first randomly select 200 instances and then manually remove 24 samples whose answers are longer than 360 words.", "### Single-hop QA\n\n\nIn a single-hop QA task, only a single evidence in the context is needed to derive the answer.\n\n\n* loogle-SD-mixup contains 160 unique QA pairs and 800 documents originated from the short-dependency QA task in LooGLE.\n* cmrc-mixup is derived from the CMRC 2018 Public Datasets, designed for Chinese machine reading comprehension. It contains ~20k questions annotated on Wikipedia paragraphs by human experts. We manually pick 200 QA pairs and their corresponding documents as supporting QA pairs and paragraphs.\n* multifieldqa-en-mixup and multifieldqa-zh-mixup are built from the MultiFieldQA datasets in LongBench. We manually remove questions that can be answered using common-sense knowledge without referring to the context, and eventually get 101 and 133 unique QA pairs for multifieldqa-en-mixup and multifieldqa-zh-mixup, respectively.\n* factrecall-en and factrecall-zh are two synthetic datasets designed to assess the LLMs' ability to identify a small piece of evidence (“fact”) located at various locations within a very lengthy context. We write one English fact-question-answer pair for factrecall-en and one Chinese fact-question-answer pair for factrecall-zh. Distracting documents are sourced from *PG-19* dataset (English) and the book of *Dream of the Red Chamber* (Chinese) to create five contexts of different length levels. For each context, we generate 200 documents by inserting the fact at 200 evenly spaced positions within the context.\n\n\nLicense\n=======\n\n\nIn LV-Eval, the cmrc-mixup and lic-mixup datasets follow 'CC-BY-SA-4.0' license, and the other datasets follow 'MIT' license." ]
[ "TAGS\n#language-English #language-Chinese #license-mit #arxiv-2402.05136 #region-us \n", "#### Quick Start\n\n\nOur dataset evaluates the long-text capabilities of the large language models from multiple perspectives. Each subset has different length divisions, so please add a length limit when loading the dataset.", "#### Loading Data\n\n\nIf you want to download the data for hotpotwikiqa\\_mixup, you can visit this link.\nIf you need other subsets of data, simply change the zip file name in the link above.", "#### Data Format\n\n\nAll data in LV-Eval follows the following format. For certain datasets (\"loogle\\_SD\\_mixup,\" \"loogle\\_CR\\_mixup,\" \"loogle\\_MIR\\_mixup\"), there is an additional key called \"answer\\_keywords\". This key indicates the most crucial word or sentence in the answer. During the evaluation of predicted values, if the match between the prediction and the \"answer\\_keywords\" falls below a certain threshold, it directly returns 0. Otherwise, it compares the \"answers\" list with the predicted value.\n\n\nFor some datasets (\"factrecall\\_en,\" \"factrecall\\_zh,\" \"cmrc\\_mixup\"), there is an extra key called \"confusing\\_facts\". This key represents confounding elements added to increase the benchmark difficulty and has been randomly placed within long texts.\n\n\nFor certain datasets (\"hotpotwikiqa\\_mixup,\" \"multifieldqa\\_en\\_mixup,\" \"multifieldqa\\_zh\\_mixup,\" \"lic\\_mixup\"), both \"answer\\_keywords\" and \"confusing\\_facts\" are present.", "#### Evaluation\n\n\nThis repository provides data download for LV-Eval. If you wish to use this dataset for automated evaluation, please refer to our github.\n\n\nTask statistics\n===============\n\n\n\nThe abbreviations for CFI, KPR, AK represent for confusing fact insertion, keyword and phrase replacement, and answer keywords, respectively. The confusing fact insertion has already been inserted into the context and will be displayed in the jsonl file as \"confusing\\_facts\". The answer keywords will be shown in the form of \"answer\\_keywords\" in the jsonl file.\n\n\nTask construction\n=================", "### Multi-hop QA\n\n\nIn a multi-hop QA task, the reasoning process to derive the answer need to gather multiple pieces of information from various locations in the context.\n\n\n* lic-mixup is originated from the Long-instruction-en2zh dataset on Hugging Face. The original Long-instruction-en2zh contains 8,000+ high-quality Chinese multi-doc QA data translated from English. We selected 197 QA pairs and their corresponding documents as supporting data, while the remaining documents serve as distracting data for context mixing.\n* hotpotwikiqa-mixup is originated from two Wikipedia-based multi-hop QA datasets: HotpotQA and 2WikiMultihopQA. HotpotQA contains 112,779 2-hop questions that are written by native speakers according to two given paragraphs as the context. 2WikiMultihopQA contains 192,606 5-hop questions that are synthesized using manually designed templates to prevent shortcut solutions. We select 124 samples from the two datasets.\n* loogle-MR-mixup and loogle-CR-mixup originate from LooGLE's Long-dependency QA task, specifically the *Multiple information Retrieval* and *Comprehension and Reasoning* subtasks. The *Multiple information Retrieval* task requires aggregation of the evidence that can be directly located in original sentences, while the *Comprehension and Reasoning* task contains implicit evidence within the context, it requires multi-step reasoning to get the correct answers. We select 139 and 99 questions for loogle-MR-mixup and loogle-CR-mixup, respectively.\n* dureader-mixup is built from the DuReader dataset. We first randomly select 200 instances and then manually remove 24 samples whose answers are longer than 360 words.", "### Single-hop QA\n\n\nIn a single-hop QA task, only a single evidence in the context is needed to derive the answer.\n\n\n* loogle-SD-mixup contains 160 unique QA pairs and 800 documents originated from the short-dependency QA task in LooGLE.\n* cmrc-mixup is derived from the CMRC 2018 Public Datasets, designed for Chinese machine reading comprehension. It contains ~20k questions annotated on Wikipedia paragraphs by human experts. We manually pick 200 QA pairs and their corresponding documents as supporting QA pairs and paragraphs.\n* multifieldqa-en-mixup and multifieldqa-zh-mixup are built from the MultiFieldQA datasets in LongBench. We manually remove questions that can be answered using common-sense knowledge without referring to the context, and eventually get 101 and 133 unique QA pairs for multifieldqa-en-mixup and multifieldqa-zh-mixup, respectively.\n* factrecall-en and factrecall-zh are two synthetic datasets designed to assess the LLMs' ability to identify a small piece of evidence (“fact”) located at various locations within a very lengthy context. We write one English fact-question-answer pair for factrecall-en and one Chinese fact-question-answer pair for factrecall-zh. Distracting documents are sourced from *PG-19* dataset (English) and the book of *Dream of the Red Chamber* (Chinese) to create five contexts of different length levels. For each context, we generate 200 documents by inserting the fact at 200 evenly spaced positions within the context.\n\n\nLicense\n=======\n\n\nIn LV-Eval, the cmrc-mixup and lic-mixup datasets follow 'CC-BY-SA-4.0' license, and the other datasets follow 'MIT' license." ]
9a0bbdb06ccd8bcab8398dfef475ead0913e6973
![Aya Header](https://huggingface.co/datasets/CohereForAI/aya_dataset/resolve/main/aya_header.png) # Dataset Summary `Aya Evaluation Suite` contains a total of 26,750 open-ended conversation-style prompts to evaluate multilingual open-ended generation quality.\ To strike a balance between language coverage and the quality that comes with human curation, we create an evaluation suite that includes: 1) human-curated examples in 7 languages (`tur, eng, yor, arb, zho, por, tel`) → `aya-human-annotated`. 2) machine-translations of handpicked examples into 101 languages → `dolly-machine-translated`. 3) human-post-edited translations into 6 languages (`hin, srp, rus, fra, arb, spa`) → `dolly-human-edited`. --- - **Curated by:** Contributors of [Aya Open Science Intiative](https://aya.for.ai/), professional annotators, and synthetic generation - **Language(s):** 101 languages - **License:** [Apache 2.0](https://opensource.org/license/apache-2-0) - **Aya Datasets Family:** | Name | Explanation | |------|--------------| | [aya_dataset](https://huggingface.co/datasets/CohereForAI/aya_dataset) | Human-annotated multilingual instruction finetuning dataset, comprising over 204K instances across 65 languages. | | [aya_collection](https://huggingface.co/datasets/CohereForAI/aya_collection) | Created by applying instruction-style templates from fluent speakers to 44 datasets, including translations of 19 instruction-style datasets into 101 languages, providing 513M instances for various tasks.| | [aya_evaluation_suite](https://huggingface.co/datasets/CohereForAI/aya_evaluation_suite) | A diverse evaluation set for multilingual open-ended generation, featuring 250 culturally grounded prompts in 7 languages, 200 translated prompts in 24 languages, and human-edited versions selected for cross-cultural relevance from English Dolly in 6 languages.| # Dataset The `Aya Evaluation Suite` includes the following subsets: 1. **aya-human-annotated**: 250 original human-written prompts in 7 languages each. 2. **dolly-machine-translated**: 200 human-selected prompts from [databricks-dolly-15k](https://www.databricks.com/blog/2023/04/12/dolly-first-open-commercially-viable-instruction-tuned-llm) , automatically translated with the [NLLB model](https://ai.meta.com/research/no-language-left-behind/) from English into 101 languages (114 dialects in total). 3. **dolly-human-edited**: 200 dolly-machine-translated prompts post-edited by fluent speakers for 6 languages. ## Load with Datasets To load this dataset consisting of prompt-completions with `datasets`, you just need to install Datasets as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset aya_eval = load_dataset("CohereForAI/aya_evaluation_suite", "dataset") ``` ## Data Fields - `id`: Unique id of the data point. - `inputs`: Prompt or input to the language model. - `targets`: Completion or output of the language model. (Not applicable for `dolly-human-edited`) - `language`: The language of the `prompt` and `completion.` - `script`: The writing system of the language. - `source_id`: Corresponding original row index from the [databricks-dolly-15k](https://huggingface.co/datasets/databricks/databricks-dolly-15k) dataset (Field applicable only for subsets `dolly-machine-translated` & `dolly-human-edited`) ## Data Instances Example data instances from the `Aya Evaluation Suite` subsets are listed in the toggled sections below. <details> <summary> <b>aya-human-annotated</b> </summary> ```json { "id": 42, "inputs": "What day is known as Star Wars Day?", "targets": "May 4th (May the 4th be with you!)", "language": "eng", "script": "Latn", } ``` </details> <b>Dolly-machine-translated and dolly-human-edited</b> - These two subsets are parallel datasets (data instances can be mapped using their `id` column). - Note that in the `dolly-machine-translated` subset, we also include the original English subset (`id 1-200`), which is translated into 101 languages. Furthermore, the field `id` can be used to match the translations of the same data instance across languages. - The `source_id` field contains the corresponding original row index from the [databricks-dolly-15k](https://huggingface.co/datasets/databricks/databricks-dolly-15k) dataset. <details> <summary> <b>dolly-machine-translated</b> </summary> ```json { "id": 2, "inputs": "How to escape from a helicopter trapped in water ?", "targets": "If you are ever trapped inside a helicopter while submerged in water, it’s best to try and remain calm until the cabin is completely underwater. It’s better to wait for pressure to be equalized, before you try to open the door or break the glass to escape.", "language": "eng", "script": "Latn", "source_id": 6060, } ``` </details> <details> <summary> <b>dolly-human-edited</b> </summary> ```json { "id": 2, "inputs": "Comment peut-on s'échapper d'un hélicoptère piégé dans l'eau ?", "targets": "-", "language": "fra", "script": "Latn", "source_id": 6060, } ``` </details> ## Statistics The toggled table below lists the breakdown of languages in each subset. ### Languages <details> <summary> <b>aya-human-annotated</b> </summary> | ISO Code | Language | Resources | |----------|----------|---------------| | `tel` | Telugu | Low | | `yor` | Yorùbá | Low | | `arb` | Arabic | High | | `tur` | Turkish | High | | `por` | Portuguese | High | | `zho` | Chinese (Simplified) | High | | `eng` | English | High | </details> <details> <summary> <b>dolly-machine-translated</b> </summary> | ISO Code | Language | Resources | |----------|----------|-----------| | `ace` | Achinese | Low | | `afr` | Afrikaans | Mid | | `amh` | Amharic | Low | | `ara` (`arb`, `acm`, `acq`, `aeb`, `ajp`, `apc`, `ars`, `ary` & `arz`) | Arabic (Standard, Gelet Iraqi, Ta'izzi-Adeni, Tunisian, South Levantine, North Levantine, Najdi, Moroccan & Egyptian) | High | | `aze` (`azb` & `azj`) | Azerbaijani (South & North) | Low | | `bel` | Belarusian | Mid | | `ben` | Bengali | Mid | | `bjn` | Banjar | Low | | `bul` | Bulgarian | Mid | | `cat` | Catalan | High | | `ceb` | Cebuano | Mid | | `ces` | Czech | High | | `cym` | Welsh | Low | | `dan` | Danish | Mid | | `deu` | German | High | | `ell` | Greek | Mid | | `eng` | English | High | | `epo` | Esperanto | Low | | `est` | Estonian | Mid | | `eus` | Basque | High | | `fin` | Finnish | High | | `fra` | French | High | | `gla` | Scottish Gaelic | Low | | `gle` | Irish | Low | | `glg` | Galician | Mid | | `guj` | Gujarati | Low | | `hat` | Haitian Creole | Low | | `hau` | Hausa | Low | | `heb` | Hebrew | Mid | | `hin` | Hindi | High | | `hun` | Hungarian | High | | `hye` | Armenian | Low | | `ibo` | Igbo | Low | | `ind` | Indonesian | Mid | | `isl` | Icelandic | Low | | `ita` | Italian | High | | `jav` | Javanese | Low | | `jpn` | Japanese | High | | `kan` | Kannada | Low | | `kas` | Kashmiri | Low | | `kat` | Georgian | Mid | | `kau` (`knc`) | Kanuri (Central) | Low | | `kaz` | Kazakh | Mid | | `khm` | Khmer | Low | | `kir` | Kyrgyz | Low | | `kor` | Korean | High | | `kur` (`ckb` & `kmr`) | Kurdish (Central & Northern) | Low | | `lao` | Lao | Low | | `lav` (`lvs`) | Latvian (Standard) | Mid | | `lit` | Lithuanian | Mid | | `ltz` | Luxembourgish | Low | | `mal` | Malayalam | Low | | `mar` | Marathi | Low | | `min` | Minangkabau | Low | | `mkd` | Macedonian | Low | | `mlg` (`plt`) | Malagasy (Plateau) | Low | | `mlt` | Maltese | Low | | `mni` | Manipuri | Low | | `mon` (`khk`) | Mongolian (Khalkha) | Low | | `mri` | Maori | Low | | `msa` (`zsm`) | Malay (Standard) | Mid | | `mya` | Burmese | Low | | `nep` (`npi`) | Nepali | Low | | `nld` | Dutch | High | | `nor` (`nno` & `nob`) | Norwegian (Nynorsk & Bokmål) | Low | | `nso` | Northern Sotho | Low | | `pes` | Persian | High | | `pol` | Polish | High | | `por` | Portuguese | High | | `pus` (`pbt`) | Pashto (Southern) | Low | | `ron` | Romanian | Mid | | `rus` | Russian | High | | `sin` | Sinhala | Low | | `slk` | Slovak | Mid | | `slv` | Slovenian | Mid | | `smo` | Samoan | Low | | `sna` | Shona | Low | | `snd` | Sindhi | Low | | `som` | Somali | Low | | `sot` | Southern Sotho | Low | | `spa` | Spanish | High | | `sqi` (`als`) | Albanian (Tosk) | Low | | `srp` | Serbian | High | | `sun` | Sundanese | Low | | `swa` (`swh`) | Swahili (Coastal) | Low | | `swe` | Swedish | High | | `tam` | Tamil | Mid | | `taq` | Tamasheq | Low | | `tel` | Telugu | Low | | `tgk` | Tajik | Low | | `tha` | Thai | Mid | | `tur` | Turkish | High | | `ukr` | Ukrainian | Mid | | `urd` | Urdu | Mid | | `uzb` (`uzn`) | Uzbek (Nothern) | Mid | | `vie` | Vietnamese | High | | `xho` | Xhosa | Low | | `yid` (`ydd`) | Yiddish (Eastern) | Low | | `yor` | Yoruba | Low | | `zho` (+ `yue`) | Chinese (Simplified & Cantonese) | High | | `zul` | Zulu | Low | </details> <details> <summary> <b>dolly-human-edited</b> </summary> | ISO Code | Language | Resources | |----------|----------|-----------| | `arb` | Arabic | High | | `fra` | French | High | | `hin` | Hindi | High | | `rus` | Russian | High | | `spa` | Spanish | High | | `srp` | Serbian | High | </details> <br> # Motivations & Intentions - **Curation Rationale:** This evaluation suite is tailored to test the generation quality of multilingual models, with the aim of balancing language coverage and human-sourced quality. It covers prompts originally written in each language, as well as English-centric translated, and manually curated or edited prompts for a linguistically broad, but rich testbed. The list of languages was initially established from mT5 and aligned with the annotators’ language list and the NLLB translation model. # Known Limitations - **Translation Quality:** Note that the expressiveness of the `dolly-machine-translated` subset is limited by the quality of the translation model and may adversely impact an estimate of ability in languages where translations are not adequate. If this subset is used for testing, we recommend it be paired and reported with the professionally post-edited `dolly-human-edited` subset or the `aya-human-annotated` set, which, while covering only 7 languages, is entirely created by proficient target language speakers. --- # Additional Information ## Provenance - **Methods Used:** combination of original annotations by volunteers, automatic translation, and post-editing of translations by professional annotators. - **Methodology Details:** - *Source:* Original annotations from Aya dataset along with translations and post-edits of Dolly dataset - *Platform:* [Aya Annotation Platform](https://aya.for.ai/) - *Dates of Collection:* May 2023 - Dec 2023 ## Dataset Version and Maintenance - **Maintenance Status:** Actively Maintained - **Version Details:** - *Current version:* 1.0 - *Last Update:* 02/2024 - *First Release:* 02/2024 - **Maintenance Plan:** No updates planned. ## Authorship - **Publishing Organization:** [Cohere For AI](https://cohere.com/research) - **Industry Type:** Not-for-profit - Tech - **Contact Details:** https://aya.for.ai/ ## Licensing Information This dataset can be used for any purpose, whether academic or commercial, under the terms of the [Apache 2.0](https://opensource.org/license/apache-2-0) License. ## Citation Information ```bibtex @misc{singh2024aya, title={Aya Dataset: An Open-Access Collection for Multilingual Instruction Tuning}, author={Shivalika Singh and Freddie Vargus and Daniel Dsouza and Börje F. Karlsson and Abinaya Mahendiran and Wei-Yin Ko and Herumb Shandilya and Jay Patel and Deividas Mataciunas and Laura OMahony and Mike Zhang and Ramith Hettiarachchi and Joseph Wilson and Marina Machado and Luisa Souza Moura and Dominik Krzemiński and Hakimeh Fadaei and Irem Ergün and Ifeoma Okoh and Aisha Alaagib and Oshan Mudannayake and Zaid Alyafeai and Vu Minh Chien and Sebastian Ruder and Surya Guthikonda and Emad A. Alghamdi and Sebastian Gehrmann and Niklas Muennighoff and Max Bartolo and Julia Kreutzer and Ahmet Üstün and Marzieh Fadaee and Sara Hooker}, year={2024}, eprint={2402.06619}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
CohereForAI/aya_evaluation_suite
[ "task_categories:text-generation", "language_creators:crowdsourced", "language_creators:expert-generated", "language_creators:machine-generated", "multilinguality:multilingual", "size_categories:10K<n<100K", "source_datasets:original", "source_datasets:extended", "language:afr", "language:sqi", "language:amh", "language:ara", "language:aze", "language:bel", "language:ben", "language:bul", "language:cat", "language:ceb", "language:ces", "language:kur", "language:cym", "language:dan", "language:deu", "language:ell", "language:eng", "language:epo", "language:est", "language:eus", "language:fin", "language:fra", "language:gla", "language:gle", "language:glg", "language:guj", "language:hat", "language:hau", "language:heb", "language:hin", "language:hun", "language:hye", "language:ibo", "language:ind", "language:isl", "language:ita", "language:jav", "language:jpn", "language:kan", "language:kat", "language:kaz", "language:mon", "language:khm", "language:kir", "language:kor", "language:lao", "language:lit", "language:ltz", "language:lav", "language:mal", "language:mar", "language:mkd", "language:mlt", "language:mri", "language:mya", "language:nld", "language:nor", "language:nep", "language:sot", "language:pus", "language:pes", "language:mlg", "language:pol", "language:por", "language:ron", "language:rus", "language:sin", "language:slk", "language:slv", "language:smo", "language:sna", "language:snd", "language:som", "language:spa", "language:srp", "language:sun", "language:swe", "language:swa", "language:tam", "language:tel", "language:tgk", "language:tha", "language:tur", "language:ukr", "language:urd", "language:uzb", "language:vie", "language:xho", "language:yid", "language:yor", "language:zho", "language:msa", "language:zul", "language:ace", "language:bjn", "language:kas", "language:kau", "language:min", "language:mni", "language:taq", "language:nso", "license:apache-2.0", "arxiv:2402.06619", "region:us" ]
2024-02-06T08:54:09+00:00
{"language_creators": ["crowdsourced", "expert-generated", "machine-generated"], "language": ["afr", "sqi", "amh", "ara", "aze", "bel", "ben", "bul", "cat", "ceb", "ces", "kur", "cym", "dan", "deu", "ell", "eng", "epo", "est", "eus", "fin", "fra", "gla", "gle", "glg", "guj", "hat", "hau", "heb", "hin", "hun", "hye", "ibo", "ind", "isl", "ita", "jav", "jpn", "kan", "kat", "kaz", "mon", "khm", "kir", "kor", "lao", "lit", "ltz", "lav", "mal", "mar", "mkd", "mlt", "mri", "mya", "nld", "nor", "nep", "sot", "pus", "pes", "mlg", "pol", "por", "ron", "rus", "sin", "slk", "slv", "smo", "sna", "snd", "som", "spa", "srp", "sun", "swe", "swa", "tam", "tel", "tgk", "tha", "tur", "ukr", "urd", "uzb", "vie", "xho", "yid", "yor", "zho", "msa", "zul", "ace", "bjn", "kas", "kau", "min", "mni", "taq", "nso"], "license": "apache-2.0", "multilinguality": ["multilingual"], "size_categories": ["10K<n<100K"], "source_datasets": ["original", "extended"], "task_categories": ["text-generation"], "pretty_name": "Aya Evaluation Suite", "dataset_info": [{"config_name": "aya_human_annotated", "features": [{"name": "id", "dtype": "int64"}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "script", "dtype": "string"}], "splits": [{"name": "test", "num_bytes": 1624958, "num_examples": 1750}], "download_size": 974483, "dataset_size": 1624958}, {"config_name": "dolly_human_edited", "features": [{"name": "id", "dtype": "int64"}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "script", "dtype": "string"}, {"name": "source_id", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 1219111, "num_examples": 1200}], "download_size": 602117, "dataset_size": 1219111}, {"config_name": "dolly_machine_translated", "features": [{"name": "id", "dtype": "int64"}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "language", "dtype": "string"}, {"name": "script", "dtype": "string"}, {"name": "source_id", "dtype": "int64"}], "splits": [{"name": "test", "num_bytes": 39679355, "num_examples": 23800}], "download_size": 20100505, "dataset_size": 39679355}], "configs": [{"config_name": "aya_human_annotated", "data_files": [{"split": "test", "path": "aya_human_annotated/test-*"}]}, {"config_name": "dolly_human_edited", "data_files": [{"split": "test", "path": "dolly_human_edited/test-*"}]}, {"config_name": "dolly_machine_translated", "data_files": [{"split": "test", "path": "dolly_machine_translated/test-*"}]}]}
2024-02-13T10:36:26+00:00
[ "2402.06619" ]
[ "afr", "sqi", "amh", "ara", "aze", "bel", "ben", "bul", "cat", "ceb", "ces", "kur", "cym", "dan", "deu", "ell", "eng", "epo", "est", "eus", "fin", "fra", "gla", "gle", "glg", "guj", "hat", "hau", "heb", "hin", "hun", "hye", "ibo", "ind", "isl", "ita", "jav", "jpn", "kan", "kat", "kaz", "mon", "khm", "kir", "kor", "lao", "lit", "ltz", "lav", "mal", "mar", "mkd", "mlt", "mri", "mya", "nld", "nor", "nep", "sot", "pus", "pes", "mlg", "pol", "por", "ron", "rus", "sin", "slk", "slv", "smo", "sna", "snd", "som", "spa", "srp", "sun", "swe", "swa", "tam", "tel", "tgk", "tha", "tur", "ukr", "urd", "uzb", "vie", "xho", "yid", "yor", "zho", "msa", "zul", "ace", "bjn", "kas", "kau", "min", "mni", "taq", "nso" ]
TAGS #task_categories-text-generation #language_creators-crowdsourced #language_creators-expert-generated #language_creators-machine-generated #multilinguality-multilingual #size_categories-10K<n<100K #source_datasets-original #source_datasets-extended #language-Afrikaans #language-Albanian #language-Amharic #language-Arabic #language-Azerbaijani #language-Belarusian #language-Bengali #language-Bulgarian #language-Catalan #language-Cebuano #language-Czech #language-Kurdish #language-Welsh #language-Danish #language-German #language-Modern Greek (1453-) #language-English #language-Esperanto #language-Estonian #language-Basque #language-Finnish #language-French #language-Scottish Gaelic #language-Irish #language-Galician #language-Gujarati #language-Haitian #language-Hausa #language-Hebrew #language-Hindi #language-Hungarian #language-Armenian #language-Igbo #language-Indonesian #language-Icelandic #language-Italian #language-Javanese #language-Japanese #language-Kannada #language-Georgian #language-Kazakh #language-Mongolian #language-Khmer #language-Kirghiz #language-Korean #language-Lao #language-Lithuanian #language-Luxembourgish #language-Latvian #language-Malayalam #language-Marathi #language-Macedonian #language-Maltese #language-Maori #language-Burmese #language-Dutch #language-Norwegian #language-Nepali (macrolanguage) #language-Southern Sotho #language-Pushto #language-Iranian Persian #language-Malagasy #language-Polish #language-Portuguese #language-Romanian #language-Russian #language-Sinhala #language-Slovak #language-Slovenian #language-Samoan #language-Shona #language-Sindhi #language-Somali #language-Spanish #language-Serbian #language-Sundanese #language-Swedish #language-Swahili (macrolanguage) #language-Tamil #language-Telugu #language-Tajik #language-Thai #language-Turkish #language-Ukrainian #language-Urdu #language-Uzbek #language-Vietnamese #language-Xhosa #language-Yiddish #language-Yoruba #language-Chinese #language-Malay (macrolanguage) #language-Zulu #language-Achinese #language-Banjar #language-Kashmiri #language-Kanuri #language-Minangkabau #language-Manipuri #language-Tamasheq #language-Pedi #license-apache-2.0 #arxiv-2402.06619 #region-us
!Aya Header Dataset Summary =============== 'Aya Evaluation Suite' contains a total of 26,750 open-ended conversation-style prompts to evaluate multilingual open-ended generation quality. To strike a balance between language coverage and the quality that comes with human curation, we create an evaluation suite that includes: 1. human-curated examples in 7 languages ('tur, eng, yor, arb, zho, por, tel') → 'aya-human-annotated'. 2. machine-translations of handpicked examples into 101 languages → 'dolly-machine-translated'. 3. human-post-edited translations into 6 languages ('hin, srp, rus, fra, arb, spa') → 'dolly-human-edited'. --- * Curated by: Contributors of Aya Open Science Intiative, professional annotators, and synthetic generation * Language(s): 101 languages * License: Apache 2.0 * Aya Datasets Family: Dataset ======= The 'Aya Evaluation Suite' includes the following subsets: 1. aya-human-annotated: 250 original human-written prompts in 7 languages each. 2. dolly-machine-translated: 200 human-selected prompts from databricks-dolly-15k , automatically translated with the NLLB model from English into 101 languages (114 dialects in total). 3. dolly-human-edited: 200 dolly-machine-translated prompts post-edited by fluent speakers for 6 languages. Load with Datasets ------------------ To load this dataset consisting of prompt-completions with 'datasets', you just need to install Datasets as 'pip install datasets --upgrade' and then use the following code: Data Fields ----------- * 'id': Unique id of the data point. * 'inputs': Prompt or input to the language model. * 'targets': Completion or output of the language model. (Not applicable for 'dolly-human-edited') * 'language': The language of the 'prompt' and 'completion.' * 'script': The writing system of the language. * 'source\_id': Corresponding original row index from the databricks-dolly-15k dataset (Field applicable only for subsets 'dolly-machine-translated' & 'dolly-human-edited') Data Instances -------------- Example data instances from the 'Aya Evaluation Suite' subsets are listed in the toggled sections below. **aya-human-annotated** **Dolly-machine-translated and dolly-human-edited** * These two subsets are parallel datasets (data instances can be mapped using their 'id' column). * Note that in the 'dolly-machine-translated' subset, we also include the original English subset ('id 1-200'), which is translated into 101 languages. Furthermore, the field 'id' can be used to match the translations of the same data instance across languages. * The 'source\_id' field contains the corresponding original row index from the databricks-dolly-15k dataset. **dolly-machine-translated** **dolly-human-edited** Statistics ---------- The toggled table below lists the breakdown of languages in each subset. ### Languages **aya-human-annotated** ISO Code: 'tel', Language: Telugu, Resources: Low ISO Code: 'yor', Language: Yorùbá, Resources: Low ISO Code: 'arb', Language: Arabic, Resources: High ISO Code: 'tur', Language: Turkish, Resources: High ISO Code: 'por', Language: Portuguese, Resources: High ISO Code: 'zho', Language: Chinese (Simplified), Resources: High ISO Code: 'eng', Language: English, Resources: High **dolly-machine-translated** ISO Code: 'ace', Language: Achinese, Resources: Low ISO Code: 'afr', Language: Afrikaans, Resources: Mid ISO Code: 'amh', Language: Amharic, Resources: Low ISO Code: 'ara' ('arb', 'acm', 'acq', 'aeb', 'ajp', 'apc', 'ars', 'ary' & 'arz'), Language: Arabic (Standard, Gelet Iraqi, Ta'izzi-Adeni, Tunisian, South Levantine, North Levantine, Najdi, Moroccan & Egyptian), Resources: High ISO Code: 'aze' ('azb' & 'azj'), Language: Azerbaijani (South & North), Resources: Low ISO Code: 'bel', Language: Belarusian, Resources: Mid ISO Code: 'ben', Language: Bengali, Resources: Mid ISO Code: 'bjn', Language: Banjar, Resources: Low ISO Code: 'bul', Language: Bulgarian, Resources: Mid ISO Code: 'cat', Language: Catalan, Resources: High ISO Code: 'ceb', Language: Cebuano, Resources: Mid ISO Code: 'ces', Language: Czech, Resources: High ISO Code: 'cym', Language: Welsh, Resources: Low ISO Code: 'dan', Language: Danish, Resources: Mid ISO Code: 'deu', Language: German, Resources: High ISO Code: 'ell', Language: Greek, Resources: Mid ISO Code: 'eng', Language: English, Resources: High ISO Code: 'epo', Language: Esperanto, Resources: Low ISO Code: 'est', Language: Estonian, Resources: Mid ISO Code: 'eus', Language: Basque, Resources: High ISO Code: 'fin', Language: Finnish, Resources: High ISO Code: 'fra', Language: French, Resources: High ISO Code: 'gla', Language: Scottish Gaelic, Resources: Low ISO Code: 'gle', Language: Irish, Resources: Low ISO Code: 'glg', Language: Galician, Resources: Mid ISO Code: 'guj', Language: Gujarati, Resources: Low ISO Code: 'hat', Language: Haitian Creole, Resources: Low ISO Code: 'hau', Language: Hausa, Resources: Low ISO Code: 'heb', Language: Hebrew, Resources: Mid ISO Code: 'hin', Language: Hindi, Resources: High ISO Code: 'hun', Language: Hungarian, Resources: High ISO Code: 'hye', Language: Armenian, Resources: Low ISO Code: 'ibo', Language: Igbo, Resources: Low ISO Code: 'ind', Language: Indonesian, Resources: Mid ISO Code: 'isl', Language: Icelandic, Resources: Low ISO Code: 'ita', Language: Italian, Resources: High ISO Code: 'jav', Language: Javanese, Resources: Low ISO Code: 'jpn', Language: Japanese, Resources: High ISO Code: 'kan', Language: Kannada, Resources: Low ISO Code: 'kas', Language: Kashmiri, Resources: Low ISO Code: 'kat', Language: Georgian, Resources: Mid ISO Code: 'kau' ('knc'), Language: Kanuri (Central), Resources: Low ISO Code: 'kaz', Language: Kazakh, Resources: Mid ISO Code: 'khm', Language: Khmer, Resources: Low ISO Code: 'kir', Language: Kyrgyz, Resources: Low ISO Code: 'kor', Language: Korean, Resources: High ISO Code: 'kur' ('ckb' & 'kmr'), Language: Kurdish (Central & Northern), Resources: Low ISO Code: 'lao', Language: Lao, Resources: Low ISO Code: 'lav' ('lvs'), Language: Latvian (Standard), Resources: Mid ISO Code: 'lit', Language: Lithuanian, Resources: Mid ISO Code: 'ltz', Language: Luxembourgish, Resources: Low ISO Code: 'mal', Language: Malayalam, Resources: Low ISO Code: 'mar', Language: Marathi, Resources: Low ISO Code: 'min', Language: Minangkabau, Resources: Low ISO Code: 'mkd', Language: Macedonian, Resources: Low ISO Code: 'mlg' ('plt'), Language: Malagasy (Plateau), Resources: Low ISO Code: 'mlt', Language: Maltese, Resources: Low ISO Code: 'mni', Language: Manipuri, Resources: Low ISO Code: 'mon' ('khk'), Language: Mongolian (Khalkha), Resources: Low ISO Code: 'mri', Language: Maori, Resources: Low ISO Code: 'msa' ('zsm'), Language: Malay (Standard), Resources: Mid ISO Code: 'mya', Language: Burmese, Resources: Low ISO Code: 'nep' ('npi'), Language: Nepali, Resources: Low ISO Code: 'nld', Language: Dutch, Resources: High ISO Code: 'nor' ('nno' & 'nob'), Language: Norwegian (Nynorsk & Bokmål), Resources: Low ISO Code: 'nso', Language: Northern Sotho, Resources: Low ISO Code: 'pes', Language: Persian, Resources: High ISO Code: 'pol', Language: Polish, Resources: High ISO Code: 'por', Language: Portuguese, Resources: High ISO Code: 'pus' ('pbt'), Language: Pashto (Southern), Resources: Low ISO Code: 'ron', Language: Romanian, Resources: Mid ISO Code: 'rus', Language: Russian, Resources: High ISO Code: 'sin', Language: Sinhala, Resources: Low ISO Code: 'slk', Language: Slovak, Resources: Mid ISO Code: 'slv', Language: Slovenian, Resources: Mid ISO Code: 'smo', Language: Samoan, Resources: Low ISO Code: 'sna', Language: Shona, Resources: Low ISO Code: 'snd', Language: Sindhi, Resources: Low ISO Code: 'som', Language: Somali, Resources: Low ISO Code: 'sot', Language: Southern Sotho, Resources: Low ISO Code: 'spa', Language: Spanish, Resources: High ISO Code: 'sqi' ('als'), Language: Albanian (Tosk), Resources: Low ISO Code: 'srp', Language: Serbian, Resources: High ISO Code: 'sun', Language: Sundanese, Resources: Low ISO Code: 'swa' ('swh'), Language: Swahili (Coastal), Resources: Low ISO Code: 'swe', Language: Swedish, Resources: High ISO Code: 'tam', Language: Tamil, Resources: Mid ISO Code: 'taq', Language: Tamasheq, Resources: Low ISO Code: 'tel', Language: Telugu, Resources: Low ISO Code: 'tgk', Language: Tajik, Resources: Low ISO Code: 'tha', Language: Thai, Resources: Mid ISO Code: 'tur', Language: Turkish, Resources: High ISO Code: 'ukr', Language: Ukrainian, Resources: Mid ISO Code: 'urd', Language: Urdu, Resources: Mid ISO Code: 'uzb' ('uzn'), Language: Uzbek (Nothern), Resources: Mid ISO Code: 'vie', Language: Vietnamese, Resources: High ISO Code: 'xho', Language: Xhosa, Resources: Low ISO Code: 'yid' ('ydd'), Language: Yiddish (Eastern), Resources: Low ISO Code: 'yor', Language: Yoruba, Resources: Low ISO Code: 'zho' (+ 'yue'), Language: Chinese (Simplified & Cantonese), Resources: High ISO Code: 'zul', Language: Zulu, Resources: Low **dolly-human-edited** ISO Code: 'arb', Language: Arabic, Resources: High ISO Code: 'fra', Language: French, Resources: High ISO Code: 'hin', Language: Hindi, Resources: High ISO Code: 'rus', Language: Russian, Resources: High ISO Code: 'spa', Language: Spanish, Resources: High ISO Code: 'srp', Language: Serbian, Resources: High Motivations & Intentions ======================== * Curation Rationale: This evaluation suite is tailored to test the generation quality of multilingual models, with the aim of balancing language coverage and human-sourced quality. It covers prompts originally written in each language, as well as English-centric translated, and manually curated or edited prompts for a linguistically broad, but rich testbed. The list of languages was initially established from mT5 and aligned with the annotators’ language list and the NLLB translation model. Known Limitations ================= * Translation Quality: Note that the expressiveness of the 'dolly-machine-translated' subset is limited by the quality of the translation model and may adversely impact an estimate of ability in languages where translations are not adequate. If this subset is used for testing, we recommend it be paired and reported with the professionally post-edited 'dolly-human-edited' subset or the 'aya-human-annotated' set, which, while covering only 7 languages, is entirely created by proficient target language speakers. --- Additional Information ====================== Provenance ---------- * Methods Used: combination of original annotations by volunteers, automatic translation, and post-editing of translations by professional annotators. * Methodology Details: + *Source:* Original annotations from Aya dataset along with translations and post-edits of Dolly dataset + *Platform:* Aya Annotation Platform + *Dates of Collection:* May 2023 - Dec 2023 Dataset Version and Maintenance ------------------------------- * Maintenance Status: Actively Maintained * Version Details: + *Current version:* 1.0 + *Last Update:* 02/2024 + *First Release:* 02/2024 * Maintenance Plan: No updates planned. Authorship ---------- * Publishing Organization: Cohere For AI * Industry Type: Not-for-profit - Tech * Contact Details: URL Licensing Information --------------------- This dataset can be used for any purpose, whether academic or commercial, under the terms of the Apache 2.0 License.
[ "### Languages\n\n\n\n **aya-human-annotated** \nISO Code: 'tel', Language: Telugu, Resources: Low\nISO Code: 'yor', Language: Yorùbá, Resources: Low\nISO Code: 'arb', Language: Arabic, Resources: High\nISO Code: 'tur', Language: Turkish, Resources: High\nISO Code: 'por', Language: Portuguese, Resources: High\nISO Code: 'zho', Language: Chinese (Simplified), Resources: High\nISO Code: 'eng', Language: English, Resources: High\n\n\n\n\n **dolly-machine-translated** \nISO Code: 'ace', Language: Achinese, Resources: Low\nISO Code: 'afr', Language: Afrikaans, Resources: Mid\nISO Code: 'amh', Language: Amharic, Resources: Low\nISO Code: 'ara' ('arb', 'acm', 'acq', 'aeb', 'ajp', 'apc', 'ars', 'ary' & 'arz'), Language: Arabic (Standard, Gelet Iraqi, Ta'izzi-Adeni, Tunisian, South Levantine, North Levantine, Najdi, Moroccan & Egyptian), Resources: High\nISO Code: 'aze' ('azb' & 'azj'), Language: Azerbaijani (South & North), Resources: Low\nISO Code: 'bel', Language: Belarusian, Resources: Mid\nISO Code: 'ben', Language: Bengali, Resources: Mid\nISO Code: 'bjn', Language: Banjar, Resources: Low\nISO Code: 'bul', Language: Bulgarian, Resources: Mid\nISO Code: 'cat', Language: Catalan, Resources: High\nISO Code: 'ceb', Language: Cebuano, Resources: Mid\nISO Code: 'ces', Language: Czech, Resources: High\nISO Code: 'cym', Language: Welsh, Resources: Low\nISO Code: 'dan', Language: Danish, Resources: Mid\nISO Code: 'deu', Language: German, Resources: High\nISO Code: 'ell', Language: Greek, Resources: Mid\nISO Code: 'eng', Language: English, Resources: High\nISO Code: 'epo', Language: Esperanto, Resources: Low\nISO Code: 'est', Language: Estonian, Resources: Mid\nISO Code: 'eus', Language: Basque, Resources: High\nISO Code: 'fin', Language: Finnish, Resources: High\nISO Code: 'fra', Language: French, Resources: High\nISO Code: 'gla', Language: Scottish Gaelic, Resources: Low\nISO Code: 'gle', Language: Irish, Resources: Low\nISO Code: 'glg', Language: Galician, Resources: Mid\nISO Code: 'guj', Language: Gujarati, Resources: Low\nISO Code: 'hat', Language: Haitian Creole, Resources: Low\nISO Code: 'hau', Language: Hausa, Resources: Low\nISO Code: 'heb', Language: Hebrew, Resources: Mid\nISO Code: 'hin', Language: Hindi, Resources: High\nISO Code: 'hun', Language: Hungarian, Resources: High\nISO Code: 'hye', Language: Armenian, Resources: Low\nISO Code: 'ibo', Language: Igbo, Resources: Low\nISO Code: 'ind', Language: Indonesian, Resources: Mid\nISO Code: 'isl', Language: Icelandic, Resources: Low\nISO Code: 'ita', Language: Italian, Resources: High\nISO Code: 'jav', Language: Javanese, Resources: Low\nISO Code: 'jpn', Language: Japanese, Resources: High\nISO Code: 'kan', Language: Kannada, Resources: Low\nISO Code: 'kas', Language: Kashmiri, Resources: Low\nISO Code: 'kat', Language: Georgian, Resources: Mid\nISO Code: 'kau' ('knc'), Language: Kanuri (Central), Resources: Low\nISO Code: 'kaz', Language: Kazakh, Resources: Mid\nISO Code: 'khm', Language: Khmer, Resources: Low\nISO Code: 'kir', Language: Kyrgyz, Resources: Low\nISO Code: 'kor', Language: Korean, Resources: High\nISO Code: 'kur' ('ckb' & 'kmr'), Language: Kurdish (Central & Northern), Resources: Low\nISO Code: 'lao', Language: Lao, Resources: Low\nISO Code: 'lav' ('lvs'), Language: Latvian (Standard), Resources: Mid\nISO Code: 'lit', Language: Lithuanian, Resources: Mid\nISO Code: 'ltz', Language: Luxembourgish, Resources: Low\nISO Code: 'mal', Language: Malayalam, Resources: Low\nISO Code: 'mar', Language: Marathi, Resources: Low\nISO Code: 'min', Language: Minangkabau, Resources: Low\nISO Code: 'mkd', Language: Macedonian, Resources: Low\nISO Code: 'mlg' ('plt'), Language: Malagasy (Plateau), Resources: Low\nISO Code: 'mlt', Language: Maltese, Resources: Low\nISO Code: 'mni', Language: Manipuri, Resources: Low\nISO Code: 'mon' ('khk'), Language: Mongolian (Khalkha), Resources: Low\nISO Code: 'mri', Language: Maori, Resources: Low\nISO Code: 'msa' ('zsm'), Language: Malay (Standard), Resources: Mid\nISO Code: 'mya', Language: Burmese, Resources: Low\nISO Code: 'nep' ('npi'), Language: Nepali, Resources: Low\nISO Code: 'nld', Language: Dutch, Resources: High\nISO Code: 'nor' ('nno' & 'nob'), Language: Norwegian (Nynorsk & Bokmål), Resources: Low\nISO Code: 'nso', Language: Northern Sotho, Resources: Low\nISO Code: 'pes', Language: Persian, Resources: High\nISO Code: 'pol', Language: Polish, Resources: High\nISO Code: 'por', Language: Portuguese, Resources: High\nISO Code: 'pus' ('pbt'), Language: Pashto (Southern), Resources: Low\nISO Code: 'ron', Language: Romanian, Resources: Mid\nISO Code: 'rus', Language: Russian, Resources: High\nISO Code: 'sin', Language: Sinhala, Resources: Low\nISO Code: 'slk', Language: Slovak, Resources: Mid\nISO Code: 'slv', Language: Slovenian, Resources: Mid\nISO Code: 'smo', Language: Samoan, Resources: Low\nISO Code: 'sna', Language: Shona, Resources: Low\nISO Code: 'snd', Language: Sindhi, Resources: Low\nISO Code: 'som', Language: Somali, Resources: Low\nISO Code: 'sot', Language: Southern Sotho, Resources: Low\nISO Code: 'spa', Language: Spanish, Resources: High\nISO Code: 'sqi' ('als'), Language: Albanian (Tosk), Resources: Low\nISO Code: 'srp', Language: Serbian, Resources: High\nISO Code: 'sun', Language: Sundanese, Resources: Low\nISO Code: 'swa' ('swh'), Language: Swahili (Coastal), Resources: Low\nISO Code: 'swe', Language: Swedish, Resources: High\nISO Code: 'tam', Language: Tamil, Resources: Mid\nISO Code: 'taq', Language: Tamasheq, Resources: Low\nISO Code: 'tel', Language: Telugu, Resources: Low\nISO Code: 'tgk', Language: Tajik, Resources: Low\nISO Code: 'tha', Language: Thai, Resources: Mid\nISO Code: 'tur', Language: Turkish, Resources: High\nISO Code: 'ukr', Language: Ukrainian, Resources: Mid\nISO Code: 'urd', Language: Urdu, Resources: Mid\nISO Code: 'uzb' ('uzn'), Language: Uzbek (Nothern), Resources: Mid\nISO Code: 'vie', Language: Vietnamese, Resources: High\nISO Code: 'xho', Language: Xhosa, Resources: Low\nISO Code: 'yid' ('ydd'), Language: Yiddish (Eastern), Resources: Low\nISO Code: 'yor', Language: Yoruba, Resources: Low\nISO Code: 'zho' (+ 'yue'), Language: Chinese (Simplified & Cantonese), Resources: High\nISO Code: 'zul', Language: Zulu, Resources: Low\n\n\n\n\n **dolly-human-edited** \nISO Code: 'arb', Language: Arabic, Resources: High\nISO Code: 'fra', Language: French, Resources: High\nISO Code: 'hin', Language: Hindi, Resources: High\nISO Code: 'rus', Language: Russian, Resources: High\nISO Code: 'spa', Language: Spanish, Resources: High\nISO Code: 'srp', Language: Serbian, Resources: High\n\n\n\n \n\nMotivations & Intentions\n========================\n\n\n* Curation Rationale: This evaluation suite is tailored to test the generation quality of multilingual models, with the aim of balancing language coverage and human-sourced quality.\nIt covers prompts originally written in each language, as well as English-centric translated, and manually curated or edited prompts for a linguistically broad, but rich testbed.\nThe list of languages was initially established from mT5 and aligned with the annotators’ language list and the NLLB translation model.\n\n\nKnown Limitations\n=================\n\n\n* Translation Quality: Note that the expressiveness of the 'dolly-machine-translated' subset is limited by the quality of the translation model and may adversely impact an estimate of ability in languages where translations are not adequate. If this subset is used for testing, we recommend it be paired and reported with the professionally post-edited 'dolly-human-edited' subset or the 'aya-human-annotated' set, which, while covering only 7 languages, is entirely created by proficient target language speakers.\n\n\n\n\n---\n\n\nAdditional Information\n======================\n\n\nProvenance\n----------\n\n\n* Methods Used: combination of original annotations by volunteers, automatic translation, and post-editing of translations by professional annotators.\n* Methodology Details:\n\t+ *Source:* Original annotations from Aya dataset along with translations and post-edits of Dolly dataset\n\t+ *Platform:* Aya Annotation Platform\n\t+ *Dates of Collection:* May 2023 - Dec 2023\n\n\nDataset Version and Maintenance\n-------------------------------\n\n\n* Maintenance Status: Actively Maintained\n* Version Details:\n\t+ *Current version:* 1.0\n\t+ *Last Update:* 02/2024\n\t+ *First Release:* 02/2024\n* Maintenance Plan: No updates planned.\n\n\nAuthorship\n----------\n\n\n* Publishing Organization: Cohere For AI\n* Industry Type: Not-for-profit - Tech\n* Contact Details: URL\n\n\nLicensing Information\n---------------------\n\n\nThis dataset can be used for any purpose, whether academic or commercial, under the terms of the Apache 2.0 License." ]
[ "TAGS\n#task_categories-text-generation #language_creators-crowdsourced #language_creators-expert-generated #language_creators-machine-generated #multilinguality-multilingual #size_categories-10K<n<100K #source_datasets-original #source_datasets-extended #language-Afrikaans #language-Albanian #language-Amharic #language-Arabic #language-Azerbaijani #language-Belarusian #language-Bengali #language-Bulgarian #language-Catalan #language-Cebuano #language-Czech #language-Kurdish #language-Welsh #language-Danish #language-German #language-Modern Greek (1453-) #language-English #language-Esperanto #language-Estonian #language-Basque #language-Finnish #language-French #language-Scottish Gaelic #language-Irish #language-Galician #language-Gujarati #language-Haitian #language-Hausa #language-Hebrew #language-Hindi #language-Hungarian #language-Armenian #language-Igbo #language-Indonesian #language-Icelandic #language-Italian #language-Javanese #language-Japanese #language-Kannada #language-Georgian #language-Kazakh #language-Mongolian #language-Khmer #language-Kirghiz #language-Korean #language-Lao #language-Lithuanian #language-Luxembourgish #language-Latvian #language-Malayalam #language-Marathi #language-Macedonian #language-Maltese #language-Maori #language-Burmese #language-Dutch #language-Norwegian #language-Nepali (macrolanguage) #language-Southern Sotho #language-Pushto #language-Iranian Persian #language-Malagasy #language-Polish #language-Portuguese #language-Romanian #language-Russian #language-Sinhala #language-Slovak #language-Slovenian #language-Samoan #language-Shona #language-Sindhi #language-Somali #language-Spanish #language-Serbian #language-Sundanese #language-Swedish #language-Swahili (macrolanguage) #language-Tamil #language-Telugu #language-Tajik #language-Thai #language-Turkish #language-Ukrainian #language-Urdu #language-Uzbek #language-Vietnamese #language-Xhosa #language-Yiddish #language-Yoruba #language-Chinese #language-Malay (macrolanguage) #language-Zulu #language-Achinese #language-Banjar #language-Kashmiri #language-Kanuri #language-Minangkabau #language-Manipuri #language-Tamasheq #language-Pedi #license-apache-2.0 #arxiv-2402.06619 #region-us \n", "### Languages\n\n\n\n **aya-human-annotated** \nISO Code: 'tel', Language: Telugu, Resources: Low\nISO Code: 'yor', Language: Yorùbá, Resources: Low\nISO Code: 'arb', Language: Arabic, Resources: High\nISO Code: 'tur', Language: Turkish, Resources: High\nISO Code: 'por', Language: Portuguese, Resources: High\nISO Code: 'zho', Language: Chinese (Simplified), Resources: High\nISO Code: 'eng', Language: English, Resources: High\n\n\n\n\n **dolly-machine-translated** \nISO Code: 'ace', Language: Achinese, Resources: Low\nISO Code: 'afr', Language: Afrikaans, Resources: Mid\nISO Code: 'amh', Language: Amharic, Resources: Low\nISO Code: 'ara' ('arb', 'acm', 'acq', 'aeb', 'ajp', 'apc', 'ars', 'ary' & 'arz'), Language: Arabic (Standard, Gelet Iraqi, Ta'izzi-Adeni, Tunisian, South Levantine, North Levantine, Najdi, Moroccan & Egyptian), Resources: High\nISO Code: 'aze' ('azb' & 'azj'), Language: Azerbaijani (South & North), Resources: Low\nISO Code: 'bel', Language: Belarusian, Resources: Mid\nISO Code: 'ben', Language: Bengali, Resources: Mid\nISO Code: 'bjn', Language: Banjar, Resources: Low\nISO Code: 'bul', Language: Bulgarian, Resources: Mid\nISO Code: 'cat', Language: Catalan, Resources: High\nISO Code: 'ceb', Language: Cebuano, Resources: Mid\nISO Code: 'ces', Language: Czech, Resources: High\nISO Code: 'cym', Language: Welsh, Resources: Low\nISO Code: 'dan', Language: Danish, Resources: Mid\nISO Code: 'deu', Language: German, Resources: High\nISO Code: 'ell', Language: Greek, Resources: Mid\nISO Code: 'eng', Language: English, Resources: High\nISO Code: 'epo', Language: Esperanto, Resources: Low\nISO Code: 'est', Language: Estonian, Resources: Mid\nISO Code: 'eus', Language: Basque, Resources: High\nISO Code: 'fin', Language: Finnish, Resources: High\nISO Code: 'fra', Language: French, Resources: High\nISO Code: 'gla', Language: Scottish Gaelic, Resources: Low\nISO Code: 'gle', Language: Irish, Resources: Low\nISO Code: 'glg', Language: Galician, Resources: Mid\nISO Code: 'guj', Language: Gujarati, Resources: Low\nISO Code: 'hat', Language: Haitian Creole, Resources: Low\nISO Code: 'hau', Language: Hausa, Resources: Low\nISO Code: 'heb', Language: Hebrew, Resources: Mid\nISO Code: 'hin', Language: Hindi, Resources: High\nISO Code: 'hun', Language: Hungarian, Resources: High\nISO Code: 'hye', Language: Armenian, Resources: Low\nISO Code: 'ibo', Language: Igbo, Resources: Low\nISO Code: 'ind', Language: Indonesian, Resources: Mid\nISO Code: 'isl', Language: Icelandic, Resources: Low\nISO Code: 'ita', Language: Italian, Resources: High\nISO Code: 'jav', Language: Javanese, Resources: Low\nISO Code: 'jpn', Language: Japanese, Resources: High\nISO Code: 'kan', Language: Kannada, Resources: Low\nISO Code: 'kas', Language: Kashmiri, Resources: Low\nISO Code: 'kat', Language: Georgian, Resources: Mid\nISO Code: 'kau' ('knc'), Language: Kanuri (Central), Resources: Low\nISO Code: 'kaz', Language: Kazakh, Resources: Mid\nISO Code: 'khm', Language: Khmer, Resources: Low\nISO Code: 'kir', Language: Kyrgyz, Resources: Low\nISO Code: 'kor', Language: Korean, Resources: High\nISO Code: 'kur' ('ckb' & 'kmr'), Language: Kurdish (Central & Northern), Resources: Low\nISO Code: 'lao', Language: Lao, Resources: Low\nISO Code: 'lav' ('lvs'), Language: Latvian (Standard), Resources: Mid\nISO Code: 'lit', Language: Lithuanian, Resources: Mid\nISO Code: 'ltz', Language: Luxembourgish, Resources: Low\nISO Code: 'mal', Language: Malayalam, Resources: Low\nISO Code: 'mar', Language: Marathi, Resources: Low\nISO Code: 'min', Language: Minangkabau, Resources: Low\nISO Code: 'mkd', Language: Macedonian, Resources: Low\nISO Code: 'mlg' ('plt'), Language: Malagasy (Plateau), Resources: Low\nISO Code: 'mlt', Language: Maltese, Resources: Low\nISO Code: 'mni', Language: Manipuri, Resources: Low\nISO Code: 'mon' ('khk'), Language: Mongolian (Khalkha), Resources: Low\nISO Code: 'mri', Language: Maori, Resources: Low\nISO Code: 'msa' ('zsm'), Language: Malay (Standard), Resources: Mid\nISO Code: 'mya', Language: Burmese, Resources: Low\nISO Code: 'nep' ('npi'), Language: Nepali, Resources: Low\nISO Code: 'nld', Language: Dutch, Resources: High\nISO Code: 'nor' ('nno' & 'nob'), Language: Norwegian (Nynorsk & Bokmål), Resources: Low\nISO Code: 'nso', Language: Northern Sotho, Resources: Low\nISO Code: 'pes', Language: Persian, Resources: High\nISO Code: 'pol', Language: Polish, Resources: High\nISO Code: 'por', Language: Portuguese, Resources: High\nISO Code: 'pus' ('pbt'), Language: Pashto (Southern), Resources: Low\nISO Code: 'ron', Language: Romanian, Resources: Mid\nISO Code: 'rus', Language: Russian, Resources: High\nISO Code: 'sin', Language: Sinhala, Resources: Low\nISO Code: 'slk', Language: Slovak, Resources: Mid\nISO Code: 'slv', Language: Slovenian, Resources: Mid\nISO Code: 'smo', Language: Samoan, Resources: Low\nISO Code: 'sna', Language: Shona, Resources: Low\nISO Code: 'snd', Language: Sindhi, Resources: Low\nISO Code: 'som', Language: Somali, Resources: Low\nISO Code: 'sot', Language: Southern Sotho, Resources: Low\nISO Code: 'spa', Language: Spanish, Resources: High\nISO Code: 'sqi' ('als'), Language: Albanian (Tosk), Resources: Low\nISO Code: 'srp', Language: Serbian, Resources: High\nISO Code: 'sun', Language: Sundanese, Resources: Low\nISO Code: 'swa' ('swh'), Language: Swahili (Coastal), Resources: Low\nISO Code: 'swe', Language: Swedish, Resources: High\nISO Code: 'tam', Language: Tamil, Resources: Mid\nISO Code: 'taq', Language: Tamasheq, Resources: Low\nISO Code: 'tel', Language: Telugu, Resources: Low\nISO Code: 'tgk', Language: Tajik, Resources: Low\nISO Code: 'tha', Language: Thai, Resources: Mid\nISO Code: 'tur', Language: Turkish, Resources: High\nISO Code: 'ukr', Language: Ukrainian, Resources: Mid\nISO Code: 'urd', Language: Urdu, Resources: Mid\nISO Code: 'uzb' ('uzn'), Language: Uzbek (Nothern), Resources: Mid\nISO Code: 'vie', Language: Vietnamese, Resources: High\nISO Code: 'xho', Language: Xhosa, Resources: Low\nISO Code: 'yid' ('ydd'), Language: Yiddish (Eastern), Resources: Low\nISO Code: 'yor', Language: Yoruba, Resources: Low\nISO Code: 'zho' (+ 'yue'), Language: Chinese (Simplified & Cantonese), Resources: High\nISO Code: 'zul', Language: Zulu, Resources: Low\n\n\n\n\n **dolly-human-edited** \nISO Code: 'arb', Language: Arabic, Resources: High\nISO Code: 'fra', Language: French, Resources: High\nISO Code: 'hin', Language: Hindi, Resources: High\nISO Code: 'rus', Language: Russian, Resources: High\nISO Code: 'spa', Language: Spanish, Resources: High\nISO Code: 'srp', Language: Serbian, Resources: High\n\n\n\n \n\nMotivations & Intentions\n========================\n\n\n* Curation Rationale: This evaluation suite is tailored to test the generation quality of multilingual models, with the aim of balancing language coverage and human-sourced quality.\nIt covers prompts originally written in each language, as well as English-centric translated, and manually curated or edited prompts for a linguistically broad, but rich testbed.\nThe list of languages was initially established from mT5 and aligned with the annotators’ language list and the NLLB translation model.\n\n\nKnown Limitations\n=================\n\n\n* Translation Quality: Note that the expressiveness of the 'dolly-machine-translated' subset is limited by the quality of the translation model and may adversely impact an estimate of ability in languages where translations are not adequate. If this subset is used for testing, we recommend it be paired and reported with the professionally post-edited 'dolly-human-edited' subset or the 'aya-human-annotated' set, which, while covering only 7 languages, is entirely created by proficient target language speakers.\n\n\n\n\n---\n\n\nAdditional Information\n======================\n\n\nProvenance\n----------\n\n\n* Methods Used: combination of original annotations by volunteers, automatic translation, and post-editing of translations by professional annotators.\n* Methodology Details:\n\t+ *Source:* Original annotations from Aya dataset along with translations and post-edits of Dolly dataset\n\t+ *Platform:* Aya Annotation Platform\n\t+ *Dates of Collection:* May 2023 - Dec 2023\n\n\nDataset Version and Maintenance\n-------------------------------\n\n\n* Maintenance Status: Actively Maintained\n* Version Details:\n\t+ *Current version:* 1.0\n\t+ *Last Update:* 02/2024\n\t+ *First Release:* 02/2024\n* Maintenance Plan: No updates planned.\n\n\nAuthorship\n----------\n\n\n* Publishing Organization: Cohere For AI\n* Industry Type: Not-for-profit - Tech\n* Contact Details: URL\n\n\nLicensing Information\n---------------------\n\n\nThis dataset can be used for any purpose, whether academic or commercial, under the terms of the Apache 2.0 License." ]
7e42533e7c93a6308f11cb11477d3b12738d266a
# Dataset Card for "HeSum" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
biunlp/HeSum
[ "region:us" ]
2024-02-06T09:00:20+00:00
{"dataset_info": {"features": [{"name": "summary", "dtype": "string"}, {"name": "article", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 98933510, "num_examples": 8000}, {"name": "validation", "num_bytes": 12217867, "num_examples": 1000}, {"name": "test", "num_bytes": 13227741, "num_examples": 1000}], "download_size": 63278508, "dataset_size": 124379118}}
2024-02-06T09:42:28+00:00
[]
[]
TAGS #region-us
# Dataset Card for "HeSum" More Information needed
[ "# Dataset Card for \"HeSum\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"HeSum\"\n\nMore Information needed" ]
1cb4624754de607dc88ea2a13c3896abe5ec86c1
# Dataset Card for "NLLB3.3B_XQuAD_TH_sent_span" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
sadanalog/NLLB3.3B_XQuAD_TH_sent_span
[ "region:us" ]
2024-02-06T09:15:53+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "context", "dtype": "string"}, {"name": "question", "dtype": "string"}, {"name": "answers", "struct": [{"name": "text", "sequence": "string"}]}], "splits": [{"name": "train", "num_bytes": 2570720, "num_examples": 1190}], "download_size": 503670, "dataset_size": 2570720}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-02-06T09:16:04+00:00
[]
[]
TAGS #region-us
# Dataset Card for "NLLB3.3B_XQuAD_TH_sent_span" More Information needed
[ "# Dataset Card for \"NLLB3.3B_XQuAD_TH_sent_span\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"NLLB3.3B_XQuAD_TH_sent_span\"\n\nMore Information needed" ]
d36dffd64c9d99869c71665c67503306c0317dc9
# Dataset Card for "lmind_hotpot_train5000_eval5000_v1_ic_qa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/lmind_hotpot_train5000_eval5000_v1_ic_qa
[ "region:us" ]
2024-02-06T10:16:40+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train_qa", "path": "data/train_qa-*"}, {"split": "train_recite_qa", "path": "data/train_recite_qa-*"}, {"split": "train_ic_qa", "path": "data/train_ic_qa-*"}, {"split": "eval_qa", "path": "data/eval_qa-*"}, {"split": "eval_recite_qa", "path": "data/eval_recite_qa-*"}, {"split": "eval_ic_qa", "path": "data/eval_ic_qa-*"}, {"split": "all_docs", "path": "data/all_docs-*"}, {"split": "all_docs_eval", "path": "data/all_docs_eval-*"}, {"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}, {"name": "answers", "struct": [{"name": "answer_start", "sequence": "null"}, {"name": "text", "sequence": "string"}]}], "splits": [{"name": "train_qa", "num_bytes": 864508, "num_examples": 5000}, {"name": "train_recite_qa", "num_bytes": 5350190, "num_examples": 5000}, {"name": "train_ic_qa", "num_bytes": 5345190, "num_examples": 5000}, {"name": "eval_qa", "num_bytes": 813536, "num_examples": 5000}, {"name": "eval_recite_qa", "num_bytes": 5394796, "num_examples": 5000}, {"name": "eval_ic_qa", "num_bytes": 5345190, "num_examples": 5000}, {"name": "all_docs", "num_bytes": 8524332, "num_examples": 18224}, {"name": "all_docs_eval", "num_bytes": 8523131, "num_examples": 18224}, {"name": "train", "num_bytes": 5345190, "num_examples": 5000}, {"name": "validation", "num_bytes": 5345190, "num_examples": 5000}], "download_size": 30643792, "dataset_size": 50851253}}
2024-02-06T10:17:22+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lmind_hotpot_train5000_eval5000_v1_ic_qa" More Information needed
[ "# Dataset Card for \"lmind_hotpot_train5000_eval5000_v1_ic_qa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lmind_hotpot_train5000_eval5000_v1_ic_qa\"\n\nMore Information needed" ]
3435f1682ee3213e7f67a10c4dd78961b5ec7f4f
# Dataset Card for farright-test This dataset has been created with [Argilla](https://docs.argilla.io). As shown in the sections below, this dataset can be loaded into Argilla as explained in [Load with Argilla](#load-with-argilla), or used directly with the `datasets` library in [Load with `datasets`](#load-with-datasets). ## Dataset Description - **Homepage:** https://argilla.io - **Repository:** https://github.com/argilla-io/argilla - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named `argilla.yaml`. This configuration file will be used to configure the dataset when using the `FeedbackDataset.from_huggingface` method in Argilla. * Dataset records in a format compatible with HuggingFace `datasets`. These records will be loaded automatically when using `FeedbackDataset.from_huggingface` and can be loaded independently using the `datasets` library via `load_dataset`. * The [annotation guidelines](#annotation-guidelines) that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as `pip install argilla --upgrade` and then use the following code: ```python import argilla as rg ds = rg.FeedbackDataset.from_huggingface("FrederikMH/farright-test") ``` ### Load with `datasets` To load this dataset with `datasets`, you'll just need to install `datasets` as `pip install datasets --upgrade` and then use the following code: ```python from datasets import load_dataset ds = load_dataset("FrederikMH/farright-test") ``` ### Supported Tasks and Leaderboards This dataset can contain [multiple fields, questions and responses](https://docs.argilla.io/en/latest/conceptual_guides/data_model.html#feedback-dataset) so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the [Dataset Structure section](#dataset-structure). There are no leaderboards associated with this dataset. ### Languages [More Information Needed] ## Dataset Structure ### Data in Argilla The dataset is created in Argilla with: **fields**, **questions**, **suggestions**, **metadata**, **vectors**, and **guidelines**. The **fields** are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. | Field Name | Title | Type | Required | Markdown | | ---------- | ----- | ---- | -------- | -------- | | text | Text | text | True | False | The **questions** are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label_selection, multi_label_selection, or ranking. | Question Name | Title | Type | Required | Description | Values/Labels | | ------------- | ----- | ---- | -------- | ----------- | ------------- | | sentiment | Sentiment | label_selection | True | N/A | ['positive', 'neutral', 'negative'] | | mixed-emotion | Mixed-emotion | multi_label_selection | True | N/A | ['joy', 'anger', 'sadness', 'fear', 'surprise', 'love'] | The **suggestions** are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The **metadata** is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. | Metadata Name | Title | Type | Values | Visible for Annotators | | ------------- | ----- | ---- | ------ | ---------------------- | The **guidelines**, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the [annotation guidelines](#annotation-guidelines) section. ### Data Instances An example of a dataset instance in Argilla looks as follows: ```json { "external_id": null, "fields": { "text": "i didnt feel humiliated" }, "metadata": {}, "responses": [ { "status": "submitted", "user_id": "3314b00d-2477-4606-b8f7-5cc2c52b2e28", "values": { "mixed-emotion": { "value": [ "fear" ] }, "sentiment": { "value": "neutral" } } } ], "suggestions": [], "vectors": {} } ``` While the same record in HuggingFace `datasets` looks as follows: ```json { "external_id": null, "metadata": "{}", "mixed-emotion": [ { "status": "submitted", "user_id": "3314b00d-2477-4606-b8f7-5cc2c52b2e28", "value": [ "fear" ] } ], "mixed-emotion-suggestion": null, "mixed-emotion-suggestion-metadata": { "agent": null, "score": null, "type": null }, "sentiment": [ { "status": "submitted", "user_id": "3314b00d-2477-4606-b8f7-5cc2c52b2e28", "value": "neutral" } ], "sentiment-suggestion": null, "sentiment-suggestion-metadata": { "agent": null, "score": null, "type": null }, "text": "i didnt feel humiliated" } ``` ### Data Fields Among the dataset fields, we differentiate between the following: * **Fields:** These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. * **text** is of type `text`. * **Questions:** These are the questions that will be asked to the annotators. They can be of different types, such as `RatingQuestion`, `TextQuestion`, `LabelQuestion`, `MultiLabelQuestion`, and `RankingQuestion`. * **sentiment** is of type `label_selection` with the following allowed values ['positive', 'neutral', 'negative']. * **mixed-emotion** is of type `multi_label_selection` with the following allowed values ['joy', 'anger', 'sadness', 'fear', 'surprise', 'love']. * **Suggestions:** As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. * (optional) **sentiment-suggestion** is of type `label_selection` with the following allowed values ['positive', 'neutral', 'negative']. * (optional) **mixed-emotion-suggestion** is of type `multi_label_selection` with the following allowed values ['joy', 'anger', 'sadness', 'fear', 'surprise', 'love']. Additionally, we also have two more fields that are optional and are the following: * **metadata:** This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the `metadata_properties` defined in the dataset configuration file in `argilla.yaml`. * **external_id:** This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is `train`. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation guidelines Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
FrederikMH/farright-test
[ "size_categories:n<1K", "rlfh", "argilla", "human-feedback", "region:us" ]
2024-02-06T10:18:22+00:00
{"size_categories": "n<1K", "tags": ["rlfh", "argilla", "human-feedback"]}
2024-02-06T10:18:24+00:00
[]
[]
TAGS #size_categories-n<1K #rlfh #argilla #human-feedback #region-us
Dataset Card for farright-test ============================== This dataset has been created with Argilla. As shown in the sections below, this dataset can be loaded into Argilla as explained in Load with Argilla, or used directly with the 'datasets' library in Load with 'datasets'. Dataset Description ------------------- * Homepage: URL * Repository: URL * Paper: * Leaderboard: * Point of Contact: ### Dataset Summary This dataset contains: * A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\_huggingface' method in Argilla. * Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\_huggingface' and can be loaded independently using the 'datasets' library via 'load\_dataset'. * The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla. ### Load with Argilla To load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code: ### Load with 'datasets' To load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code: ### Supported Tasks and Leaderboards This dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section. There are no leaderboards associated with this dataset. ### Languages Dataset Structure ----------------- ### Data in Argilla The dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines. The fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. The questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\_selection, multi\_label\_selection, or ranking. The suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending "-suggestion" and "-suggestion-metadata" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with "-suggestion" and the metadata is appended with "-suggestion-metadata". The metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. The guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section. ### Data Instances An example of a dataset instance in Argilla looks as follows: While the same record in HuggingFace 'datasets' looks as follows: ### Data Fields Among the dataset fields, we differentiate between the following: * Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions. + text is of type 'text'. * Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'. + sentiment is of type 'label\_selection' with the following allowed values ['positive', 'neutral', 'negative']. + mixed-emotion is of type 'multi\_label\_selection' with the following allowed values ['joy', 'anger', 'sadness', 'fear', 'surprise', 'love']. * Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable. + (optional) sentiment-suggestion is of type 'label\_selection' with the following allowed values ['positive', 'neutral', 'negative']. + (optional) mixed-emotion-suggestion is of type 'multi\_label\_selection' with the following allowed values ['joy', 'anger', 'sadness', 'fear', 'surprise', 'love']. Additionally, we also have two more fields that are optional and are the following: * metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\_properties' defined in the dataset configuration file in 'URL'. * external\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file. ### Data Splits The dataset contains a single split, which is 'train'. Dataset Creation ---------------- ### Curation Rationale ### Source Data #### Initial Data Collection and Normalization #### Who are the source language producers? ### Annotations #### Annotation guidelines Emotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. #### Annotation process #### Who are the annotators? ### Personal and Sensitive Information Considerations for Using the Data --------------------------------- ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations Additional Information ---------------------- ### Dataset Curators ### Licensing Information ### Contributions
[ "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'text'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ sentiment is of type 'label\\_selection' with the following allowed values ['positive', 'neutral', 'negative'].\n\t+ mixed-emotion is of type 'multi\\_label\\_selection' with the following allowed values ['joy', 'anger', 'sadness', 'fear', 'surprise', 'love'].\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) sentiment-suggestion is of type 'label\\_selection' with the following allowed values ['positive', 'neutral', 'negative'].\n\t+ (optional) mixed-emotion-suggestion is of type 'multi\\_label\\_selection' with the following allowed values ['joy', 'anger', 'sadness', 'fear', 'surprise', 'love'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nEmotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
[ "TAGS\n#size_categories-n<1K #rlfh #argilla #human-feedback #region-us \n", "### Dataset Summary\n\n\nThis dataset contains:\n\n\n* A dataset configuration file conforming to the Argilla dataset format named 'URL'. This configuration file will be used to configure the dataset when using the 'FeedbackDataset.from\\_huggingface' method in Argilla.\n* Dataset records in a format compatible with HuggingFace 'datasets'. These records will be loaded automatically when using 'FeedbackDataset.from\\_huggingface' and can be loaded independently using the 'datasets' library via 'load\\_dataset'.\n* The annotation guidelines that have been used for building and curating the dataset, if they've been defined in Argilla.", "### Load with Argilla\n\n\nTo load with Argilla, you'll just need to install Argilla as 'pip install argilla --upgrade' and then use the following code:", "### Load with 'datasets'\n\n\nTo load this dataset with 'datasets', you'll just need to install 'datasets' as 'pip install datasets --upgrade' and then use the following code:", "### Supported Tasks and Leaderboards\n\n\nThis dataset can contain multiple fields, questions and responses so it can be used for different NLP tasks, depending on the configuration. The dataset structure is described in the Dataset Structure section.\n\n\nThere are no leaderboards associated with this dataset.", "### Languages\n\n\nDataset Structure\n-----------------", "### Data in Argilla\n\n\nThe dataset is created in Argilla with: fields, questions, suggestions, metadata, vectors, and guidelines.\n\n\nThe fields are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\nThe questions are the questions that will be asked to the annotators. They can be of different types, such as rating, text, label\\_selection, multi\\_label\\_selection, or ranking.\n\n\n\nThe suggestions are human or machine generated recommendations for each question to assist the annotator during the annotation process, so those are always linked to the existing questions, and named appending \"-suggestion\" and \"-suggestion-metadata\" to those, containing the value/s of the suggestion and its metadata, respectively. So on, the possible values are the same as in the table above, but the column name is appended with \"-suggestion\" and the metadata is appended with \"-suggestion-metadata\".\n\n\nThe metadata is a dictionary that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n\n\n\nThe guidelines, are optional as well, and are just a plain string that can be used to provide instructions to the annotators. Find those in the annotation guidelines section.", "### Data Instances\n\n\nAn example of a dataset instance in Argilla looks as follows:\n\n\nWhile the same record in HuggingFace 'datasets' looks as follows:", "### Data Fields\n\n\nAmong the dataset fields, we differentiate between the following:\n\n\n* Fields: These are the dataset records themselves, for the moment just text fields are supported. These are the ones that will be used to provide responses to the questions.\n\n\n\t+ text is of type 'text'.\n* Questions: These are the questions that will be asked to the annotators. They can be of different types, such as 'RatingQuestion', 'TextQuestion', 'LabelQuestion', 'MultiLabelQuestion', and 'RankingQuestion'.\n\n\n\t+ sentiment is of type 'label\\_selection' with the following allowed values ['positive', 'neutral', 'negative'].\n\t+ mixed-emotion is of type 'multi\\_label\\_selection' with the following allowed values ['joy', 'anger', 'sadness', 'fear', 'surprise', 'love'].\n* Suggestions: As of Argilla 1.13.0, the suggestions have been included to provide the annotators with suggestions to ease or assist during the annotation process. Suggestions are linked to the existing questions, are always optional, and contain not just the suggestion itself, but also the metadata linked to it, if applicable.\n\n\n\t+ (optional) sentiment-suggestion is of type 'label\\_selection' with the following allowed values ['positive', 'neutral', 'negative'].\n\t+ (optional) mixed-emotion-suggestion is of type 'multi\\_label\\_selection' with the following allowed values ['joy', 'anger', 'sadness', 'fear', 'surprise', 'love'].\n\n\nAdditionally, we also have two more fields that are optional and are the following:\n\n\n* metadata: This is an optional field that can be used to provide additional information about the dataset record. This can be useful to provide additional context to the annotators, or to provide additional information about the dataset record itself. For example, you can use this to provide a link to the original source of the dataset record, or to provide additional information about the dataset record itself, such as the author, the date, or the source. The metadata is always optional, and can be potentially linked to the 'metadata\\_properties' defined in the dataset configuration file in 'URL'.\n* external\\_id: This is an optional field that can be used to provide an external ID for the dataset record. This can be useful if you want to link the dataset record to an external resource, such as a database or a file.", "### Data Splits\n\n\nThe dataset contains a single split, which is 'train'.\n\n\nDataset Creation\n----------------", "### Curation Rationale", "### Source Data", "#### Initial Data Collection and Normalization", "#### Who are the source language producers?", "### Annotations", "#### Annotation guidelines\n\n\nEmotion is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise.", "#### Annotation process", "#### Who are the annotators?", "### Personal and Sensitive Information\n\n\nConsiderations for Using the Data\n---------------------------------", "### Social Impact of Dataset", "### Discussion of Biases", "### Other Known Limitations\n\n\nAdditional Information\n----------------------", "### Dataset Curators", "### Licensing Information", "### Contributions" ]
2e89be2150e2310d8d17d33c7450efa921b6c387
# The Croatian Parliamentary Spoken Dataset ParlaSpeech-HR 2.0 http://hdl.handle.net/11356/1914 The ParlaSpeech-HR dataset is built from the transcripts of parliamentary proceedings available in the Croatian part of the ParlaMint corpus, and the parliamentary recordings available from the Croatian Parliament's YouTube channel. The corpus consists of audio segments that correspond to specific sentences in the transcripts. The transcript contains word-level alignments to the recordings, each instance consisting of character and millisecond start and end offsets, allowing for simple further segmentation of long sentences into shorter segments for ASR and other memory-sensitive applications. Sequences longer than 30 seconds have already been removed from this dataset, which should allow for a simple usage on most modern GPUs. Each segment has an identifier reference to the ParlaMint 4.0 corpus (http://hdl.handle.net/11356/1859) via the utterance ID and character offsets. While in the original dataset all the speaker information from the ParlaMint corpus is available via the `speaker_info` attribute, in the HuggingFace version only a subset of metadata is available, namely: the date, the name of the speaker, their gender, year of birth, party affiliation at that point in time, status of the party at that point in time (coalition or opposition), and party orientation (left, right, centre etc.). Different to the original dataset, this version has also a `text_normalised` attribute, which contains the text with parliamentary comments (`[[Applause]]` and similar) removed. If you use the dataset, please cite the following paper: ``` @inproceedings{ljubesic-etal-2022-parlaspeech, title = "{P}arla{S}peech-{HR} - a Freely Available {ASR} Dataset for {C}roatian Bootstrapped from the {P}arla{M}int Corpus", author = "Ljube{\v{s}}i{\'c}, Nikola and Kor{\v{z}}inek, Danijel and Rupnik, Peter and Jazbec, Ivo-Pavao", editor = "Fi{\v{s}}er, Darja and Eskevich, Maria and Lenardi{\v{c}}, Jakob and de Jong, Franciska", booktitle = "Proceedings of the Workshop ParlaCLARIN III within the 13th Language Resources and Evaluation Conference", month = jun, year = "2022", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2022.parlaclarin-1.16", pages = "111--116", } ```
classla/ParlaSpeech-HR
[ "region:us" ]
2024-02-06T10:46:25+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "text_normalised", "dtype": "string"}, {"name": "words", "list": [{"name": "char_e", "dtype": "int64"}, {"name": "char_s", "dtype": "int64"}, {"name": "time_e", "dtype": "float64"}, {"name": "time_s", "dtype": "float64"}]}, {"name": "audio_length", "dtype": "float64"}, {"name": "date", "dtype": "string"}, {"name": "speaker_name", "dtype": "string"}, {"name": "speaker_gender", "dtype": "string"}, {"name": "speaker_birth", "dtype": "string"}, {"name": "speaker_party", "dtype": "string"}, {"name": "party_orientation", "dtype": "string"}, {"name": "party_status", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 162874686121.866, "num_examples": 867581}], "download_size": 179092718936, "dataset_size": 162874686121.866}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-02-06T19:16:47+00:00
[]
[]
TAGS #region-us
# The Croatian Parliamentary Spoken Dataset ParlaSpeech-HR 2.0 URL The ParlaSpeech-HR dataset is built from the transcripts of parliamentary proceedings available in the Croatian part of the ParlaMint corpus, and the parliamentary recordings available from the Croatian Parliament's YouTube channel. The corpus consists of audio segments that correspond to specific sentences in the transcripts. The transcript contains word-level alignments to the recordings, each instance consisting of character and millisecond start and end offsets, allowing for simple further segmentation of long sentences into shorter segments for ASR and other memory-sensitive applications. Sequences longer than 30 seconds have already been removed from this dataset, which should allow for a simple usage on most modern GPUs. Each segment has an identifier reference to the ParlaMint 4.0 corpus (URL via the utterance ID and character offsets. While in the original dataset all the speaker information from the ParlaMint corpus is available via the 'speaker_info' attribute, in the HuggingFace version only a subset of metadata is available, namely: the date, the name of the speaker, their gender, year of birth, party affiliation at that point in time, status of the party at that point in time (coalition or opposition), and party orientation (left, right, centre etc.). Different to the original dataset, this version has also a 'text_normalised' attribute, which contains the text with parliamentary comments ('[[Applause]]' and similar) removed. If you use the dataset, please cite the following paper:
[ "# The Croatian Parliamentary Spoken Dataset ParlaSpeech-HR 2.0\n\nURL\n\nThe ParlaSpeech-HR dataset is built from the transcripts of parliamentary proceedings available in the Croatian part of the ParlaMint corpus, and the parliamentary recordings available from the Croatian Parliament's YouTube channel.\n\nThe corpus consists of audio segments that correspond to specific sentences in the transcripts. The transcript contains word-level alignments to the recordings, each instance consisting of character and millisecond start and end offsets, allowing for simple further segmentation of long sentences into shorter segments for ASR and other memory-sensitive applications. Sequences longer than 30 seconds have already been removed from this dataset, which should allow for a simple usage on most modern GPUs.\n\nEach segment has an identifier reference to the ParlaMint 4.0 corpus (URL via the utterance ID and character offsets.\n\nWhile in the original dataset all the speaker information from the ParlaMint corpus is available via the 'speaker_info' attribute, in the HuggingFace version only a subset of metadata is available, namely: the date, the name of the speaker, their gender, year of birth, party affiliation at that point in time, status of the party at that point in time (coalition or opposition), and party orientation (left, right, centre etc.).\n\nDifferent to the original dataset, this version has also a 'text_normalised' attribute, which contains the text with parliamentary comments ('[[Applause]]' and similar) removed.\n\nIf you use the dataset, please cite the following paper:" ]
[ "TAGS\n#region-us \n", "# The Croatian Parliamentary Spoken Dataset ParlaSpeech-HR 2.0\n\nURL\n\nThe ParlaSpeech-HR dataset is built from the transcripts of parliamentary proceedings available in the Croatian part of the ParlaMint corpus, and the parliamentary recordings available from the Croatian Parliament's YouTube channel.\n\nThe corpus consists of audio segments that correspond to specific sentences in the transcripts. The transcript contains word-level alignments to the recordings, each instance consisting of character and millisecond start and end offsets, allowing for simple further segmentation of long sentences into shorter segments for ASR and other memory-sensitive applications. Sequences longer than 30 seconds have already been removed from this dataset, which should allow for a simple usage on most modern GPUs.\n\nEach segment has an identifier reference to the ParlaMint 4.0 corpus (URL via the utterance ID and character offsets.\n\nWhile in the original dataset all the speaker information from the ParlaMint corpus is available via the 'speaker_info' attribute, in the HuggingFace version only a subset of metadata is available, namely: the date, the name of the speaker, their gender, year of birth, party affiliation at that point in time, status of the party at that point in time (coalition or opposition), and party orientation (left, right, centre etc.).\n\nDifferent to the original dataset, this version has also a 'text_normalised' attribute, which contains the text with parliamentary comments ('[[Applause]]' and similar) removed.\n\nIf you use the dataset, please cite the following paper:" ]
cd2469ccebefa5d7263b233f427e79f59ef8fd86
# The Polish Parliamentary Spoken Dataset ParlaSpeech-PL 1.0 http://hdl.handle.net/11356/1686 The ParlaSpeech-PL dataset is built from the transcripts of parliamentary proceedings available in the Polish part of the ParlaMint corpus, and the parliamentary recordings available from the Polish Parliament's YouTube channel. The corpus consists of audio segments that correspond to specific sentences in the transcripts. The transcript contains word-level alignments to the recordings, each instance consisting of character and millisecond start and end offsets, allowing for simple further segmentation of long sentences into shorter segments for ASR and other memory-sensitive applications. Sequences longer than 30 seconds have already been removed from this dataset, which should allow for a simple usage on most modern GPUs. Each segment has an identifier reference to the ParlaMint 4.0 corpus (http://hdl.handle.net/11356/1859) via the utterance ID and character offsets. While in the original dataset all the speaker information from the ParlaMint corpus is available via the `speaker_info` attribute, in the HuggingFace version only a subset of metadata is available, namely: the date, the name of the speaker, their gender, year of birth, party affiliation at that point in time, status of the party at that point in time (coalition or opposition), and party orientation (left, right, centre etc.). Different to the original dataset, this version has also a `text_normalised` attribute, which contains the text with parliamentary comments (`[[Applause]]` and similar) removed. If you use the dataset, please cite the following paper: ``` @inproceedings{ljubesic-etal-2022-parlaspeech, title = "{P}arla{S}peech-{HR} - a Freely Available {ASR} Dataset for {C}roatian Bootstrapped from the {P}arla{M}int Corpus", author = "Ljube{\v{s}}i{\'c}, Nikola and Kor{\v{z}}inek, Danijel and Rupnik, Peter and Jazbec, Ivo-Pavao", editor = "Fi{\v{s}}er, Darja and Eskevich, Maria and Lenardi{\v{c}}, Jakob and de Jong, Franciska", booktitle = "Proceedings of the Workshop ParlaCLARIN III within the 13th Language Resources and Evaluation Conference", month = jun, year = "2022", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2022.parlaclarin-1.16", pages = "111--116", } ```
classla/ParlaSpeech-PL
[ "region:us" ]
2024-02-06T10:49:13+00:00
{"dataset_info": {"features": [{"name": "id", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "text_normalised", "dtype": "string"}, {"name": "words", "list": [{"name": "char_e", "dtype": "int64"}, {"name": "char_s", "dtype": "int64"}, {"name": "time_e", "dtype": "float64"}, {"name": "time_s", "dtype": "float64"}]}, {"name": "audio_length", "dtype": "float64"}, {"name": "date", "dtype": "string"}, {"name": "speaker_name", "dtype": "string"}, {"name": "speaker_gender", "dtype": "string"}, {"name": "speaker_birth", "dtype": "string"}, {"name": "speaker_party", "dtype": "string"}, {"name": "party_orientation", "dtype": "string"}, {"name": "party_status", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 61274022869.885, "num_examples": 530773}], "download_size": 60791222740, "dataset_size": 61274022869.885}, "configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "data/train-*"}]}]}
2024-02-09T07:51:35+00:00
[]
[]
TAGS #region-us
# The Polish Parliamentary Spoken Dataset ParlaSpeech-PL 1.0 URL The ParlaSpeech-PL dataset is built from the transcripts of parliamentary proceedings available in the Polish part of the ParlaMint corpus, and the parliamentary recordings available from the Polish Parliament's YouTube channel. The corpus consists of audio segments that correspond to specific sentences in the transcripts. The transcript contains word-level alignments to the recordings, each instance consisting of character and millisecond start and end offsets, allowing for simple further segmentation of long sentences into shorter segments for ASR and other memory-sensitive applications. Sequences longer than 30 seconds have already been removed from this dataset, which should allow for a simple usage on most modern GPUs. Each segment has an identifier reference to the ParlaMint 4.0 corpus (URL via the utterance ID and character offsets. While in the original dataset all the speaker information from the ParlaMint corpus is available via the 'speaker_info' attribute, in the HuggingFace version only a subset of metadata is available, namely: the date, the name of the speaker, their gender, year of birth, party affiliation at that point in time, status of the party at that point in time (coalition or opposition), and party orientation (left, right, centre etc.). Different to the original dataset, this version has also a 'text_normalised' attribute, which contains the text with parliamentary comments ('[[Applause]]' and similar) removed. If you use the dataset, please cite the following paper:
[ "# The Polish Parliamentary Spoken Dataset ParlaSpeech-PL 1.0\n\nURL\n\nThe ParlaSpeech-PL dataset is built from the transcripts of parliamentary proceedings available in the Polish part of the ParlaMint corpus, and the parliamentary recordings available from the Polish Parliament's YouTube channel.\n\nThe corpus consists of audio segments that correspond to specific sentences in the transcripts. The transcript contains word-level alignments to the recordings, each instance consisting of character and millisecond start and end offsets, allowing for simple further segmentation of long sentences into shorter segments for ASR and other memory-sensitive applications. Sequences longer than 30 seconds have already been removed from this dataset, which should allow for a simple usage on most modern GPUs.\n\nEach segment has an identifier reference to the ParlaMint 4.0 corpus (URL via the utterance ID and character offsets.\n\nWhile in the original dataset all the speaker information from the ParlaMint corpus is available via the 'speaker_info' attribute, in the HuggingFace version only a subset of metadata is available, namely: the date, the name of the speaker, their gender, year of birth, party affiliation at that point in time, status of the party at that point in time (coalition or opposition), and party orientation (left, right, centre etc.).\n\nDifferent to the original dataset, this version has also a 'text_normalised' attribute, which contains the text with parliamentary comments ('[[Applause]]' and similar) removed.\n\nIf you use the dataset, please cite the following paper:" ]
[ "TAGS\n#region-us \n", "# The Polish Parliamentary Spoken Dataset ParlaSpeech-PL 1.0\n\nURL\n\nThe ParlaSpeech-PL dataset is built from the transcripts of parliamentary proceedings available in the Polish part of the ParlaMint corpus, and the parliamentary recordings available from the Polish Parliament's YouTube channel.\n\nThe corpus consists of audio segments that correspond to specific sentences in the transcripts. The transcript contains word-level alignments to the recordings, each instance consisting of character and millisecond start and end offsets, allowing for simple further segmentation of long sentences into shorter segments for ASR and other memory-sensitive applications. Sequences longer than 30 seconds have already been removed from this dataset, which should allow for a simple usage on most modern GPUs.\n\nEach segment has an identifier reference to the ParlaMint 4.0 corpus (URL via the utterance ID and character offsets.\n\nWhile in the original dataset all the speaker information from the ParlaMint corpus is available via the 'speaker_info' attribute, in the HuggingFace version only a subset of metadata is available, namely: the date, the name of the speaker, their gender, year of birth, party affiliation at that point in time, status of the party at that point in time (coalition or opposition), and party orientation (left, right, centre etc.).\n\nDifferent to the original dataset, this version has also a 'text_normalised' attribute, which contains the text with parliamentary comments ('[[Applause]]' and similar) removed.\n\nIf you use the dataset, please cite the following paper:" ]
9790e2a2f72dc1f31bda13b7b5a7fd59b8076c89
<s>[INST] Give me a detailed answer on following: What is azentio? [/INST] Thanks for the question. The detailed information on azentio is as follows:- Azentio Software Private Limited is a prominent provider of core software solutions to customers in the Banking, Financial Services, and Insurance (BFSI) sector. Their comprehensive Enterprise Resource Planning (ERP) solutions cater to clients across the Middle East, Africa, and the Asia Pacific regions. With a global reach, Azentio boasts a substantial customer base, serving over 800 clients in more than 65 countries. The company operates with a strong presence, maintaining offices in 12 countries and a dedicated team exceeding 2000 employees. Headquartered in Singapore, Azentio emphasizes innovation and a commitment to delivering best-in-class customer service to its clients worldwide. Azentio's core software suite includes flagship products such as Kastle™ (Universal Banking Solution), Premia Astra™ (Core Insurance Solution), AMLOCK™ (Financial Crime Detection and Management Solution), MFund Plus™ (Wealth & Asset Management Solution), and Orion™ (Enterprise Resource Planning Solution). These products demonstrate the company's deep domain expertise in banking, insurance, asset management, and ERP verticals. The company's mission revolves around collaborating with global customers to transform their business operations. Azentio seeks to achieve this transformation through continuous innovation and a dedication to providing top-notch customer service. As a result, Azentio positions itself as a reliable partner for organizations looking to enhance and optimize their business processes in the dynamic landscape of the BFSI sector. For more detailed information, you can visit Azentio's official website at https://www.azentio.com.</s>
Akshit2000/NewDataHackathon
[ "region:us" ]
2024-02-06T10:52:42+00:00
{}
2024-02-06T10:59:57+00:00
[]
[]
TAGS #region-us
<s>[INST] Give me a detailed answer on following: What is azentio? [/INST] Thanks for the question. The detailed information on azentio is as follows:- Azentio Software Private Limited is a prominent provider of core software solutions to customers in the Banking, Financial Services, and Insurance (BFSI) sector. Their comprehensive Enterprise Resource Planning (ERP) solutions cater to clients across the Middle East, Africa, and the Asia Pacific regions. With a global reach, Azentio boasts a substantial customer base, serving over 800 clients in more than 65 countries. The company operates with a strong presence, maintaining offices in 12 countries and a dedicated team exceeding 2000 employees. Headquartered in Singapore, Azentio emphasizes innovation and a commitment to delivering best-in-class customer service to its clients worldwide. Azentio's core software suite includes flagship products such as Kastle™ (Universal Banking Solution), Premia Astra™ (Core Insurance Solution), AMLOCK™ (Financial Crime Detection and Management Solution), MFund Plus™ (Wealth & Asset Management Solution), and Orion™ (Enterprise Resource Planning Solution). These products demonstrate the company's deep domain expertise in banking, insurance, asset management, and ERP verticals. The company's mission revolves around collaborating with global customers to transform their business operations. Azentio seeks to achieve this transformation through continuous innovation and a dedication to providing top-notch customer service. As a result, Azentio positions itself as a reliable partner for organizations looking to enhance and optimize their business processes in the dynamic landscape of the BFSI sector. For more detailed information, you can visit Azentio's official website at URL.</s>
[]
[ "TAGS\n#region-us \n" ]
c255ee12c757abc11426b538d2c5e06581ce4605
## Dataset Card for Cantonese Alpaca ![Cantonese Alpaca](https://github.com/hon9kon9ize/hon9kon9ize.github.io/blob/main/public/images/alpaca_with_tank.jpg?raw=true) - repository: (https://github.com/hon9kon9ize/yue-alpaca) ## Dataset Description This dataset contains Cantonese Instruction-Following generated by Gemini Pro using [Stanford's Alpaca](https://github.com/tatsu-lab/stanford_alpaca) prompts for fine-tuning LLMs. Attention: This dataset is generated by Gemini Pro and has not undergone rigorous verification. The content may contain errors. Please keep this in mind when using it. ## Licensing Information The dataset is available under the [Creative Commons NonCommercial (CC BY-NC 4.0)](https://creativecommons.org/licenses/by-nc/4.0/legalcode). ## Citation Information ``` @misc{alpaca, author = {Rohan Taori and Ishaan Gulrajani and Tianyi Zhang and Yann Dubois and Xuechen Li and Carlos Guestrin and Percy Liang and Tatsunori B. Hashimoto }, title = {Stanford Alpaca: An Instruction-following LLaMA model}, year = {2023}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/tatsu-lab/stanford_alpaca}}, } ```
hon9kon9ize/yue-alpaca
[ "size_categories:1K<n<10K", "language:yue", "license:cc-by-nc-4.0", "sft", "alpaca", "region:us" ]
2024-02-06T11:07:45+00:00
{"language": "yue", "license": "cc-by-nc-4.0", "size_categories": ["1K<n<10K"], "tags": ["sft", "alpaca"]}
2024-02-13T11:56:07+00:00
[]
[ "yue" ]
TAGS #size_categories-1K<n<10K #language-Yue Chinese #license-cc-by-nc-4.0 #sft #alpaca #region-us
## Dataset Card for Cantonese Alpaca !Cantonese Alpaca - repository: (URL ## Dataset Description This dataset contains Cantonese Instruction-Following generated by Gemini Pro using Stanford's Alpaca prompts for fine-tuning LLMs. Attention: This dataset is generated by Gemini Pro and has not undergone rigorous verification. The content may contain errors. Please keep this in mind when using it. ## Licensing Information The dataset is available under the Creative Commons NonCommercial (CC BY-NC 4.0).
[ "## Dataset Card for Cantonese Alpaca\n\n!Cantonese Alpaca\n\n- repository: (URL", "## Dataset Description\n\nThis dataset contains Cantonese Instruction-Following generated by Gemini Pro using Stanford's Alpaca prompts for fine-tuning LLMs.\n\nAttention: This dataset is generated by Gemini Pro and has not undergone rigorous verification. The content may contain errors. Please keep this in mind when using it.", "## Licensing Information\n\nThe dataset is available under the Creative Commons NonCommercial (CC BY-NC 4.0)." ]
[ "TAGS\n#size_categories-1K<n<10K #language-Yue Chinese #license-cc-by-nc-4.0 #sft #alpaca #region-us \n", "## Dataset Card for Cantonese Alpaca\n\n!Cantonese Alpaca\n\n- repository: (URL", "## Dataset Description\n\nThis dataset contains Cantonese Instruction-Following generated by Gemini Pro using Stanford's Alpaca prompts for fine-tuning LLMs.\n\nAttention: This dataset is generated by Gemini Pro and has not undergone rigorous verification. The content may contain errors. Please keep this in mind when using it.", "## Licensing Information\n\nThe dataset is available under the Creative Commons NonCommercial (CC BY-NC 4.0)." ]
dd2fc0a36ac76264ecc1cdfe11a3672e85e0cf73
This dataset inclueds labeled sprite animations with 9 frames. More info https://github.com/PawKanarek/spraix
pawkanarek/spraix_1024_9frames
[ "size_categories:n<1K", "license:gpl-3.0", "art", "region:us" ]
2024-02-06T12:58:54+00:00
{"license": "gpl-3.0", "size_categories": ["n<1K"], "pretty_name": "96 images to train sprite animations", "tags": ["art"]}
2024-02-06T13:03:39+00:00
[]
[]
TAGS #size_categories-n<1K #license-gpl-3.0 #art #region-us
This dataset inclueds labeled sprite animations with 9 frames. More info URL
[]
[ "TAGS\n#size_categories-n<1K #license-gpl-3.0 #art #region-us \n" ]
f6ea40757cd7006f9703bdf6adaadff388e22a20
# Commit Message Quality dataset This is the dataset for commit message quality classification, used during processing of [Commit Message Generation dataset](https://huggingface.co/datasets/JetBrains-Research/lca-commit-message-generation) from 🏟️ [Long Code Arena benchmark](https://huggingface.co/spaces/JetBrains-Research/long-code-arena). This is a cleaned and relabeled version of the [dataset](https://zenodo.org/records/7042943#.YxG_ROzMLdo) from 📜 ["Commit Message Matters: Investigating Impact and Evolution of Commit Message Quality", ICSE'23](https://ieeexplore.ieee.org/abstract/document/10172825). We drop "Neither Why nor What" examples, clean all the external references (URLs, issues/PR references) from messages and manually label each sample with the goal of training a binary commit message quality classifier for data filtering in mind. ## How-to Load the data via [`load_dataset`](https://huggingface.co/docs/datasets/v2.14.3/en/package_reference/loading_methods#datasets.load_dataset): ``` from datasets import load_dataset dataset = load_dataset("saridormi/commit-message-quality", split="test") ``` Note that all the data we have is considered to be in the test split. ## Dataset Structure Each example has the following fields: | **Field** | **Description** | |:---------------------|:---------------------------------------------------------------------------| | `url` | Link to commit on GitHub. | | `original_message` | Commit message as it was in the original dataset. | | `message` | Commit message cleaned from external references. | | `original_label` | Commit message label as it was in the original dataset (`Why and What`/`No Why`/`No What`). | | `is_good` | Whether the commit message serves as a good example of a *high quality* commit message (boolean). | | `is_bad` | Whether the commit message serves as a good example of a *low quality* commit message (boolean). | | `binary_label` | Commit message label: `1` for *high quality* messages, `0` for *low quality* messages, `null` for messages not recommended to consider for classifier training. | Data point example: ``` {"url":"https://github.com/spring-projects/spring-boot/commit/7080500db9ecf1cf78ad23503280c713bb6e8649", "original_message":"Upgrade to Commons Lang3 3.6 \n \n Closes gh-9661", "message":"Upgrade to Commons Lang3 3.6", "original_label":"Why and What", "is_good": False, "is_bad": True, "binary_label":0.0, } ```
saridormi/commit-message-quality
[ "task_categories:text-classification", "language:code", "language:en", "license:other", "code", "commit_message_generation", "region:us" ]
2024-02-06T13:08:55+00:00
{"language": ["code", "en"], "license": "other", "task_categories": ["text-classification"], "tags": ["code", "commit_message_generation"], "configs": [{"config_name": "default", "data_files": [{"split": "test", "path": "data.jsonl"}]}]}
2024-02-06T15:18:02+00:00
[]
[ "code", "en" ]
TAGS #task_categories-text-classification #language-code #language-English #license-other #code #commit_message_generation #region-us
Commit Message Quality dataset ============================== This is the dataset for commit message quality classification, used during processing of Commit Message Generation dataset from ️ Long Code Arena benchmark. This is a cleaned and relabeled version of the dataset from "Commit Message Matters: Investigating Impact and Evolution of Commit Message Quality", ICSE'23. We drop "Neither Why nor What" examples, clean all the external references (URLs, issues/PR references) from messages and manually label each sample with the goal of training a binary commit message quality classifier for data filtering in mind. How-to ------ Load the data via 'load\_dataset': Note that all the data we have is considered to be in the test split. Dataset Structure ----------------- Each example has the following fields: Data point example:
[]
[ "TAGS\n#task_categories-text-classification #language-code #language-English #license-other #code #commit_message_generation #region-us \n" ]
02e280686c740ec3ae43524b0279ae32e3823abc
# 한국어 위키피디아 형태소 분석 데이터셋 - 한국어 형태소분석 관련 테스트를 위한 더미 데이터입니다. - 위키피디아 한국어 텍스트를 Mecab으로 분석하여 제작하였습니다. - 위키피디아 데이터 출처 : https://github.com/lovit/kowikitext - 한국어 Mecab 다운로드 페이지 : https://bitbucket.org/eunjeon/mecab-ko-dic/src/master/ ## License [CC-BY-SA 3.0](https://www.creativecommons.org/licenses/by-sa/3.0/) which [kowiki](https://ko.wikipedia.org/wiki/%EC%9C%84%ED%82%A4%EB%B0%B1%EA%B3%BC:%EC%A0%80%EC%9E%91%EA%B6%8C) dump dataset is licensed
kimmeoungjun/korean-wiki-pos
[ "region:us" ]
2024-02-06T13:33:32+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train", "path": "train.json"}, {"split": "train.sample", "path": "train.sample.json"}, {"split": "dev", "path": "dev.json"}, {"split": "test", "path": "test.json"}]}]}
2024-02-06T14:06:12+00:00
[]
[]
TAGS #region-us
# 한국어 위키피디아 형태소 분석 데이터셋 - 한국어 형태소분석 관련 테스트를 위한 더미 데이터입니다. - 위키피디아 한국어 텍스트를 Mecab으로 분석하여 제작하였습니다. - 위키피디아 데이터 출처 : URL - 한국어 Mecab 다운로드 페이지 : URL ## License CC-BY-SA 3.0 which kowiki dump dataset is licensed
[ "# 한국어 위키피디아 형태소 분석 데이터셋\n\n- 한국어 형태소분석 관련 테스트를 위한 더미 데이터입니다.\n- 위키피디아 한국어 텍스트를 Mecab으로 분석하여 제작하였습니다.\n- 위키피디아 데이터 출처 : URL\n- 한국어 Mecab 다운로드 페이지 : URL", "## License\nCC-BY-SA 3.0 which kowiki dump dataset is licensed" ]
[ "TAGS\n#region-us \n", "# 한국어 위키피디아 형태소 분석 데이터셋\n\n- 한국어 형태소분석 관련 테스트를 위한 더미 데이터입니다.\n- 위키피디아 한국어 텍스트를 Mecab으로 분석하여 제작하였습니다.\n- 위키피디아 데이터 출처 : URL\n- 한국어 Mecab 다운로드 페이지 : URL", "## License\nCC-BY-SA 3.0 which kowiki dump dataset is licensed" ]
e98b5f1f59a74efc97d95626c4f5efed6a10ebf9
### Dataset Description **Paper**: [SHARE: A Lexicon of Harmful Expressions by Spanish Speakers](https://aclanthology.org/2022.lrec-1.139.pdf) **Point of Contact**: [email protected], [email protected] ### Source Data Telegram ### Licensing Information SHARE is released under the [Apache-2.0 License](http://www.apache.org/licenses/LICENSE-2.0). ### Citation Information ```bibtex @inproceedings{plaza-del-arco-etal-2022-share, title = "{SHARE}: A Lexicon of Harmful Expressions by {S}panish Speakers", author = "{Plaza-del-Arco}, Flor Miriam and Parras Portillo, Ana Belén and López-Úbeda, Pilar and Gil, Beatriz and Martín-Valdivia, María-Teresa", booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference", month = jun, year = "2022", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2022.lrec-1.139", pages = "1307--1316"}
SINAI/share
[ "size_categories:10K<n<100K", "language:es", "license:apache-2.0", "hate speech", "insults", "spanish", "offensive expressions", "offensive language", "region:us" ]
2024-02-06T13:54:44+00:00
{"language": ["es"], "license": "apache-2.0", "size_categories": ["10K<n<100K"], "pretty_name": "SHARE", "tags": ["hate speech", "insults", "spanish", "offensive expressions", "offensive language"]}
2024-02-06T14:16:50+00:00
[]
[ "es" ]
TAGS #size_categories-10K<n<100K #language-Spanish #license-apache-2.0 #hate speech #insults #spanish #offensive expressions #offensive language #region-us
### Dataset Description Paper: SHARE: A Lexicon of Harmful Expressions by Spanish Speakers Point of Contact: URL@URL, maite@URL ### Source Data Telegram ### Licensing Information SHARE is released under the Apache-2.0 License. '''bibtex @inproceedings{plaza-del-arco-etal-2022-share, title = "{SHARE}: A Lexicon of Harmful Expressions by {S}panish Speakers", author = "{Plaza-del-Arco}, Flor Miriam and Parras Portillo, Ana Belén and López-Úbeda, Pilar and Gil, Beatriz and Martín-Valdivia, María-Teresa", booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference", month = jun, year = "2022", address = "Marseille, France", publisher = "European Language Resources Association", url = "URL pages = "1307--1316"}
[ "### Dataset Description\n\nPaper: SHARE: A Lexicon of Harmful Expressions by Spanish Speakers\n\nPoint of Contact: URL@URL, maite@URL", "### Source Data\n\nTelegram", "### Licensing Information\n\nSHARE is released under the Apache-2.0 License. \n\n\n'''bibtex\n@inproceedings{plaza-del-arco-etal-2022-share,\n title = \"{SHARE}: A Lexicon of Harmful Expressions by {S}panish Speakers\",\n author = \"{Plaza-del-Arco}, Flor Miriam and\n Parras Portillo, Ana Belén and\n López-Úbeda, Pilar and\n Gil, Beatriz and\n Martín-Valdivia, María-Teresa\",\n booktitle = \"Proceedings of the Thirteenth Language Resources and Evaluation Conference\",\n month = jun,\n year = \"2022\",\n address = \"Marseille, France\",\n publisher = \"European Language Resources Association\",\n url = \"URL\n pages = \"1307--1316\"}" ]
[ "TAGS\n#size_categories-10K<n<100K #language-Spanish #license-apache-2.0 #hate speech #insults #spanish #offensive expressions #offensive language #region-us \n", "### Dataset Description\n\nPaper: SHARE: A Lexicon of Harmful Expressions by Spanish Speakers\n\nPoint of Contact: URL@URL, maite@URL", "### Source Data\n\nTelegram", "### Licensing Information\n\nSHARE is released under the Apache-2.0 License. \n\n\n'''bibtex\n@inproceedings{plaza-del-arco-etal-2022-share,\n title = \"{SHARE}: A Lexicon of Harmful Expressions by {S}panish Speakers\",\n author = \"{Plaza-del-Arco}, Flor Miriam and\n Parras Portillo, Ana Belén and\n López-Úbeda, Pilar and\n Gil, Beatriz and\n Martín-Valdivia, María-Teresa\",\n booktitle = \"Proceedings of the Thirteenth Language Resources and Evaluation Conference\",\n month = jun,\n year = \"2022\",\n address = \"Marseille, France\",\n publisher = \"European Language Resources Association\",\n url = \"URL\n pages = \"1307--1316\"}" ]
6c3ce2898aa4aa891d6b6e48f2d524761b4adc90
# Whatscooking.restaurants ## Overview This dataset provides detailed information about various restaurants, including their location, cuisine, ratings, and other attributes. It is particularly useful for applications in food and beverage industry analysis, recommendation systems, and geographical studies. ## Dataset Structure Each record in the dataset represents a single restaurant and contains the following fields: - `_id`: A unique identifier for the restaurant record. - `address`: An object containing the building number, coordinates, street, and zipcode of the restaurant. - `borough`: The borough in which the restaurant is located. - `cuisine`: The type of cuisine offered by the restaurant. - `name`: The name of the restaurant. - `restaurant_id`: A unique restaurant ID. - `location`: Geolocation data of the restaurant, in `Point` format. - `stars`: The star rating of the restaurant. - `review_count`: Number of reviews the restaurant has received. - `attributes`: Various attributes of the restaurant, such as `GoodForKids`, `RestaurantsDelivery`, `NoiseLevel`, etc. - `PriceRange`: The price range of the restaurant. - `OutdoorSeating`: Indicates whether the restaurant has outdoor seating. - `HappyHour`: Indicates whether the restaurant offers a happy hour. - `TakeOut`: Indicates whether the restaurant offers takeout services. - `DogsAllowed`: Indicates whether dogs are allowed in the restaurant. - `embedding`: A list of numerical values representing the embedding of the menu and attributes. ## Field Details ### Address Object - `building`: Building number. - `coord`: Array containing longitude and latitude. - `street`: Street name. - `zipcode`: Postal code. ### Location Object - `type`: Type of the geolocation data, typically `"Point"`. - `coordinates`: Array containing longitude and latitude. ### Attributes Object - This object contains several boolean and string fields representing various features and services of the restaurant, such as `GoodForKids`, `RestaurantsDelivery`, `NoiseLevel`, etc. ### Embedding Field - Generated by OpenAI `text-embedding-3-small` with 256 elements. This field consists of an array of floating point numbers. It represents a combined embedding of the restaurant's menu and attributes, useful for similarity searches and machine learning applications. ## Usage This dataset can be utilized for various purposes, including but not limited to: - Analysis of restaurant trends in different boroughs. - Development of recommendation systems based on cuisine, attributes, and location. - Geospatial analysis of restaurant distributions. ## Notes - The dataset is provided "as is" and is intended for informational purposes only. - Users are advised to consider the implications of the embedded data and its use in their applications. ### Sample Document ``` { "_id": { "$oid": "6095a34a7c34416a90d3209e" }, "address": { "building": "17", "coord": [ -74.1350211, 40.6369042 ], "street": "Harrison Avenue", "zipcode": "10302" }, "borough": "Staten Island", "cuisine": "American", "name": "Buddy'S Wonder Bar", "restaurant_id": "40367442", "location": { "type": "Point", "coordinates": [ -74.1350211, 40.6369042 ] }, "stars": 3.5, "review_count": 62, "attributes": { "BikeParking": "True", "RestaurantsReservations": "True", "RestaurantsTableService": "True", "RestaurantsAttire": "'casual'", "Alcohol": "'beer_and_wine'", "RestaurantsGoodForGroups": "True", "GoodForKids": "True", "BusinessParking": "{'garage': False, 'street': True, 'validated': False, 'lot': True, 'valet': False}", "WiFi": "u'free'", "HasTV": "True", "RestaurantsDelivery": "True", "WheelchairAccessible": "True", "NoiseLevel": "u'average'", "GoodForMeal": "{'dessert': False, 'latenight': False, 'lunch': True, 'dinner': True, 'brunch': False, 'breakfast': False}", "Ambience": "{'romantic': False, 'intimate': False, 'classy': False, 'hipster': False, 'divey': False, 'touristy': False, 'trendy': False, 'upscale': False, 'casual': True}" }, "menu": [ "Grilled cheese sandwich", "Baked potato", "Lasagna", "Mozzarella sticks", "Mac & cheese", "Chicken fingers", "Mashed potatoes", "Chicken pot pie", "Green salad", "Meatloaf", "Tomato soup", "Onion rings" ], "PriceRange": 2, "OutdoorSeating": true, "HappyHour": null, "TakeOut": true, "DogsAllowed": true, "embedding": [ -0.11977468, -0.02157107, ... ] } ``` ## Ingest Data The small script `ingest.py` can be used to load the data into your MongoDB Atlas cluster. ``` pip install pymongo pip install datasets ## export MONGODB_ATLAS_URI=<your atlas uri> ``` The `ingest.py`: ```python import os from pymongo import MongoClient import datasets from datasets import load_dataset from bson import json_util uri = os.environ.get('MONGODB_ATLAS_URI') client = MongoClient(uri) db_name = 'whatscooking' collection_name = 'restaurants' restaurants_collection = client[db_name][collection_name] dataset = load_dataset("AIatMongoDB/whatscooking.restaurants") insert_data = [] for restaurant in dataset['train']: doc_restaurant = json_util.loads(json_util.dumps(restaurant)) insert_data.append(doc_restaurant) if len(insert_data) == 1000: restaurants_collection.insert_many(insert_data) print("1000 records ingested") insert_data = [] if len(insert_data) > 0: restaurants_collection.insert_many(insert_data) insert_data = [] print("Data Ingested") ``` ## Contact For any queries or further information regarding this dataset, please open a disucssion.
AIatMongoDB/whatscooking.restaurants
[ "language:en", "license:apache-2.0", "region:us" ]
2024-02-06T14:38:02+00:00
{"language": ["en"], "license": "apache-2.0"}
2024-02-11T10:04:51+00:00
[]
[ "en" ]
TAGS #language-English #license-apache-2.0 #region-us
# Whatscooking.restaurants ## Overview This dataset provides detailed information about various restaurants, including their location, cuisine, ratings, and other attributes. It is particularly useful for applications in food and beverage industry analysis, recommendation systems, and geographical studies. ## Dataset Structure Each record in the dataset represents a single restaurant and contains the following fields: - '_id': A unique identifier for the restaurant record. - 'address': An object containing the building number, coordinates, street, and zipcode of the restaurant. - 'borough': The borough in which the restaurant is located. - 'cuisine': The type of cuisine offered by the restaurant. - 'name': The name of the restaurant. - 'restaurant_id': A unique restaurant ID. - 'location': Geolocation data of the restaurant, in 'Point' format. - 'stars': The star rating of the restaurant. - 'review_count': Number of reviews the restaurant has received. - 'attributes': Various attributes of the restaurant, such as 'GoodForKids', 'RestaurantsDelivery', 'NoiseLevel', etc. - 'PriceRange': The price range of the restaurant. - 'OutdoorSeating': Indicates whether the restaurant has outdoor seating. - 'HappyHour': Indicates whether the restaurant offers a happy hour. - 'TakeOut': Indicates whether the restaurant offers takeout services. - 'DogsAllowed': Indicates whether dogs are allowed in the restaurant. - 'embedding': A list of numerical values representing the embedding of the menu and attributes. ## Field Details ### Address Object - 'building': Building number. - 'coord': Array containing longitude and latitude. - 'street': Street name. - 'zipcode': Postal code. ### Location Object - 'type': Type of the geolocation data, typically '"Point"'. - 'coordinates': Array containing longitude and latitude. ### Attributes Object - This object contains several boolean and string fields representing various features and services of the restaurant, such as 'GoodForKids', 'RestaurantsDelivery', 'NoiseLevel', etc. ### Embedding Field - Generated by OpenAI 'text-embedding-3-small' with 256 elements. This field consists of an array of floating point numbers. It represents a combined embedding of the restaurant's menu and attributes, useful for similarity searches and machine learning applications. ## Usage This dataset can be utilized for various purposes, including but not limited to: - Analysis of restaurant trends in different boroughs. - Development of recommendation systems based on cuisine, attributes, and location. - Geospatial analysis of restaurant distributions. ## Notes - The dataset is provided "as is" and is intended for informational purposes only. - Users are advised to consider the implications of the embedded data and its use in their applications. ### Sample Document ## Ingest Data The small script 'URL' can be used to load the data into your MongoDB Atlas cluster. The 'URL': ## Contact For any queries or further information regarding this dataset, please open a disucssion.
[ "# Whatscooking.restaurants", "## Overview\n\nThis dataset provides detailed information about various restaurants, including their location, cuisine, ratings, and other attributes. It is particularly useful for applications in food and beverage industry analysis, recommendation systems, and geographical studies.", "## Dataset Structure\n\nEach record in the dataset represents a single restaurant and contains the following fields:\n\n- '_id': A unique identifier for the restaurant record.\n- 'address': An object containing the building number, coordinates, street, and zipcode of the restaurant.\n- 'borough': The borough in which the restaurant is located.\n- 'cuisine': The type of cuisine offered by the restaurant.\n- 'name': The name of the restaurant.\n- 'restaurant_id': A unique restaurant ID.\n- 'location': Geolocation data of the restaurant, in 'Point' format.\n- 'stars': The star rating of the restaurant.\n- 'review_count': Number of reviews the restaurant has received.\n- 'attributes': Various attributes of the restaurant, such as 'GoodForKids', 'RestaurantsDelivery', 'NoiseLevel', etc.\n- 'PriceRange': The price range of the restaurant.\n- 'OutdoorSeating': Indicates whether the restaurant has outdoor seating.\n- 'HappyHour': Indicates whether the restaurant offers a happy hour.\n- 'TakeOut': Indicates whether the restaurant offers takeout services.\n- 'DogsAllowed': Indicates whether dogs are allowed in the restaurant.\n- 'embedding': A list of numerical values representing the embedding of the menu and attributes.", "## Field Details", "### Address Object\n\n- 'building': Building number.\n- 'coord': Array containing longitude and latitude.\n- 'street': Street name.\n- 'zipcode': Postal code.", "### Location Object\n\n- 'type': Type of the geolocation data, typically '\"Point\"'.\n- 'coordinates': Array containing longitude and latitude.", "### Attributes Object\n\n- This object contains several boolean and string fields representing various features and services of the restaurant, such as 'GoodForKids', 'RestaurantsDelivery', 'NoiseLevel', etc.", "### Embedding Field\n\n- Generated by OpenAI 'text-embedding-3-small' with 256 elements. This field consists of an array of floating point numbers. It represents a combined embedding of the restaurant's menu and attributes, useful for similarity searches and machine learning applications.", "## Usage\n\nThis dataset can be utilized for various purposes, including but not limited to:\n\n- Analysis of restaurant trends in different boroughs.\n- Development of recommendation systems based on cuisine, attributes, and location.\n- Geospatial analysis of restaurant distributions.", "## Notes\n\n- The dataset is provided \"as is\" and is intended for informational purposes only.\n- Users are advised to consider the implications of the embedded data and its use in their applications.", "### Sample Document", "## Ingest Data\n\nThe small script 'URL' can be used to load the data into your MongoDB Atlas cluster. \n\n\nThe 'URL':", "## Contact\n\nFor any queries or further information regarding this dataset, please open a disucssion." ]
[ "TAGS\n#language-English #license-apache-2.0 #region-us \n", "# Whatscooking.restaurants", "## Overview\n\nThis dataset provides detailed information about various restaurants, including their location, cuisine, ratings, and other attributes. It is particularly useful for applications in food and beverage industry analysis, recommendation systems, and geographical studies.", "## Dataset Structure\n\nEach record in the dataset represents a single restaurant and contains the following fields:\n\n- '_id': A unique identifier for the restaurant record.\n- 'address': An object containing the building number, coordinates, street, and zipcode of the restaurant.\n- 'borough': The borough in which the restaurant is located.\n- 'cuisine': The type of cuisine offered by the restaurant.\n- 'name': The name of the restaurant.\n- 'restaurant_id': A unique restaurant ID.\n- 'location': Geolocation data of the restaurant, in 'Point' format.\n- 'stars': The star rating of the restaurant.\n- 'review_count': Number of reviews the restaurant has received.\n- 'attributes': Various attributes of the restaurant, such as 'GoodForKids', 'RestaurantsDelivery', 'NoiseLevel', etc.\n- 'PriceRange': The price range of the restaurant.\n- 'OutdoorSeating': Indicates whether the restaurant has outdoor seating.\n- 'HappyHour': Indicates whether the restaurant offers a happy hour.\n- 'TakeOut': Indicates whether the restaurant offers takeout services.\n- 'DogsAllowed': Indicates whether dogs are allowed in the restaurant.\n- 'embedding': A list of numerical values representing the embedding of the menu and attributes.", "## Field Details", "### Address Object\n\n- 'building': Building number.\n- 'coord': Array containing longitude and latitude.\n- 'street': Street name.\n- 'zipcode': Postal code.", "### Location Object\n\n- 'type': Type of the geolocation data, typically '\"Point\"'.\n- 'coordinates': Array containing longitude and latitude.", "### Attributes Object\n\n- This object contains several boolean and string fields representing various features and services of the restaurant, such as 'GoodForKids', 'RestaurantsDelivery', 'NoiseLevel', etc.", "### Embedding Field\n\n- Generated by OpenAI 'text-embedding-3-small' with 256 elements. This field consists of an array of floating point numbers. It represents a combined embedding of the restaurant's menu and attributes, useful for similarity searches and machine learning applications.", "## Usage\n\nThis dataset can be utilized for various purposes, including but not limited to:\n\n- Analysis of restaurant trends in different boroughs.\n- Development of recommendation systems based on cuisine, attributes, and location.\n- Geospatial analysis of restaurant distributions.", "## Notes\n\n- The dataset is provided \"as is\" and is intended for informational purposes only.\n- Users are advised to consider the implications of the embedded data and its use in their applications.", "### Sample Document", "## Ingest Data\n\nThe small script 'URL' can be used to load the data into your MongoDB Atlas cluster. \n\n\nThe 'URL':", "## Contact\n\nFor any queries or further information regarding this dataset, please open a disucssion." ]
2a9bded23035a0a1d641fe7bc305bfdc2fec89c3
# Dataset Card for Evaluation run of ibivibiv/orthorus-125b-moe-v2 <!-- Provide a quick summary of the dataset. --> Dataset automatically created during the evaluation run of model [ibivibiv/orthorus-125b-moe-v2](https://huggingface.co/ibivibiv/orthorus-125b-moe-v2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)). To load the details from a run, you can for instance do the following: ```python from datasets import load_dataset data = load_dataset("open-llm-leaderboard/details_ibivibiv__orthorus-125b-moe-v2", "harness_winogrande_5", split="train") ``` ## Latest results These are the [latest results from run 2024-02-06T15:03:25.094734](https://huggingface.co/datasets/open-llm-leaderboard/details_ibivibiv__orthorus-125b-moe-v2/blob/main/results_2024-02-06T15-03-25.094734.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ```python { "all": { "acc": 0.22895924430194997, "acc_stderr": 0.029775044140996448, "acc_norm": 0.22892897024882422, "acc_norm_stderr": 0.03055773036734348, "mc1": 0.23745410036719705, "mc1_stderr": 0.014896277441041845, "mc2": 0.48491700399143933, "mc2_stderr": 0.016301526988248338 }, "harness|arc:challenge|25": { "acc": 0.22184300341296928, "acc_stderr": 0.012141659068147884, "acc_norm": 0.2627986348122867, "acc_norm_stderr": 0.012862523175351333 }, "harness|hellaswag|10": { "acc": 0.2591117307309301, "acc_stderr": 0.004372516060164754, "acc_norm": 0.2517426807408883, "acc_norm_stderr": 0.004331271717773866 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.22, "acc_stderr": 0.04163331998932268, "acc_norm": 0.22, "acc_norm_stderr": 0.04163331998932268 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.2740740740740741, "acc_stderr": 0.038532548365520024, "acc_norm": 0.2740740740740741, "acc_norm_stderr": 0.038532548365520024 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.17763157894736842, "acc_stderr": 0.031103182383123398, "acc_norm": 0.17763157894736842, "acc_norm_stderr": 0.031103182383123398 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.3, "acc_stderr": 0.046056618647183814, "acc_norm": 0.3, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.2528301886792453, "acc_stderr": 0.026749899771241238, "acc_norm": 0.2528301886792453, "acc_norm_stderr": 0.026749899771241238 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.2569444444444444, "acc_stderr": 0.03653946969442099, "acc_norm": 0.2569444444444444, "acc_norm_stderr": 0.03653946969442099 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.18, "acc_stderr": 0.038612291966536955, "acc_norm": 0.18, "acc_norm_stderr": 0.038612291966536955 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.26, "acc_stderr": 0.0440844002276808, "acc_norm": 0.26, "acc_norm_stderr": 0.0440844002276808 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.21, "acc_stderr": 0.040936018074033256, "acc_norm": 0.21, "acc_norm_stderr": 0.040936018074033256 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.20809248554913296, "acc_stderr": 0.030952890217749874, "acc_norm": 0.20809248554913296, "acc_norm_stderr": 0.030952890217749874 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.21568627450980393, "acc_stderr": 0.04092563958237654, "acc_norm": 0.21568627450980393, "acc_norm_stderr": 0.04092563958237654 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.28, "acc_stderr": 0.045126085985421276, "acc_norm": 0.28, "acc_norm_stderr": 0.045126085985421276 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.26382978723404255, "acc_stderr": 0.028809989854102973, "acc_norm": 0.26382978723404255, "acc_norm_stderr": 0.028809989854102973 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.23684210526315788, "acc_stderr": 0.039994238792813365, "acc_norm": 0.23684210526315788, "acc_norm_stderr": 0.039994238792813365 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.2413793103448276, "acc_stderr": 0.03565998174135302, "acc_norm": 0.2413793103448276, "acc_norm_stderr": 0.03565998174135302 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.21164021164021163, "acc_stderr": 0.021037331505262883, "acc_norm": 0.21164021164021163, "acc_norm_stderr": 0.021037331505262883 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.15873015873015872, "acc_stderr": 0.03268454013011744, "acc_norm": 0.15873015873015872, "acc_norm_stderr": 0.03268454013011744 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.18, "acc_stderr": 0.038612291966536934, "acc_norm": 0.18, "acc_norm_stderr": 0.038612291966536934 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.1774193548387097, "acc_stderr": 0.02173254068932927, "acc_norm": 0.1774193548387097, "acc_norm_stderr": 0.02173254068932927 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.15270935960591134, "acc_stderr": 0.02530890453938063, "acc_norm": 0.15270935960591134, "acc_norm_stderr": 0.02530890453938063 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.25, "acc_stderr": 0.04351941398892446, "acc_norm": 0.25, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.21818181818181817, "acc_stderr": 0.03225078108306289, "acc_norm": 0.21818181818181817, "acc_norm_stderr": 0.03225078108306289 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.17676767676767677, "acc_stderr": 0.027178752639044915, "acc_norm": 0.17676767676767677, "acc_norm_stderr": 0.027178752639044915 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.20207253886010362, "acc_stderr": 0.02897908979429673, "acc_norm": 0.20207253886010362, "acc_norm_stderr": 0.02897908979429673 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.24615384615384617, "acc_stderr": 0.02184086699042309, "acc_norm": 0.24615384615384617, "acc_norm_stderr": 0.02184086699042309 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.2111111111111111, "acc_stderr": 0.024882116857655075, "acc_norm": 0.2111111111111111, "acc_norm_stderr": 0.024882116857655075 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.21008403361344538, "acc_stderr": 0.026461398717471874, "acc_norm": 0.21008403361344538, "acc_norm_stderr": 0.026461398717471874 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.1986754966887417, "acc_stderr": 0.03257847384436776, "acc_norm": 0.1986754966887417, "acc_norm_stderr": 0.03257847384436776 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.1926605504587156, "acc_stderr": 0.016909276884936094, "acc_norm": 0.1926605504587156, "acc_norm_stderr": 0.016909276884936094 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.1527777777777778, "acc_stderr": 0.024536326026134224, "acc_norm": 0.1527777777777778, "acc_norm_stderr": 0.024536326026134224 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.25, "acc_stderr": 0.03039153369274154, "acc_norm": 0.25, "acc_norm_stderr": 0.03039153369274154 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.270042194092827, "acc_stderr": 0.028900721906293426, "acc_norm": 0.270042194092827, "acc_norm_stderr": 0.028900721906293426 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.2645739910313901, "acc_stderr": 0.02960510321703834, "acc_norm": 0.2645739910313901, "acc_norm_stderr": 0.02960510321703834 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.183206106870229, "acc_stderr": 0.03392770926494733, "acc_norm": 0.183206106870229, "acc_norm_stderr": 0.03392770926494733 }, "harness|hendrycksTest-international_law|5": { "acc": 0.2396694214876033, "acc_stderr": 0.03896878985070417, "acc_norm": 0.2396694214876033, "acc_norm_stderr": 0.03896878985070417 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.2962962962962963, "acc_stderr": 0.044143436668549335, "acc_norm": 0.2962962962962963, "acc_norm_stderr": 0.044143436668549335 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.22085889570552147, "acc_stderr": 0.032591773927421776, "acc_norm": 0.22085889570552147, "acc_norm_stderr": 0.032591773927421776 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.3125, "acc_stderr": 0.043994650575715215, "acc_norm": 0.3125, "acc_norm_stderr": 0.043994650575715215 }, "harness|hendrycksTest-management|5": { "acc": 0.21359223300970873, "acc_stderr": 0.04058042015646034, "acc_norm": 0.21359223300970873, "acc_norm_stderr": 0.04058042015646034 }, "harness|hendrycksTest-marketing|5": { "acc": 0.2777777777777778, "acc_stderr": 0.02934311479809447, "acc_norm": 0.2777777777777778, "acc_norm_stderr": 0.02934311479809447 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.28, "acc_stderr": 0.04512608598542127, "acc_norm": 0.28, "acc_norm_stderr": 0.04512608598542127 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.2388250319284802, "acc_stderr": 0.015246803197398675, "acc_norm": 0.2388250319284802, "acc_norm_stderr": 0.015246803197398675 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.24855491329479767, "acc_stderr": 0.023267528432100174, "acc_norm": 0.24855491329479767, "acc_norm_stderr": 0.023267528432100174 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.23798882681564246, "acc_stderr": 0.014242630070574915, "acc_norm": 0.23798882681564246, "acc_norm_stderr": 0.014242630070574915 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.22549019607843138, "acc_stderr": 0.023929155517351284, "acc_norm": 0.22549019607843138, "acc_norm_stderr": 0.023929155517351284 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.1832797427652733, "acc_stderr": 0.021974198848265805, "acc_norm": 0.1832797427652733, "acc_norm_stderr": 0.021974198848265805 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.21604938271604937, "acc_stderr": 0.022899162918445806, "acc_norm": 0.21604938271604937, "acc_norm_stderr": 0.022899162918445806 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.23404255319148937, "acc_stderr": 0.025257861359432417, "acc_norm": 0.23404255319148937, "acc_norm_stderr": 0.025257861359432417 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.2457627118644068, "acc_stderr": 0.010996156635142692, "acc_norm": 0.2457627118644068, "acc_norm_stderr": 0.010996156635142692 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.18382352941176472, "acc_stderr": 0.023529242185193106, "acc_norm": 0.18382352941176472, "acc_norm_stderr": 0.023529242185193106 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.25163398692810457, "acc_stderr": 0.01755581809132226, "acc_norm": 0.25163398692810457, "acc_norm_stderr": 0.01755581809132226 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.20909090909090908, "acc_stderr": 0.03895091015724136, "acc_norm": 0.20909090909090908, "acc_norm_stderr": 0.03895091015724136 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.18775510204081633, "acc_stderr": 0.02500025603954621, "acc_norm": 0.18775510204081633, "acc_norm_stderr": 0.02500025603954621 }, "harness|hendrycksTest-sociology|5": { "acc": 0.24378109452736318, "acc_stderr": 0.03036049015401465, "acc_norm": 0.24378109452736318, "acc_norm_stderr": 0.03036049015401465 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.25, "acc_stderr": 0.04351941398892446, "acc_norm": 0.25, "acc_norm_stderr": 0.04351941398892446 }, "harness|hendrycksTest-virology|5": { "acc": 0.2891566265060241, "acc_stderr": 0.03529486801511115, "acc_norm": 0.2891566265060241, "acc_norm_stderr": 0.03529486801511115 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.2222222222222222, "acc_stderr": 0.03188578017686399, "acc_norm": 0.2222222222222222, "acc_norm_stderr": 0.03188578017686399 }, "harness|truthfulqa:mc|0": { "mc1": 0.23745410036719705, "mc1_stderr": 0.014896277441041845, "mc2": 0.48491700399143933, "mc2_stderr": 0.016301526988248338 }, "harness|winogrande|5": { "acc": 0.49329123914759276, "acc_stderr": 0.014051220692330349 }, "harness|gsm8k|5": { "acc": 0.0, "acc_stderr": 0.0 } } ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
open-llm-leaderboard/details_ibivibiv__orthorus-125b-moe-v2
[ "region:us" ]
2024-02-06T15:05:35+00:00
{"pretty_name": "Evaluation run of ibivibiv/orthorus-125b-moe-v2", "dataset_summary": "Dataset automatically created during the evaluation run of model [ibivibiv/orthorus-125b-moe-v2](https://huggingface.co/ibivibiv/orthorus-125b-moe-v2) on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)).\n\nTo load the details from a run, you can for instance do the following:\n```python\nfrom datasets import load_dataset\ndata = load_dataset(\"open-llm-leaderboard/details_ibivibiv__orthorus-125b-moe-v2\",\n\t\"harness_winogrande_5\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the [latest results from run 2024-02-06T15:03:25.094734](https://huggingface.co/datasets/open-llm-leaderboard/details_ibivibiv__orthorus-125b-moe-v2/blob/main/results_2024-02-06T15-03-25.094734.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):\n\n```python\n{\n \"all\": {\n \"acc\": 0.22895924430194997,\n \"acc_stderr\": 0.029775044140996448,\n \"acc_norm\": 0.22892897024882422,\n \"acc_norm_stderr\": 0.03055773036734348,\n \"mc1\": 0.23745410036719705,\n \"mc1_stderr\": 0.014896277441041845,\n \"mc2\": 0.48491700399143933,\n \"mc2_stderr\": 0.016301526988248338\n },\n \"harness|arc:challenge|25\": {\n \"acc\": 0.22184300341296928,\n \"acc_stderr\": 0.012141659068147884,\n \"acc_norm\": 0.2627986348122867,\n \"acc_norm_stderr\": 0.012862523175351333\n },\n \"harness|hellaswag|10\": {\n \"acc\": 0.2591117307309301,\n \"acc_stderr\": 0.004372516060164754,\n \"acc_norm\": 0.2517426807408883,\n \"acc_norm_stderr\": 0.004331271717773866\n },\n \"harness|hendrycksTest-abstract_algebra|5\": {\n \"acc\": 0.22,\n \"acc_stderr\": 0.04163331998932268,\n \"acc_norm\": 0.22,\n \"acc_norm_stderr\": 0.04163331998932268\n },\n \"harness|hendrycksTest-anatomy|5\": {\n \"acc\": 0.2740740740740741,\n \"acc_stderr\": 0.038532548365520024,\n \"acc_norm\": 0.2740740740740741,\n \"acc_norm_stderr\": 0.038532548365520024\n },\n \"harness|hendrycksTest-astronomy|5\": {\n \"acc\": 0.17763157894736842,\n \"acc_stderr\": 0.031103182383123398,\n \"acc_norm\": 0.17763157894736842,\n \"acc_norm_stderr\": 0.031103182383123398\n },\n \"harness|hendrycksTest-business_ethics|5\": {\n \"acc\": 0.3,\n \"acc_stderr\": 0.046056618647183814,\n \"acc_norm\": 0.3,\n \"acc_norm_stderr\": 0.046056618647183814\n },\n \"harness|hendrycksTest-clinical_knowledge|5\": {\n \"acc\": 0.2528301886792453,\n \"acc_stderr\": 0.026749899771241238,\n \"acc_norm\": 0.2528301886792453,\n \"acc_norm_stderr\": 0.026749899771241238\n },\n \"harness|hendrycksTest-college_biology|5\": {\n \"acc\": 0.2569444444444444,\n \"acc_stderr\": 0.03653946969442099,\n \"acc_norm\": 0.2569444444444444,\n \"acc_norm_stderr\": 0.03653946969442099\n },\n \"harness|hendrycksTest-college_chemistry|5\": {\n \"acc\": 0.18,\n \"acc_stderr\": 0.038612291966536955,\n \"acc_norm\": 0.18,\n \"acc_norm_stderr\": 0.038612291966536955\n },\n \"harness|hendrycksTest-college_computer_science|5\": {\n \"acc\": 0.26,\n \"acc_stderr\": 0.0440844002276808,\n \"acc_norm\": 0.26,\n \"acc_norm_stderr\": 0.0440844002276808\n },\n \"harness|hendrycksTest-college_mathematics|5\": {\n \"acc\": 0.21,\n \"acc_stderr\": 0.040936018074033256,\n \"acc_norm\": 0.21,\n \"acc_norm_stderr\": 0.040936018074033256\n },\n \"harness|hendrycksTest-college_medicine|5\": {\n \"acc\": 0.20809248554913296,\n \"acc_stderr\": 0.030952890217749874,\n \"acc_norm\": 0.20809248554913296,\n \"acc_norm_stderr\": 0.030952890217749874\n },\n \"harness|hendrycksTest-college_physics|5\": {\n \"acc\": 0.21568627450980393,\n \"acc_stderr\": 0.04092563958237654,\n \"acc_norm\": 0.21568627450980393,\n \"acc_norm_stderr\": 0.04092563958237654\n },\n \"harness|hendrycksTest-computer_security|5\": {\n \"acc\": 0.28,\n \"acc_stderr\": 0.045126085985421276,\n \"acc_norm\": 0.28,\n \"acc_norm_stderr\": 0.045126085985421276\n },\n \"harness|hendrycksTest-conceptual_physics|5\": {\n \"acc\": 0.26382978723404255,\n \"acc_stderr\": 0.028809989854102973,\n \"acc_norm\": 0.26382978723404255,\n \"acc_norm_stderr\": 0.028809989854102973\n },\n \"harness|hendrycksTest-econometrics|5\": {\n \"acc\": 0.23684210526315788,\n \"acc_stderr\": 0.039994238792813365,\n \"acc_norm\": 0.23684210526315788,\n \"acc_norm_stderr\": 0.039994238792813365\n },\n \"harness|hendrycksTest-electrical_engineering|5\": {\n \"acc\": 0.2413793103448276,\n \"acc_stderr\": 0.03565998174135302,\n \"acc_norm\": 0.2413793103448276,\n \"acc_norm_stderr\": 0.03565998174135302\n },\n \"harness|hendrycksTest-elementary_mathematics|5\": {\n \"acc\": 0.21164021164021163,\n \"acc_stderr\": 0.021037331505262883,\n \"acc_norm\": 0.21164021164021163,\n \"acc_norm_stderr\": 0.021037331505262883\n },\n \"harness|hendrycksTest-formal_logic|5\": {\n \"acc\": 0.15873015873015872,\n \"acc_stderr\": 0.03268454013011744,\n \"acc_norm\": 0.15873015873015872,\n \"acc_norm_stderr\": 0.03268454013011744\n },\n \"harness|hendrycksTest-global_facts|5\": {\n \"acc\": 0.18,\n \"acc_stderr\": 0.038612291966536934,\n \"acc_norm\": 0.18,\n \"acc_norm_stderr\": 0.038612291966536934\n },\n \"harness|hendrycksTest-high_school_biology|5\": {\n \"acc\": 0.1774193548387097,\n \"acc_stderr\": 0.02173254068932927,\n \"acc_norm\": 0.1774193548387097,\n \"acc_norm_stderr\": 0.02173254068932927\n },\n \"harness|hendrycksTest-high_school_chemistry|5\": {\n \"acc\": 0.15270935960591134,\n \"acc_stderr\": 0.02530890453938063,\n \"acc_norm\": 0.15270935960591134,\n \"acc_norm_stderr\": 0.02530890453938063\n },\n \"harness|hendrycksTest-high_school_computer_science|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-high_school_european_history|5\": {\n \"acc\": 0.21818181818181817,\n \"acc_stderr\": 0.03225078108306289,\n \"acc_norm\": 0.21818181818181817,\n \"acc_norm_stderr\": 0.03225078108306289\n },\n \"harness|hendrycksTest-high_school_geography|5\": {\n \"acc\": 0.17676767676767677,\n \"acc_stderr\": 0.027178752639044915,\n \"acc_norm\": 0.17676767676767677,\n \"acc_norm_stderr\": 0.027178752639044915\n },\n \"harness|hendrycksTest-high_school_government_and_politics|5\": {\n \"acc\": 0.20207253886010362,\n \"acc_stderr\": 0.02897908979429673,\n \"acc_norm\": 0.20207253886010362,\n \"acc_norm_stderr\": 0.02897908979429673\n },\n \"harness|hendrycksTest-high_school_macroeconomics|5\": {\n \"acc\": 0.24615384615384617,\n \"acc_stderr\": 0.02184086699042309,\n \"acc_norm\": 0.24615384615384617,\n \"acc_norm_stderr\": 0.02184086699042309\n },\n \"harness|hendrycksTest-high_school_mathematics|5\": {\n \"acc\": 0.2111111111111111,\n \"acc_stderr\": 0.024882116857655075,\n \"acc_norm\": 0.2111111111111111,\n \"acc_norm_stderr\": 0.024882116857655075\n },\n \"harness|hendrycksTest-high_school_microeconomics|5\": {\n \"acc\": 0.21008403361344538,\n \"acc_stderr\": 0.026461398717471874,\n \"acc_norm\": 0.21008403361344538,\n \"acc_norm_stderr\": 0.026461398717471874\n },\n \"harness|hendrycksTest-high_school_physics|5\": {\n \"acc\": 0.1986754966887417,\n \"acc_stderr\": 0.03257847384436776,\n \"acc_norm\": 0.1986754966887417,\n \"acc_norm_stderr\": 0.03257847384436776\n },\n \"harness|hendrycksTest-high_school_psychology|5\": {\n \"acc\": 0.1926605504587156,\n \"acc_stderr\": 0.016909276884936094,\n \"acc_norm\": 0.1926605504587156,\n \"acc_norm_stderr\": 0.016909276884936094\n },\n \"harness|hendrycksTest-high_school_statistics|5\": {\n \"acc\": 0.1527777777777778,\n \"acc_stderr\": 0.024536326026134224,\n \"acc_norm\": 0.1527777777777778,\n \"acc_norm_stderr\": 0.024536326026134224\n },\n \"harness|hendrycksTest-high_school_us_history|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.03039153369274154,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.03039153369274154\n },\n \"harness|hendrycksTest-high_school_world_history|5\": {\n \"acc\": 0.270042194092827,\n \"acc_stderr\": 0.028900721906293426,\n \"acc_norm\": 0.270042194092827,\n \"acc_norm_stderr\": 0.028900721906293426\n },\n \"harness|hendrycksTest-human_aging|5\": {\n \"acc\": 0.2645739910313901,\n \"acc_stderr\": 0.02960510321703834,\n \"acc_norm\": 0.2645739910313901,\n \"acc_norm_stderr\": 0.02960510321703834\n },\n \"harness|hendrycksTest-human_sexuality|5\": {\n \"acc\": 0.183206106870229,\n \"acc_stderr\": 0.03392770926494733,\n \"acc_norm\": 0.183206106870229,\n \"acc_norm_stderr\": 0.03392770926494733\n },\n \"harness|hendrycksTest-international_law|5\": {\n \"acc\": 0.2396694214876033,\n \"acc_stderr\": 0.03896878985070417,\n \"acc_norm\": 0.2396694214876033,\n \"acc_norm_stderr\": 0.03896878985070417\n },\n \"harness|hendrycksTest-jurisprudence|5\": {\n \"acc\": 0.2962962962962963,\n \"acc_stderr\": 0.044143436668549335,\n \"acc_norm\": 0.2962962962962963,\n \"acc_norm_stderr\": 0.044143436668549335\n },\n \"harness|hendrycksTest-logical_fallacies|5\": {\n \"acc\": 0.22085889570552147,\n \"acc_stderr\": 0.032591773927421776,\n \"acc_norm\": 0.22085889570552147,\n \"acc_norm_stderr\": 0.032591773927421776\n },\n \"harness|hendrycksTest-machine_learning|5\": {\n \"acc\": 0.3125,\n \"acc_stderr\": 0.043994650575715215,\n \"acc_norm\": 0.3125,\n \"acc_norm_stderr\": 0.043994650575715215\n },\n \"harness|hendrycksTest-management|5\": {\n \"acc\": 0.21359223300970873,\n \"acc_stderr\": 0.04058042015646034,\n \"acc_norm\": 0.21359223300970873,\n \"acc_norm_stderr\": 0.04058042015646034\n },\n \"harness|hendrycksTest-marketing|5\": {\n \"acc\": 0.2777777777777778,\n \"acc_stderr\": 0.02934311479809447,\n \"acc_norm\": 0.2777777777777778,\n \"acc_norm_stderr\": 0.02934311479809447\n },\n \"harness|hendrycksTest-medical_genetics|5\": {\n \"acc\": 0.28,\n \"acc_stderr\": 0.04512608598542127,\n \"acc_norm\": 0.28,\n \"acc_norm_stderr\": 0.04512608598542127\n },\n \"harness|hendrycksTest-miscellaneous|5\": {\n \"acc\": 0.2388250319284802,\n \"acc_stderr\": 0.015246803197398675,\n \"acc_norm\": 0.2388250319284802,\n \"acc_norm_stderr\": 0.015246803197398675\n },\n \"harness|hendrycksTest-moral_disputes|5\": {\n \"acc\": 0.24855491329479767,\n \"acc_stderr\": 0.023267528432100174,\n \"acc_norm\": 0.24855491329479767,\n \"acc_norm_stderr\": 0.023267528432100174\n },\n \"harness|hendrycksTest-moral_scenarios|5\": {\n \"acc\": 0.23798882681564246,\n \"acc_stderr\": 0.014242630070574915,\n \"acc_norm\": 0.23798882681564246,\n \"acc_norm_stderr\": 0.014242630070574915\n },\n \"harness|hendrycksTest-nutrition|5\": {\n \"acc\": 0.22549019607843138,\n \"acc_stderr\": 0.023929155517351284,\n \"acc_norm\": 0.22549019607843138,\n \"acc_norm_stderr\": 0.023929155517351284\n },\n \"harness|hendrycksTest-philosophy|5\": {\n \"acc\": 0.1832797427652733,\n \"acc_stderr\": 0.021974198848265805,\n \"acc_norm\": 0.1832797427652733,\n \"acc_norm_stderr\": 0.021974198848265805\n },\n \"harness|hendrycksTest-prehistory|5\": {\n \"acc\": 0.21604938271604937,\n \"acc_stderr\": 0.022899162918445806,\n \"acc_norm\": 0.21604938271604937,\n \"acc_norm_stderr\": 0.022899162918445806\n },\n \"harness|hendrycksTest-professional_accounting|5\": {\n \"acc\": 0.23404255319148937,\n \"acc_stderr\": 0.025257861359432417,\n \"acc_norm\": 0.23404255319148937,\n \"acc_norm_stderr\": 0.025257861359432417\n },\n \"harness|hendrycksTest-professional_law|5\": {\n \"acc\": 0.2457627118644068,\n \"acc_stderr\": 0.010996156635142692,\n \"acc_norm\": 0.2457627118644068,\n \"acc_norm_stderr\": 0.010996156635142692\n },\n \"harness|hendrycksTest-professional_medicine|5\": {\n \"acc\": 0.18382352941176472,\n \"acc_stderr\": 0.023529242185193106,\n \"acc_norm\": 0.18382352941176472,\n \"acc_norm_stderr\": 0.023529242185193106\n },\n \"harness|hendrycksTest-professional_psychology|5\": {\n \"acc\": 0.25163398692810457,\n \"acc_stderr\": 0.01755581809132226,\n \"acc_norm\": 0.25163398692810457,\n \"acc_norm_stderr\": 0.01755581809132226\n },\n \"harness|hendrycksTest-public_relations|5\": {\n \"acc\": 0.20909090909090908,\n \"acc_stderr\": 0.03895091015724136,\n \"acc_norm\": 0.20909090909090908,\n \"acc_norm_stderr\": 0.03895091015724136\n },\n \"harness|hendrycksTest-security_studies|5\": {\n \"acc\": 0.18775510204081633,\n \"acc_stderr\": 0.02500025603954621,\n \"acc_norm\": 0.18775510204081633,\n \"acc_norm_stderr\": 0.02500025603954621\n },\n \"harness|hendrycksTest-sociology|5\": {\n \"acc\": 0.24378109452736318,\n \"acc_stderr\": 0.03036049015401465,\n \"acc_norm\": 0.24378109452736318,\n \"acc_norm_stderr\": 0.03036049015401465\n },\n \"harness|hendrycksTest-us_foreign_policy|5\": {\n \"acc\": 0.25,\n \"acc_stderr\": 0.04351941398892446,\n \"acc_norm\": 0.25,\n \"acc_norm_stderr\": 0.04351941398892446\n },\n \"harness|hendrycksTest-virology|5\": {\n \"acc\": 0.2891566265060241,\n \"acc_stderr\": 0.03529486801511115,\n \"acc_norm\": 0.2891566265060241,\n \"acc_norm_stderr\": 0.03529486801511115\n },\n \"harness|hendrycksTest-world_religions|5\": {\n \"acc\": 0.2222222222222222,\n \"acc_stderr\": 0.03188578017686399,\n \"acc_norm\": 0.2222222222222222,\n \"acc_norm_stderr\": 0.03188578017686399\n },\n \"harness|truthfulqa:mc|0\": {\n \"mc1\": 0.23745410036719705,\n \"mc1_stderr\": 0.014896277441041845,\n \"mc2\": 0.48491700399143933,\n \"mc2_stderr\": 0.016301526988248338\n },\n \"harness|winogrande|5\": {\n \"acc\": 0.49329123914759276,\n \"acc_stderr\": 0.014051220692330349\n },\n \"harness|gsm8k|5\": {\n \"acc\": 0.0,\n \"acc_stderr\": 0.0\n }\n}\n```", "repo_url": "https://huggingface.co/ibivibiv/orthorus-125b-moe-v2", "leaderboard_url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard", "point_of_contact": "[email protected]", "configs": [{"config_name": "harness_arc_challenge_25", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|arc:challenge|25_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|arc:challenge|25_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_gsm8k_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|gsm8k|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|gsm8k|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hellaswag_10", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hellaswag|10_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hellaswag|10_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-anatomy|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-astronomy|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_biology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_physics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-computer_security|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-econometrics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-global_facts|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-human_aging|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-international_law|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-management|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-marketing|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-nutrition|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-philosophy|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-prehistory|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-professional_law|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-public_relations|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-security_studies|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-sociology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-virology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-world_religions|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-anatomy|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-astronomy|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-business_ethics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-clinical_knowledge|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_biology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_chemistry|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_computer_science|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_mathematics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_medicine|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-college_physics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-computer_security|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-conceptual_physics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-econometrics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-electrical_engineering|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-elementary_mathematics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-formal_logic|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-global_facts|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_biology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_chemistry|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_computer_science|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_european_history|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_geography|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_mathematics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_physics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_psychology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_statistics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_us_history|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-high_school_world_history|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-human_aging|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-human_sexuality|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-international_law|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-jurisprudence|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-logical_fallacies|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-machine_learning|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-management|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-marketing|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-medical_genetics|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-miscellaneous|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-moral_disputes|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-moral_scenarios|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-nutrition|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-philosophy|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-prehistory|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-professional_accounting|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-professional_law|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-professional_medicine|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-professional_psychology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-public_relations|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-security_studies|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-sociology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-us_foreign_policy|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-virology|5_2024-02-06T15-03-25.094734.parquet", "**/details_harness|hendrycksTest-world_religions|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_abstract_algebra_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-abstract_algebra|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_anatomy_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-anatomy|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-anatomy|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_astronomy_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-astronomy|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-astronomy|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_business_ethics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-business_ethics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_clinical_knowledge_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-clinical_knowledge|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_biology_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-college_biology|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_biology|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_chemistry_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_chemistry|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_computer_science_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_computer_science|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_mathematics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_mathematics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_medicine_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_medicine|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_college_physics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-college_physics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-college_physics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_computer_security_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-computer_security|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-computer_security|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_conceptual_physics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-conceptual_physics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_econometrics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-econometrics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-econometrics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_electrical_engineering_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-electrical_engineering|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_elementary_mathematics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-elementary_mathematics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_formal_logic_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-formal_logic|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_global_facts_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-global_facts|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-global_facts|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_biology_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_biology|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_chemistry_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_chemistry|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_computer_science_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_computer_science|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_european_history_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_european_history|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_geography_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_geography|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_government_and_politics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_government_and_politics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_macroeconomics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_macroeconomics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_mathematics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_mathematics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_microeconomics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_microeconomics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_physics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_physics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_psychology_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_psychology|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_statistics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_statistics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_us_history_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_us_history|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_high_school_world_history_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-high_school_world_history|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_aging_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-human_aging|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_aging|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_human_sexuality_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-human_sexuality|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_international_law_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-international_law|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-international_law|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_jurisprudence_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-jurisprudence|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_logical_fallacies_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-logical_fallacies|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_machine_learning_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-machine_learning|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_management_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-management|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-management|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_marketing_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-marketing|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-marketing|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_medical_genetics_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-medical_genetics|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_miscellaneous_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-miscellaneous|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_disputes_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_disputes|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_moral_scenarios_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-moral_scenarios|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_nutrition_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-nutrition|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-nutrition|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_philosophy_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-philosophy|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-philosophy|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_prehistory_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-prehistory|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-prehistory|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_accounting_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_accounting|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_law_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-professional_law|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_law|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_medicine_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_medicine|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_professional_psychology_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-professional_psychology|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_public_relations_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-public_relations|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-public_relations|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_security_studies_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-security_studies|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-security_studies|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_sociology_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-sociology|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-sociology|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_us_foreign_policy_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-us_foreign_policy|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_virology_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-virology|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-virology|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_hendrycksTest_world_religions_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|hendrycksTest-world_religions|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|hendrycksTest-world_religions|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_truthfulqa_mc_0", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|truthfulqa:mc|0_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|truthfulqa:mc|0_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "harness_winogrande_5", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["**/details_harness|winogrande|5_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["**/details_harness|winogrande|5_2024-02-06T15-03-25.094734.parquet"]}]}, {"config_name": "results", "data_files": [{"split": "2024_02_06T15_03_25.094734", "path": ["results_2024-02-06T15-03-25.094734.parquet"]}, {"split": "latest", "path": ["results_2024-02-06T15-03-25.094734.parquet"]}]}]}
2024-02-06T15:06:02+00:00
[]
[]
TAGS #region-us
# Dataset Card for Evaluation run of ibivibiv/orthorus-125b-moe-v2 Dataset automatically created during the evaluation run of model ibivibiv/orthorus-125b-moe-v2 on the Open LLM Leaderboard. The dataset is composed of 63 configuration, each one coresponding to one of the evaluated task. The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results. An additional configuration "results" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard). To load the details from a run, you can for instance do the following: ## Latest results These are the latest results from run 2024-02-06T15:03:25.094734(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval): ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Evaluation run of ibivibiv/orthorus-125b-moe-v2\n\n\n\nDataset automatically created during the evaluation run of model ibivibiv/orthorus-125b-moe-v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2024-02-06T15:03:25.094734(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#region-us \n", "# Dataset Card for Evaluation run of ibivibiv/orthorus-125b-moe-v2\n\n\n\nDataset automatically created during the evaluation run of model ibivibiv/orthorus-125b-moe-v2 on the Open LLM Leaderboard.\n\nThe dataset is composed of 63 configuration, each one coresponding to one of the evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The \"train\" split is always pointing to the latest results.\n\nAn additional configuration \"results\" store all the aggregated results of the run (and is used to compute and display the aggregated metrics on the Open LLM Leaderboard).\n\nTo load the details from a run, you can for instance do the following:", "## Latest results\n\nThese are the latest results from run 2024-02-06T15:03:25.094734(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the \"latest\" split for each eval):", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
6cd55c253577da94e4b7f8c9e316eb4dd99d5c59
# Dataset Card for "voxpopuli" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
qmeeus/voxpopuli
[ "region:us" ]
2024-02-06T15:17:31+00:00
{"dataset_info": [{"config_name": "de", "features": [{"name": "audio_id", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "language", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 61603981153.568, "num_examples": 108473}, {"name": "validation", "num_bytes": 1149586917.507, "num_examples": 2109}], "download_size": 52060225655, "dataset_size": 62753568071.075}, {"config_name": "es", "features": [{"name": "audio_id", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "language", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 36533665201.936, "num_examples": 50922}, {"name": "validation", "num_bytes": 1173444834.383, "num_examples": 1631}], "download_size": 1005381345, "dataset_size": 37707110036.319}, {"config_name": "fr", "features": [{"name": "audio_id", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "language", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 48346650213.26, "num_examples": 73561}, {"name": "validation", "num_bytes": 1149779276.605, "num_examples": 1727}], "download_size": 17314564262, "dataset_size": 49496429489.865005}, {"config_name": "nl", "features": [{"name": "audio_id", "dtype": "string"}, {"name": "audio", "dtype": {"audio": {"sampling_rate": 16000}}}, {"name": "text", "dtype": "string"}, {"name": "language", "dtype": "string"}], "splits": [{"name": "train", "num_bytes": 10436544940.608, "num_examples": 20968}, {"name": "validation", "num_bytes": 636925883.64, "num_examples": 1230}], "download_size": 9404833804, "dataset_size": 11073470824.248}], "configs": [{"config_name": "de", "data_files": [{"split": "train", "path": "de/train-*"}, {"split": "validation", "path": "de/validation-*"}]}, {"config_name": "es", "data_files": [{"split": "train", "path": "es/train-*"}, {"split": "validation", "path": "es/validation-*"}]}, {"config_name": "fr", "data_files": [{"split": "train", "path": "fr/train-*"}, {"split": "validation", "path": "fr/validation-*"}]}, {"config_name": "nl", "data_files": [{"split": "train", "path": "nl/train-*"}, {"split": "validation", "path": "nl/validation-*"}]}]}
2024-02-06T23:13:46+00:00
[]
[]
TAGS #region-us
# Dataset Card for "voxpopuli" More Information needed
[ "# Dataset Card for \"voxpopuli\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"voxpopuli\"\n\nMore Information needed" ]
e27d2e9ab94537e023fb4dc9868f15d0bec8f70a
# Text to Image Dataset for Pixel Art style ## Dataset Description The dataset contains 100 examples of Images representing different topics all with the same style.
premai-io/sd-ml-assignment
[ "task_categories:text-to-image", "license:mit", "region:us" ]
2024-02-06T15:34:55+00:00
{"license": "mit", "task_categories": ["text-to-image"]}
2024-02-06T21:52:22+00:00
[]
[]
TAGS #task_categories-text-to-image #license-mit #region-us
# Text to Image Dataset for Pixel Art style ## Dataset Description The dataset contains 100 examples of Images representing different topics all with the same style.
[ "# Text to Image Dataset for Pixel Art style", "## Dataset Description\n\nThe dataset contains 100 examples of Images representing different topics all with the same style." ]
[ "TAGS\n#task_categories-text-to-image #license-mit #region-us \n", "# Text to Image Dataset for Pixel Art style", "## Dataset Description\n\nThe dataset contains 100 examples of Images representing different topics all with the same style." ]
a688bd4a1ddb2ac4d4649122a3001c007c11e31b
# Dataset Card for Dataset Name <!-- Provide a quick summary of the dataset. --> ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
wesslen/ner-reddit-cooking
[ "prodigy", "region:us" ]
2024-02-06T15:51:49+00:00
{"tags": ["prodigy"]}
2024-02-06T15:51:49+00:00
[]
[]
TAGS #prodigy #region-us
# Dataset Card for Dataset Name ## Dataset Details ### Dataset Description - Curated by: - Funded by [optional]: - Shared by [optional]: - Language(s) (NLP): - License: ### Dataset Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Out-of-Scope Use ## Dataset Structure ## Dataset Creation ### Curation Rationale ### Source Data #### Data Collection and Processing #### Who are the source data producers? ### Annotations [optional] #### Annotation process #### Who are the annotators? #### Personal and Sensitive Information ## Bias, Risks, and Limitations ### Recommendations Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Dataset Card Authors [optional] ## Dataset Card Contact
[ "# Dataset Card for Dataset Name", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
[ "TAGS\n#prodigy #region-us \n", "# Dataset Card for Dataset Name", "## Dataset Details", "### Dataset Description\n\n\n\n\n\n- Curated by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Language(s) (NLP): \n- License:", "### Dataset Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Out-of-Scope Use", "## Dataset Structure", "## Dataset Creation", "### Curation Rationale", "### Source Data", "#### Data Collection and Processing", "#### Who are the source data producers?", "### Annotations [optional]", "#### Annotation process", "#### Who are the annotators?", "#### Personal and Sensitive Information", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Dataset Card Authors [optional]", "## Dataset Card Contact" ]
5bda3a45b2f70310a90bfc4dd32733f67f105dce
## Dataset Information | # Nodes | # Edges | # Features | |:-------:|:-------:|:----------:| | 7,600 | 33,544 | 931 | ## Usage ```python from huggingface_hub import hf_hub_download hf_hub_download(repo_id="SauravMaheshkar/pareto-actor", filename="processed/actor.bin", local_dir="./data/", repo_type="dataset") dataset, _ = dgl.load_graphs("./data/processed/actor.bin") ``` Thank you [@severo](https://huggingface.co/severo) for helping me [figure out the usage](https://discuss.huggingface.co/t/can-i-use-a-pickle-file-with-the-data-files-argument-with-datasets/72189/2?u=sauravmaheshkar). Pre-processed as per the official codebase of https://arxiv.org/abs/2210.02016 ## Citations ``` @article{ju2023multi, title={Multi-task Self-supervised Graph Neural Networks Enable Stronger Task Generalization}, author={Ju, Mingxuan and Zhao, Tong and Wen, Qianlong and Yu, Wenhao and Shah, Neil and Ye, Yanfang and Zhang, Chuxu}, booktitle={International Conference on Learning Representations}, year={2023} } ``` ``` @inproceedings{Tang2009SocialIA, title={Social influence analysis in large-scale networks}, author={Jie Tang and Jimeng Sun and Chi Wang and Zi Yang}, booktitle={Knowledge Discovery and Data Mining}, year={2009}, url={https://api.semanticscholar.org/CorpusID:4931148} } ```
SauravMaheshkar/pareto-actor
[ "task_categories:graph-ml", "size_categories:1K<n<10K", "license:cc", "art", "arxiv:2210.02016", "region:us" ]
2024-02-06T15:58:42+00:00
{"license": "cc", "size_categories": ["1K<n<10K"], "task_categories": ["graph-ml"], "tags": ["art"]}
2024-02-12T21:37:49+00:00
[ "2210.02016" ]
[]
TAGS #task_categories-graph-ml #size_categories-1K<n<10K #license-cc #art #arxiv-2210.02016 #region-us
Dataset Information ------------------- Usage ----- Thank you @severo for helping me figure out the usage. Pre-processed as per the official codebase of URL s
[]
[ "TAGS\n#task_categories-graph-ml #size_categories-1K<n<10K #license-cc #art #arxiv-2210.02016 #region-us \n" ]
15411286052d857d17ebc99538307c059fcab72b
# Dataset Card for "lmind_nq_train6000_eval6489_v1_qa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/lmind_nq_train6000_eval6489_v1_qa
[ "region:us" ]
2024-02-06T16:15:25+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train_qa", "path": "data/train_qa-*"}, {"split": "train_ic_qa", "path": "data/train_ic_qa-*"}, {"split": "train_recite_qa", "path": "data/train_recite_qa-*"}, {"split": "eval_qa", "path": "data/eval_qa-*"}, {"split": "eval_ic_qa", "path": "data/eval_ic_qa-*"}, {"split": "eval_recite_qa", "path": "data/eval_recite_qa-*"}, {"split": "all_docs", "path": "data/all_docs-*"}, {"split": "all_docs_eval", "path": "data/all_docs_eval-*"}, {"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "answers", "struct": [{"name": "answer_start", "sequence": "null"}, {"name": "text", "sequence": "string"}]}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train_qa", "num_bytes": 697367, "num_examples": 6000}, {"name": "train_ic_qa", "num_bytes": 4540536, "num_examples": 6000}, {"name": "train_recite_qa", "num_bytes": 4546536, "num_examples": 6000}, {"name": "eval_qa", "num_bytes": 752802, "num_examples": 6489}, {"name": "eval_ic_qa", "num_bytes": 4906186, "num_examples": 6489}, {"name": "eval_recite_qa", "num_bytes": 4912675, "num_examples": 6489}, {"name": "all_docs", "num_bytes": 7126313, "num_examples": 10925}, {"name": "all_docs_eval", "num_bytes": 7125701, "num_examples": 10925}, {"name": "train", "num_bytes": 697367, "num_examples": 6000}, {"name": "validation", "num_bytes": 752802, "num_examples": 6489}], "download_size": 22440510, "dataset_size": 36058285}}
2024-02-06T16:15:57+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lmind_nq_train6000_eval6489_v1_qa" More Information needed
[ "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_qa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_qa\"\n\nMore Information needed" ]
65179da7aa0b4f76edfbf0d420baae8eccc3890f
# Dataset Card for "lmind_nq_train6000_eval6489_v1_doc" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/lmind_nq_train6000_eval6489_v1_doc
[ "region:us" ]
2024-02-06T16:15:58+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train_qa", "path": "data/train_qa-*"}, {"split": "train_ic_qa", "path": "data/train_ic_qa-*"}, {"split": "train_recite_qa", "path": "data/train_recite_qa-*"}, {"split": "eval_qa", "path": "data/eval_qa-*"}, {"split": "eval_ic_qa", "path": "data/eval_ic_qa-*"}, {"split": "eval_recite_qa", "path": "data/eval_recite_qa-*"}, {"split": "all_docs", "path": "data/all_docs-*"}, {"split": "all_docs_eval", "path": "data/all_docs_eval-*"}, {"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "answers", "struct": [{"name": "answer_start", "sequence": "null"}, {"name": "text", "sequence": "string"}]}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train_qa", "num_bytes": 697367, "num_examples": 6000}, {"name": "train_ic_qa", "num_bytes": 4540536, "num_examples": 6000}, {"name": "train_recite_qa", "num_bytes": 4546536, "num_examples": 6000}, {"name": "eval_qa", "num_bytes": 752802, "num_examples": 6489}, {"name": "eval_ic_qa", "num_bytes": 4906186, "num_examples": 6489}, {"name": "eval_recite_qa", "num_bytes": 4912675, "num_examples": 6489}, {"name": "all_docs", "num_bytes": 7126313, "num_examples": 10925}, {"name": "all_docs_eval", "num_bytes": 7125701, "num_examples": 10925}, {"name": "train", "num_bytes": 7126313, "num_examples": 10925}, {"name": "validation", "num_bytes": 7126313, "num_examples": 10925}], "download_size": 30529604, "dataset_size": 48860742}}
2024-02-06T16:16:26+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lmind_nq_train6000_eval6489_v1_doc" More Information needed
[ "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_doc\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_doc\"\n\nMore Information needed" ]
f04430f63b94822e6b9f7342f9985d1506ee303a
# Dataset Card for "lmind_nq_train6000_eval6489_v1_docidx" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/lmind_nq_train6000_eval6489_v1_docidx
[ "region:us" ]
2024-02-06T16:16:26+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train_qa", "path": "data/train_qa-*"}, {"split": "train_ic_qa", "path": "data/train_ic_qa-*"}, {"split": "train_recite_qa", "path": "data/train_recite_qa-*"}, {"split": "eval_qa", "path": "data/eval_qa-*"}, {"split": "eval_ic_qa", "path": "data/eval_ic_qa-*"}, {"split": "eval_recite_qa", "path": "data/eval_recite_qa-*"}, {"split": "all_docs", "path": "data/all_docs-*"}, {"split": "all_docs_eval", "path": "data/all_docs_eval-*"}, {"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "answers", "struct": [{"name": "answer_start", "sequence": "null"}, {"name": "text", "sequence": "string"}]}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train_qa", "num_bytes": 697367, "num_examples": 6000}, {"name": "train_ic_qa", "num_bytes": 4540536, "num_examples": 6000}, {"name": "train_recite_qa", "num_bytes": 4546536, "num_examples": 6000}, {"name": "eval_qa", "num_bytes": 752802, "num_examples": 6489}, {"name": "eval_ic_qa", "num_bytes": 4906186, "num_examples": 6489}, {"name": "eval_recite_qa", "num_bytes": 4912675, "num_examples": 6489}, {"name": "all_docs", "num_bytes": 7126313, "num_examples": 10925}, {"name": "all_docs_eval", "num_bytes": 7125701, "num_examples": 10925}, {"name": "train", "num_bytes": 7126313, "num_examples": 10925}, {"name": "validation", "num_bytes": 7125701, "num_examples": 10925}], "download_size": 30617969, "dataset_size": 48860130}}
2024-02-06T16:16:54+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lmind_nq_train6000_eval6489_v1_docidx" More Information needed
[ "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_docidx\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_docidx\"\n\nMore Information needed" ]
e60e552c4526c6b7fc104c66216d8a5573e20a0f
# Dataset Card for "lmind_nq_train6000_eval6489_v1_doc_qa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/lmind_nq_train6000_eval6489_v1_doc_qa
[ "region:us" ]
2024-02-06T16:16:55+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train_qa", "path": "data/train_qa-*"}, {"split": "train_ic_qa", "path": "data/train_ic_qa-*"}, {"split": "train_recite_qa", "path": "data/train_recite_qa-*"}, {"split": "eval_qa", "path": "data/eval_qa-*"}, {"split": "eval_ic_qa", "path": "data/eval_ic_qa-*"}, {"split": "eval_recite_qa", "path": "data/eval_recite_qa-*"}, {"split": "all_docs", "path": "data/all_docs-*"}, {"split": "all_docs_eval", "path": "data/all_docs_eval-*"}, {"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "answers", "struct": [{"name": "answer_start", "sequence": "null"}, {"name": "text", "sequence": "string"}]}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train_qa", "num_bytes": 697367, "num_examples": 6000}, {"name": "train_ic_qa", "num_bytes": 4540536, "num_examples": 6000}, {"name": "train_recite_qa", "num_bytes": 4546536, "num_examples": 6000}, {"name": "eval_qa", "num_bytes": 752802, "num_examples": 6489}, {"name": "eval_ic_qa", "num_bytes": 4906186, "num_examples": 6489}, {"name": "eval_recite_qa", "num_bytes": 4912675, "num_examples": 6489}, {"name": "all_docs", "num_bytes": 7126313, "num_examples": 10925}, {"name": "all_docs_eval", "num_bytes": 7125701, "num_examples": 10925}, {"name": "train", "num_bytes": 7823680, "num_examples": 16925}, {"name": "validation", "num_bytes": 752802, "num_examples": 6489}], "download_size": 26914575, "dataset_size": 43184598}}
2024-02-06T16:17:21+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lmind_nq_train6000_eval6489_v1_doc_qa" More Information needed
[ "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_doc_qa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_doc_qa\"\n\nMore Information needed" ]
44972f8dc279d8b2d425de3ea6c38a92cb58976e
# Dataset Card for "lmind_nq_train6000_eval6489_v1_recite_qa" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
tyzhu/lmind_nq_train6000_eval6489_v1_recite_qa
[ "region:us" ]
2024-02-06T16:17:21+00:00
{"configs": [{"config_name": "default", "data_files": [{"split": "train_qa", "path": "data/train_qa-*"}, {"split": "train_ic_qa", "path": "data/train_ic_qa-*"}, {"split": "train_recite_qa", "path": "data/train_recite_qa-*"}, {"split": "eval_qa", "path": "data/eval_qa-*"}, {"split": "eval_ic_qa", "path": "data/eval_ic_qa-*"}, {"split": "eval_recite_qa", "path": "data/eval_recite_qa-*"}, {"split": "all_docs", "path": "data/all_docs-*"}, {"split": "all_docs_eval", "path": "data/all_docs_eval-*"}, {"split": "train", "path": "data/train-*"}, {"split": "validation", "path": "data/validation-*"}]}], "dataset_info": {"features": [{"name": "answers", "struct": [{"name": "answer_start", "sequence": "null"}, {"name": "text", "sequence": "string"}]}, {"name": "inputs", "dtype": "string"}, {"name": "targets", "dtype": "string"}], "splits": [{"name": "train_qa", "num_bytes": 697367, "num_examples": 6000}, {"name": "train_ic_qa", "num_bytes": 4540536, "num_examples": 6000}, {"name": "train_recite_qa", "num_bytes": 4546536, "num_examples": 6000}, {"name": "eval_qa", "num_bytes": 752802, "num_examples": 6489}, {"name": "eval_ic_qa", "num_bytes": 4906186, "num_examples": 6489}, {"name": "eval_recite_qa", "num_bytes": 4912675, "num_examples": 6489}, {"name": "all_docs", "num_bytes": 7126313, "num_examples": 10925}, {"name": "all_docs_eval", "num_bytes": 7125701, "num_examples": 10925}, {"name": "train", "num_bytes": 11672849, "num_examples": 16925}, {"name": "validation", "num_bytes": 4912675, "num_examples": 6489}], "download_size": 31822578, "dataset_size": 51193640}}
2024-02-06T16:17:48+00:00
[]
[]
TAGS #region-us
# Dataset Card for "lmind_nq_train6000_eval6489_v1_recite_qa" More Information needed
[ "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_recite_qa\"\n\nMore Information needed" ]
[ "TAGS\n#region-us \n", "# Dataset Card for \"lmind_nq_train6000_eval6489_v1_recite_qa\"\n\nMore Information needed" ]