File size: 5,608 Bytes
604734f d45b5c1 38b9e9c 604734f 6295a5d 604734f 0633d18 604734f 0633d18 604734f 38b9e9c 0633d18 604734f 38b9e9c 0633d18 604734f 0633d18 604734f 0633d18 604734f 0633d18 604734f 0633d18 604734f a5b9681 604734f 38b9e9c 604734f 38b9e9c 604734f 38b9e9c 604734f 38b9e9c a7ac9c1 604734f 38b9e9c 604734f d45b5c1 6295a5d 38b9e9c d45b5c1 6295a5d d45b5c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
import os
import datasets
import pyarrow.parquet as pq
from huggingface_hub import hf_hub_download
# Define configurations for each flavor.
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="sound_baseline",
description="Physical dataset: baseline variant",
data_dir="sound_baseline"
),
datasets.BuilderConfig(
name="sound_reflection",
description="Physical dataset: reflection variant",
data_dir="sound_reflection"
),
datasets.BuilderConfig(
name="sound_diffraction",
description="Physical dataset: diffraction variant",
data_dir="sound_diffraction"
),
datasets.BuilderConfig(
name="sound_combined",
description="Physical dataset: combined variant",
data_dir="sound_combined"
),
datasets.BuilderConfig(
name="lens_p1",
description="Distortion dataset variant",
data_dir="lens_p1"
),
datasets.BuilderConfig(
name="lens_p2",
description="Distortion dataset variant",
data_dir="lens_p2"
),
datasets.BuilderConfig(
name="ball_roll",
description="Double image dataset variant",
data_dir="ball_roll"
),
datasets.BuilderConfig(
name="ball_bounce",
description="Double image dataset variant",
data_dir="ball_bounce"
),
]
class MyPhysicalDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = BUILDER_CONFIGS
VERSION = datasets.Version("1.1.0")
def _info(self):
if self.config.name in ["sound_baseline", "sound_reflection", "sound_diffraction", "sound_combined"]:
features = datasets.Features({
"lat": datasets.Value("float"),
"long": datasets.Value("float"),
"db": datasets.Value("string"),
"soundmap": datasets.Image(), # Expects a dict: {"bytes": ...}
"osm": datasets.Image(),
"temperature": datasets.Value("int32"),
"humidity": datasets.Value("int32"),
"yaw": datasets.Value("float"),
"sample_id": datasets.Value("int32"),
"soundmap_512": datasets.Image(),
})
elif self.config.name in ["lens_p1", "lens_p2"]:
features = datasets.Features({
"label_path": datasets.Value("string"),
"fx": datasets.Value("float"),
"k1": datasets.Value("float"),
"k2": datasets.Value("float"),
"k3": datasets.Value("float"),
"p1": datasets.Value("float"),
"p2": datasets.Value("float"),
"cx": datasets.Value("float"),
})
elif self.config.name in ["ball_roll", "ball_bounce"]:
features = datasets.Features({
"ImgName": datasets.Value("string"),
"StartHeight": datasets.Value("int32"),
"GroundIncli": datasets.Value("float"),
"InputTime": datasets.Value("int32"),
"TargetTime": datasets.Value("int32"),
"input_image": datasets.Image(), # Expects {"bytes": ...}
"target_image": datasets.Image(),
})
else:
raise ValueError(f"Unknown config name: {self.config.name}")
return datasets.DatasetInfo(
description="Multiple variant physical tasks dataset stored as parquet files.",
features=features,
)
def _split_generators(self, dl_manager):
# Use hf_hub_download to fetch the parquet files directly from the Hub.
repo_id = "mspitzna/physicsgen"
train_file = hf_hub_download(repo_id=repo_id, filename=f"{self.config.data_dir}/train.parquet", repo_type="dataset")
test_file = hf_hub_download(repo_id=repo_id, filename=f"{self.config.data_dir}/test.parquet", repo_type="dataset")
eval_file = hf_hub_download(repo_id=repo_id, filename=f"{self.config.data_dir}/eval.parquet", repo_type="dataset")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"parquet_file": train_file},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"parquet_file": test_file},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"parquet_file": eval_file},
),
]
def _generate_examples(self, parquet_file):
table = pq.read_table(parquet_file)
examples = table.to_pylist()
# Wrap image bytes into the format expected by datasets.Image.
if self.config.name in ["sound_baseline", "sound_reflection", "sound_diffraction", "sound_combined"]:
for example in examples:
for key in ["soundmap", "osm", "soundmap_512"]:
if example.get(key) is not None and isinstance(example[key], bytes):
example[key] = {"bytes": example[key]}
elif self.config.name in ["ball_roll", "ball_bounce"]:
for example in examples:
for key in ["input_image", "target_image"]:
if example.get(key) is not None and isinstance(example[key], bytes):
example[key] = {"bytes": example[key]}
for idx, row in enumerate(examples):
yield idx, row
|