File size: 4,414 Bytes
760e6b0 0ff6559 b6de6a7 0ff6559 b6de6a7 0ff6559 760e6b0 92f75f0 760e6b0 0ff6559 618d3b7 0ff6559 760e6b0 0ff6559 618d3b7 0ff6559 760e6b0 0ff6559 618d3b7 0ff6559 760e6b0 4bbd8cb 760e6b0 47694af ab34985 fe1b9bb 760e6b0 51420dc 760e6b0 e5a84b8 760e6b0 04846db 760e6b0 e5a84b8 760e6b0 e5a84b8 760e6b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import json
import os
import datasets
_CITATION = """\
@InProceedings{...},
title = {Your Dataset Title},
author={Your Name},
year={2025}
}
"""
_DESCRIPTION = """\
Dataset containing multi-view images with camera poses, depth maps, and masks for NeRF training.
"""
_LICENSE = "MIT"
class RefRefConfig(datasets.BuilderConfig):
"""BuilderConfig for RefRef dataset."""
def __init__(self, scene=None, **kwargs):
"""BuilderConfig for RefRef dataset.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(**kwargs)
self.scene = scene
class RefRef(datasets.GeneratorBasedBuilder):
"""A dataset loader for NeRF-style data with camera poses, depth maps, and masks."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = RefRefConfig
BUILDER_CONFIGS = [
RefRefConfig(
name="single-non-convex",
description="Single non-convex scene configuration for RefRef dataset.",
),
RefRefConfig(
name="multiple-non-convex",
description="Multiple non-convex scene configuration for RefRef dataset.",
),
RefRefConfig(
name="single-convex",
description="Single convex scene configuration for RefRef dataset.",
)
]
def _info(self):
features = datasets.Features({
"image": datasets.Image(),
"depth": datasets.Image(),
"mask": datasets.Image(),
"transform_matrix": datasets.Sequence(
datasets.Sequence(datasets.Value("float64"), length=4),
length=4
),
"rotation": datasets.Value("float32")
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage="",
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
# Automatically find all JSON files matching the split patterns
return [
datasets.SplitGenerator(
name=f"{'cubeBg' if cat == 'textured_cube_scene' else 'sphereBg' if cat == 'textured_sphere_scene' else 'envMapBg'}_{'singleMatConvex' if self.config.name == 'single-convex' else 'singleMatNonConvex' if self.config.name == 'single-non-convex' else 'multiMatNonConvex'}_{self.config.scene}",
gen_kwargs={
"filepaths": os.path.join(f"https://huggingface.co/datasets/yinyue27/RefRef_dataset/resolve/main/image_data/{cat}/{self.config.name}/",
f"{self.config.scene}_sphere" if cat == "textured_sphere_scene" else f"{self.config.scene}_hdr" if cat == "environment_map_scene" else self.config.scene),
"split": f"{'cubeBg' if cat == 'textured_cube_scene' else 'sphereBg' if cat == 'textured_sphere_scene' else 'envMapBg'}_{'singleMatConvex' if self.config.name == 'single-convex' else 'singleMatNonConvex' if self.config.name == 'single-non-convex' else 'multiMatNonConvex'}_{self.config.scene}",
},
) for cat in ["textured_sphere_scene", "textured_cube_scene", "environment_map_scene"]
]
def _generate_examples(self, filepaths, split):
for split in ["train", "val", "test"]:
split_filepaths = os.path.join(filepaths, f"transforms_{split}.json")
with open(split_filepaths, "r", encoding="utf-8") as f:
try:
data = json.load(f)
except json.JSONDecodeError:
print("Error opening " + split_filepaths)
continue
scene_name = os.path.basename(os.path.dirname(split_filepaths))
for frame_idx, frame in enumerate(data.get("frames", [])):
base_dir = os.path.dirname(split_filepaths)
yield f"{scene_name}_{frame_idx}", {
"image": os.path.join(base_dir, frame["file_path"]+".png"),
"depth": os.path.join(base_dir, frame["depth_file_path"]),
"mask": os.path.join(base_dir, frame["mask_file_path"]),
"transform_matrix": frame["transform_matrix"],
"rotation": frame.get("rotation", 0.0)
} |