File size: 4,333 Bytes
41f21e7 92dcb68 41f21e7 92dcb68 4e9ffe6 92dcb68 c6ce10a 92dcb68 41f21e7 92dcb68 1ff4ba6 d3abe03 1ff4ba6 0834f01 92dcb68 9f891ed 1ff4ba6 9f891ed 1ff4ba6 9f891ed 1ff4ba6 9f891ed 4bf00ab d62d0bc 4bf00ab 97fea69 4bf00ab cc9524c 4bf00ab 7d72899 4bf00ab c94065c 4bf00ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import json
import os
import datasets
_CITATION = """\
@InProceedings{...},
title = {Your Dataset Title},
author={Your Name},
year={2023}
}
"""
_DESCRIPTION = """\
Dataset containing multi-view images with camera poses, depth maps, and masks for NeRF training.
"""
_LICENSE = "MIT"
class RefRef_test(datasets.GeneratorBasedBuilder):
"""A dataset loader for NeRF-style data with camera poses, depth maps, and masks."""
VERSION = datasets.Version("1.0.0")
# No multiple configs needed - using single configuration
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="ball",
version=VERSION,
description="Default configuration for NeRF dataset"
),
datasets.BuilderConfig(
name="ampoule",
version=VERSION,
description="Default configuration for NeRF dataset"
)
]
def _info(self):
features = datasets.Features({
"image": datasets.Image(),
"depth": datasets.Image(),
"mask": datasets.Image(),
"transform_matrix": datasets.Sequence(
datasets.Sequence(datasets.Value("float64"), length=4),
length=4
),
"rotation": datasets.Value("float32")
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage="",
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
return [
datasets.SplitGenerator(
name=split,
gen_kwargs={
"filepaths": os.path.join(f"https://huggingface.co/datasets/eztao/RefRef_test/resolve/main/{self.config.name}/", f"transforms_{split}.json"),
"split": split
},
) for split in ["train", "val", "test"]
]
# def _generate_examples(self, filepaths, split):
# # Iterate through all JSON files for this split
# for scene_idx, filepath in enumerate(filepaths):
# print(filepath)
# with open(filepath, "r", encoding="utf-8") as f:
# data = json.load(f)
# scene_name = os.path.basename(os.path.dirname(filepath))
# for frame_idx, frame in enumerate(data["frames"]):
# # Build absolute paths relative to JSON file location
# base_dir = os.path.dirname(filepath)
# # Generate unique key using scene and frame indices
# unique_key = f"{scene_name}_{split}_{scene_idx}_{frame_idx}"
# yield unique_key, {
# "image": os.path.join(base_dir, frame["file_path"]),
# "depth": os.path.join(base_dir, frame["depth_file_path"]),
# "mask": os.path.join(base_dir, frame["mask_file_path"]),
# "transform_matrix": frame["transform_matrix"],
# "rotation": frame.get("rotation", 0.0)
# }
def _generate_examples(self, filepaths, split):
# for filepath in filepaths:
# print(filepaths)
# Add validation for JSON files
# if not filepaths.endswith(".json") or os.path.isdir(filepaths):
# continue
with open(filepaths, "r", encoding="utf-8") as f:
try:
data = json.load(f)
except json.JSONDecodeError:
print("error")
scene_name = os.path.basename(os.path.dirname(filepaths))
for frame_idx, frame in enumerate(data.get("frames", [])):
base_dir = os.path.dirname(filepaths)
print(os.path.join(base_dir, frame["file_path"]+".png"))
yield f"{scene_name}_{frame_idx}", {
"image": os.path.join(base_dir, frame["file_path"]+".png"),
"depth": os.path.join(base_dir, frame["depth_file_path"]),
"mask": os.path.join(base_dir, frame["mask_file_path"]),
"transform_matrix": frame["transform_matrix"],
"rotation": frame.get("rotation", 0.0)
} |