Datasets:
File size: 4,764 Bytes
8a7887f f5475ad 4f17940 37a4531 f5475ad cf836c8 4927426 f5475ad cf836c8 f5475ad 0867f80 8d8fd44 4f17940 8a7887f f5475ad 8a7887f 72a5419 8a7887f 4f17940 8a7887f f5475ad 72a5419 f5475ad 8d8fd44 8a7887f 4f17940 82273b9 4f17940 f5475ad 72a5419 4f17940 82273b9 d664d85 818c25a 72a5419 4f17940 f5475ad 4f17940 8a7887f f5475ad 4f17940 f5475ad 4f17940 f5475ad 8a7887f f025b48 f5475ad 8a7887f f5475ad 8a7887f f5475ad 8a7887f f5475ad 72a5419 4f17940 f5475ad 72a5419 4f17940 f5475ad 8a7887f 77df0c8 f5475ad 72a5419 4f17940 72a5419 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import os
import pandas as pd
import nibabel as nib
import datasets
from datasets import (
GeneratorBasedBuilder,
SplitGenerator,
Split,
DatasetInfo,
Features,
Value,
)
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
Please cite these papers when using this dataset:
- CartiMorph: A framework for automated knee articular cartilage morphometrics
- Quantifying Knee Cartilage Shape and Lesion: From Image to Metrics
@article{YAO2024103035,
title = {CartiMorph: A framework for automated knee articular cartilage morphometrics},
journal = {Medical Image Analysis},
author = {Yongcheng Yao and Junru Zhong and Liping Zhang and Sheheryar Khan and Weitian Chen},
volume = {91},
pages = {103035},
year = {2024},
issn = {1361-8415},
doi = {https://doi.org/10.1016/j.media.2023.103035}
}
@InProceedings{10.1007/978-3-031-82007-6_16,
author="Yao, Yongcheng
and Chen, Weitian",
editor="Wu, Shandong
and Shabestari, Behrouz
and Xing, Lei",
title="Quantifying Knee Cartilage Shape and Lesion: From Image to Metrics",
booktitle="Applications of Medical Artificial Intelligence",
year="2025",
publisher="Springer Nature Switzerland",
address="Cham",
pages="162--172"
}
"""
_DESCRIPTION = """\
This is the official release of the OAIZIB-CM dataset.
(https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM/blob/load_dataset-support/README.md)
"""
_HOME_PAGE = "https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM"
_LICENSE = "CC BY-NC 4.0"
class OAIZIBCMDataset(GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
@staticmethod
def load_nifti(example):
"""Map function to load NIFTI images on demand."""
img_nib = nib.load(example["image_path"])
image = img_nib.get_fdata().astype("float32")
mask_nib = nib.load(example["mask_path"])
mask = mask_nib.get_fdata().astype("float32")
example["image"] = image
example["mask"] = mask
return example
def _info(self):
# Define dataset information including feature schema
return DatasetInfo(
description=_DESCRIPTION,
features=Features(
{
"image_path": Value("string"),
"mask_path": Value("string"),
}
),
citation=_CITATION,
homepage=_HOME_PAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Download dataset metadata and data files
train_csv_url = "train.csv"
test_csv_url = "test.csv"
csv_paths = dl_manager.download({"train": train_csv_url, "test": test_csv_url})
logger.info(f"Downloaded CSV paths: {csv_paths}")
# Extract main dataset archive
data_root_dir = dl_manager.download_and_extract("data/OAIZIB-CM.zip")
data_dir = os.path.join(data_root_dir, "OAIZIB-CM")
logger.info(f"Data directory set to {data_dir}")
# Load split metadata
train_df = pd.read_csv(csv_paths["train"])
test_df = pd.read_csv(csv_paths["test"])
# Define split generators for training and test sets
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"df": train_df, "split": "train", "data_dir": data_dir},
),
SplitGenerator(
name=Split.TEST,
gen_kwargs={"df": test_df, "split": "test", "data_dir": data_dir},
),
]
def _generate_examples(self, df, split, data_dir):
# Set up directory paths based on the split
if split == "train":
img_dir = os.path.join(data_dir, "imagesTr")
mask_dir = os.path.join(data_dir, "labelsTr")
elif split == "test":
img_dir = os.path.join(data_dir, "imagesTs")
mask_dir = os.path.join(data_dir, "labelsTs")
else:
raise ValueError(f"Unknown split: {split}")
# Log directories and ensure they exist
logger.info(f"Looking for {split} images in: {img_dir}")
logger.info(f"Looking for {split} masks in: {mask_dir}")
os.makedirs(img_dir, exist_ok=True)
os.makedirs(mask_dir, exist_ok=True)
# Process and yield examples
count = 0
for idx, row in df.iterrows():
img_file = row["image"]
mask_file = row["mask"]
img_path = os.path.join(img_dir, img_file)
mask_path = os.path.join(mask_dir, mask_file)
# Only yield paths, don't load data into memory
yield idx, {
"image_path": img_path,
"mask_path": mask_path,
}
logger.info(f"Successfully yielded {count} examples for {split} split")
|