OAIZIB-CM / OAIZIB-CM.py
YongchengYAO's picture
Update OAIZIB-CM.py
4927426 verified
raw
history blame
4.76 kB
import os
import pandas as pd
import nibabel as nib
import datasets
from datasets import (
GeneratorBasedBuilder,
SplitGenerator,
Split,
DatasetInfo,
Features,
Value,
)
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
Please cite these papers when using this dataset:
- CartiMorph: A framework for automated knee articular cartilage morphometrics
- Quantifying Knee Cartilage Shape and Lesion: From Image to Metrics
@article{YAO2024103035,
title = {CartiMorph: A framework for automated knee articular cartilage morphometrics},
journal = {Medical Image Analysis},
author = {Yongcheng Yao and Junru Zhong and Liping Zhang and Sheheryar Khan and Weitian Chen},
volume = {91},
pages = {103035},
year = {2024},
issn = {1361-8415},
doi = {https://doi.org/10.1016/j.media.2023.103035}
}
@InProceedings{10.1007/978-3-031-82007-6_16,
author="Yao, Yongcheng
and Chen, Weitian",
editor="Wu, Shandong
and Shabestari, Behrouz
and Xing, Lei",
title="Quantifying Knee Cartilage Shape and Lesion: From Image to Metrics",
booktitle="Applications of Medical Artificial Intelligence",
year="2025",
publisher="Springer Nature Switzerland",
address="Cham",
pages="162--172"
}
"""
_DESCRIPTION = """\
This is the official release of the OAIZIB-CM dataset.
(https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM/blob/load_dataset-support/README.md)
"""
_HOME_PAGE = "https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM"
_LICENSE = "CC BY-NC 4.0"
class OAIZIBCMDataset(GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
@staticmethod
def load_nifti(example):
"""Map function to load NIFTI images on demand."""
img_nib = nib.load(example["image_path"])
image = img_nib.get_fdata().astype("float32")
mask_nib = nib.load(example["mask_path"])
mask = mask_nib.get_fdata().astype("float32")
example["image"] = image
example["mask"] = mask
return example
def _info(self):
# Define dataset information including feature schema
return DatasetInfo(
description=_DESCRIPTION,
features=Features(
{
"image_path": Value("string"),
"mask_path": Value("string"),
}
),
citation=_CITATION,
homepage=_HOME_PAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Download dataset metadata and data files
train_csv_url = "train.csv"
test_csv_url = "test.csv"
csv_paths = dl_manager.download({"train": train_csv_url, "test": test_csv_url})
logger.info(f"Downloaded CSV paths: {csv_paths}")
# Extract main dataset archive
data_root_dir = dl_manager.download_and_extract("data/OAIZIB-CM.zip")
data_dir = os.path.join(data_root_dir, "OAIZIB-CM")
logger.info(f"Data directory set to {data_dir}")
# Load split metadata
train_df = pd.read_csv(csv_paths["train"])
test_df = pd.read_csv(csv_paths["test"])
# Define split generators for training and test sets
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"df": train_df, "split": "train", "data_dir": data_dir},
),
SplitGenerator(
name=Split.TEST,
gen_kwargs={"df": test_df, "split": "test", "data_dir": data_dir},
),
]
def _generate_examples(self, df, split, data_dir):
# Set up directory paths based on the split
if split == "train":
img_dir = os.path.join(data_dir, "imagesTr")
mask_dir = os.path.join(data_dir, "labelsTr")
elif split == "test":
img_dir = os.path.join(data_dir, "imagesTs")
mask_dir = os.path.join(data_dir, "labelsTs")
else:
raise ValueError(f"Unknown split: {split}")
# Log directories and ensure they exist
logger.info(f"Looking for {split} images in: {img_dir}")
logger.info(f"Looking for {split} masks in: {mask_dir}")
os.makedirs(img_dir, exist_ok=True)
os.makedirs(mask_dir, exist_ok=True)
# Process and yield examples
count = 0
for idx, row in df.iterrows():
img_file = row["image"]
mask_file = row["mask"]
img_path = os.path.join(img_dir, img_file)
mask_path = os.path.join(mask_dir, mask_file)
# Only yield paths, don't load data into memory
yield idx, {
"image_path": img_path,
"mask_path": mask_path,
}
logger.info(f"Successfully yielded {count} examples for {split} split")