diff --git a/ci/unit_tests/runner.py b/ci/unit_tests/runner.py index 145df92b..e470c981 100644 --- a/ci/unit_tests/runner.py +++ b/ci/unit_tests/runner.py @@ -12,6 +12,7 @@ from __future__ import annotations import argparse +import importlib import inspect import os import sys @@ -107,7 +108,13 @@ def get_default_pattern(loader): test_file_name = f"test_{args.bundle}_dist" if args.dist is True else f"test_{args.bundle}" test_file = os.path.join(os.path.dirname(__file__), f"{test_file_name}.py") if os.path.exists(test_file): - tests = unittest.TestLoader().loadTestsFromNames([test_file_name]) + loader = unittest.TestLoader() + # if having the "test_order" function, will use it as the load order + sys.path.append(os.path.dirname(__file__)) + module = importlib.import_module(test_file_name) + if hasattr(module, "test_order"): + loader.sortTestMethodsUsing = module.test_order + tests = loader.loadTestsFromNames([test_file_name]) test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) diff --git a/ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion.py b/ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion.py index 68a09a55..b5ac192d 100644 --- a/ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion.py +++ b/ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion.py @@ -20,6 +20,7 @@ import numpy as np from monai.bundle import ConfigWorkflow from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ { @@ -98,9 +99,7 @@ def test_autoencoder_train(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=False) @parameterized.expand([TEST_CASE_2]) def test_autoencoder_infer(self, override): @@ -115,9 +114,7 @@ def test_autoencoder_infer(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=False) @parameterized.expand([TEST_CASE_1]) def test_diffusion_train(self, override): @@ -134,14 +131,7 @@ def test_diffusion_train(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # TODO: uncomment the following check after we have monai > 1.2.0 - # https://github.com/Project-MONAI/MONAI/issues/6602 - # check_result = trainer.check_properties() - # if check_result is not None and len(check_result) > 0: - # raise ValueError(f"check properties for overrided train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=False) @parameterized.expand([TEST_CASE_2]) def test_diffusion_infer(self, override): @@ -156,9 +146,7 @@ def test_diffusion_infer(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=False) if __name__ == "__main__": diff --git a/ci/unit_tests/test_brats_mri_generative_diffusion.py b/ci/unit_tests/test_brats_mri_generative_diffusion.py index 0fb1b9f6..3a3c460c 100644 --- a/ci/unit_tests/test_brats_mri_generative_diffusion.py +++ b/ci/unit_tests/test_brats_mri_generative_diffusion.py @@ -20,6 +20,7 @@ import numpy as np from monai.bundle import ConfigWorkflow from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ { @@ -112,9 +113,7 @@ def test_autoencoder_train(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=False) @parameterized.expand([TEST_CASE_3]) def test_autoencoder_infer(self, override): @@ -129,9 +128,7 @@ def test_autoencoder_infer(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=False) @parameterized.expand([TEST_CASE_2]) def test_diffusion_train(self, override): @@ -148,14 +145,7 @@ def test_diffusion_train(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # TODO: uncomment the following check after we have monai > 1.2.0 - # https://github.com/Project-MONAI/MONAI/issues/6602 - # check_result = trainer.check_properties() - # if check_result is not None and len(check_result) > 0: - # raise ValueError(f"check properties for overrided train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=False) @parameterized.expand([TEST_CASE_3]) def test_diffusion_infer(self, override): @@ -170,9 +160,7 @@ def test_diffusion_infer(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=False) if __name__ == "__main__": diff --git a/ci/unit_tests/test_brats_mri_segmentation.py b/ci/unit_tests/test_brats_mri_segmentation.py index 61287df1..33b9a84e 100644 --- a/ci/unit_tests/test_brats_mri_segmentation.py +++ b/ci/unit_tests/test_brats_mri_segmentation.py @@ -19,6 +19,7 @@ import numpy as np from monai.bundle import ConfigWorkflow from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ # train, evaluate { @@ -75,13 +76,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) validator = ConfigWorkflow( # override train.json, thus set the workflow to "train" rather than "eval" @@ -91,12 +86,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - validator.initialize() - check_result = validator.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for overrided train config failed: {check_result}") - validator.run() - validator.finalize() + check_workflow(validator, check_properties=True) @parameterized.expand([TEST_CASE_2]) def test_infer_config(self, override): @@ -110,13 +100,7 @@ def test_infer_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/test_endoscopic_inbody_classification.py b/ci/unit_tests/test_endoscopic_inbody_classification.py index f65f72cc..7313c641 100644 --- a/ci/unit_tests/test_endoscopic_inbody_classification.py +++ b/ci/unit_tests/test_endoscopic_inbody_classification.py @@ -19,6 +19,7 @@ from monai.bundle import ConfigWorkflow from monai.data import PILWriter from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ # train, evaluate { @@ -77,13 +78,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) validator = ConfigWorkflow( # override train.json, thus set the workflow to "train" rather than "eval" @@ -93,12 +88,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - validator.initialize() - check_result = validator.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for overrided train config failed: {check_result}") - validator.run() - validator.finalize() + check_workflow(validator, check_properties=True) @parameterized.expand([TEST_CASE_2]) def test_infer_config(self, override): @@ -112,13 +102,7 @@ def test_infer_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py new file mode 100644 index 00000000..877eacc1 --- /dev/null +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py @@ -0,0 +1,176 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import subprocess +import tempfile +import unittest + +import nibabel as nib +import numpy as np +from monai.bundle import ConfigWorkflow +from parameterized import parameterized +from utils import check_workflow, export_overrided_config + +TEST_CASE_1 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "data_list_file_path": "models/pancreas_ct_dints_segmentation/configs/dataset_0.json", + "num_epochs": 1, + "num_epochs_per_validation": 1, + "num_epochs_warmup": 0, + "num_sw_batch_size": 2, + "patch_size": [32, 32, 32], + "patch_size_valid": [32, 32, 32], + } +] + +TEST_CASE_2 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0, + "train#dataloader#num_workers": 1, + "validate#dataset#cache_rate": 0, + "validate#inferer#roi_size": [32, 32, 32], + "validate#inferer#sw_batch_size": 1, + "validate#inferer#overlap": 0.1, + "validate#dataloader#num_workers": 1, + "train#random_transforms#0#spatial_size": [32, 32, 32], + "val_interval": 1, + } +] + +TEST_CASE_3 = [{"bundle_root": "models/pancreas_ct_dints_segmentation", "validate#inferer#roi_size": [32, 32, 32]}] + +TEST_CASE_4 = [{"bundle_root": "models/pancreas_ct_dints_segmentation", "inferer#roi_size": [32, 32, 32]}] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "search" in name: + return 1 + if "train" in name: + return 2 + if "eval" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +def get_searched_arch(path): + file_list = os.listdir(path) + arch_name = None + for f in file_list: + if "search_code" in f: + arch_name = f + if arch_name is None: + raise ValueError("Cannot find searched architectures file.") + + return arch_name + + +class TestDints(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 20 + input_shape = (64, 64, 64) + images_folder = os.path.join(self.dataset_dir, "imagesTr") + labels_folder = os.path.join(self.dataset_dir, "labelsTr") + os.makedirs(images_folder) + os.makedirs(labels_folder) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=3, size=input_shape).astype(np.int8) + image_filename = os.path.join(images_folder, f"pancreas_{s}.nii.gz") + label_filename = os.path.join(labels_folder, f"pancreas_{s}.nii.gz") + nib.save(nib.Nifti1Image(test_image, np.eye(4)), image_filename) + nib.save(nib.Nifti1Image(test_label, np.eye(4)), label_filename) + + prepare_datalist_file = "models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py" + datalist_file = "models/pancreas_ct_dints_segmentation/configs/dataset_0.json" + cmd = f"python {prepare_datalist_file} --path {self.dataset_dir} --output {datalist_file} --train_size 12" + call_status = subprocess.run(cmd, shell=True) + call_status.check_returncode() + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_search(self, override): + override["data_file_base_dir"] = self.dataset_dir + override["arch_ckpt_path"] = os.path.join(override["bundle_root"], "models") + output_path = "models/pancreas_ct_dints_segmentation/configs/search_override.json" + export_overrided_config("models/pancreas_ct_dints_segmentation/configs/search.yaml", override, output_path) + cmd = f"python -m scripts.search run --config_file {output_path}" + env = os.environ.copy() + # ensure customized library can be loaded in subprocess + env["PYTHONPATH"] = override.get("bundle_root", ".") + subprocess.check_call(cmd, shell=True, env=env) + + @parameterized.expand([TEST_CASE_2]) + def test_train(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + arch_name = get_searched_arch(os.path.join(bundle_root, "models")) + override["arch_ckpt_path"] = os.path.join(bundle_root, "models", arch_name) + train_file = os.path.join(bundle_root, "configs/train.yaml") + + trainer = ConfigWorkflow( + workflow="train", + config_file=train_file, + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=True) + + @parameterized.expand([TEST_CASE_3]) + def test_eval(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + arch_name = get_searched_arch(os.path.join(bundle_root, "models")) + override["arch_ckpt_path"] = os.path.join(bundle_root, "models", arch_name) + train_file = os.path.join(bundle_root, "configs/train.yaml") + eval_file = os.path.join(bundle_root, "configs/evaluate.yaml") + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(validator, check_properties=True) + + @parameterized.expand([TEST_CASE_4]) + def test_infer_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + arch_name = get_searched_arch(os.path.join(bundle_root, "models")) + override["arch_ckpt_path"] = os.path.join(bundle_root, "models", arch_name) + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.yaml"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py new file mode 100644 index 00000000..315e3da3 --- /dev/null +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py @@ -0,0 +1,133 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import subprocess +import tempfile +import unittest + +import nibabel as nib +import numpy as np +import torch +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd, export_overrided_config + +TEST_CASE_1 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "data_list_file_path": "models/pancreas_ct_dints_segmentation/configs/dataset_0.json", + "num_epochs": 1, + "num_epochs_per_validation": 1, + "num_epochs_warmup": 0, + "num_sw_batch_size": 2, + "patch_size": [32, 32, 32], + "patch_size_valid": [32, 32, 32], + } +] + +TEST_CASE_2 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0, + "validate#dataset#cache_rate": 0, + "validate#inferer#roi_size": [32, 32, 32], + "train#random_transforms#0#spatial_size": [32, 32, 32], + "val_interval": 1, + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "search" in name: + return 1 + return 2 + + return get_order(test_name1) - get_order(test_name2) + + +def get_searched_arch(path): + file_list = os.listdir(path) + arch_name = None + for f in file_list: + if "search_code" in f: + arch_name = f + if arch_name is None: + raise ValueError("Cannot find searched architectures file.") + + return arch_name + + +class TestDintsMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 20 + input_shape = (64, 64, 64) + images_folder = os.path.join(self.dataset_dir, "imagesTr") + labels_folder = os.path.join(self.dataset_dir, "labelsTr") + os.makedirs(images_folder) + os.makedirs(labels_folder) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=3, size=input_shape).astype(np.int8) + image_filename = os.path.join(images_folder, f"pancreas_{s}.nii.gz") + label_filename = os.path.join(labels_folder, f"pancreas_{s}.nii.gz") + nib.save(nib.Nifti1Image(test_image, np.eye(4)), image_filename) + nib.save(nib.Nifti1Image(test_label, np.eye(4)), label_filename) + + prepare_datalist_file = "models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py" + datalist_file = "models/pancreas_ct_dints_segmentation/configs/dataset_0.json" + cmd = f"python {prepare_datalist_file} --path {self.dataset_dir} --output {datalist_file} --train_size 12" + call_status = subprocess.run(cmd, shell=True) + call_status.check_returncode() + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_search(self, override): + override["data_file_base_dir"] = self.dataset_dir + override["arch_ckpt_path"] = os.path.join(override["bundle_root"], "models") + output_path = "models/pancreas_ct_dints_segmentation/configs/search_override.json" + export_overrided_config("models/pancreas_ct_dints_segmentation/configs/search.yaml", override, output_path) + cmd = f"torchrun --standalone --nnodes=1 --nproc_per_node=2 -m scripts.search run {output_path}" + env = os.environ.copy() + # ensure customized library can be loaded in subprocess + env["PYTHONPATH"] = override.get("bundle_root", ".") + subprocess.check_call(cmd, shell=True, env=env) + + @parameterized.expand([TEST_CASE_2]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + arch_name = get_searched_arch(os.path.join(bundle_root, "models")) + override["arch_ckpt_path"] = os.path.join(bundle_root, "models", arch_name) + train_file = os.path.join(bundle_root, "configs/train.yaml") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.yaml") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_pathology_tumor_detection.py b/ci/unit_tests/test_pathology_tumor_detection.py new file mode 100644 index 00000000..86f11e6c --- /dev/null +++ b/ci/unit_tests/test_pathology_tumor_detection.py @@ -0,0 +1,102 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import os +import shutil +import tempfile +import unittest + +from monai.apps.utils import download_url +from monai.bundle import ConfigWorkflow +from parameterized import parameterized +from utils import check_workflow + +TEST_CASE_1 = [ # train + { + "bundle_root": "models/pathology_tumor_detection", + "region_size": [256, 256], + "num_epochs": 1, + "epochs": 1, + "train#dataloader#batch_size": 32, + "validate#dataloader#batch_size": 32, + "train#datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + "validate#datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + }, + { + "bundle_root": "models/pathology_tumor_detection", + "dataloader#batch_size": 32, + "datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + }, +] + +TIFF_INFO = { # data to be downloaded for test + "url": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/CMU-1.tiff", + "filename": "CMU-1.tiff", + "hash_type": "sha256", + "hash_val": "73a7e89bc15576587c3d68e55d9bf92f09690280166240b48ff4b48230b13bcd", + "csv_info": ["CMU-1", 46000, 32914, 0, 0, 0, 0, 0, 0, 1, 1, 1], +} + + +class TestTumorDetection(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + # download sample tiff file + download_url( + url=TIFF_INFO["url"], + filepath=os.path.join(self.dataset_dir, TIFF_INFO["filename"]), + hash_type=TIFF_INFO["hash_type"], + hash_val=TIFF_INFO["hash_val"], + ) + # prepare csv files + for csv_file in ["training.csv", "validation.csv"]: + with open(os.path.join("models/pathology_tumor_detection", csv_file), "w", newline="") as save_file: + wr = csv.writer(save_file) + wr.writerow(TIFF_INFO["csv_info"]) + + with open(os.path.join("models/pathology_tumor_detection", "testing.csv"), "w", newline="") as save_file: + wr = csv.writer(save_file) + wr.writerow(TIFF_INFO["csv_info"][:1]) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_infer_config(self, override, override_infer): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + + trainer = ConfigWorkflow( + workflow="train", + config_file=train_file, + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=True) + + # run train and infer tests within a single function to avoid duplicating download + override_infer["dataset_dir"] = self.dataset_dir + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override_infer, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/ci/unit_tests/test_pathology_tumor_detection_dist.py b/ci/unit_tests/test_pathology_tumor_detection_dist.py new file mode 100644 index 00000000..b77bc7e3 --- /dev/null +++ b/ci/unit_tests/test_pathology_tumor_detection_dist.py @@ -0,0 +1,86 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import os +import shutil +import tempfile +import unittest + +import torch +from monai.apps.utils import download_url +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +TEST_CASE_1 = [ + { + "bundle_root": "models/pathology_tumor_detection", + "region_size": [256, 256], + "num_epochs": 1, + "epochs": 1, + "train#dataloader#batch_size": 32, + "validate#dataloader#batch_size": 32, + "train#datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + "validate#datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + } +] + +TIFF_INFO = { # data to be downloaded for test + "url": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/CMU-1.tiff", + "filename": "CMU-1.tiff", + "hash_type": "sha256", + "hash_val": "73a7e89bc15576587c3d68e55d9bf92f09690280166240b48ff4b48230b13bcd", + "csv_info": ["CMU-1", 46000, 32914, 0, 0, 0, 0, 0, 0, 1, 1, 1], +} + + +class TestTumorDetectionMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + # download sample tiff file + download_url( + url=TIFF_INFO["url"], + filepath=os.path.join(self.dataset_dir, TIFF_INFO["filename"]), + hash_type=TIFF_INFO["hash_type"], + hash_val=TIFF_INFO["hash_val"], + ) + # prepare csv files + for csv_file in ["training.csv", "validation.csv"]: + with open(os.path.join("models/pathology_tumor_detection", csv_file), "w", newline="") as save_file: + wr = csv.writer(save_file) + wr.writerow(TIFF_INFO["csv_info"]) + # 2 gpu training need 2 samples + wr.writerow(TIFF_INFO["csv_info"]) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/ci/unit_tests/test_spleen_ct_segmentation.py b/ci/unit_tests/test_spleen_ct_segmentation.py index da39256a..a9bb2c9e 100644 --- a/ci/unit_tests/test_spleen_ct_segmentation.py +++ b/ci/unit_tests/test_spleen_ct_segmentation.py @@ -18,6 +18,7 @@ import numpy as np from monai.bundle import ConfigWorkflow from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ # train, evaluate { @@ -73,13 +74,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) validator = ConfigWorkflow( # override train.json, thus set the workflow to "train" rather than "eval" @@ -89,12 +84,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - validator.initialize() - check_result = validator.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for overrided train config failed: {check_result}") - validator.run() - validator.finalize() + check_workflow(validator, check_properties=True) @parameterized.expand([TEST_CASE_2]) def test_infer_config(self, override): @@ -108,13 +98,7 @@ def test_infer_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/test_spleen_deepedit_annotation.py b/ci/unit_tests/test_spleen_deepedit_annotation.py new file mode 100644 index 00000000..7a0aba67 --- /dev/null +++ b/ci/unit_tests/test_spleen_deepedit_annotation.py @@ -0,0 +1,127 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import ITKWriter +from parameterized import parameterized +from utils import check_workflow + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "models/spleen_deepedit_annotation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "validate#dataset#cache_rate": 0.0, + "spatial_size": [32, 32, 32], + } +] + +TEST_CASE_2 = [ # inference + { + "bundle_root": "models/spleen_deepedit_annotation", + "datalist": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "spatial_size": [32, 32, 32], + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + if "infer" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestDeepeditAnno(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + input_shape = (64, 64, 64) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + trainer = ConfigWorkflow( + workflow="train", + config_file=os.path.join(bundle_root, "configs/train.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=True) + + @parameterized.expand([TEST_CASE_1]) + def test_eval_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(validator, check_properties=True) + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_spleen_deepedit_annotation_dist.py b/ci/unit_tests/test_spleen_deepedit_annotation_dist.py new file mode 100644 index 00000000..7f11e586 --- /dev/null +++ b/ci/unit_tests/test_spleen_deepedit_annotation_dist.py @@ -0,0 +1,77 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import torch +from monai.data import ITKWriter +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +TEST_CASE_1 = [ # mgpu train + { + "bundle_root": "models/spleen_deepedit_annotation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "validate#dataset#cache_rate": 0.0, + "spatial_size": [32, 32, 32], + } +] + + +class TestDeepeditAnnoMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + input_shape = (64, 64, 64) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/ci/unit_tests/test_swin_unetr_btcv_segmentation.py b/ci/unit_tests/test_swin_unetr_btcv_segmentation.py new file mode 100644 index 00000000..193580cb --- /dev/null +++ b/ci/unit_tests/test_swin_unetr_btcv_segmentation.py @@ -0,0 +1,130 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import ITKWriter +from parameterized import parameterized +from utils import check_workflow + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "models/swin_unetr_btcv_segmentation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "val_interval": 1, + "train#random_transforms#0#num_samples": 1, + "train#deterministic_transforms#3#pixdim": [1.0, 1.0, 1.0], + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "train#dataloader#batch_size": 1, + "validate#dataset#cache_rate": 0.0, + } +] + +TEST_CASE_2 = [ # inference + { + "bundle_root": "models/swin_unetr_btcv_segmentation", + "datalist": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "preprocessing#transforms#3#pixdim": [1.0, 1.0, 1.0], + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + if "infer" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestSwinUnetr(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 12 + input_shape = (96, 96, 96) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=14, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + trainer = ConfigWorkflow( + workflow="train", + config_file=os.path.join(bundle_root, "configs/train.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(trainer, check_properties=True) + + @parameterized.expand([TEST_CASE_1]) + def test_eval_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(validator, check_properties=True) + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + check_workflow(inferrer, check_properties=True) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py b/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py new file mode 100644 index 00000000..07eca394 --- /dev/null +++ b/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py @@ -0,0 +1,80 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import torch +from monai.data import ITKWriter +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +TEST_CASE_1 = [ + { + "bundle_root": "models/swin_unetr_btcv_segmentation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "val_interval": 1, + "train#random_transforms#0#num_samples": 1, + "train#deterministic_transforms#3#pixdim": [1.0, 1.0, 1.0], + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "train#dataloader#batch_size": 1, + "validate#dataset#cache_rate": 0.0, + } +] + + +class TestSwinUnetrMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 12 + input_shape = (96, 96, 96) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=14, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/ci/unit_tests/utils.py b/ci/unit_tests/utils.py index e6bedf47..24752495 100644 --- a/ci/unit_tests/utils.py +++ b/ci/unit_tests/utils.py @@ -76,3 +76,13 @@ def export_config_and_run_mgpu_cmd( # ensure customized library can be loaded in subprocess env["PYTHONPATH"] = override_dict.get("bundle_root", ".") subprocess.check_call(cmd, env=env) + + +def check_workflow(workflow: ConfigWorkflow, check_properties: bool = False): + workflow.initialize() + if check_properties is True: + check_result = workflow.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for workflow failed: {check_result}") + workflow.run() + workflow.finalize() diff --git a/models/pancreas_ct_dints_segmentation/configs/metadata.json b/models/pancreas_ct_dints_segmentation/configs/metadata.json index ecdcbad5..282fd360 100644 --- a/models/pancreas_ct_dints_segmentation/configs/metadata.json +++ b/models/pancreas_ct_dints_segmentation/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.4.1", + "version": "0.4.2", "changelog": { + "0.4.2": "update search function to match monai 1.2", "0.4.1": "fix the wrong GPU index issue of multi-node", "0.4.0": "remove error dollar symbol in readme", "0.3.9": "add cpu ram requirement in readme", diff --git a/models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py b/models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py index a7fac23c..0e9ad985 100644 --- a/models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py +++ b/models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py @@ -11,11 +11,10 @@ def produce_sample_dict(line: str): return {"label": line, "image": line.replace("labelsTr", "imagesTr")} -def produce_datalist(dataset_dir: str): +def produce_datalist(dataset_dir: str, train_size: int = 196): """ This function is used to split the dataset. - It will produce 200 samples for training, and the other samples are divided equally - into val and test sets. + It will produce "train_size" number of samples for training. """ samples = sorted(glob.glob(os.path.join(dataset_dir, "labelsTr", "*"), recursive=True)) @@ -23,7 +22,7 @@ def produce_datalist(dataset_dir: str): datalist = [] for line in samples: datalist.append(produce_sample_dict(line)) - train_list, other_list = train_test_split(datalist, train_size=196) + train_list, other_list = train_test_split(datalist, train_size=train_size) val_list, test_list = train_test_split(other_list, train_size=0.66) return {"training": train_list, "validation": val_list, "testing": test_list} @@ -37,7 +36,7 @@ def main(args): output_json = args.output # produce deterministic data splits monai.utils.set_determinism(seed=123) - datalist = produce_datalist(dataset_dir=data_file_base_dir) + datalist = produce_datalist(dataset_dir=data_file_base_dir, train_size=args.train_size) with open(output_json, "w") as f: json.dump(datalist, f, ensure_ascii=True, indent=4) @@ -53,6 +52,7 @@ def main(args): parser.add_argument( "--output", type=str, default="dataset_0.json", help="relative path of output datalist json file." ) + parser.add_argument("--train_size", type=int, default=196, help="number of training samples.") args = parser.parse_args() main(args) diff --git a/models/pancreas_ct_dints_segmentation/scripts/search.py b/models/pancreas_ct_dints_segmentation/scripts/search.py index 8ccb4e26..b7720b58 100644 --- a/models/pancreas_ct_dints_segmentation/scripts/search.py +++ b/models/pancreas_ct_dints_segmentation/scripts/search.py @@ -28,7 +28,7 @@ from monai.bundle import ConfigParser from monai.data import ThreadDataLoader, partition_dataset from monai.inferers import sliding_window_inference -from monai.metrics import compute_meandice +from monai.metrics import compute_dice from monai.utils import set_determinism from torch.nn.parallel import DistributedDataParallel from torch.utils.tensorboard import SummaryWriter @@ -100,14 +100,12 @@ def run(config_file: Union[str, Sequence[str]]): train_files_w = partition_dataset( data=train_files_w, shuffle=True, num_partitions=world_size, even_divisible=True )[dist.get_rank()] - print("train_files_w:", len(train_files_w)) train_files_a = train_files[len(train_files) // 2 :] if torch.cuda.device_count() > 1: train_files_a = partition_dataset( data=train_files_a, shuffle=True, num_partitions=world_size, even_divisible=True )[dist.get_rank()] - print("train_files_a:", len(train_files_a)) # validation data files = [] @@ -125,7 +123,6 @@ def run(config_file: Union[str, Sequence[str]]): val_files = partition_dataset(data=val_files, shuffle=False, num_partitions=world_size, even_divisible=False)[ dist.get_rank() ] - print("val_files:", len(val_files)) # network architecture if torch.cuda.device_count() > 1: @@ -421,7 +418,7 @@ def run(config_file: Union[str, Sequence[str]]): val_labels = post_label(val_labels[0, ...]) val_labels = val_labels[None, ...] - value = compute_meandice(y_pred=val_outputs, y=val_labels, include_background=False) + value = compute_dice(y_pred=val_outputs, y=val_labels, include_background=False) print(_index + 1, "/", len(val_loader), value) diff --git a/models/pathology_tumor_detection/configs/metadata.json b/models/pathology_tumor_detection/configs/metadata.json index 320021d9..306c8b65 100644 --- a/models/pathology_tumor_detection/configs/metadata.json +++ b/models/pathology_tumor_detection/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.5.6", + "version": "0.5.7", "changelog": { + "0.5.7": "update channel_def in metadata", "0.5.6": "fix the wrong GPU index issue of multi-node", "0.5.5": "modify mgpu logging level", "0.5.4": "retrain using an internal pretrained ResNet18", @@ -70,7 +71,9 @@ ], "is_patch_data": true, "channel_def": { - "0": "image" + "0": "R", + "1": "G", + "2": "B" } } }, diff --git a/requirements-dev.txt b/requirements-dev.txt index cdb45e7a..98d35c70 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -25,3 +25,8 @@ tensorboard parameterized monai>=1.2.0rc7 pillow!=8.3.0 # https://github.com/python-pillow/Pillow/issues/5571 +itk>=5.2 +scikit-learn +pandas +cucim==22.8.1; platform_system == "Linux" +scikit-image>=0.19.0