From 00a4ac16bb74b74a4a41a2b4d0988484e27c673d Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Tue, 13 Jun 2023 20:47:35 +0800 Subject: [PATCH 01/19] Add more unit tests Signed-off-by: Yiheng Wang --- .../test_spleen_deepedit_annotation.py | 143 ++++++++++++++++++ .../test_spleen_deepedit_annotation_dist.py | 76 ++++++++++ requirements-dev.txt | 1 + 3 files changed, 220 insertions(+) create mode 100644 ci/unit_tests/test_spleen_deepedit_annotation.py create mode 100644 ci/unit_tests/test_spleen_deepedit_annotation_dist.py diff --git a/ci/unit_tests/test_spleen_deepedit_annotation.py b/ci/unit_tests/test_spleen_deepedit_annotation.py new file mode 100644 index 00000000..e3852f50 --- /dev/null +++ b/ci/unit_tests/test_spleen_deepedit_annotation.py @@ -0,0 +1,143 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import ITKWriter +from parameterized import parameterized + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "models/spleen_deepedit_annotation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "validate#dataset#cache_rate": 0.0, + "spatial_size": [32, 32, 32], + } +] + +TEST_CASE_2 = [ # inference + { + "bundle_root": "models/spleen_deepedit_annotation", + "datalist": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "spatial_size": [32, 32, 32], + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + if "infer" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestDeepeditAnno(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + input_shape = (64, 64, 64) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + trainer = ConfigWorkflow( + workflow="train", + config_file=os.path.join(bundle_root, "configs/train.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + trainer.initialize() + # check required and optional properties + check_result = trainer.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for train config failed: {check_result}") + trainer.run() + trainer.finalize() + + @parameterized.expand([TEST_CASE_1]) + def test_eval_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + validator.initialize() + check_result = validator.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for overrided train config failed: {check_result}") + validator.run() + validator.finalize() + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + inferrer.initialize() + # check required and optional properties + check_result = inferrer.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for inference config failed: {check_result}") + inferrer.run() + inferrer.finalize() + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_spleen_deepedit_annotation_dist.py b/ci/unit_tests/test_spleen_deepedit_annotation_dist.py new file mode 100644 index 00000000..e5269d2c --- /dev/null +++ b/ci/unit_tests/test_spleen_deepedit_annotation_dist.py @@ -0,0 +1,76 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import torch +from monai.data import ITKWriter +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +TEST_CASE_1 = [ # mgpu train + { + "bundle_root": "models/spleen_deepedit_annotation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "validate#dataset#cache_rate": 0.0, + "spatial_size": [32, 32, 32], + } +] + + +class TestDeepeditAnnoMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 10 + input_shape = (64, 64, 64) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/requirements-dev.txt b/requirements-dev.txt index cdb45e7a..f24f10c0 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -25,3 +25,4 @@ tensorboard parameterized monai>=1.2.0rc7 pillow!=8.3.0 # https://github.com/python-pillow/Pillow/issues/5571 +itk>=5.2 From dbe7330195afe4c5a9bce1aa4ea7c8a377d503e2 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Wed, 14 Jun 2023 18:55:19 +0800 Subject: [PATCH 02/19] add dints unit tests Signed-off-by: Yiheng Wang --- .../test_pancreas_ct_dints_segmentation.py | 185 ++++++++++++++++++ ...est_pancreas_ct_dints_segmentation_dist.py | 119 +++++++++++ .../scripts/prepare_datalist.py | 10 +- .../scripts/search.py | 7 +- 4 files changed, 311 insertions(+), 10 deletions(-) create mode 100644 ci/unit_tests/test_pancreas_ct_dints_segmentation.py create mode 100644 ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py new file mode 100644 index 00000000..ad65f125 --- /dev/null +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py @@ -0,0 +1,185 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import subprocess +import tempfile +import unittest + +import nibabel as nib +import numpy as np +from monai.bundle import ConfigWorkflow +from parameterized import parameterized +from utils import export_overrided_config + +TEST_CASE_1 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/", + "data_list_file_path": "models/pancreas_ct_dints_segmentation/configs/dataset_0.json", + "num_epochs": 1, + "num_epochs_per_validation": 1, + "num_epochs_warmup": 0, + "num_sw_batch_size": 2, + "patch_size": [32, 32, 32], + "patch_size_valid": [32, 32, 32], + } +] + +TEST_CASE_2 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/search_code_6.pt", + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0, + "validate#dataset#cache_rate": 0, + "validate#inferer#roi_size": [32, 32, 32], + "train#random_transforms#0#spatial_size": [32, 32, 32], + "val_interval": 1, + } +] + +TEST_CASE_3 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/search_code_6.pt", + "validate#inferer#roi_size": [32, 32, 32], + } +] + +TEST_CASE_4 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/search_code_6.pt", + "inferer#roi_size": [32, 32, 32], + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "search" in name: + return 1 + if "train" in name: + return 2 + if "eval" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestDints(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 20 + input_shape = (64, 64, 64) + images_folder = os.path.join(self.dataset_dir, "imagesTr") + labels_folder = os.path.join(self.dataset_dir, "labelsTr") + os.makedirs(images_folder) + os.makedirs(labels_folder) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=3, size=input_shape).astype(np.int8) + image_filename = os.path.join(images_folder, f"pancreas_{s}.nii.gz") + label_filename = os.path.join(labels_folder, f"pancreas_{s}.nii.gz") + nib.save(nib.Nifti1Image(test_image, np.eye(4)), image_filename) + nib.save(nib.Nifti1Image(test_label, np.eye(4)), label_filename) + + prepare_datalist_file = "models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py" + datalist_file = "models/pancreas_ct_dints_segmentation/configs/dataset_0.json" + cmd = f"python {prepare_datalist_file} --path {self.dataset_dir} --output {datalist_file} --train_size 12" + call_status = subprocess.run(cmd, shell=True) + call_status.check_returncode() + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_search(self, override): + override["data_file_base_dir"] = self.dataset_dir + output_path = "models/pancreas_ct_dints_segmentation/configs/search_override.json" + export_overrided_config("models/pancreas_ct_dints_segmentation/configs/search.yaml", override, output_path) + cmd = f"python -m scripts.search run --config_file {output_path}" + env = os.environ.copy() + # ensure customized library can be loaded in subprocess + env["PYTHONPATH"] = override.get("bundle_root", ".") + subprocess.check_call(cmd, shell=True, env=env) + + @parameterized.expand([TEST_CASE_2]) + def test_train(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.yaml") + + trainer = ConfigWorkflow( + workflow="train", + config_file=train_file, + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + trainer.initialize() + # check required and optional properties + check_result = trainer.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for train config failed: {check_result}") + trainer.run() + trainer.finalize() + + @parameterized.expand([TEST_CASE_3]) + def test_eval(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.yaml") + eval_file = os.path.join(bundle_root, "configs/evaluate.yaml") + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + validator.initialize() + check_result = validator.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for overrided train config failed: {check_result}") + validator.run() + validator.finalize() + + @parameterized.expand([TEST_CASE_4]) + def test_infer_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.yaml"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + inferrer.initialize() + # check required and optional properties + check_result = inferrer.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for inference config failed: {check_result}") + inferrer.run() + inferrer.finalize() + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py new file mode 100644 index 00000000..75b98384 --- /dev/null +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py @@ -0,0 +1,119 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import subprocess +import tempfile +import unittest + +import nibabel as nib +import numpy as np +import torch +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd, export_overrided_config + +TEST_CASE_1 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/", + "data_list_file_path": "models/pancreas_ct_dints_segmentation/configs/dataset_0.json", + "num_epochs": 1, + "num_epochs_per_validation": 1, + "num_epochs_warmup": 0, + "num_sw_batch_size": 2, + "patch_size": [32, 32, 32], + "patch_size_valid": [32, 32, 32], + } +] + +TEST_CASE_2 = [ + { + "bundle_root": "models/pancreas_ct_dints_segmentation", + "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/search_code_3.pt", + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0, + "validate#dataset#cache_rate": 0, + "validate#inferer#roi_size": [32, 32, 32], + "train#random_transforms#0#spatial_size": [32, 32, 32], + "val_interval": 1, + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "search" in name: + return 1 + return 2 + + return get_order(test_name1) - get_order(test_name2) + + +class TestDintsMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 20 + input_shape = (64, 64, 64) + images_folder = os.path.join(self.dataset_dir, "imagesTr") + labels_folder = os.path.join(self.dataset_dir, "labelsTr") + os.makedirs(images_folder) + os.makedirs(labels_folder) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=3, size=input_shape).astype(np.int8) + image_filename = os.path.join(images_folder, f"pancreas_{s}.nii.gz") + label_filename = os.path.join(labels_folder, f"pancreas_{s}.nii.gz") + nib.save(nib.Nifti1Image(test_image, np.eye(4)), image_filename) + nib.save(nib.Nifti1Image(test_label, np.eye(4)), label_filename) + + prepare_datalist_file = "models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py" + datalist_file = "models/pancreas_ct_dints_segmentation/configs/dataset_0.json" + cmd = f"python {prepare_datalist_file} --path {self.dataset_dir} --output {datalist_file} --train_size 12" + call_status = subprocess.run(cmd, shell=True) + call_status.check_returncode() + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_search(self, override): + override["data_file_base_dir"] = self.dataset_dir + output_path = "models/pancreas_ct_dints_segmentation/configs/search_override.json" + export_overrided_config("models/pancreas_ct_dints_segmentation/configs/search.yaml", override, output_path) + cmd = f"torchrun --standalone --nnodes=1 --nproc_per_node=2 -m scripts.search run {output_path}" + env = os.environ.copy() + # ensure customized library can be loaded in subprocess + env["PYTHONPATH"] = override.get("bundle_root", ".") + subprocess.check_call(cmd, shell=True, env=env) + + @parameterized.expand([TEST_CASE_2]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.yaml") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.yaml") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + ) + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py b/models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py index a7fac23c..0e9ad985 100644 --- a/models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py +++ b/models/pancreas_ct_dints_segmentation/scripts/prepare_datalist.py @@ -11,11 +11,10 @@ def produce_sample_dict(line: str): return {"label": line, "image": line.replace("labelsTr", "imagesTr")} -def produce_datalist(dataset_dir: str): +def produce_datalist(dataset_dir: str, train_size: int = 196): """ This function is used to split the dataset. - It will produce 200 samples for training, and the other samples are divided equally - into val and test sets. + It will produce "train_size" number of samples for training. """ samples = sorted(glob.glob(os.path.join(dataset_dir, "labelsTr", "*"), recursive=True)) @@ -23,7 +22,7 @@ def produce_datalist(dataset_dir: str): datalist = [] for line in samples: datalist.append(produce_sample_dict(line)) - train_list, other_list = train_test_split(datalist, train_size=196) + train_list, other_list = train_test_split(datalist, train_size=train_size) val_list, test_list = train_test_split(other_list, train_size=0.66) return {"training": train_list, "validation": val_list, "testing": test_list} @@ -37,7 +36,7 @@ def main(args): output_json = args.output # produce deterministic data splits monai.utils.set_determinism(seed=123) - datalist = produce_datalist(dataset_dir=data_file_base_dir) + datalist = produce_datalist(dataset_dir=data_file_base_dir, train_size=args.train_size) with open(output_json, "w") as f: json.dump(datalist, f, ensure_ascii=True, indent=4) @@ -53,6 +52,7 @@ def main(args): parser.add_argument( "--output", type=str, default="dataset_0.json", help="relative path of output datalist json file." ) + parser.add_argument("--train_size", type=int, default=196, help="number of training samples.") args = parser.parse_args() main(args) diff --git a/models/pancreas_ct_dints_segmentation/scripts/search.py b/models/pancreas_ct_dints_segmentation/scripts/search.py index 8ccb4e26..b7720b58 100644 --- a/models/pancreas_ct_dints_segmentation/scripts/search.py +++ b/models/pancreas_ct_dints_segmentation/scripts/search.py @@ -28,7 +28,7 @@ from monai.bundle import ConfigParser from monai.data import ThreadDataLoader, partition_dataset from monai.inferers import sliding_window_inference -from monai.metrics import compute_meandice +from monai.metrics import compute_dice from monai.utils import set_determinism from torch.nn.parallel import DistributedDataParallel from torch.utils.tensorboard import SummaryWriter @@ -100,14 +100,12 @@ def run(config_file: Union[str, Sequence[str]]): train_files_w = partition_dataset( data=train_files_w, shuffle=True, num_partitions=world_size, even_divisible=True )[dist.get_rank()] - print("train_files_w:", len(train_files_w)) train_files_a = train_files[len(train_files) // 2 :] if torch.cuda.device_count() > 1: train_files_a = partition_dataset( data=train_files_a, shuffle=True, num_partitions=world_size, even_divisible=True )[dist.get_rank()] - print("train_files_a:", len(train_files_a)) # validation data files = [] @@ -125,7 +123,6 @@ def run(config_file: Union[str, Sequence[str]]): val_files = partition_dataset(data=val_files, shuffle=False, num_partitions=world_size, even_divisible=False)[ dist.get_rank() ] - print("val_files:", len(val_files)) # network architecture if torch.cuda.device_count() > 1: @@ -421,7 +418,7 @@ def run(config_file: Union[str, Sequence[str]]): val_labels = post_label(val_labels[0, ...]) val_labels = val_labels[None, ...] - value = compute_meandice(y_pred=val_outputs, y=val_labels, include_background=False) + value = compute_dice(y_pred=val_outputs, y=val_labels, include_background=False) print(_index + 1, "/", len(val_loader), value) From 887cd15a527b21ad0bff6602408873fbb8e9e39b Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Wed, 14 Jun 2023 19:03:02 +0800 Subject: [PATCH 03/19] update changelog Signed-off-by: Yiheng Wang --- models/pancreas_ct_dints_segmentation/configs/metadata.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/models/pancreas_ct_dints_segmentation/configs/metadata.json b/models/pancreas_ct_dints_segmentation/configs/metadata.json index ecdcbad5..282fd360 100644 --- a/models/pancreas_ct_dints_segmentation/configs/metadata.json +++ b/models/pancreas_ct_dints_segmentation/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.4.1", + "version": "0.4.2", "changelog": { + "0.4.2": "update search function to match monai 1.2", "0.4.1": "fix the wrong GPU index issue of multi-node", "0.4.0": "remove error dollar symbol in readme", "0.3.9": "add cpu ram requirement in readme", From 4c801f736c46cb6499b9cc7853c29b9affe7ca45 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Wed, 14 Jun 2023 20:35:08 +0800 Subject: [PATCH 04/19] add requirements dev Signed-off-by: Yiheng Wang --- requirements-dev.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements-dev.txt b/requirements-dev.txt index f24f10c0..ad4c057e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -26,3 +26,4 @@ parameterized monai>=1.2.0rc7 pillow!=8.3.0 # https://github.com/python-pillow/Pillow/issues/5571 itk>=5.2 +scikit-learn From 50d1a433259220960178067cf5a897e4be4201d8 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Mon, 19 Jun 2023 12:00:34 +0800 Subject: [PATCH 05/19] reduce resources Signed-off-by: Yiheng Wang --- ci/unit_tests/test_pancreas_ct_dints_segmentation.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py index ad65f125..03fa37ce 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py @@ -43,6 +43,8 @@ "train#dataset#cache_rate": 0, "validate#dataset#cache_rate": 0, "validate#inferer#roi_size": [32, 32, 32], + "validate#inferer#sw_batch_size": 1, + "validate#inferer#overlap": 0.1, "train#random_transforms#0#spatial_size": [32, 32, 32], "val_interval": 1, } From 7517225261014b4b4a4761cb23350238cbf81a82 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Mon, 19 Jun 2023 14:36:07 +0800 Subject: [PATCH 06/19] check shm size Signed-off-by: Yiheng Wang --- .../test_pancreas_ct_dints_segmentation.py | 38 ++++++++++++++++--- ...est_pancreas_ct_dints_segmentation_dist.py | 18 ++++++++- 2 files changed, 49 insertions(+), 7 deletions(-) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py index 03fa37ce..f40a0189 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py @@ -24,7 +24,6 @@ TEST_CASE_1 = [ { "bundle_root": "models/pancreas_ct_dints_segmentation", - "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/", "data_list_file_path": "models/pancreas_ct_dints_segmentation/configs/dataset_0.json", "num_epochs": 1, "num_epochs_per_validation": 1, @@ -38,13 +37,14 @@ TEST_CASE_2 = [ { "bundle_root": "models/pancreas_ct_dints_segmentation", - "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/search_code_6.pt", "train#trainer#max_epochs": 1, "train#dataset#cache_rate": 0, + "train#dataloader#num_workers": 1, "validate#dataset#cache_rate": 0, "validate#inferer#roi_size": [32, 32, 32], "validate#inferer#sw_batch_size": 1, "validate#inferer#overlap": 0.1, + "validate#dataloader#num_workers": 1, "train#random_transforms#0#spatial_size": [32, 32, 32], "val_interval": 1, } @@ -53,7 +53,6 @@ TEST_CASE_3 = [ { "bundle_root": "models/pancreas_ct_dints_segmentation", - "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/search_code_6.pt", "validate#inferer#roi_size": [32, 32, 32], } ] @@ -61,7 +60,6 @@ TEST_CASE_4 = [ { "bundle_root": "models/pancreas_ct_dints_segmentation", - "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/search_code_6.pt", "inferer#roi_size": [32, 32, 32], } ] @@ -80,8 +78,32 @@ def get_order(name): return get_order(test_name1) - get_order(test_name2) +def get_searched_arch(path): + file_list = os.listdir(path) + arch_name = None + for f in file_list: + if "search_code" in f: + arch_name = f + if arch_name is None: + raise ValueError("Cannot find searched architectures file.") + print("arch_name: ", arch_name) + return arch_name + +def get_size(start_path = '/dev/shm/'): + total_size = 0 + for dirpath, dirnames, filenames in os.walk(start_path): + for f in filenames: + fp = os.path.join(dirpath, f) + # skip if it is symbolic link + if not os.path.islink(fp): + total_size += os.path.getsize(fp) + + print("shm size is: ", total_size) + + class TestDints(unittest.TestCase): def setUp(self): + get_size() self.dataset_dir = tempfile.mkdtemp() dataset_size = 20 input_shape = (64, 64, 64) @@ -109,6 +131,7 @@ def tearDown(self): @parameterized.expand([TEST_CASE_1]) def test_search(self, override): override["data_file_base_dir"] = self.dataset_dir + override["arch_ckpt_path"] = os.path.join(override["bundle_root"], "models") output_path = "models/pancreas_ct_dints_segmentation/configs/search_override.json" export_overrided_config("models/pancreas_ct_dints_segmentation/configs/search.yaml", override, output_path) cmd = f"python -m scripts.search run --config_file {output_path}" @@ -121,6 +144,8 @@ def test_search(self, override): def test_train(self, override): override["dataset_dir"] = self.dataset_dir bundle_root = override["bundle_root"] + arch_name = get_searched_arch(os.path.join(bundle_root, "models")) + override["arch_ckpt_path"] = os.path.join(bundle_root, "models", arch_name) train_file = os.path.join(bundle_root, "configs/train.yaml") trainer = ConfigWorkflow( @@ -142,6 +167,8 @@ def test_train(self, override): def test_eval(self, override): override["dataset_dir"] = self.dataset_dir bundle_root = override["bundle_root"] + arch_name = get_searched_arch(os.path.join(bundle_root, "models")) + override["arch_ckpt_path"] = os.path.join(bundle_root, "models", arch_name) train_file = os.path.join(bundle_root, "configs/train.yaml") eval_file = os.path.join(bundle_root, "configs/evaluate.yaml") @@ -164,7 +191,8 @@ def test_eval(self, override): def test_infer_config(self, override): override["dataset_dir"] = self.dataset_dir bundle_root = override["bundle_root"] - + arch_name = get_searched_arch(os.path.join(bundle_root, "models")) + override["arch_ckpt_path"] = os.path.join(bundle_root, "models", arch_name) inferrer = ConfigWorkflow( workflow="infer", config_file=os.path.join(bundle_root, "configs/inference.yaml"), diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py index 75b98384..3febcf45 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py @@ -24,7 +24,6 @@ TEST_CASE_1 = [ { "bundle_root": "models/pancreas_ct_dints_segmentation", - "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/", "data_list_file_path": "models/pancreas_ct_dints_segmentation/configs/dataset_0.json", "num_epochs": 1, "num_epochs_per_validation": 1, @@ -38,7 +37,6 @@ TEST_CASE_2 = [ { "bundle_root": "models/pancreas_ct_dints_segmentation", - "arch_ckpt_path": "models/pancreas_ct_dints_segmentation/models/search_code_3.pt", "train#trainer#max_epochs": 1, "train#dataset#cache_rate": 0, "validate#dataset#cache_rate": 0, @@ -58,8 +56,21 @@ def get_order(name): return get_order(test_name1) - get_order(test_name2) +def get_searched_arch(path): + file_list = os.listdir(path) + arch_name = None + for f in file_list: + if "search_code" in f: + arch_name = f + if arch_name is None: + raise ValueError("Cannot find searched architectures file.") + print("arch_name: ", arch_name) + return arch_name + + class TestDintsMGPU(unittest.TestCase): def setUp(self): + get_size() self.dataset_dir = tempfile.mkdtemp() dataset_size = 20 input_shape = (64, 64, 64) @@ -87,6 +98,7 @@ def tearDown(self): @parameterized.expand([TEST_CASE_1]) def test_search(self, override): override["data_file_base_dir"] = self.dataset_dir + override["arch_ckpt_path"] = os.path.join(override["bundle_root"], "models") output_path = "models/pancreas_ct_dints_segmentation/configs/search_override.json" export_overrided_config("models/pancreas_ct_dints_segmentation/configs/search.yaml", override, output_path) cmd = f"torchrun --standalone --nnodes=1 --nproc_per_node=2 -m scripts.search run {output_path}" @@ -99,6 +111,8 @@ def test_search(self, override): def test_train_mgpu_config(self, override): override["dataset_dir"] = self.dataset_dir bundle_root = override["bundle_root"] + arch_name = get_searched_arch(os.path.join(bundle_root, "models")) + override["arch_ckpt_path"] = os.path.join(bundle_root, "models", arch_name) train_file = os.path.join(bundle_root, "configs/train.yaml") mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.yaml") output_path = os.path.join(bundle_root, "configs/train_override.json") From 2cf7483cd1427b1017fc33422a53691d7acfe021 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Mon, 19 Jun 2023 14:49:04 +0800 Subject: [PATCH 07/19] add missing func Signed-off-by: Yiheng Wang --- .../test_pancreas_ct_dints_segmentation_dist.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py index 3febcf45..2c2ba80e 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py @@ -47,6 +47,18 @@ ] +def get_size(start_path = '/dev/shm/'): + total_size = 0 + for dirpath, dirnames, filenames in os.walk(start_path): + for f in filenames: + fp = os.path.join(dirpath, f) + # skip if it is symbolic link + if not os.path.islink(fp): + total_size += os.path.getsize(fp) + + print("shm size is: ", total_size) + + def test_order(test_name1, test_name2): def get_order(name): if "search" in name: From 5bfabf64c1c0771b45f392632aea2ad3b97f152a Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Mon, 19 Jun 2023 15:08:56 +0800 Subject: [PATCH 08/19] check dev/shm Signed-off-by: Yiheng Wang --- .../test_pancreas_ct_dints_segmentation.py | 13 +------------ .../test_pancreas_ct_dints_segmentation_dist.py | 14 +------------- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py index f40a0189..ccd1f7e5 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py @@ -89,21 +89,9 @@ def get_searched_arch(path): print("arch_name: ", arch_name) return arch_name -def get_size(start_path = '/dev/shm/'): - total_size = 0 - for dirpath, dirnames, filenames in os.walk(start_path): - for f in filenames: - fp = os.path.join(dirpath, f) - # skip if it is symbolic link - if not os.path.islink(fp): - total_size += os.path.getsize(fp) - - print("shm size is: ", total_size) - class TestDints(unittest.TestCase): def setUp(self): - get_size() self.dataset_dir = tempfile.mkdtemp() dataset_size = 20 input_shape = (64, 64, 64) @@ -124,6 +112,7 @@ def setUp(self): cmd = f"python {prepare_datalist_file} --path {self.dataset_dir} --output {datalist_file} --train_size 12" call_status = subprocess.run(cmd, shell=True) call_status.check_returncode() + subprocess.check_call("df -h /dev/shm/", shell=True) def tearDown(self): shutil.rmtree(self.dataset_dir) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py index 2c2ba80e..02578d09 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py @@ -47,18 +47,6 @@ ] -def get_size(start_path = '/dev/shm/'): - total_size = 0 - for dirpath, dirnames, filenames in os.walk(start_path): - for f in filenames: - fp = os.path.join(dirpath, f) - # skip if it is symbolic link - if not os.path.islink(fp): - total_size += os.path.getsize(fp) - - print("shm size is: ", total_size) - - def test_order(test_name1, test_name2): def get_order(name): if "search" in name: @@ -82,7 +70,6 @@ def get_searched_arch(path): class TestDintsMGPU(unittest.TestCase): def setUp(self): - get_size() self.dataset_dir = tempfile.mkdtemp() dataset_size = 20 input_shape = (64, 64, 64) @@ -103,6 +90,7 @@ def setUp(self): cmd = f"python {prepare_datalist_file} --path {self.dataset_dir} --output {datalist_file} --train_size 12" call_status = subprocess.run(cmd, shell=True) call_status.check_returncode() + subprocess.check_call("df -h /dev/shm/", shell=True) def tearDown(self): shutil.rmtree(self.dataset_dir) From 42fb866f9434e212394105c35e4bd6d7ab38c52c Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Mon, 19 Jun 2023 15:23:10 +0800 Subject: [PATCH 09/19] show searched list Signed-off-by: Yiheng Wang --- ci/unit_tests/test_pancreas_ct_dints_segmentation.py | 2 +- ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py index ccd1f7e5..d87a4c25 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py @@ -86,7 +86,7 @@ def get_searched_arch(path): arch_name = f if arch_name is None: raise ValueError("Cannot find searched architectures file.") - print("arch_name: ", arch_name) + print(file_list) return arch_name diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py index 02578d09..32a1a1e3 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py @@ -64,7 +64,7 @@ def get_searched_arch(path): arch_name = f if arch_name is None: raise ValueError("Cannot find searched architectures file.") - print("arch_name: ", arch_name) + print(file_list) return arch_name From a10434f64fc0102112bf84bee67efbc59b9f1384 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Mon, 19 Jun 2023 15:49:36 +0800 Subject: [PATCH 10/19] remove shm checks Signed-off-by: Yiheng Wang --- .../test_pancreas_ct_dints_segmentation.py | 17 +++-------------- .../test_pancreas_ct_dints_segmentation_dist.py | 3 +-- 2 files changed, 4 insertions(+), 16 deletions(-) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py index d87a4c25..26b856f8 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py @@ -50,19 +50,9 @@ } ] -TEST_CASE_3 = [ - { - "bundle_root": "models/pancreas_ct_dints_segmentation", - "validate#inferer#roi_size": [32, 32, 32], - } -] +TEST_CASE_3 = [{"bundle_root": "models/pancreas_ct_dints_segmentation", "validate#inferer#roi_size": [32, 32, 32]}] -TEST_CASE_4 = [ - { - "bundle_root": "models/pancreas_ct_dints_segmentation", - "inferer#roi_size": [32, 32, 32], - } -] +TEST_CASE_4 = [{"bundle_root": "models/pancreas_ct_dints_segmentation", "inferer#roi_size": [32, 32, 32]}] def test_order(test_name1, test_name2): @@ -86,7 +76,7 @@ def get_searched_arch(path): arch_name = f if arch_name is None: raise ValueError("Cannot find searched architectures file.") - print(file_list) + return arch_name @@ -112,7 +102,6 @@ def setUp(self): cmd = f"python {prepare_datalist_file} --path {self.dataset_dir} --output {datalist_file} --train_size 12" call_status = subprocess.run(cmd, shell=True) call_status.check_returncode() - subprocess.check_call("df -h /dev/shm/", shell=True) def tearDown(self): shutil.rmtree(self.dataset_dir) diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py index 32a1a1e3..ae50f676 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py @@ -64,7 +64,7 @@ def get_searched_arch(path): arch_name = f if arch_name is None: raise ValueError("Cannot find searched architectures file.") - print(file_list) + return arch_name @@ -90,7 +90,6 @@ def setUp(self): cmd = f"python {prepare_datalist_file} --path {self.dataset_dir} --output {datalist_file} --train_size 12" call_status = subprocess.run(cmd, shell=True) call_status.check_returncode() - subprocess.check_call("df -h /dev/shm/", shell=True) def tearDown(self): shutil.rmtree(self.dataset_dir) From 427d4c777bdd5f5c1105682bb763c70e98ba86f4 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Tue, 20 Jun 2023 16:58:40 +0800 Subject: [PATCH 11/19] add swinunetr test cases Signed-off-by: Yiheng Wang --- ...est_pancreas_ct_dints_segmentation_dist.py | 1 + .../test_spleen_deepedit_annotation_dist.py | 1 + .../test_swin_unetr_btcv_segmentation.py | 149 ++++++++++++++++++ .../test_swin_unetr_btcv_segmentation_dist.py | 82 ++++++++++ 4 files changed, 233 insertions(+) create mode 100644 ci/unit_tests/test_swin_unetr_btcv_segmentation.py create mode 100644 ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py index ae50f676..315e3da3 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation_dist.py @@ -123,6 +123,7 @@ def test_train_mgpu_config(self, override): override_dict=override, output_path=output_path, ngpu=n_gpu, + check_config=True, ) diff --git a/ci/unit_tests/test_spleen_deepedit_annotation_dist.py b/ci/unit_tests/test_spleen_deepedit_annotation_dist.py index e5269d2c..7f11e586 100644 --- a/ci/unit_tests/test_spleen_deepedit_annotation_dist.py +++ b/ci/unit_tests/test_spleen_deepedit_annotation_dist.py @@ -69,6 +69,7 @@ def test_train_mgpu_config(self, override): override_dict=override, output_path=output_path, ngpu=n_gpu, + check_config=True, ) diff --git a/ci/unit_tests/test_swin_unetr_btcv_segmentation.py b/ci/unit_tests/test_swin_unetr_btcv_segmentation.py new file mode 100644 index 00000000..a744c856 --- /dev/null +++ b/ci/unit_tests/test_swin_unetr_btcv_segmentation.py @@ -0,0 +1,149 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +from monai.bundle import ConfigWorkflow +from monai.data import ITKWriter +from parameterized import parameterized + +TEST_CASE_1 = [ # train, evaluate + { + "bundle_root": "models/swin_unetr_btcv_segmentation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "val_interval": 1, + "network_def#img_size": 96, + "network_def#feature_size": 24, + "train#random_transforms#0#num_samples": 1, + "train#deterministic_transforms#3#pixdim": [1.0, 1.0, 1.0], + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "train#dataloader#batch_size": 1, + "validate#dataset#cache_rate": 0.0, + } +] + +TEST_CASE_2 = [ # inference + { + "bundle_root": "models/swin_unetr_btcv_segmentation", + "datalist": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "network_def#feature_size": 24, + "preprocessing#transforms#3#pixdim": [1.0, 1.0, 1.0], + } +] + + +def test_order(test_name1, test_name2): + def get_order(name): + if "train" in name: + return 1 + if "eval" in name: + return 2 + if "infer" in name: + return 3 + return 4 + + return get_order(test_name1) - get_order(test_name2) + + +class TestSwinUnetr(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 12 + input_shape = (96, 96, 96) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=14, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + trainer = ConfigWorkflow( + workflow="train", + config_file=os.path.join(bundle_root, "configs/train.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + trainer.initialize() + # check required and optional properties + check_result = trainer.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for train config failed: {check_result}") + trainer.run() + trainer.finalize() + + @parameterized.expand([TEST_CASE_1]) + def test_eval_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + eval_file = os.path.join(bundle_root, "configs/evaluate.json") + + validator = ConfigWorkflow( + # override train.json, thus set the workflow to "train" rather than "eval" + workflow="train", + config_file=[train_file, eval_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + validator.initialize() + check_result = validator.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for overrided train config failed: {check_result}") + validator.run() + validator.finalize() + + @parameterized.expand([TEST_CASE_2]) + def test_infer_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + inferrer.initialize() + # check required and optional properties + check_result = inferrer.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for inference config failed: {check_result}") + inferrer.run() + inferrer.finalize() + + +if __name__ == "__main__": + loader = unittest.TestLoader() + loader.sortTestMethodsUsing = test_order + unittest.main(testLoader=loader) diff --git a/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py b/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py new file mode 100644 index 00000000..ca86072e --- /dev/null +++ b/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py @@ -0,0 +1,82 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import torch +from monai.data import ITKWriter +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +TEST_CASE_1 = [ + { + "bundle_root": "models/swin_unetr_btcv_segmentation", + "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", + "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", + "val_interval": 1, + "network_def#img_size": 96, + "network_def#feature_size": 24, + "train#random_transforms#0#num_samples": 1, + "train#deterministic_transforms#3#pixdim": [1.0, 1.0, 1.0], + "train#trainer#max_epochs": 1, + "train#dataset#cache_rate": 0.0, + "train#dataloader#batch_size": 1, + "validate#dataset#cache_rate": 0.0, + } +] + + +class TestSwinUnetrMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + dataset_size = 12 + input_shape = (96, 96, 96) + writer = ITKWriter(output_dtype=np.uint8) + for s in range(dataset_size): + test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8) + test_label = np.random.randint(low=0, high=14, size=input_shape).astype(np.int8) + image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz") + label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz") + writer.set_data_array(test_image, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(image_filename) + writer.set_data_array(test_label, channel_dim=None) + writer.set_metadata({"affine": np.eye(4), "original_affine": -1 * np.eye(4)}) + writer.write(label_filename) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + unittest.main() From 7b11bd7394c0daf1c6b59b7c3d471cb0194368d4 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Wed, 21 Jun 2023 14:33:05 +0800 Subject: [PATCH 12/19] add tumor detect tests Signed-off-by: Yiheng Wang --- .../test_pathology_tumor_detection.py | 113 ++++++++++++++++++ .../test_pathology_tumor_detection_dist.py | 86 +++++++++++++ requirements-dev.txt | 2 + 3 files changed, 201 insertions(+) create mode 100644 ci/unit_tests/test_pathology_tumor_detection.py create mode 100644 ci/unit_tests/test_pathology_tumor_detection_dist.py diff --git a/ci/unit_tests/test_pathology_tumor_detection.py b/ci/unit_tests/test_pathology_tumor_detection.py new file mode 100644 index 00000000..f3ceb739 --- /dev/null +++ b/ci/unit_tests/test_pathology_tumor_detection.py @@ -0,0 +1,113 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import os +import shutil +import tempfile +import unittest + +from monai.apps.utils import download_url +from monai.bundle import ConfigWorkflow +from parameterized import parameterized + +TEST_CASE_1 = [ # train + { + "bundle_root": "models/pathology_tumor_detection", + "region_size": [256, 256], + "num_epochs": 1, + "epochs": 1, + "train#dataloader#batch_size": 32, + "validate#dataloader#batch_size": 32, + "train#datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + "validate#datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + }, + { + "bundle_root": "models/pathology_tumor_detection", + "dataloader#batch_size": 32, + "datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + }, +] + +TIFF_INFO = { # data to be downloaded for test + "url": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/CMU-1.tiff", + "filename": "CMU-1.tiff", + "hash_type": "sha256", + "hash_val": "73a7e89bc15576587c3d68e55d9bf92f09690280166240b48ff4b48230b13bcd", + "csv_info": ["CMU-1", 46000, 32914, 0, 0, 0, 0, 0, 0, 1, 1, 1], +} + + +class TestTumorDetection(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + # download sample tiff file + download_url( + url=TIFF_INFO["url"], + filepath=os.path.join(self.dataset_dir, TIFF_INFO["filename"]), + hash_type=TIFF_INFO["hash_type"], + hash_val=TIFF_INFO["hash_val"], + ) + # prepare csv files + for csv_file in ["training.csv", "validation.csv"]: + with open(os.path.join("models/pathology_tumor_detection", csv_file), "w", newline="") as save_file: + wr = csv.writer(save_file) + wr.writerow(TIFF_INFO["csv_info"]) + + with open(os.path.join("models/pathology_tumor_detection", "testing.csv"), "w", newline="") as save_file: + wr = csv.writer(save_file) + wr.writerow(TIFF_INFO["csv_info"][:1]) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_infer_config(self, override, override_infer): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + + trainer = ConfigWorkflow( + workflow="train", + config_file=train_file, + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override, + ) + trainer.initialize() + # check required and optional properties + check_result = trainer.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for train config failed: {check_result}") + trainer.run() + trainer.finalize() + + # run train and infer tests within a single function to avoid duplicating download + override_infer["dataset_dir"] = self.dataset_dir + + inferrer = ConfigWorkflow( + workflow="infer", + config_file=os.path.join(bundle_root, "configs/inference.json"), + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + **override_infer, + ) + inferrer.initialize() + # check required and optional properties + check_result = inferrer.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for inference config failed: {check_result}") + inferrer.run() + inferrer.finalize() + + +if __name__ == "__main__": + unittest.main() diff --git a/ci/unit_tests/test_pathology_tumor_detection_dist.py b/ci/unit_tests/test_pathology_tumor_detection_dist.py new file mode 100644 index 00000000..b77bc7e3 --- /dev/null +++ b/ci/unit_tests/test_pathology_tumor_detection_dist.py @@ -0,0 +1,86 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import os +import shutil +import tempfile +import unittest + +import torch +from monai.apps.utils import download_url +from parameterized import parameterized +from utils import export_config_and_run_mgpu_cmd + +TEST_CASE_1 = [ + { + "bundle_root": "models/pathology_tumor_detection", + "region_size": [256, 256], + "num_epochs": 1, + "epochs": 1, + "train#dataloader#batch_size": 32, + "validate#dataloader#batch_size": 32, + "train#datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + "validate#datalist#transform#func": "$lambda x: os.path.join(@dataset_dir, x + '.tiff')", + } +] + +TIFF_INFO = { # data to be downloaded for test + "url": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/CMU-1.tiff", + "filename": "CMU-1.tiff", + "hash_type": "sha256", + "hash_val": "73a7e89bc15576587c3d68e55d9bf92f09690280166240b48ff4b48230b13bcd", + "csv_info": ["CMU-1", 46000, 32914, 0, 0, 0, 0, 0, 0, 1, 1, 1], +} + + +class TestTumorDetectionMGPU(unittest.TestCase): + def setUp(self): + self.dataset_dir = tempfile.mkdtemp() + # download sample tiff file + download_url( + url=TIFF_INFO["url"], + filepath=os.path.join(self.dataset_dir, TIFF_INFO["filename"]), + hash_type=TIFF_INFO["hash_type"], + hash_val=TIFF_INFO["hash_val"], + ) + # prepare csv files + for csv_file in ["training.csv", "validation.csv"]: + with open(os.path.join("models/pathology_tumor_detection", csv_file), "w", newline="") as save_file: + wr = csv.writer(save_file) + wr.writerow(TIFF_INFO["csv_info"]) + # 2 gpu training need 2 samples + wr.writerow(TIFF_INFO["csv_info"]) + + def tearDown(self): + shutil.rmtree(self.dataset_dir) + + @parameterized.expand([TEST_CASE_1]) + def test_train_mgpu_config(self, override): + override["dataset_dir"] = self.dataset_dir + bundle_root = override["bundle_root"] + train_file = os.path.join(bundle_root, "configs/train.json") + mgpu_train_file = os.path.join(bundle_root, "configs/multi_gpu_train.json") + output_path = os.path.join(bundle_root, "configs/train_override.json") + n_gpu = torch.cuda.device_count() + export_config_and_run_mgpu_cmd( + config_file=[train_file, mgpu_train_file], + logging_file=os.path.join(bundle_root, "configs/logging.conf"), + meta_file=os.path.join(bundle_root, "configs/metadata.json"), + override_dict=override, + output_path=output_path, + ngpu=n_gpu, + check_config=True, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/requirements-dev.txt b/requirements-dev.txt index ad4c057e..ab7497b5 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -27,3 +27,5 @@ monai>=1.2.0rc7 pillow!=8.3.0 # https://github.com/python-pillow/Pillow/issues/5571 itk>=5.2 scikit-learn +pandas +cucim==22.8.1; platform_system == "Linux" From 97a382ea98fba35806a99ebbd1064b0d85172128 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Wed, 21 Jun 2023 14:36:07 +0800 Subject: [PATCH 13/19] update metadata Signed-off-by: Yiheng Wang --- models/pathology_tumor_detection/configs/metadata.json | 7 +++++-- models/spleen_deepedit_annotation/configs/metadata.json | 3 ++- models/swin_unetr_btcv_segmentation/configs/metadata.json | 3 ++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/models/pathology_tumor_detection/configs/metadata.json b/models/pathology_tumor_detection/configs/metadata.json index 320021d9..306c8b65 100644 --- a/models/pathology_tumor_detection/configs/metadata.json +++ b/models/pathology_tumor_detection/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.5.6", + "version": "0.5.7", "changelog": { + "0.5.7": "update channel_def in metadata", "0.5.6": "fix the wrong GPU index issue of multi-node", "0.5.5": "modify mgpu logging level", "0.5.4": "retrain using an internal pretrained ResNet18", @@ -70,7 +71,9 @@ ], "is_patch_data": true, "channel_def": { - "0": "image" + "0": "R", + "1": "G", + "2": "B" } } }, diff --git a/models/spleen_deepedit_annotation/configs/metadata.json b/models/spleen_deepedit_annotation/configs/metadata.json index 2264b4c8..5485f5f5 100644 --- a/models/spleen_deepedit_annotation/configs/metadata.json +++ b/models/spleen_deepedit_annotation/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.4.7", + "version": "0.4.8", "changelog": { + "0.4.8": "check unit test", "0.4.7": "fix the wrong GPU index issue of multi-node", "0.4.6": "update to use rc7 which solves dynunet issue", "0.4.5": "remove error dollar symbol in readme", diff --git a/models/swin_unetr_btcv_segmentation/configs/metadata.json b/models/swin_unetr_btcv_segmentation/configs/metadata.json index 5c47077d..4799d0a8 100644 --- a/models/swin_unetr_btcv_segmentation/configs/metadata.json +++ b/models/swin_unetr_btcv_segmentation/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.5.0", + "version": "0.5.1", "changelog": { + "0.5.1": "check unit test", "0.5.0": "fix the wrong GPU index issue of multi-node", "0.4.9": "remove error dollar symbol in readme", "0.4.8": "add RAM usage with CacheDataset", From 25195abd358dd22f7e65fafe5c3acc3cf7f2c9d3 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Wed, 21 Jun 2023 14:55:40 +0800 Subject: [PATCH 14/19] add skimage Signed-off-by: Yiheng Wang --- requirements-dev.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements-dev.txt b/requirements-dev.txt index ab7497b5..98d35c70 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -29,3 +29,4 @@ itk>=5.2 scikit-learn pandas cucim==22.8.1; platform_system == "Linux" +scikit-image>=0.19.0 From 5a0aa2a98d258d8b0bb9607bb29759e9b873d2b7 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Wed, 21 Jun 2023 17:25:49 +0800 Subject: [PATCH 15/19] add runner order with more tests Signed-off-by: Yiheng Wang --- ci/unit_tests/runner.py | 12 +++++++++++- ci/unit_tests/test_swin_unetr_btcv_segmentation.py | 3 --- .../test_swin_unetr_btcv_segmentation_dist.py | 2 -- .../configs/metadata.json | 3 ++- .../configs/metadata.json | 3 ++- models/brats_mri_segmentation/configs/metadata.json | 3 ++- .../configs/metadata.json | 3 ++- 7 files changed, 19 insertions(+), 10 deletions(-) diff --git a/ci/unit_tests/runner.py b/ci/unit_tests/runner.py index 145df92b..628516a7 100644 --- a/ci/unit_tests/runner.py +++ b/ci/unit_tests/runner.py @@ -12,6 +12,7 @@ from __future__ import annotations import argparse +import importlib import inspect import os import sys @@ -107,7 +108,16 @@ def get_default_pattern(loader): test_file_name = f"test_{args.bundle}_dist" if args.dist is True else f"test_{args.bundle}" test_file = os.path.join(os.path.dirname(__file__), f"{test_file_name}.py") if os.path.exists(test_file): - tests = unittest.TestLoader().loadTestsFromNames([test_file_name]) + loader = unittest.TestLoader() + try: + # if having the "test_order" function, will use it as the load order + sys.path.append(os.path.dirname(__file__)) + module = importlib.import_module(test_file_name) + test_order = getattr(module, "test_order") + loader.sortTestMethodsUsing = test_order + except: + pass + tests = loader.loadTestsFromNames([test_file_name]) test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) diff --git a/ci/unit_tests/test_swin_unetr_btcv_segmentation.py b/ci/unit_tests/test_swin_unetr_btcv_segmentation.py index a744c856..cc87cdd9 100644 --- a/ci/unit_tests/test_swin_unetr_btcv_segmentation.py +++ b/ci/unit_tests/test_swin_unetr_btcv_segmentation.py @@ -25,8 +25,6 @@ "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", "val_interval": 1, - "network_def#img_size": 96, - "network_def#feature_size": 24, "train#random_transforms#0#num_samples": 1, "train#deterministic_transforms#3#pixdim": [1.0, 1.0, 1.0], "train#trainer#max_epochs": 1, @@ -40,7 +38,6 @@ { "bundle_root": "models/swin_unetr_btcv_segmentation", "datalist": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", - "network_def#feature_size": 24, "preprocessing#transforms#3#pixdim": [1.0, 1.0, 1.0], } ] diff --git a/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py b/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py index ca86072e..07eca394 100644 --- a/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py +++ b/ci/unit_tests/test_swin_unetr_btcv_segmentation_dist.py @@ -26,8 +26,6 @@ "images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))", "labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))", "val_interval": 1, - "network_def#img_size": 96, - "network_def#feature_size": 24, "train#random_transforms#0#num_samples": 1, "train#deterministic_transforms#3#pixdim": [1.0, 1.0, 1.0], "train#trainer#max_epochs": 1, diff --git a/models/brats_mri_axial_slices_generative_diffusion/configs/metadata.json b/models/brats_mri_axial_slices_generative_diffusion/configs/metadata.json index 19be69af..b9f416bb 100644 --- a/models/brats_mri_axial_slices_generative_diffusion/configs/metadata.json +++ b/models/brats_mri_axial_slices_generative_diffusion/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_generator_ldm_20230507.json", - "version": "1.0.6", + "version": "1.0.7", "changelog": { + "1.0.7": "check unit test", "1.0.6": "update with new lr scheduler api in inference", "1.0.5": "fix the wrong GPU index issue of multi-node", "1.0.4": "update with new lr scheduler api", diff --git a/models/brats_mri_generative_diffusion/configs/metadata.json b/models/brats_mri_generative_diffusion/configs/metadata.json index c5a22d79..2dd6d119 100644 --- a/models/brats_mri_generative_diffusion/configs/metadata.json +++ b/models/brats_mri_generative_diffusion/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_generator_ldm_20230507.json", - "version": "1.0.6", + "version": "1.0.7", "changelog": { + "1.0.7": "check unit test", "1.0.6": "update with new lr scheduler api in inference", "1.0.5": "fix the wrong GPU index issue of multi-node", "1.0.4": "update with new lr scheduler api", diff --git a/models/brats_mri_segmentation/configs/metadata.json b/models/brats_mri_segmentation/configs/metadata.json index 2ca65cea..1242dd8a 100644 --- a/models/brats_mri_segmentation/configs/metadata.json +++ b/models/brats_mri_segmentation/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.4.8", + "version": "0.4.9", "changelog": { + "0.4.9": "check unit test", "0.4.8": "fix the wrong GPU index issue of multi-node", "0.4.7": "enhance prepare datalist file", "0.4.6": "add dataset dir example", diff --git a/models/endoscopic_inbody_classification/configs/metadata.json b/models/endoscopic_inbody_classification/configs/metadata.json index 6332e6b8..ff8561fc 100644 --- a/models/endoscopic_inbody_classification/configs/metadata.json +++ b/models/endoscopic_inbody_classification/configs/metadata.json @@ -1,7 +1,8 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.4.4", + "version": "0.4.5", "changelog": { + "0.4.5": "check unit test", "0.4.4": "fix the wrong GPU index issue of multi-node", "0.4.3": "add dataset dir example", "0.4.2": "update ONNX-TensorRT descriptions", From 437993933dad2d8a65f1e485463ac410c5093d8a Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Wed, 21 Jun 2023 22:24:23 +0800 Subject: [PATCH 16/19] use hasattr Signed-off-by: Yiheng Wang --- ci/unit_tests/runner.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/ci/unit_tests/runner.py b/ci/unit_tests/runner.py index 628516a7..294523ca 100644 --- a/ci/unit_tests/runner.py +++ b/ci/unit_tests/runner.py @@ -109,14 +109,12 @@ def get_default_pattern(loader): test_file = os.path.join(os.path.dirname(__file__), f"{test_file_name}.py") if os.path.exists(test_file): loader = unittest.TestLoader() - try: - # if having the "test_order" function, will use it as the load order - sys.path.append(os.path.dirname(__file__)) - module = importlib.import_module(test_file_name) + # if having the "test_order" function, will use it as the load order + sys.path.append(os.path.dirname(__file__)) + module = importlib.import_module(test_file_name) + if hasattr(module, "test_order"): test_order = getattr(module, "test_order") loader.sortTestMethodsUsing = test_order - except: - pass tests = loader.loadTestsFromNames([test_file_name]) test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast From 727da655e1780759fb457cfb8d88dd1c083305ff Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Wed, 21 Jun 2023 22:38:54 +0800 Subject: [PATCH 17/19] remove getattr to fix flake8 Signed-off-by: Yiheng Wang --- ci/unit_tests/runner.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ci/unit_tests/runner.py b/ci/unit_tests/runner.py index 294523ca..e470c981 100644 --- a/ci/unit_tests/runner.py +++ b/ci/unit_tests/runner.py @@ -113,8 +113,7 @@ def get_default_pattern(loader): sys.path.append(os.path.dirname(__file__)) module = importlib.import_module(test_file_name) if hasattr(module, "test_order"): - test_order = getattr(module, "test_order") - loader.sortTestMethodsUsing = test_order + loader.sortTestMethodsUsing = module.test_order tests = loader.loadTestsFromNames([test_file_name]) test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast From fcd5a7bde686e4e4137b6ec383aeb1eb46f7d940 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Sun, 25 Jun 2023 14:20:51 +0800 Subject: [PATCH 18/19] revert test changes Signed-off-by: Yiheng Wang --- .../configs/metadata.json | 3 +-- models/brats_mri_generative_diffusion/configs/metadata.json | 3 +-- models/brats_mri_segmentation/configs/metadata.json | 3 +-- models/endoscopic_inbody_classification/configs/metadata.json | 3 +-- models/spleen_deepedit_annotation/configs/metadata.json | 3 +-- models/swin_unetr_btcv_segmentation/configs/metadata.json | 3 +-- 6 files changed, 6 insertions(+), 12 deletions(-) diff --git a/models/brats_mri_axial_slices_generative_diffusion/configs/metadata.json b/models/brats_mri_axial_slices_generative_diffusion/configs/metadata.json index b9f416bb..19be69af 100644 --- a/models/brats_mri_axial_slices_generative_diffusion/configs/metadata.json +++ b/models/brats_mri_axial_slices_generative_diffusion/configs/metadata.json @@ -1,8 +1,7 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_generator_ldm_20230507.json", - "version": "1.0.7", + "version": "1.0.6", "changelog": { - "1.0.7": "check unit test", "1.0.6": "update with new lr scheduler api in inference", "1.0.5": "fix the wrong GPU index issue of multi-node", "1.0.4": "update with new lr scheduler api", diff --git a/models/brats_mri_generative_diffusion/configs/metadata.json b/models/brats_mri_generative_diffusion/configs/metadata.json index 2dd6d119..c5a22d79 100644 --- a/models/brats_mri_generative_diffusion/configs/metadata.json +++ b/models/brats_mri_generative_diffusion/configs/metadata.json @@ -1,8 +1,7 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_generator_ldm_20230507.json", - "version": "1.0.7", + "version": "1.0.6", "changelog": { - "1.0.7": "check unit test", "1.0.6": "update with new lr scheduler api in inference", "1.0.5": "fix the wrong GPU index issue of multi-node", "1.0.4": "update with new lr scheduler api", diff --git a/models/brats_mri_segmentation/configs/metadata.json b/models/brats_mri_segmentation/configs/metadata.json index 1242dd8a..2ca65cea 100644 --- a/models/brats_mri_segmentation/configs/metadata.json +++ b/models/brats_mri_segmentation/configs/metadata.json @@ -1,8 +1,7 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.4.9", + "version": "0.4.8", "changelog": { - "0.4.9": "check unit test", "0.4.8": "fix the wrong GPU index issue of multi-node", "0.4.7": "enhance prepare datalist file", "0.4.6": "add dataset dir example", diff --git a/models/endoscopic_inbody_classification/configs/metadata.json b/models/endoscopic_inbody_classification/configs/metadata.json index ff8561fc..6332e6b8 100644 --- a/models/endoscopic_inbody_classification/configs/metadata.json +++ b/models/endoscopic_inbody_classification/configs/metadata.json @@ -1,8 +1,7 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.4.5", + "version": "0.4.4", "changelog": { - "0.4.5": "check unit test", "0.4.4": "fix the wrong GPU index issue of multi-node", "0.4.3": "add dataset dir example", "0.4.2": "update ONNX-TensorRT descriptions", diff --git a/models/spleen_deepedit_annotation/configs/metadata.json b/models/spleen_deepedit_annotation/configs/metadata.json index 5485f5f5..2264b4c8 100644 --- a/models/spleen_deepedit_annotation/configs/metadata.json +++ b/models/spleen_deepedit_annotation/configs/metadata.json @@ -1,8 +1,7 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.4.8", + "version": "0.4.7", "changelog": { - "0.4.8": "check unit test", "0.4.7": "fix the wrong GPU index issue of multi-node", "0.4.6": "update to use rc7 which solves dynunet issue", "0.4.5": "remove error dollar symbol in readme", diff --git a/models/swin_unetr_btcv_segmentation/configs/metadata.json b/models/swin_unetr_btcv_segmentation/configs/metadata.json index 4799d0a8..5c47077d 100644 --- a/models/swin_unetr_btcv_segmentation/configs/metadata.json +++ b/models/swin_unetr_btcv_segmentation/configs/metadata.json @@ -1,8 +1,7 @@ { "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_20220324.json", - "version": "0.5.1", + "version": "0.5.0", "changelog": { - "0.5.1": "check unit test", "0.5.0": "fix the wrong GPU index issue of multi-node", "0.4.9": "remove error dollar symbol in readme", "0.4.8": "add RAM usage with CacheDataset", From 5e34812c36b4ba8fa18cb207fb3a841d7bf93019 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Mon, 26 Jun 2023 15:02:15 +0800 Subject: [PATCH 19/19] enhance tests according to comments Signed-off-by: Yiheng Wang --- ...s_mri_axial_slices_generative_diffusion.py | 22 ++++------------ .../test_brats_mri_generative_diffusion.py | 22 ++++------------ ci/unit_tests/test_brats_mri_segmentation.py | 24 +++--------------- .../test_endoscopic_inbody_classification.py | 24 +++--------------- .../test_pancreas_ct_dints_segmentation.py | 25 +++---------------- .../test_pathology_tumor_detection.py | 17 +++---------- ci/unit_tests/test_spleen_ct_segmentation.py | 24 +++--------------- .../test_spleen_deepedit_annotation.py | 24 +++--------------- .../test_swin_unetr_btcv_segmentation.py | 24 +++--------------- ci/unit_tests/utils.py | 10 ++++++++ 10 files changed, 47 insertions(+), 169 deletions(-) diff --git a/ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion.py b/ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion.py index 68a09a55..b5ac192d 100644 --- a/ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion.py +++ b/ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion.py @@ -20,6 +20,7 @@ import numpy as np from monai.bundle import ConfigWorkflow from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ { @@ -98,9 +99,7 @@ def test_autoencoder_train(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=False) @parameterized.expand([TEST_CASE_2]) def test_autoencoder_infer(self, override): @@ -115,9 +114,7 @@ def test_autoencoder_infer(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=False) @parameterized.expand([TEST_CASE_1]) def test_diffusion_train(self, override): @@ -134,14 +131,7 @@ def test_diffusion_train(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # TODO: uncomment the following check after we have monai > 1.2.0 - # https://github.com/Project-MONAI/MONAI/issues/6602 - # check_result = trainer.check_properties() - # if check_result is not None and len(check_result) > 0: - # raise ValueError(f"check properties for overrided train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=False) @parameterized.expand([TEST_CASE_2]) def test_diffusion_infer(self, override): @@ -156,9 +146,7 @@ def test_diffusion_infer(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=False) if __name__ == "__main__": diff --git a/ci/unit_tests/test_brats_mri_generative_diffusion.py b/ci/unit_tests/test_brats_mri_generative_diffusion.py index 0fb1b9f6..3a3c460c 100644 --- a/ci/unit_tests/test_brats_mri_generative_diffusion.py +++ b/ci/unit_tests/test_brats_mri_generative_diffusion.py @@ -20,6 +20,7 @@ import numpy as np from monai.bundle import ConfigWorkflow from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ { @@ -112,9 +113,7 @@ def test_autoencoder_train(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=False) @parameterized.expand([TEST_CASE_3]) def test_autoencoder_infer(self, override): @@ -129,9 +128,7 @@ def test_autoencoder_infer(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=False) @parameterized.expand([TEST_CASE_2]) def test_diffusion_train(self, override): @@ -148,14 +145,7 @@ def test_diffusion_train(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # TODO: uncomment the following check after we have monai > 1.2.0 - # https://github.com/Project-MONAI/MONAI/issues/6602 - # check_result = trainer.check_properties() - # if check_result is not None and len(check_result) > 0: - # raise ValueError(f"check properties for overrided train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=False) @parameterized.expand([TEST_CASE_3]) def test_diffusion_infer(self, override): @@ -170,9 +160,7 @@ def test_diffusion_infer(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=False) if __name__ == "__main__": diff --git a/ci/unit_tests/test_brats_mri_segmentation.py b/ci/unit_tests/test_brats_mri_segmentation.py index 61287df1..33b9a84e 100644 --- a/ci/unit_tests/test_brats_mri_segmentation.py +++ b/ci/unit_tests/test_brats_mri_segmentation.py @@ -19,6 +19,7 @@ import numpy as np from monai.bundle import ConfigWorkflow from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ # train, evaluate { @@ -75,13 +76,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) validator = ConfigWorkflow( # override train.json, thus set the workflow to "train" rather than "eval" @@ -91,12 +86,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - validator.initialize() - check_result = validator.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for overrided train config failed: {check_result}") - validator.run() - validator.finalize() + check_workflow(validator, check_properties=True) @parameterized.expand([TEST_CASE_2]) def test_infer_config(self, override): @@ -110,13 +100,7 @@ def test_infer_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/test_endoscopic_inbody_classification.py b/ci/unit_tests/test_endoscopic_inbody_classification.py index f65f72cc..7313c641 100644 --- a/ci/unit_tests/test_endoscopic_inbody_classification.py +++ b/ci/unit_tests/test_endoscopic_inbody_classification.py @@ -19,6 +19,7 @@ from monai.bundle import ConfigWorkflow from monai.data import PILWriter from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ # train, evaluate { @@ -77,13 +78,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) validator = ConfigWorkflow( # override train.json, thus set the workflow to "train" rather than "eval" @@ -93,12 +88,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - validator.initialize() - check_result = validator.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for overrided train config failed: {check_result}") - validator.run() - validator.finalize() + check_workflow(validator, check_properties=True) @parameterized.expand([TEST_CASE_2]) def test_infer_config(self, override): @@ -112,13 +102,7 @@ def test_infer_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py index 26b856f8..877eacc1 100644 --- a/ci/unit_tests/test_pancreas_ct_dints_segmentation.py +++ b/ci/unit_tests/test_pancreas_ct_dints_segmentation.py @@ -19,7 +19,7 @@ import numpy as np from monai.bundle import ConfigWorkflow from parameterized import parameterized -from utils import export_overrided_config +from utils import check_workflow, export_overrided_config TEST_CASE_1 = [ { @@ -133,13 +133,7 @@ def test_train(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) @parameterized.expand([TEST_CASE_3]) def test_eval(self, override): @@ -158,12 +152,7 @@ def test_eval(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - validator.initialize() - check_result = validator.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for overrided train config failed: {check_result}") - validator.run() - validator.finalize() + check_workflow(validator, check_properties=True) @parameterized.expand([TEST_CASE_4]) def test_infer_config(self, override): @@ -178,13 +167,7 @@ def test_infer_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/test_pathology_tumor_detection.py b/ci/unit_tests/test_pathology_tumor_detection.py index f3ceb739..86f11e6c 100644 --- a/ci/unit_tests/test_pathology_tumor_detection.py +++ b/ci/unit_tests/test_pathology_tumor_detection.py @@ -18,6 +18,7 @@ from monai.apps.utils import download_url from monai.bundle import ConfigWorkflow from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ # train { @@ -82,13 +83,7 @@ def test_train_infer_config(self, override, override_infer): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) # run train and infer tests within a single function to avoid duplicating download override_infer["dataset_dir"] = self.dataset_dir @@ -100,13 +95,7 @@ def test_train_infer_config(self, override, override_infer): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override_infer, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/test_spleen_ct_segmentation.py b/ci/unit_tests/test_spleen_ct_segmentation.py index da39256a..a9bb2c9e 100644 --- a/ci/unit_tests/test_spleen_ct_segmentation.py +++ b/ci/unit_tests/test_spleen_ct_segmentation.py @@ -18,6 +18,7 @@ import numpy as np from monai.bundle import ConfigWorkflow from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ # train, evaluate { @@ -73,13 +74,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) validator = ConfigWorkflow( # override train.json, thus set the workflow to "train" rather than "eval" @@ -89,12 +84,7 @@ def test_train_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - validator.initialize() - check_result = validator.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for overrided train config failed: {check_result}") - validator.run() - validator.finalize() + check_workflow(validator, check_properties=True) @parameterized.expand([TEST_CASE_2]) def test_infer_config(self, override): @@ -108,13 +98,7 @@ def test_infer_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/test_spleen_deepedit_annotation.py b/ci/unit_tests/test_spleen_deepedit_annotation.py index e3852f50..7a0aba67 100644 --- a/ci/unit_tests/test_spleen_deepedit_annotation.py +++ b/ci/unit_tests/test_spleen_deepedit_annotation.py @@ -18,6 +18,7 @@ from monai.bundle import ConfigWorkflow from monai.data import ITKWriter from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ # train, evaluate { @@ -86,13 +87,7 @@ def test_train_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) @parameterized.expand([TEST_CASE_1]) def test_eval_config(self, override): @@ -109,12 +104,7 @@ def test_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - validator.initialize() - check_result = validator.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for overrided train config failed: {check_result}") - validator.run() - validator.finalize() + check_workflow(validator, check_properties=True) @parameterized.expand([TEST_CASE_2]) def test_infer_config(self, override): @@ -128,13 +118,7 @@ def test_infer_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/test_swin_unetr_btcv_segmentation.py b/ci/unit_tests/test_swin_unetr_btcv_segmentation.py index cc87cdd9..193580cb 100644 --- a/ci/unit_tests/test_swin_unetr_btcv_segmentation.py +++ b/ci/unit_tests/test_swin_unetr_btcv_segmentation.py @@ -18,6 +18,7 @@ from monai.bundle import ConfigWorkflow from monai.data import ITKWriter from parameterized import parameterized +from utils import check_workflow TEST_CASE_1 = [ # train, evaluate { @@ -89,13 +90,7 @@ def test_train_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - trainer.initialize() - # check required and optional properties - check_result = trainer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for train config failed: {check_result}") - trainer.run() - trainer.finalize() + check_workflow(trainer, check_properties=True) @parameterized.expand([TEST_CASE_1]) def test_eval_config(self, override): @@ -112,12 +107,7 @@ def test_eval_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - validator.initialize() - check_result = validator.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for overrided train config failed: {check_result}") - validator.run() - validator.finalize() + check_workflow(validator, check_properties=True) @parameterized.expand([TEST_CASE_2]) def test_infer_config(self, override): @@ -131,13 +121,7 @@ def test_infer_config(self, override): meta_file=os.path.join(bundle_root, "configs/metadata.json"), **override, ) - inferrer.initialize() - # check required and optional properties - check_result = inferrer.check_properties() - if check_result is not None and len(check_result) > 0: - raise ValueError(f"check properties for inference config failed: {check_result}") - inferrer.run() - inferrer.finalize() + check_workflow(inferrer, check_properties=True) if __name__ == "__main__": diff --git a/ci/unit_tests/utils.py b/ci/unit_tests/utils.py index e6bedf47..24752495 100644 --- a/ci/unit_tests/utils.py +++ b/ci/unit_tests/utils.py @@ -76,3 +76,13 @@ def export_config_and_run_mgpu_cmd( # ensure customized library can be loaded in subprocess env["PYTHONPATH"] = override_dict.get("bundle_root", ".") subprocess.check_call(cmd, env=env) + + +def check_workflow(workflow: ConfigWorkflow, check_properties: bool = False): + workflow.initialize() + if check_properties is True: + check_result = workflow.check_properties() + if check_result is not None and len(check_result) > 0: + raise ValueError(f"check properties for workflow failed: {check_result}") + workflow.run() + workflow.finalize()