diff --git a/CHANGELOG.md b/CHANGELOG.md index 61fb2e54cf..6528f6e33e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ CHANGELOG **ENHANCEMENTS** - Add new configuration section `Scheduling/SlurmSettings/ExternalSlurmdbd` to connect the cluster to an external Slurmdbd. +- Add support for FSx Lustre as a shared storage type in us-iso-east-1. **BUG FIXES** - Fix DRA configuration to make `AutoExportPolicy` and `AutoImportPolicy` optional. diff --git a/cli/src/pcluster/constants.py b/cli/src/pcluster/constants.py index e3160a455b..f774d7a766 100644 --- a/cli/src/pcluster/constants.py +++ b/cli/src/pcluster/constants.py @@ -271,7 +271,7 @@ class Feature(Enum): UNSUPPORTED_FEATURES_MAP = { Feature.BATCH: ["ap-northeast-3", "us-iso"], Feature.DCV: ["us-iso"], - Feature.FSX_LUSTRE: ["us-iso"], + Feature.FSX_LUSTRE: ["us-isob"], Feature.FILE_CACHE: ["us-iso"], Feature.FSX_ONTAP: ["us-iso"], Feature.FSX_OPENZFS: ["us-iso"], diff --git a/cli/tests/pcluster/test_utils.py b/cli/tests/pcluster/test_utils.py index ad92f28559..165e9fc356 100644 --- a/cli/tests/pcluster/test_utils.py +++ b/cli/tests/pcluster/test_utils.py @@ -574,10 +574,8 @@ async def async_method(self, param): (Feature.DCV, "us-iso-west-1", False), (Feature.DCV, "us-isob-east-1", False), (Feature.DCV, "us-isoWHATEVER", False), - (Feature.FSX_LUSTRE, "us-iso-east-1", False), - (Feature.FSX_LUSTRE, "us-iso-west-1", False), (Feature.FSX_LUSTRE, "us-isob-east-1", False), - (Feature.FSX_LUSTRE, "us-isoWHATEVER", False), + (Feature.FSX_LUSTRE, "us-isobWHATEVER", False), (Feature.FSX_ONTAP, "us-iso-east-1", False), (Feature.FSX_ONTAP, "us-iso-west-1", False), (Feature.FSX_ONTAP, "us-isob-east-1", False), diff --git a/tests/integration-tests/tests/ad_integration/test_ad_integration.py b/tests/integration-tests/tests/ad_integration/test_ad_integration.py index e7a0ac2281..41c6f2e01b 100644 --- a/tests/integration-tests/tests/ad_integration/test_ad_integration.py +++ b/tests/integration-tests/tests/ad_integration/test_ad_integration.py @@ -27,7 +27,15 @@ from remote_command_executor import RemoteCommandExecutor from retrying import retry from time_utils import seconds -from utils import find_stack_by_tag, generate_stack_name, is_directory_supported, is_fsx_supported, random_alphanumeric +from utils import ( + find_stack_by_tag, + generate_stack_name, + is_directory_supported, + is_fsx_lustre_supported, + is_fsx_ontap_supported, + is_fsx_openzfs_supported, + random_alphanumeric, +) from tests.ad_integration.cluster_user import ClusterUser from tests.common.utils import run_system_analyzer @@ -513,7 +521,7 @@ def _check_ssh_key(user, ssh_generation_enabled, remote_command_executor, schedu ], ) @pytest.mark.usefixtures("os", "instance") -def test_ad_integration( +def test_ad_integration( # noqa: C901 region, scheduler, scheduler_commands_factory, @@ -541,9 +549,14 @@ def test_ad_integration( if not is_directory_supported(region, directory_type): pytest.skip(f"Skipping the test because directory type {directory_type} is not supported in region {region}") - fsx_supported = is_fsx_supported(region) + fsx_lustre_supported = is_fsx_lustre_supported(region) + fsx_ontap_supported = is_fsx_ontap_supported(region) + fsx_openzfs_supported = is_fsx_openzfs_supported(region) + config_params = { - "fsx_supported": fsx_supported, + "fsx_lustre_supported": fsx_lustre_supported, + "fsx_ontap_supported": fsx_ontap_supported, + "fsx_openzfs_supported": fsx_openzfs_supported, } directory_stack_name = directory_factory( request.config.getoption("directory_stack_name"), @@ -616,8 +629,12 @@ def test_ad_integration( ) ) shared_storage_mount_dirs = ["/shared", "/efs"] - if fsx_supported: - shared_storage_mount_dirs.extend(["/fsxlustre", "/fsxontap", "/fsxopenzfs"]) + if fsx_lustre_supported: + shared_storage_mount_dirs.extend(["/fsxlustre"]) + if fsx_ontap_supported: + shared_storage_mount_dirs.extend(["/fsxontap"]) + if fsx_openzfs_supported: + shared_storage_mount_dirs.extend(["/fsxopenzfs"]) _run_user_workloads(users, test_datadir, shared_storage_mount_dirs) logging.info("Testing pcluster update and generate ssh keys for user") _check_ssh_key_generation(users[0], remote_command_executor, scheduler_commands, False) diff --git a/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.update.yaml b/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.update.yaml index 6528a846d7..92732cdc25 100644 --- a/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.update.yaml +++ b/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.update.yaml @@ -33,17 +33,21 @@ SharedStorage: - MountDir: /efs Name: efs StorageType: Efs - {% if fsx_supported %} + {% if fsx_lustre_supported %} - MountDir: /fsxlustre Name: fsx StorageType: FsxLustre FsxLustreSettings: StorageCapacity: 2400 + {% endif %} + {% if fsx_openzfs_supported %} - MountDir: /fsxopenzfs Name: existingopenzfs StorageType: FsxOpenZfs FsxOpenZfsSettings: VolumeId: {{ fsx_open_zfs_volume_id }} + {% endif %} + {% if fsx_ontap_supported %} - MountDir: /fsxontap Name: existingontap StorageType: FsxOntap diff --git a/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.update2.yaml b/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.update2.yaml index a10aa0dd3e..4287588cb6 100644 --- a/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.update2.yaml +++ b/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.update2.yaml @@ -33,17 +33,21 @@ SharedStorage: - MountDir: /efs Name: efs StorageType: Efs - {% if fsx_supported %} + {% if fsx_lustre_supported %} - MountDir: /fsxlustre Name: fsx StorageType: FsxLustre FsxLustreSettings: StorageCapacity: 2400 + {% endif %} + {% if fsx_openzfs_supported %} - MountDir: /fsxopenzfs Name: existingopenzfs StorageType: FsxOpenZfs FsxOpenZfsSettings: VolumeId: {{ fsx_open_zfs_volume_id }} + {% endif %} + {% if fsx_ontap_supported %} - MountDir: /fsxontap Name: existingontap StorageType: FsxOntap diff --git a/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.yaml b/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.yaml index 2fa10d1dc5..4be433d7bf 100644 --- a/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.yaml +++ b/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration/pcluster.config.yaml @@ -33,17 +33,21 @@ SharedStorage: - MountDir: /efs Name: efs StorageType: Efs - {% if fsx_supported %} + {% if fsx_lustre_supported %} - MountDir: /fsxlustre Name: fsx StorageType: FsxLustre FsxLustreSettings: StorageCapacity: 2400 + {% endif %} + {% if fsx_openzfs_supported %} - MountDir: /fsxopenzfs Name: existingopenzfs StorageType: FsxOpenZfs FsxOpenZfsSettings: VolumeId: {{ fsx_open_zfs_volume_id }} + {% endif %} + {% if fsx_ontap_supported %} - MountDir: /fsxontap Name: existingontap StorageType: FsxOntap diff --git a/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration_on_login_nodes/pcluster.config.yaml b/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration_on_login_nodes/pcluster.config.yaml index f6bb9de49f..9a442da6c6 100644 --- a/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration_on_login_nodes/pcluster.config.yaml +++ b/tests/integration-tests/tests/ad_integration/test_ad_integration/test_ad_integration_on_login_nodes/pcluster.config.yaml @@ -43,17 +43,21 @@ SharedStorage: - MountDir: /efs Name: efs StorageType: Efs - {% if fsx_supported %} + {% if fsx_lustre_supported %} - MountDir: /fsxlustre Name: fsx StorageType: FsxLustre FsxLustreSettings: StorageCapacity: 2400 + {% endif %} + {% if fsx_openzfs_supported %} - MountDir: /fsxopenzfs Name: existingopenzfs StorageType: FsxOpenZfs FsxOpenZfsSettings: VolumeId: {{ fsx_open_zfs_volume_id }} + { % endif % } + { % if fsx_ontap_supported % } - MountDir: /fsxontap Name: existingontap StorageType: FsxOntap diff --git a/tests/integration-tests/tests/storage/test_shared_home.py b/tests/integration-tests/tests/storage/test_shared_home.py index 31969fc45e..3b5d721453 100644 --- a/tests/integration-tests/tests/storage/test_shared_home.py +++ b/tests/integration-tests/tests/storage/test_shared_home.py @@ -15,7 +15,7 @@ import pytest from assertpy import assert_that from remote_command_executor import RemoteCommandExecutor -from utils import is_fsx_supported +from utils import is_fsx_lustre_supported, is_fsx_ontap_supported, is_fsx_openzfs_supported from tests.storage.storage_common import ( check_fsx, @@ -57,8 +57,13 @@ def test_shared_home( """Verify the shared /home storage fs is available when set""" mount_dir = "/home" bucket_name = None - if is_fsx_supported(region) or storage_type in ["Efs", "Ebs"]: - if storage_type == "FsxOpenZfs": + + fsx_supported = ( + is_fsx_lustre_supported(region) or is_fsx_ontap_supported(region) or is_fsx_openzfs_supported(region) + ) + + if fsx_supported or storage_type in ["Efs", "Ebs"]: + if is_fsx_openzfs_supported(region) and storage_type == "FsxOpenZfs": fsx_open_zfs_root_volume_id = create_fsx_open_zfs(fsx_factory, num=1)[0] fsx_open_zfs_volume_id = open_zfs_volume_factory(fsx_open_zfs_root_volume_id, num_volumes=1)[0] cluster_config = pcluster_config_reader( @@ -67,7 +72,7 @@ def test_shared_home( volume_id=fsx_open_zfs_volume_id, shared_storage_type=shared_storage_type, ) - elif storage_type == "FsxOntap": + elif is_fsx_ontap_supported(region) and storage_type == "FsxOntap": fsx_ontap_fs_id = create_fsx_ontap(fsx_factory, num=1)[0] fsx_on_tap_volume_id = svm_factory(fsx_ontap_fs_id, num_volumes=1)[0] cluster_config = pcluster_config_reader( diff --git a/tests/integration-tests/tests/update/test_update.py b/tests/integration-tests/tests/update/test_update.py index b3bb203610..195056f603 100644 --- a/tests/integration-tests/tests/update/test_update.py +++ b/tests/integration-tests/tests/update/test_update.py @@ -15,7 +15,6 @@ import re import time from collections import defaultdict -from datetime import datetime import boto3 import pytest @@ -33,8 +32,10 @@ generate_stack_name, get_arn_partition, get_root_volume_id, - is_fsx_supported, - random_alphanumeric, + is_filecache_supported, + is_fsx_lustre_supported, + is_fsx_ontap_supported, + is_fsx_openzfs_supported, retrieve_cfn_resources, wait_for_computefleet_changed, ) @@ -1028,7 +1029,11 @@ def create_stack(vpc_stack, bucket_name, file_cache_path): vpc = vpc_stack.cfn_outputs["VpcId"] import_path = "s3://{0}".format(bucket_name) export_path = "s3://{0}/export_dir".format(bucket_name) - fsx_supported = is_fsx_supported(region) + + fsx_lustre_supported = is_fsx_lustre_supported(region) + fsx_ontap_supported = is_fsx_ontap_supported(region) + fsx_openzfs_supported = is_fsx_openzfs_supported(region) + filecache_supported = is_filecache_supported(region) params = [ # Networking @@ -1042,15 +1047,15 @@ def create_stack(vpc_stack, bucket_name, file_cache_path): # EFS {"ParameterKey": "CreateEfs", "ParameterValue": "true"}, # FSxLustre - {"ParameterKey": "CreateFsxLustre", "ParameterValue": str(fsx_supported).lower()}, + {"ParameterKey": "CreateFsxLustre", "ParameterValue": str(fsx_lustre_supported).lower()}, {"ParameterKey": "FsxLustreImportPath", "ParameterValue": import_path}, {"ParameterKey": "FsxLustreExportPath", "ParameterValue": export_path}, # FSxOntap - {"ParameterKey": "CreateFsxOntap", "ParameterValue": str(fsx_supported).lower()}, + {"ParameterKey": "CreateFsxOntap", "ParameterValue": str(fsx_ontap_supported).lower()}, # FSxOpenZfs - {"ParameterKey": "CreateFsxOpenZfs", "ParameterValue": str(fsx_supported).lower()}, + {"ParameterKey": "CreateFsxOpenZfs", "ParameterValue": str(fsx_openzfs_supported).lower()}, # FileCache - {"ParameterKey": "CreateFileCache", "ParameterValue": str(fsx_supported).lower()}, + {"ParameterKey": "CreateFileCache", "ParameterValue": str(filecache_supported).lower()}, {"ParameterKey": "FileCachePath", "ParameterValue": file_cache_path}, {"ParameterKey": "FileCacheS3BucketName", "ParameterValue": bucket_name}, ] @@ -1071,6 +1076,10 @@ def create_stack(vpc_stack, bucket_name, file_cache_path): yield create_stack +def remove_none_items(items: list): + return [item for item in items if item is not None] + + @pytest.mark.usefixtures("instance") def test_dynamic_file_systems_update( region, @@ -1095,30 +1104,21 @@ def test_dynamic_file_systems_update( ) cluster = clusters_factory(init_config_file, wait=False) - fsx_supported = is_fsx_supported(region) + fsx_lustre_supported = is_fsx_lustre_supported(region) + fsx_ontap_supported = is_fsx_ontap_supported(region) + fsx_openzfs_supported = is_fsx_openzfs_supported(region) + filecache_supported = is_filecache_supported(region) + existing_ebs_mount_dir = "/existing_ebs_mount_dir" existing_efs_mount_dir = "/existing_efs_mount_dir" - existing_fsx_lustre_mount_dir = "/existing_fsx_lustre_mount_dir" - existing_fsx_ontap_mount_dir = "/existing_fsx_ontap_mount_dir" - existing_fsx_open_zfs_mount_dir = "/existing_fsx_open_zfs_mount_dir" - existing_file_cache_mount_dir = "/existing_file_cache_mount_dir" + existing_fsx_lustre_mount_dir = "/existing_fsx_lustre_mount_dir" if fsx_lustre_supported else None + existing_fsx_ontap_mount_dir = "/existing_fsx_ontap_mount_dir" if fsx_ontap_supported else None + existing_fsx_open_zfs_mount_dir = "/existing_fsx_open_zfs_mount_dir" if fsx_openzfs_supported else None + existing_file_cache_mount_dir = "/existing_file_cache_mount_dir" if filecache_supported else None new_ebs_mount_dir = "/new_ebs_mount_dir" new_raid_mount_dir = "/new_raid_dir" new_efs_mount_dir = "/new_efs_mount_dir" - new_lustre_mount_dir = "/new_lustre_mount_dir" - ebs_mount_dirs = [new_ebs_mount_dir, existing_ebs_mount_dir] - efs_mount_dirs = [existing_efs_mount_dir, new_efs_mount_dir] - fsx_mount_dirs = ( - [ - new_lustre_mount_dir, - existing_fsx_lustre_mount_dir, - existing_fsx_open_zfs_mount_dir, - existing_fsx_ontap_mount_dir, - existing_file_cache_mount_dir, - ] - if fsx_supported - else [] - ) + new_lustre_mount_dir = "/new_lustre_mount_dir" if fsx_lustre_supported else None bucket_name = s3_bucket_factory() bucket = boto3.resource("s3", region_name=region).Bucket(bucket_name) @@ -1178,7 +1178,6 @@ def test_dynamic_file_systems_update( fsx_open_zfs_volume_id=existing_fsx_open_zfs_volume_id, existing_file_cache_id=existing_file_cache_id, bucket_name=bucket_name, - fsx_supported=fsx_supported, queue_update_strategy="DRAIN", login_nodes_count=1, ) @@ -1195,17 +1194,16 @@ def test_dynamic_file_systems_update( scheduler_commands.assert_job_state(queue1_job_id, "RUNNING") # Check that the mounted storage is visible on all cluster nodes right after the update. - all_mount_dirs_update_1 = ( - [existing_efs_mount_dir] - + [ + efs_mount_dirs = [existing_efs_mount_dir] + fsx_mount_dirs = remove_none_items( + [ existing_fsx_lustre_mount_dir, - existing_fsx_ontap_mount_dir, existing_fsx_open_zfs_mount_dir, + existing_fsx_ontap_mount_dir, existing_file_cache_mount_dir, ] - if fsx_supported - else [] ) + all_mount_dirs_update_1 = efs_mount_dirs + fsx_mount_dirs _test_shared_storages_mount_on_headnode( remote_command_executor, cluster, @@ -1214,17 +1212,8 @@ def test_dynamic_file_systems_update( scheduler_commands_factory, ebs_mount_dirs=[], new_raid_mount_dir=[], - efs_mount_dirs=[existing_efs_mount_dir], - fsx_mount_dirs=( - [ - existing_fsx_lustre_mount_dir, - existing_fsx_open_zfs_mount_dir, - existing_fsx_ontap_mount_dir, - existing_file_cache_mount_dir, - ] - if fsx_supported - else [] - ), + efs_mount_dirs=efs_mount_dirs, + fsx_mount_dirs=fsx_mount_dirs, file_cache_path=file_cache_path, ) for mount_dir in all_mount_dirs_update_1: @@ -1248,7 +1237,6 @@ def test_dynamic_file_systems_update( fsx_open_zfs_volume_id=existing_fsx_open_zfs_volume_id, existing_file_cache_id=existing_file_cache_id, bucket_name=bucket_name, - fsx_supported=fsx_supported, queue_update_strategy="DRAIN", login_nodes_count=0, ) @@ -1291,7 +1279,6 @@ def test_dynamic_file_systems_update( new_lustre_deletion_policy="Retain", new_efs_mount_dir=new_efs_mount_dir, new_efs_deletion_policy="Retain", - fsx_supported=fsx_supported, queue_update_strategy="DRAIN", login_nodes_count=0, ) @@ -1300,24 +1287,31 @@ def test_dynamic_file_systems_update( # Retrieve created shared storage ids to remove them at teardown logging.info("Retrieve managed storage ids and mark them for deletion on teardown") + ebs_mount_dirs = [new_ebs_mount_dir, existing_ebs_mount_dir] + fsx_mount_dirs = remove_none_items( + [ + new_lustre_mount_dir, + existing_fsx_lustre_mount_dir, + existing_fsx_open_zfs_mount_dir, + existing_fsx_ontap_mount_dir, + existing_file_cache_mount_dir, + ] + ) existing_ebs_ids = [existing_ebs_volume_id] existing_efs_ids = [existing_efs_id] - existing_fsx_ids = ( + existing_fsx_ids = remove_none_items( [ existing_fsx_lustre_fs_id, existing_fsx_ontap_volume_id, existing_fsx_open_zfs_volume_id, existing_file_cache_id, ] - if fsx_supported - else [] ) managed_storage_ids = _retrieve_managed_storage_ids( cluster, existing_ebs_ids, existing_efs_ids, existing_fsx_ids, - fsx_supported, ) for storage_type in managed_storage_ids: for storage_id in managed_storage_ids[storage_type]["ids"]: @@ -1365,7 +1359,7 @@ def test_dynamic_file_systems_update( file_cache_path, ) - all_mount_dirs_update_2 = ( + all_mount_dirs_update_2 = remove_none_items( [ new_ebs_mount_dir, new_raid_mount_dir, @@ -1373,27 +1367,21 @@ def test_dynamic_file_systems_update( new_lustre_mount_dir, existing_ebs_mount_dir, existing_efs_mount_dir, - ] - + [ existing_fsx_lustre_mount_dir, existing_fsx_ontap_mount_dir, existing_fsx_open_zfs_mount_dir, existing_file_cache_mount_dir, ] - if fsx_supported - else [] ) - mount_dirs_requiring_replacement = ( + mount_dirs_requiring_replacement = remove_none_items( [ new_ebs_mount_dir, new_raid_mount_dir, new_efs_mount_dir, existing_ebs_mount_dir, + new_lustre_mount_dir, ] - + [new_lustre_mount_dir] - if fsx_supported - else [] ) logging.info("Checking that previously mounted storage is visible on all compute nodes") @@ -1490,11 +1478,10 @@ def test_dynamic_file_systems_update_rollback( bucket.upload_file(str(test_datadir / "s3_test_file"), "s3_test_file") file_cache_path = "/file-cache-path/" - fsx_supported = is_fsx_supported(region) existing_ebs_mount_dir = "/existing_ebs_mount_dir" new_ebs_mount_dir = "/new_ebs_mount_dir" new_efs_mount_dir = "/new_efs_mount_dir" - new_lustre_mount_dir = "/new_lustre_mount_dir" + new_lustre_mount_dir = "/new_lustre_mount_dir" if is_fsx_lustre_supported(region) else None ( existing_ebs_volume_id, @@ -1529,7 +1516,6 @@ def test_dynamic_file_systems_update_rollback( pcluster_config_reader, scheduler_commands, region, - fsx_supported, ) @@ -1640,7 +1626,7 @@ def test_dynamic_file_systems_update_data_loss( assert_file_exists(cluster, file_path) -def _retrieve_managed_storage_ids(cluster, existing_ebs_ids, existing_efs_ids, existing_fsx_ids, fsx_supported): +def _retrieve_managed_storage_ids(cluster, existing_ebs_ids, existing_efs_ids, existing_fsx_ids): """Retrieve all the shared storages part of the cluster and exclude provided existing storage ids.""" managed_ebs_noraid_volume_ids = [ id for id in cluster.cfn_outputs["EBSIds"].split(",") if id not in existing_ebs_ids @@ -1648,9 +1634,9 @@ def _retrieve_managed_storage_ids(cluster, existing_ebs_ids, existing_efs_ids, e managed_ebs_raid_volume_ids = [id for id in cluster.cfn_outputs["RAIDIds"].split(",") if id not in existing_ebs_ids] managed_ebs_volume_ids = managed_ebs_noraid_volume_ids + managed_ebs_raid_volume_ids managed_efs_filesystem_ids = [id for id in cluster.cfn_outputs["EFSIds"].split(",") if id not in existing_efs_ids] - managed_fsx_filesystem_ids = ( - [id for id in cluster.cfn_outputs["FSXIds"].split(",") if id not in existing_fsx_ids] if fsx_supported else [] - ) + managed_fsx_filesystem_ids = [ + id for id in cluster.cfn_outputs.get("FSXIds", "").split(",") if id not in existing_fsx_ids + ] managed_storage = { StorageType.STORAGE_EBS: dict(ids=managed_ebs_volume_ids, expected_states=["available"]), StorageType.STORAGE_EFS: dict(ids=managed_efs_filesystem_ids, expected_states=["available"]), @@ -1672,12 +1658,12 @@ def _create_shared_storages_resources( storage_stack = external_shared_storage_stack(vpc_stack, bucket_name, file_cache_path) return ( - storage_stack.cfn_outputs["EbsId"], - storage_stack.cfn_outputs["EfsId"], - storage_stack.cfn_outputs["FsxLustreFsId"], - storage_stack.cfn_outputs["FsxOntapVolumeId"], - storage_stack.cfn_outputs["FsxOpenZfsVolumeId"], - storage_stack.cfn_outputs["FileCacheId"], + storage_stack.cfn_outputs.get("EbsId"), + storage_stack.cfn_outputs.get("EfsId"), + storage_stack.cfn_outputs.get("FsxLustreFsId"), + storage_stack.cfn_outputs.get("FsxOntapVolumeId"), + storage_stack.cfn_outputs.get("FsxOpenZfsVolumeId"), + storage_stack.cfn_outputs.get("FileCacheId"), ) @@ -1774,7 +1760,6 @@ def _test_shared_storage_rollback( pcluster_config_reader, scheduler_commands, region, - fsx_supported, ): # update cluster with adding non-existing ebs and skip validator problematic_volume_id = "vol-00000000000000000" @@ -1789,7 +1774,6 @@ def _test_shared_storage_rollback( new_ebs_mount_dir=new_ebs_mount_dir, new_lustre_mount_dir=new_lustre_mount_dir, new_efs_mount_dir=new_efs_mount_dir, - fsx_supported=fsx_supported, queue_update_strategy="TERMINATE", login_nodes_count=0, ) @@ -1811,7 +1795,7 @@ def _test_shared_storage_rollback( # Check shared storages are not on headnode ebs_mount_dirs = [existing_ebs_mount_dir, new_ebs_mount_dir, problematic_ebs_mount_dir] - fs_mount_dirs = [new_lustre_mount_dir, new_efs_mount_dir] if fsx_supported else [new_efs_mount_dir] + fs_mount_dirs = list(filter(lambda item: item is not None, [new_lustre_mount_dir, new_efs_mount_dir])) _test_ebs_not_mounted(remote_command_executor, ebs_mount_dirs) _test_directory_not_mounted(remote_command_executor, fs_mount_dirs) @@ -1837,7 +1821,7 @@ def _test_shared_storage_rollback( ] managed_fsx = ( [fs.get("fsx_fs_id") for fs in failed_share_storages.get("fsx") if fs.get("mount_dir") == new_lustre_mount_dir] - if fsx_supported + if new_lustre_mount_dir else [] ) @@ -1850,7 +1834,7 @@ def _test_shared_storage_rollback( with pytest.raises(ClientError, match="FileSystemNotFound"): boto3.client("efs", region).describe_file_systems(FileSystemId=managed_efs[0]) # assert the managed FSX is clean up - if fsx_supported: + if new_lustre_mount_dir: logging.info("Checking managed FSX is deleted after stack rollback") with pytest.raises(ClientError, match="FileSystemNotFound"): boto3.client("fsx", region).describe_file_systems(FileSystemIds=managed_fsx) diff --git a/tests/integration-tests/tests/update/test_update/test_dynamic_file_systems_update/pcluster.config.update.yaml b/tests/integration-tests/tests/update/test_update/test_dynamic_file_systems_update/pcluster.config.update.yaml index 25b6bcbf5e..6b65f6c3c8 100644 --- a/tests/integration-tests/tests/update/test_update/test_dynamic_file_systems_update/pcluster.config.update.yaml +++ b/tests/integration-tests/tests/update/test_update/test_dynamic_file_systems_update/pcluster.config.update.yaml @@ -100,7 +100,6 @@ SharedStorage: EfsSettings: FileSystemId: {{ existing_efs_id }} {% endif %} -{% if fsx_supported %} {% if new_lustre_mount_dir %} - MountDir: {{ new_lustre_mount_dir }} Name: manage-fsx @@ -141,5 +140,4 @@ SharedStorage: FileCacheSettings: FileCacheId: {{ existing_file_cache_id }} {% endif %} -{% endif %} diff --git a/tests/integration-tests/tests/update/test_update/test_dynamic_file_systems_update_rollback/pcluster.config.update_rollback.yaml b/tests/integration-tests/tests/update/test_update/test_dynamic_file_systems_update_rollback/pcluster.config.update_rollback.yaml index 8a968d3f48..1de301f274 100644 --- a/tests/integration-tests/tests/update/test_update/test_dynamic_file_systems_update_rollback/pcluster.config.update_rollback.yaml +++ b/tests/integration-tests/tests/update/test_update/test_dynamic_file_systems_update_rollback/pcluster.config.update_rollback.yaml @@ -78,7 +78,7 @@ SharedStorage: PerformanceMode: maxIO ThroughputMode: provisioned ProvisionedThroughput: 200 - {% if fsx_supported %} + {% if new_lustre_mount_dir %} - MountDir: {{ new_lustre_mount_dir }} Name: {{ new_lustre_mount_dir }} StorageType: FsxLustre diff --git a/tests/integration-tests/utils.py b/tests/integration-tests/utils.py index 4d198770bd..f955d19608 100644 --- a/tests/integration-tests/utils.py +++ b/tests/integration-tests/utils.py @@ -798,7 +798,19 @@ def is_dcv_supported(region: str): return "us-iso" not in region -def is_fsx_supported(region: str): +def is_fsx_lustre_supported(region: str): + return "us-isob" not in region + + +def is_fsx_ontap_supported(region: str): + return "us-iso" not in region + + +def is_fsx_openzfs_supported(region: str): + return "us-iso" not in region + + +def is_filecache_supported(region: str): return "us-iso" not in region