diff --git a/src/aks-preview/HISTORY.rst b/src/aks-preview/HISTORY.rst index f14bafd8566..fae6d0d3784 100644 --- a/src/aks-preview/HISTORY.rst +++ b/src/aks-preview/HISTORY.rst @@ -12,6 +12,11 @@ To release a new version, please select a new version number (usually plus 1 to Pending +++++++ +0.5.164 ++++++++ +* Add option `--enable-azure-container-storage` and supporting options `--storage-pool-name`, `--storage-pool-type`, `--storage-pool-sku`, `--storage-pool-size` for `az aks create` and `az aks update`. `az aks update` also supports `--azure-container-storage-nodepools` option. +* Add option `--disable-azure-container-storage` to `az aks create` and `az aks update`. + 0.5.163 +++++++ * Add `get-upgrades` and `get-revisions` to the `az aks mesh` command. diff --git a/src/aks-preview/azext_aks_preview/_client_factory.py b/src/aks-preview/azext_aks_preview/_client_factory.py index 643e7877248..f8b8124663b 100644 --- a/src/aks-preview/azext_aks_preview/_client_factory.py +++ b/src/aks-preview/azext_aks_preview/_client_factory.py @@ -151,3 +151,7 @@ def get_resource_by_name(cli_ctx, resource_name, resource_type): def get_msi_client(cli_ctx, subscription_id=None): return get_mgmt_service_client(cli_ctx, ManagedServiceIdentityClient, subscription_id=subscription_id) + + +def get_providers_client_factory(cli_ctx, subscription_id=None): + return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, subscription_id=subscription_id).providers diff --git a/src/aks-preview/azext_aks_preview/_params.py b/src/aks-preview/azext_aks_preview/_params.py index 7e61e112b39..320033cc379 100644 --- a/src/aks-preview/azext_aks_preview/_params.py +++ b/src/aks-preview/azext_aks_preview/_params.py @@ -169,7 +169,21 @@ tags_type, zones_type, ) -from azure.cli.core.profiles import ResourceType +from azext_aks_preview.azurecontainerstorage._consts import ( + CONST_STORAGE_POOL_TYPE_AZURE_DISK, + CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK, + CONST_STORAGE_POOL_TYPE_ELASTIC_SAN, + CONST_STORAGE_POOL_SKU_PREMIUM_LRS, + CONST_STORAGE_POOL_SKU_STANDARD_LRS, + CONST_STORAGE_POOL_SKU_STANDARDSSD_LRS, + CONST_STORAGE_POOL_SKU_ULTRASSD_LRS, + CONST_STORAGE_POOL_SKU_PREMIUM_ZRS, + CONST_STORAGE_POOL_SKU_PREMIUMV2_LRS, + CONST_STORAGE_POOL_SKU_STANDARDSSD_ZRS, + CONST_STORAGE_POOL_OPTION_NVME, + CONST_STORAGE_POOL_OPTION_SSD, + CONST_STORAGE_POOL_DEFAULT_SIZE, +) from knack.arguments import CLIArgumentType # candidates for enumeration @@ -256,6 +270,28 @@ CONST_AZURE_SERVICE_MESH_INGRESS_MODE_INTERNAL, ] +# azure container storage +storage_pool_types = [ + CONST_STORAGE_POOL_TYPE_AZURE_DISK, + CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK, + CONST_STORAGE_POOL_TYPE_ELASTIC_SAN, +] + +storage_pool_skus = [ + CONST_STORAGE_POOL_SKU_PREMIUM_LRS, + CONST_STORAGE_POOL_SKU_STANDARD_LRS, + CONST_STORAGE_POOL_SKU_STANDARDSSD_LRS, + CONST_STORAGE_POOL_SKU_ULTRASSD_LRS, + CONST_STORAGE_POOL_SKU_PREMIUM_ZRS, + CONST_STORAGE_POOL_SKU_PREMIUMV2_LRS, + CONST_STORAGE_POOL_SKU_STANDARDSSD_ZRS, +] + +storage_pool_options = [ + CONST_STORAGE_POOL_OPTION_NVME, + CONST_STORAGE_POOL_OPTION_SSD, +] + def load_arguments(self, _): @@ -455,6 +491,15 @@ def load_arguments(self, _): c.argument('grafana_resource_id', validator=validate_grafanaresourceid) c.argument('enable_windows_recording_rules', action='store_true') c.argument('enable_cost_analysis', is_preview=True, action='store_true') + # azure container storage + c.argument('enable_azure_container_storage', arg_type=get_enum_type(storage_pool_types), + help='enable azure container storage and define storage pool type') + c.argument('storage_pool_name', help='set storage pool name for azure container storage') + c.argument('storage_pool_size', help='set storage pool size for azure container storage') + c.argument('storage_pool_sku', arg_type=get_enum_type(storage_pool_skus), + help='set azure disk type storage pool sku for azure container storage') + c.argument('storage_pool_option', arg_type=get_enum_type(storage_pool_options), + help='set ephemeral disk storage pool option for azure container storage') with self.argument_context('aks update') as c: # managed cluster paramerters @@ -583,6 +628,19 @@ def load_arguments(self, _): c.argument('enable_network_observability', action='store_true', is_preview=True, help="enable network observability for cluster") c.argument('enable_cost_analysis', is_preview=True, action='store_true') c.argument('disable_cost_analysis', is_preview=True, action='store_true') + # azure container storage + c.argument('enable_azure_container_storage', arg_type=get_enum_type(storage_pool_types), + help='enable azure container storage and define storage pool type') + c.argument('disable_azure_container_storage', action='store_true', + help='Flag to disable azure container storage') + c.argument('storage_pool_name', help='set storage pool name for azure container storage') + c.argument('storage_pool_size', help='set storage pool size for azure container storage') + c.argument('storage_pool_sku', arg_type=get_enum_type(storage_pool_skus), + help='set azure disk type storage pool sku for azure container storage') + c.argument('storage_pool_option', arg_type=get_enum_type(storage_pool_options), + help='set ephemeral disk storage pool option for azure container storage') + c.argument('azure_container_storage_nodepools', + help='define the comma separated nodepool list to install azure container storage') with self.argument_context('aks upgrade') as c: c.argument('kubernetes_version', completer=get_k8s_upgrades_completion_list) diff --git a/src/aks-preview/azext_aks_preview/azurecontainerstorage/__init__.py b/src/aks-preview/azext_aks_preview/azurecontainerstorage/__init__.py new file mode 100644 index 00000000000..944e5654667 --- /dev/null +++ b/src/aks-preview/azext_aks_preview/azurecontainerstorage/__init__.py @@ -0,0 +1,4 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- diff --git a/src/aks-preview/azext_aks_preview/azurecontainerstorage/_consts.py b/src/aks-preview/azext_aks_preview/azurecontainerstorage/_consts.py new file mode 100644 index 00000000000..f4f62233f2a --- /dev/null +++ b/src/aks-preview/azext_aks_preview/azurecontainerstorage/_consts.py @@ -0,0 +1,28 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +CONST_ACSTOR_K8S_EXTENSION_NAME = "microsoft.azurecontainerstorage" +CONST_EXT_INSTALLATION_NAME = "azurecontainerstorage" +CONST_K8S_EXTENSION_CLIENT_FACTORY_MOD_NAME = "azext_k8s_extension._client_factory" +CONST_K8S_EXTENSION_CUSTOM_MOD_NAME = "azext_k8s_extension.custom" +CONST_K8S_EXTENSION_NAME = "k8s-extension" +CONST_STORAGE_POOL_DEFAULT_SIZE_ESAN = "1Ti" +CONST_STORAGE_POOL_DEFAULT_SIZE = "512Gi" +CONST_STORAGE_POOL_NAME_PREFIX = "storagepool-" +CONST_STORAGE_POOL_OPTION_NVME = "NVMe" +CONST_STORAGE_POOL_OPTION_SSD = "SSD" +CONST_STORAGE_POOL_SKU_PREMIUM_LRS = "Premium_LRS" +CONST_STORAGE_POOL_SKU_PREMIUM_ZRS = "Premium_ZRS" +CONST_STORAGE_POOL_SKU_PREMIUMV2_LRS = "PremiumV2_LRS" +CONST_STORAGE_POOL_SKU_STANDARD_LRS = "Standard_LRS" +CONST_STORAGE_POOL_SKU_STANDARDSSD_LRS = "StandardSSD_LRS" +CONST_STORAGE_POOL_SKU_STANDARDSSD_ZRS = "StandardSSD_ZRS" +CONST_STORAGE_POOL_SKU_ULTRASSD_LRS = "UltraSSD_LRS" +CONST_STORAGE_POOL_TYPE_AZURE_DISK = "azureDisk" +CONST_STORAGE_POOL_TYPE_ELASTIC_SAN = "elasticSan" +CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK = "ephemeralDisk" + +CONST_STORAGE_POOL_RANDOM_LENGTH = 7 +RP_REGISTRATION_POLLING_INTERVAL_IN_SEC = 5 diff --git a/src/aks-preview/azext_aks_preview/azurecontainerstorage/_helpers.py b/src/aks-preview/azext_aks_preview/azurecontainerstorage/_helpers.py new file mode 100644 index 00000000000..720d0b25bca --- /dev/null +++ b/src/aks-preview/azext_aks_preview/azurecontainerstorage/_helpers.py @@ -0,0 +1,206 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from azure.cli.core.azclierror import UnknownError +from azure.cli.command_modules.acs._roleassignments import ( + add_role_assignment, + build_role_scope, + delete_role_assignments, +) +from azext_aks_preview._client_factory import get_providers_client_factory +from azext_aks_preview.azurecontainerstorage._consts import ( + CONST_ACSTOR_K8S_EXTENSION_NAME, + CONST_EXT_INSTALLATION_NAME, + CONST_K8S_EXTENSION_CLIENT_FACTORY_MOD_NAME, + CONST_K8S_EXTENSION_CUSTOM_MOD_NAME, + CONST_K8S_EXTENSION_NAME, + CONST_STORAGE_POOL_NAME_PREFIX, + CONST_STORAGE_POOL_OPTION_NVME, + CONST_STORAGE_POOL_RANDOM_LENGTH, + CONST_STORAGE_POOL_TYPE_ELASTIC_SAN, + CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK, + RP_REGISTRATION_POLLING_INTERVAL_IN_SEC, +) + +from datetime import datetime +from knack.log import get_logger +import random +import string +import time + +logger = get_logger(__name__) + + +def register_dependent_rps(cmd, subscription_id) -> bool: + required_rp = 'Microsoft.KubernetesConfiguration' + from azure.mgmt.resource.resources.models import ProviderRegistrationRequest, ProviderConsentDefinition + + properties = ProviderRegistrationRequest(third_party_provider_consent=ProviderConsentDefinition(consent_to_authorization=False)) + client = get_providers_client_factory(cmd.cli_ctx) + is_registered = False + try: + is_registered = _is_rp_registered(cmd, required_rp, subscription_id) + if is_registered: + return + client.register(required_rp, properties=properties) + # wait for registration to finish + timeout_secs = 120 + start = datetime.utcnow() + is_registered = _is_rp_registered(cmd, required_rp, subscription_id) + while not is_registered: + is_registered = _is_rp_registered(cmd, required_rp, subscription_id) + time.sleep(RP_REGISTRATION_POLLING_INTERVAL_IN_SEC) + if (datetime.utcnow() - start).seconds >= timeout_secs: + logger.error("Timed out while waiting for the {0} resource provider to be registered.".format(required_rp)) + break + + except Exception as e: + logger.error( + "Installation of Azure Container Storage requires registering to the following resource provider: {0}. " + "We were unable to perform the registration on your behalf due to the following error: {1}\n" + "Please check with your admin on permissions, " + "or try running registration manually with: `az provider register --namespace {0}` command." + .format(required_rp, e.msg) + ) + + return is_registered + + +def should_create_storagepool( + cmd, + subscription_id, + node_resource_group, + kubelet_identity_object_id, + storage_pool_type, + storage_pool_option, + agentpool_details, + nodepool_name, +): + role_assignment_success = perform_role_operations_on_managed_rg(cmd, subscription_id, node_resource_group, kubelet_identity_object_id, True) + return_val = True + + if not role_assignment_success: + msg = "\nUnable to add Role Assignments needed for Elastic SAN storagepools to be functional. " \ + "Please check with your admin on permissions." + if storage_pool_type == CONST_STORAGE_POOL_TYPE_ELASTIC_SAN: + msg += "\nThis command will not create an Elastic SAN storagepool after installation." + return_val = False + msg += "\nGoing ahead with the installation of Azure Container Storage..." + logger.warning(msg) + + if not return_val: + return return_val + + if storage_pool_type == CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK and \ + storage_pool_option == CONST_STORAGE_POOL_OPTION_NVME: + nodepool_list = nodepool_name.split(',') + for nodepool in nodepool_list: + agentpool_vm = agentpool_details.get(nodepool.lower()) + if agentpool_vm is not None and agentpool_vm.lower().startswith('standard_l'): + break + else: + logger.warning( + "\nNo supporting nodepool found which can support ephemeral NVMe disk " + "so this command will not create an ephemeral NVMe disk storage pool after installation." + "\nGoing ahead with the installation of Azure Container Storage..." + ) + return_val = False + + return return_val + + +def perform_role_operations_on_managed_rg(cmd, subscription_id, node_resource_group, kubelet_identity_object_id, assign): + managed_rg_role_scope = build_role_scope(node_resource_group, None, subscription_id) + roles = ["Reader", "Network Contributor", "Elastic SAN Owner", "Elastic SAN Volume Group Owner"] + result = True + + for role in roles: + try: + if assign: + result = add_role_assignment( + cmd, + role, + kubelet_identity_object_id, + scope=managed_rg_role_scope, + delay=0, + ) + else: + # NOTE: delete_role_assignments accepts cli_ctx + # instead of cmd unlike add_role_assignment. + result = delete_role_assignments( + cmd.cli_ctx, + role, + kubelet_identity_object_id, + scope=managed_rg_role_scope, + delay=0, + ) + + if not result: + break + except Exception as ex: + break + else: + return True + + if not assign: + logger.error("\nUnable to revoke Role Assignments if any, added for Azure Container Storage.") + + return False + + +def generate_random_storage_pool_name(): + random_name = CONST_STORAGE_POOL_NAME_PREFIX + ''.join(random.choices(string.ascii_lowercase, k=CONST_STORAGE_POOL_RANDOM_LENGTH)) + return random_name + + +def get_k8s_extension_module(module_name): + try: + # adding the installed extension in the path + from azure.cli.core.extension.operations import add_extension_to_path + add_extension_to_path(CONST_K8S_EXTENSION_NAME) + # import the extension module + from importlib import import_module + azext_custom = import_module(module_name) + return azext_custom + except ImportError as ie: + raise UnknownError( + "Please add CLI extension `k8s-extension` for performing Azure Container Storage operations.\n" + "Run command `az extension add --name k8s-extension`" + ) + + +def check_if_extension_is_installed(cmd, resource_group, cluster_name) -> bool: + client_factory = get_k8s_extension_module(CONST_K8S_EXTENSION_CLIENT_FACTORY_MOD_NAME) + client = client_factory.cf_k8s_extension_operation(cmd.cli_ctx) + k8s_extension_custom_mod = get_k8s_extension_module(CONST_K8S_EXTENSION_CUSTOM_MOD_NAME) + return_val = True + try: + extension = k8s_extension_custom_mod.show_k8s_extension( + client, + resource_group, + cluster_name, + CONST_EXT_INSTALLATION_NAME, + "managedClusters", + ) + + extension_type = extension.extension_type.lower() + if extension_type != CONST_ACSTOR_K8S_EXTENSION_NAME: + return_val = False + except: + return_val = False + + return return_val + + +def _is_rp_registered(cmd, required_rp, subscription_id): + registered = False + try: + providers_client = get_providers_client_factory(cmd.cli_ctx, subscription_id) + registration_state = getattr(providers_client.get(required_rp), 'registration_state', "NotRegistered") + + registered = (registration_state and registration_state.lower() == 'registered') + except Exception: # pylint: disable=broad-except + pass + return registered diff --git a/src/aks-preview/azext_aks_preview/azurecontainerstorage/_validators.py b/src/aks-preview/azext_aks_preview/azurecontainerstorage/_validators.py new file mode 100644 index 00000000000..d372c4f7a8f --- /dev/null +++ b/src/aks-preview/azext_aks_preview/azurecontainerstorage/_validators.py @@ -0,0 +1,183 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from azext_aks_preview.azurecontainerstorage._consts import ( + CONST_STORAGE_POOL_OPTION_SSD, + CONST_STORAGE_POOL_SKU_PREMIUM_LRS, + CONST_STORAGE_POOL_SKU_PREMIUM_ZRS, + CONST_STORAGE_POOL_TYPE_AZURE_DISK, + CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK, + CONST_STORAGE_POOL_TYPE_ELASTIC_SAN, +) + +from azure.cli.core.azclierror import ( + ArgumentUsageError, + InvalidArgumentValueError, + MutuallyExclusiveArgumentError, +) + +from knack.log import get_logger +import re + +elastic_san_supported_skus = [ + CONST_STORAGE_POOL_SKU_PREMIUM_LRS, + CONST_STORAGE_POOL_SKU_PREMIUM_ZRS, +] + +logger = get_logger(__name__) + + +def validate_nodepool_names_with_cluster_nodepools(nodepool_names, agentpool_details): + nodepool_list = nodepool_names.split(',') + for nodepool in nodepool_list: + if nodepool not in agentpool_details: + raise InvalidArgumentValueError( + 'Nodepool: {} not found. ' + 'Please provide existing nodepool names in --azure-container-storage-nodepools.' + '\nUse command `az nodepool list` to get the list of nodepools in the cluster.' + '\nAborting installation of Azure Container Storage.' + .format(nodepool) + ) + + +def validate_azure_container_storage_params( + enable_azure_container_storage, + disable_azure_container_storage, + storage_pool_name, + storage_pool_type, + storage_pool_sku, + storage_pool_option, + storage_pool_size, + nodepool_list, +): + if enable_azure_container_storage and disable_azure_container_storage: + raise MutuallyExclusiveArgumentError( + 'Conflicting flags. Cannot set --enable-azure-container-storage ' + 'and --disable-azure-container-storage together.' + ) + + if disable_azure_container_storage: + _validate_disable_azure_container_storage_params( + storage_pool_name, + storage_pool_sku, + storage_pool_option, + storage_pool_size, + nodepool_list, + ) + + elif enable_azure_container_storage: + _validate_enable_azure_container_storage_params( + storage_pool_name, + storage_pool_type, + storage_pool_sku, + storage_pool_option, + storage_pool_size, + ) + + +def _validate_disable_azure_container_storage_params( + storage_pool_name, + storage_pool_sku, + storage_pool_option, + storage_pool_size, + nodepool_list, +): + if storage_pool_name is not None: + raise MutuallyExclusiveArgumentError( + 'Conflicting flags. Cannot define --storage-pool-name value ' + 'when --disable-azure-container-storage is set.' + ) + + if storage_pool_sku is not None: + raise MutuallyExclusiveArgumentError( + 'Conflicting flags. Cannot define --storage-pool-sku value ' + 'when --disable-azure-container-storage is set.' + ) + + if storage_pool_size is not None: + raise MutuallyExclusiveArgumentError( + 'Conflicting flags. Cannot define --storage-pool-size value ' + 'when --disable-azure-container-storage is set.' + ) + + if storage_pool_option is not None: + raise MutuallyExclusiveArgumentError( + 'Conflicting flags. Cannot define --storage-pool-option value ' + 'when --disable-azure-container-storage is set.' + ) + + if nodepool_list is not None: + raise MutuallyExclusiveArgumentError( + 'Conflicting flags. Cannot define --azure-container-storage-nodepools value ' + 'when --disable-azure-container-storage is set.' + ) + + +def _validate_enable_azure_container_storage_params( + storage_pool_name, + storage_pool_type, + storage_pool_sku, + storage_pool_option, + storage_pool_size, +): + if storage_pool_name is not None: + pattern = r'[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + is_pool_name_valid = re.fullmatch(pattern, storage_pool_name) + if not is_pool_name_valid: + raise InvalidArgumentValueError( + "Invalid --storage-pool-name value. " + "Accepted values are lowercase alphanumeric characters, " + "'-' or '.', and must start and end with an alphanumeric character.") + + if storage_pool_sku is not None: + if storage_pool_type == CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK: + raise ArgumentUsageError('Cannot set --storage-pool-sku when --enable-azure-container-storage is ephemeralDisk.') + elif storage_pool_type == CONST_STORAGE_POOL_TYPE_ELASTIC_SAN and \ + storage_pool_sku not in elastic_san_supported_skus: + supported_skus_str = ", ".join(elastic_san_supported_skus) + raise ArgumentUsageError( + 'Invalid --storage-pool-sku value. ' + 'Supported value for --storage-pool-sku are {0} ' + 'when --enable-azure-container-storage is set to elasticSan.' + .format(supported_skus_str) + ) + + if storage_pool_type != CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK and \ + storage_pool_option is not None: + raise ArgumentUsageError('Cannot set --storage-pool-option when --enable-azure-container-storage is not ephemeralDisk.') + + if storage_pool_type == CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK and \ + storage_pool_option == CONST_STORAGE_POOL_OPTION_SSD: + raise ArgumentUsageError( + '--storage-pool-option Temp storage (SSD) currently not supported.' + ) + + if storage_pool_size is not None: + pattern = r'^\d+(\.\d+)?[GT]i$' + match = re.match(pattern, storage_pool_size) + if match is None: + raise ArgumentUsageError( + 'Value for --storage-pool-size should be defined ' + 'with size followed by Gi or Ti e.g. 512Gi or 2Ti.' + ) + + else: + if storage_pool_type == CONST_STORAGE_POOL_TYPE_ELASTIC_SAN: + pool_size_qty = float(storage_pool_size[:-2]) + pool_size_unit = storage_pool_size[-2:] + + if ( + (pool_size_unit == "Gi" and pool_size_qty < 1024) or + (pool_size_unit == "Ti" and pool_size_qty < 1) + ): + raise ArgumentUsageError( + 'Value for --storage-pool-size must be at least 1Ti when ' + '--enable-azure-container-storage is elasticSan.') + + elif storage_pool_type == CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK: + logger.warning( + 'Storage pools using Ephemeral disk use all capacity available on the local device. ' + ' --storage-pool-size will be ignored.' + ) diff --git a/src/aks-preview/azext_aks_preview/azurecontainerstorage/acstor_ops.py b/src/aks-preview/azext_aks_preview/azurecontainerstorage/acstor_ops.py new file mode 100644 index 00000000000..6bc9edfe5be --- /dev/null +++ b/src/aks-preview/azext_aks_preview/azurecontainerstorage/acstor_ops.py @@ -0,0 +1,259 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from azure.cli.core.azclierror import UnknownError +from azure.cli.core.commands import LongRunningOperation +from azext_aks_preview.azurecontainerstorage._consts import ( + CONST_ACSTOR_K8S_EXTENSION_NAME, + CONST_EXT_INSTALLATION_NAME, + CONST_K8S_EXTENSION_CLIENT_FACTORY_MOD_NAME, + CONST_K8S_EXTENSION_CUSTOM_MOD_NAME, + CONST_STORAGE_POOL_DEFAULT_SIZE, + CONST_STORAGE_POOL_DEFAULT_SIZE_ESAN, + CONST_STORAGE_POOL_OPTION_NVME, + CONST_STORAGE_POOL_OPTION_SSD, + CONST_STORAGE_POOL_SKU_PREMIUM_LRS, + CONST_STORAGE_POOL_TYPE_AZURE_DISK, + CONST_STORAGE_POOL_TYPE_ELASTIC_SAN, + CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK, +) +from azext_aks_preview.azurecontainerstorage._helpers import ( + check_if_extension_is_installed, + generate_random_storage_pool_name, + get_k8s_extension_module, + perform_role_operations_on_managed_rg, + register_dependent_rps, + should_create_storagepool, +) +from azext_aks_preview.azurecontainerstorage._validators import validate_nodepool_names_with_cluster_nodepools +from knack.log import get_logger +from knack.prompting import prompt_y_n + +logger = get_logger(__name__) + + +def perform_enable_azure_container_storage( + cmd, + subscription_id, + resource_group, + cluster_name, + node_resource_group, + kubelet_identity_object_id, + storage_pool_name, + storage_pool_type, + storage_pool_size, + storage_pool_sku, + storage_pool_option, + nodepool_names, + agentpool_details, + is_cluster_create, +): + # Step 1: Check and register the dependent provider for ManagedClusters i.e. + # Microsoft.KubernetesConfiguration + if not register_dependent_rps(cmd, subscription_id): + return + + # Step 2: Check if extension already installed incase of an update call + if not is_cluster_create and check_if_extension_is_installed(cmd, resource_group, cluster_name): + logger.error( + "Extension type {0} already installed on cluster." + "\nAborting installation of Azure Container Storage." + .format(CONST_ACSTOR_K8S_EXTENSION_NAME) + ) + return + + if nodepool_names is None: + nodepool_names = "nodepool1" + if storage_pool_type == CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK: + if storage_pool_option is None: + storage_pool_option = CONST_STORAGE_POOL_OPTION_NVME + if storage_pool_option == CONST_STORAGE_POOL_OPTION_SSD: + storage_pool_option = "temp" + + validate_nodepool_names_with_cluster_nodepools(nodepool_names, agentpool_details) + + # Step 3: Validate if storagepool should be created. + # Depends on the following: + # 3a: Grant AKS cluster's node identity the following + # roles on the AKS managed resource group: + # 1. Reader + # 2. Network Contributor + # 3. Elastic SAN Owner + # 4. Elastic SAN Volume Group Owner + # Ensure grant was successful if creation of + # Elastic SAN storagepool is requested. + # 3b: Ensure Ls series nodepool is present if creation + # of Ephemeral NVMe Disk storagepool is requested. + create_storage_pool = should_create_storagepool( + cmd, + subscription_id, + node_resource_group, + kubelet_identity_object_id, + storage_pool_type, + storage_pool_option, + agentpool_details, + nodepool_names, + ) + + # Step 4: Configure the storagepool parameters + config_settings = [] + if create_storage_pool: + if storage_pool_name is None: + storage_pool_name = generate_random_storage_pool_name() + if storage_pool_size is None: + storage_pool_size = CONST_STORAGE_POOL_DEFAULT_SIZE_ESAN if \ + storage_pool_type == CONST_STORAGE_POOL_TYPE_ELASTIC_SAN else \ + CONST_STORAGE_POOL_DEFAULT_SIZE + config_settings.extend( + [ + {"cli.storagePool.create": True}, + {"cli.storagePool.name": storage_pool_name}, + {"cli.storagePool.size": storage_pool_size}, + {"cli.storagePool.type": storage_pool_type}, + {"cli.node.nodepools": nodepool_names}, + ] + ) + + if storage_pool_type == CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK: + config_settings.append({"cli.storagePool.ephemeralDiskOption": storage_pool_option.lower()}) + else: + if storage_pool_sku is None: + storage_pool_sku = CONST_STORAGE_POOL_SKU_PREMIUM_LRS + config_settings.append({"cli.storagePool.sku": storage_pool_sku}) + else: + config_settings.append({"cli.storagePool.create": False}) + + # Step 5: Install the k8s_extension 'microsoft.azurecontainerstorage' + client_factory = get_k8s_extension_module(CONST_K8S_EXTENSION_CLIENT_FACTORY_MOD_NAME) + client = client_factory.cf_k8s_extension_operation(cmd.cli_ctx) + + k8s_extension_custom_mod = get_k8s_extension_module(CONST_K8S_EXTENSION_CUSTOM_MOD_NAME) + try: + result = k8s_extension_custom_mod.create_k8s_extension( + cmd, + client, + resource_group, + cluster_name, + CONST_EXT_INSTALLATION_NAME, + "managedClusters", + CONST_ACSTOR_K8S_EXTENSION_NAME, + auto_upgrade_minor_version=True, + release_train="stable", + scope="cluster", + release_namespace="acstor", + configuration_settings=config_settings, + ) + + create_result = LongRunningOperation(cmd.cli_ctx)(result) + if create_result.provisioning_state == "Succeeded": + logger.warning("Azure Container Storage successfully installed.") + except Exception as ex: + if is_cluster_create: + logger.error("Azure Container Storage failed to install.\nError: {0}".format(ex.message)) + logger.warning( + "AKS cluster is created. " + "Please run `az aks update` along with `--enable-azure-container-storage` " + "to enable Azure Container Storage." + ) + else: + logger.error("AKS update to enable Azure Container Storage failed.\nError: {0}".format(ex.message)) + + +def perform_disable_azure_container_storage( + cmd, + subscription_id, + resource_group, + cluster_name, + node_resource_group, + kubelet_identity_object_id, + perform_validation, +): + # Step 1: Check if show_k8s_extension returns an extension already installed + if not check_if_extension_is_installed(cmd, resource_group, cluster_name): + raise UnknownError( + "Extension type {0} not installed on cluster." + "\nAborting disabling of Azure Container Storage." + .format(CONST_ACSTOR_K8S_EXTENSION_NAME) + ) + + client_factory = get_k8s_extension_module(CONST_K8S_EXTENSION_CLIENT_FACTORY_MOD_NAME) + client = client_factory.cf_k8s_extension_operation(cmd.cli_ctx) + k8s_extension_custom_mod = get_k8s_extension_module(CONST_K8S_EXTENSION_CUSTOM_MOD_NAME) + no_wait_delete_op = False + # Step 2: Perform validation if accepted by user + if perform_validation: + config_settings = [{"cli.storagePool.uninstallValidation": True}] + try: + update_result = k8s_extension_custom_mod.update_k8s_extension( + cmd, + client, + resource_group, + cluster_name, + CONST_EXT_INSTALLATION_NAME, + "managedClusters", + configuration_settings=config_settings, + yes=True, + no_wait=False, + ) + + update_long_op_result = LongRunningOperation(cmd.cli_ctx)(update_result) + if update_long_op_result.provisioning_state == "Succeeded": + logger.warning("Validation succeeded. Disabling Azure Container Storage...") + + # Since, pre uninstall validation will ensure deletion of storagepools, + # we don't need to long wait while performing the delete operation. + # Setting no_wait_delete_op = True. + no_wait_delete_op = True + except Exception as ex: + config_settings = [{"cli.storagePool.uninstallValidation": False}] + k8s_extension_custom_mod.update_k8s_extension( + cmd, + client, + resource_group, + cluster_name, + CONST_EXT_INSTALLATION_NAME, + "managedClusters", + configuration_settings=config_settings, + yes=True, + no_wait=True, + ) + + if ex.message.__contains__("pre-upgrade hooks failed"): + raise UnknownError( + "Validation failed. " + "Please ensure that storagepools are not being used. " + "Unable to disable Azure Container Storage. " + "Reseting cluster state." + ) + else: + raise UnknownError("Validation failed. Unable to disable Azure Container Storage. Reseting cluster state.") + + # Step 3: If the extension is installed and validation succeeded or skipped, call delete_k8s_extension + try: + delete_op_result = k8s_extension_custom_mod.delete_k8s_extension( + cmd, + client, + resource_group, + cluster_name, + CONST_EXT_INSTALLATION_NAME, + "managedClusters", + yes=True, + no_wait=no_wait_delete_op, + ) + + if not no_wait_delete_op: + LongRunningOperation(cmd.cli_ctx)(delete_op_result) + except Exception as delete_ex: + raise UnknownError("Failure observed while disabling Azure Container Storage.\nError: {0}".format(delete_ex.message)) + + logger.warning("Azure Container Storage has been disabled.") + + # Step 4: Revoke AKS cluster's node identity the following + # roles on the AKS managed resource group: + # 1. Reader + # 2. Network Contributor + # 3. Elastic SAN Owner + # 4. Elastic SAN Volume Group Owner + perform_role_operations_on_managed_rg(cmd, subscription_id, node_resource_group, kubelet_identity_object_id, False) diff --git a/src/aks-preview/azext_aks_preview/custom.py b/src/aks-preview/azext_aks_preview/custom.py index d0ae81bc358..455ce5d87d2 100644 --- a/src/aks-preview/azext_aks_preview/custom.py +++ b/src/aks-preview/azext_aks_preview/custom.py @@ -598,6 +598,12 @@ def aks_create( enable_windows_recording_rules=False, # metrics profile enable_cost_analysis=False, + # azure container storage + enable_azure_container_storage=None, + storage_pool_name=None, + storage_pool_size=None, + storage_pool_sku=None, + storage_pool_option=None, ): # DO NOT MOVE: get all the original parameters and save them as a dictionary raw_parameters = locals() @@ -768,6 +774,14 @@ def aks_update( # metrics profile enable_cost_analysis=False, disable_cost_analysis=False, + # azure container storage + enable_azure_container_storage=None, + disable_azure_container_storage=False, + storage_pool_name=None, + storage_pool_size=None, + storage_pool_sku=None, + storage_pool_option=None, + azure_container_storage_nodepools=None, ): # DO NOT MOVE: get all the original parameters and save them as a dictionary raw_parameters = locals() diff --git a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py index c1bb3437555..ae5d8994512 100644 --- a/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py +++ b/src/aks-preview/azext_aks_preview/managed_cluster_decorator.py @@ -31,6 +31,10 @@ from azext_aks_preview.azuremonitormetrics.azuremonitorprofile import ( ensure_azure_monitor_profile_prerequisites ) +from azext_aks_preview.azurecontainerstorage.acstor_ops import ( + perform_enable_azure_container_storage, + perform_disable_azure_container_storage, +) from azure.cli.command_modules.acs.managed_cluster_decorator import ( AKSManagedClusterContext, AKSManagedClusterCreateDecorator, @@ -168,6 +172,9 @@ def external_functions(self) -> SimpleNamespace: ] = ensure_azure_monitor_profile_prerequisites # temp workaround for the breaking change caused by default API version bump of the auth SDK external_functions["add_role_assignment"] = add_role_assignment + # azure container storage functions + external_functions["perform_enable_azure_container_storage"] = perform_enable_azure_container_storage + external_functions["perform_disable_azure_container_storage"] = perform_disable_azure_container_storage self.__external_functions = SimpleNamespace(**external_functions) return self.__external_functions @@ -2745,6 +2752,34 @@ def set_up_azure_monitor_profile(self, mc: ManagedCluster) -> ManagedCluster: self.context.set_intermediate("azuremonitormetrics_addon_enabled", True, overwrite_exists=True) return mc + def set_up_azure_container_storage(self, mc: ManagedCluster) -> None: + """Set up azure container storage for the Managed Cluster object + :return: None + """ + self._ensure_mc(mc) + # read the azure container storage values passed + pool_type = self.context.raw_param.get("enable_azure_container_storage") + enable_azure_container_storage = pool_type is not None + if enable_azure_container_storage: + pool_name = self.context.raw_param.get("storage_pool_name") + pool_option = self.context.raw_param.get("storage_pool_option") + pool_sku = self.context.raw_param.get("storage_pool_sku") + pool_size = self.context.raw_param.get("storage_pool_size") + from azext_aks_preview.azurecontainerstorage._validators import validate_azure_container_storage_params + validate_azure_container_storage_params( + True, + None, + pool_name, + pool_type, + pool_sku, + pool_option, + pool_size, + None, + ) + + # set intermediates + self.context.set_intermediate("enable_azure_container_storage", True, overwrite_exists=True) + def set_up_auto_upgrade_profile(self, mc: ManagedCluster) -> ManagedCluster: """Set up auto upgrade profile for the ManagedCluster object. :return: the ManagedCluster object @@ -2885,6 +2920,8 @@ def construct_mc_profile_preview(self, bypass_restore_defaults: bool = False) -> mc = self.set_up_azure_monitor_profile(mc) # set up metrics profile mc = self.set_up_metrics_profile(mc) + # set up for azure container storage + self.set_up_azure_container_storage(mc) # DO NOT MOVE: keep this at the bottom, restore defaults mc = self._restore_defaults_in_mc(mc) @@ -2908,6 +2945,7 @@ def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool: need_grant_vnet_permission_to_cluster_identity = self.context.get_intermediate( "need_post_creation_vnet_permission_granting", default_value=False ) + enable_azure_container_storage = self.context.get_intermediate("enable_azure_container_storage", default_value=False) if ( monitoring_addon_enabled or @@ -2915,7 +2953,8 @@ def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool: virtual_node_addon_enabled or azuremonitormetrics_addon_enabled or (enable_managed_identity and attach_acr) or - need_grant_vnet_permission_to_cluster_identity + need_grant_vnet_permission_to_cluster_identity or + enable_azure_container_storage ): return True return False @@ -3042,6 +3081,53 @@ def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: True ) + # enable azure container storage + enable_azure_container_storage = self.context.get_intermediate("enable_azure_container_storage") + if enable_azure_container_storage: + if cluster.identity_profile is None or cluster.identity_profile["kubeletidentity"] is None: + logger.warning( + "Unexpected error getting kubelet's identity for the cluster. " + "Unable to perform the azure container storage operation." + ) + return + + # Get the node_resource_group from the cluster object since + # `mc` in `context` still doesn't have the updated node_resource_group. + if cluster.node_resource_group is None: + logger.warning( + "Unexpected error getting cluster's node resource group. " + "Unable to perform the azure container storage operation." + ) + return + + pool_name = self.context.raw_param.get("storage_pool_name") + pool_type = self.context.raw_param.get("enable_azure_container_storage") + pool_option = self.context.raw_param.get("storage_pool_option") + pool_sku = self.context.raw_param.get("storage_pool_sku") + pool_size = self.context.raw_param.get("storage_pool_size") + kubelet_identity_object_id = cluster.identity_profile["kubeletidentity"].object_id + node_resource_group = cluster.node_resource_group + agent_pool_details = {} + for agentpool_profile in cluster.agent_pool_profiles: + agent_pool_details[agentpool_profile.name] = agentpool_profile.vm_size + + self.context.external_functions.perform_enable_azure_container_storage( + self.cmd, + self.context.get_subscription_id(), + self.context.get_resource_group_name(), + self.context.get_name(), + node_resource_group, + kubelet_identity_object_id, + pool_name, + pool_type, + pool_size, + pool_sku, + pool_option, + "nodepool1", + agent_pool_details, + True, + ) + class AKSPreviewManagedClusterUpdateDecorator(AKSManagedClusterUpdateDecorator): def __init__( @@ -3168,6 +3254,50 @@ def update_enable_network_observability_in_network_profile(self, mc: ManagedClus ) return mc + def update_azure_container_storage(self, mc: ManagedCluster) -> None: + """Update azure container storage for the Managed Cluster object + :return: None + """ + self._ensure_mc(mc) + # read the azure container storage values passed + pool_type = self.context.raw_param.get("enable_azure_container_storage") + disable_azure_container_storage = self.context.raw_param.get("disable_azure_container_storage") + enable_azure_container_storage = pool_type is not None + if enable_azure_container_storage or disable_azure_container_storage: + pool_name = self.context.raw_param.get("storage_pool_name") + pool_option = self.context.raw_param.get("storage_pool_option") + pool_sku = self.context.raw_param.get("storage_pool_sku") + pool_size = self.context.raw_param.get("storage_pool_size") + nodepool_list = self.context.raw_param.get("azure_container_storage_nodepools") + from azext_aks_preview.azurecontainerstorage._validators import validate_azure_container_storage_params + validate_azure_container_storage_params( + enable_azure_container_storage, + disable_azure_container_storage, + pool_name, + pool_type, + pool_sku, + pool_option, + pool_size, + nodepool_list, + ) + + if enable_azure_container_storage: + # set intermediates + self.context.set_intermediate("enable_azure_container_storage", True, overwrite_exists=True) + + if disable_azure_container_storage: + pre_uninstall_validate = False + msg = 'Disabling Azure Container Storage will forcefully delete all the storagepools on the cluster and ' \ + 'affect the applications using these storagepools. Forceful deletion of storagepools can also lead to ' \ + 'leaking of storage resources which are being consumed. Do you want to validate whether any of ' \ + 'the storagepools are being used before disabling Azure Container Storage?' + if self.context.get_yes() or prompt_y_n(msg, default="y"): + pre_uninstall_validate = True + + # set intermediate + self.context.set_intermediate("disable_azure_container_storage", True, overwrite_exists=True) + self.context.set_intermediate("pre_uninstall_validate_azure_container_storage", pre_uninstall_validate, overwrite_exists=True) + def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster: """Update load balancer profile for the ManagedCluster object. @@ -3804,5 +3934,82 @@ def update_mc_profile_preview(self) -> ManagedCluster: mc = self.update_k8s_support_plan(mc) # update metrics profile mc = self.update_metrics_profile(mc) + # update azure container storage + self.update_azure_container_storage(mc) return mc + + def check_is_postprocessing_required(self, mc: ManagedCluster) -> bool: + """Helper function to check if postprocessing is required after sending a PUT request to create the cluster. + + :return: bool + """ + postprocessing_required = super().check_is_postprocessing_required(mc) + if not postprocessing_required: + enable_azure_container_storage = self.context.get_intermediate("enable_azure_container_storage", default_value=False) + disable_azure_container_storage = self.context.get_intermediate("disable_azure_container_storage", default_value=False) + + if (enable_azure_container_storage or disable_azure_container_storage): + return True + return postprocessing_required + + def postprocessing_after_mc_created(self, cluster: ManagedCluster) -> None: + """Postprocessing performed after the cluster is created. + + :return: None + """ + super().postprocessing_after_mc_created(cluster) + enable_azure_container_storage = self.context.get_intermediate("enable_azure_container_storage") + disable_azure_container_storage = self.context.get_intermediate("disable_azure_container_storage") + + if enable_azure_container_storage or disable_azure_container_storage: + if cluster.identity_profile is None or cluster.identity_profile["kubeletidentity"] is None: + logger.warning( + "Unexpected error getting kubelet's identity for the cluster." + "Unable to perform azure container storage operation." + ) + return + + # enable azure container storage + if enable_azure_container_storage: + pool_name = self.context.raw_param.get("storage_pool_name") + pool_type = self.context.raw_param.get("enable_azure_container_storage") + pool_option = self.context.raw_param.get("storage_pool_option") + pool_sku = self.context.raw_param.get("storage_pool_sku") + pool_size = self.context.raw_param.get("storage_pool_size") + nodepool_list = self.context.raw_param.get("azure_container_storage_nodepools") + kubelet_identity_object_id = cluster.identity_profile["kubeletidentity"].object_id + agent_pool_details = {} + for agentpool_profile in cluster.agent_pool_profiles: + agent_pool_details[agentpool_profile.name] = agentpool_profile.vm_size + + self.context.external_functions.perform_enable_azure_container_storage( + self.cmd, + self.context.get_subscription_id(), + self.context.get_resource_group_name(), + self.context.get_name(), + self.context.get_node_resource_group(), + kubelet_identity_object_id, + pool_name, + pool_type, + pool_size, + pool_sku, + pool_option, + nodepool_list, + agent_pool_details, + False, + ) + + # disable azure container storage + if disable_azure_container_storage: + kubelet_identity_object_id = cluster.identity_profile["kubeletidentity"].object_id + pre_uninstall_validate = self.context.get_intermediate("pre_uninstall_validate_azure_container_storage") + self.context.external_functions.perform_disable_azure_container_storage( + self.cmd, + self.context.get_subscription_id(), + self.context.get_resource_group_name(), + self.context.get_name(), + self.context.get_node_resource_group(), + kubelet_identity_object_id, + pre_uninstall_validate, + ) diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py index 0c3b65e26f6..029ebe55241 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_commands.py @@ -6720,6 +6720,44 @@ def test_aks_create_with_azuremonitormetrics(self, resource_group, resource_grou self.is_empty(), ]) + # live only due to downloading k8s-extension extension + @live_only() + @AllowLargeResponse(8192) + @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + def test_aks_create_with_azurecontainerstorage(self, resource_group, resource_group_location): + # reset the count so in replay mode the random names will start with 0 + self.test_resources_count = 0 + # kwargs for string formatting + aks_name = self.create_random_name('cliakstest', 16) + + node_vm_size = 'standard_d4s_v3' + self.kwargs.update({ + 'resource_group': resource_group, + 'name': aks_name, + 'location': resource_group_location, + 'resource_type': 'Microsoft.ContainerService/ManagedClusters', + 'ssh_key_value': self.generate_ssh_keys(), + 'node_vm_size': node_vm_size + }) + + # add k8s-extension extension for azurecontainerstorage operations. + self.cmd('extension add --name k8s-extension') + + create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} ' \ + '--node-count 3 --enable-managed-identity --enable-azure-container-storage azureDisk --output=json' + + # enabling azurecontainerstorage will not affect any field in the cluster. + # the only check we should perform is to verify that the cluster is provisioned successfully. + self.cmd(create_cmd, checks=[ + self.check('provisioningState', 'Succeeded'), + ]) + + # delete + cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + self.cmd(cmd, checks=[ + self.is_empty(), + ]) + @AllowLargeResponse() @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') def test_aks_update_with_azuremonitormetrics(self, resource_group, resource_group_location): @@ -6762,6 +6800,55 @@ def test_aks_update_with_azuremonitormetrics(self, resource_group, resource_grou self.is_empty(), ]) + # live only due to downloading k8s-extension extension + # @live_only() + # Introduce this test back once v1.0.3-preview version of Azure container storage is released + # @AllowLargeResponse(8192) + # @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') + # def test_aks_update_with_azurecontainerstorage(self, resource_group, resource_group_location): + # aks_name = self.create_random_name('cliakstest', 16) + # node_vm_size = 'standard_d4s_v3' + # self.kwargs.update({ + # 'resource_group': resource_group, + # 'name': aks_name, + # 'location': resource_group_location, + # 'ssh_key_value': self.generate_ssh_keys(), + # 'node_vm_size': node_vm_size, + # }) + + # # add k8s-extension extension for azurecontainerstorage operations. + # self.cmd('extension add --name k8s-extension') + + # # create: without enable-azure-container-storage + # create_cmd = 'aks create --resource-group={resource_group} --name={name} --location={location} --ssh-key-value={ssh_key_value} --node-vm-size={node_vm_size} --node-count 3 --enable-managed-identity --output=json' + # self.cmd(create_cmd, checks=[ + # self.check('provisioningState', 'Succeeded'), + # ]) + + # # enabling or disabling azurecontainerstorage will not affect any field in the cluster. + # # the only check we should perform is to verify that the cluster is provisioned successfully. + + # # update: enable-azure-container-storage + # update_cmd = 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' \ + # '--enable-azure-container-storage azureDisk' + # self.cmd(update_cmd, checks=[ + # self.check('provisioningState', 'Succeeded'), + # ]) + + # # update: disable-azure-container-storage + # update_cmd = 'aks update --resource-group={resource_group} --name={name} --yes --output=json ' \ + # '--disable-azure-container-storage' + # self.cmd(update_cmd, checks=[ + # self.check('provisioningState', 'Succeeded'), + # ]) + + # # delete + # cmd = 'aks delete --resource-group={resource_group} --name={name} --yes --no-wait' + # self.cmd(cmd, checks=[ + # self.is_empty(), + # ]) + + # live only due to workspace is not mocked correctly @AllowLargeResponse() @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus2') diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py b/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py index ec77e53cbf1..9537dde9f90 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py @@ -6,9 +6,15 @@ from types import SimpleNamespace from azure.cli.core.util import CLIError -from azure.cli.core.azclierror import InvalidArgumentValueError +from azure.cli.core.azclierror import ( + ArgumentUsageError, + InvalidArgumentValueError, + MutuallyExclusiveArgumentError, +) import azext_aks_preview._validators as validators from azext_aks_preview._consts import ADDONS +import azext_aks_preview.azurecontainerstorage._validators as acstor_validator +import azext_aks_preview.azurecontainerstorage._consts as acstor_consts class TestValidateIPRanges(unittest.TestCase): @@ -615,5 +621,153 @@ def test_valid_start_time(self): validators.validate_start_time(namespace) +class TestValidateAzureContainerStorage(unittest.TestCase): + def test_conflicting_flags_for_enable_disable(self): + err = 'Conflicting flags. Cannot set --enable-azure-container-storage '\ + 'and --disable-azure-container-storage together.' + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + acstor_validator.validate_azure_container_storage_params(True, True, None, None, None, None, None, None) + self.assertEqual(str(cm.exception), err) + + def test_disable_flag_with_storage_pool_name(self): + storage_pool_name = "pool-name" + err = 'Conflicting flags. Cannot define --storage-pool-name value '\ + 'when --disable-azure-container-storage is set.' + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + acstor_validator.validate_azure_container_storage_params(None, True, storage_pool_name, None, None, None, None, None) + self.assertEqual(str(cm.exception), err) + + def test_disable_flag_with_storage_pool_sku(self): + storage_pool_sku = acstor_consts.CONST_STORAGE_POOL_SKU_PREMIUM_LRS + err = 'Conflicting flags. Cannot define --storage-pool-sku value ' \ + 'when --disable-azure-container-storage is set.' + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + acstor_validator.validate_azure_container_storage_params(None, True, None, None, storage_pool_sku, None, None, None) + self.assertEqual(str(cm.exception), err) + + def test_disable_flag_with_storage_pool_size(self): + storage_pool_size = "5Gi" + err = 'Conflicting flags. Cannot define --storage-pool-size value ' \ + 'when --disable-azure-container-storage is set.' + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + acstor_validator.validate_azure_container_storage_params(None, True, None, None, None, None, storage_pool_size, None) + self.assertEqual(str(cm.exception), err) + + def test_disable_flag_with_storage_pool_option(self): + storage_pool_option = acstor_consts.CONST_STORAGE_POOL_OPTION_NVME + err = 'Conflicting flags. Cannot define --storage-pool-option value ' \ + 'when --disable-azure-container-storage is set.' + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + acstor_validator.validate_azure_container_storage_params(None, True, None, None, None, storage_pool_option, None, None) + self.assertEqual(str(cm.exception), err) + + def test_disable_flag_with_nodepool_list(self): + nodepool_list = "test,test1" + err = 'Conflicting flags. Cannot define --azure-container-storage-nodepools value ' \ + 'when --disable-azure-container-storage is set.' + with self.assertRaises(MutuallyExclusiveArgumentError) as cm: + acstor_validator.validate_azure_container_storage_params(None, True, None, None, None, None, None, nodepool_list) + self.assertEqual(str(cm.exception), err) + + def test_valid_disable(self): + acstor_validator.validate_azure_container_storage_params(None, True, None, None, None, None, None, None) + + def test_enable_with_invalid_storage_pool_name(self): + storage_pool_name = "my_test_pool" + err = "Invalid --storage-pool-name value. " \ + "Accepted values are lowercase alphanumeric characters, " \ + "'-' or '.', and must start and end with an alphanumeric character." + with self.assertRaises(InvalidArgumentValueError) as cm: + acstor_validator.validate_azure_container_storage_params(True, None, storage_pool_name, None, None, None, None, None) + self.assertEqual(str(cm.exception), err) + + def test_enable_with_sku_and_ephemeral_disk_pool(self): + storage_pool_name = "valid-name" + storage_pool_sku = acstor_consts.CONST_STORAGE_POOL_SKU_PREMIUM_LRS + storage_pool_type = acstor_consts.CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK + err = 'Cannot set --storage-pool-sku when --enable-azure-container-storage is ephemeralDisk.' + with self.assertRaises(ArgumentUsageError) as cm: + acstor_validator.validate_azure_container_storage_params(True, None, storage_pool_name, storage_pool_type, storage_pool_sku, None, None, None) + self.assertEqual(str(cm.exception), err) + + def test_enable_with_sku_and_elastic_san_pool(self): + storage_pool_name = "valid-name" + storage_pool_sku = acstor_consts.CONST_STORAGE_POOL_SKU_PREMIUMV2_LRS + storage_pool_type = acstor_consts.CONST_STORAGE_POOL_TYPE_ELASTIC_SAN + supported_skus = acstor_consts.CONST_STORAGE_POOL_SKU_PREMIUM_LRS + ", " + \ + acstor_consts.CONST_STORAGE_POOL_SKU_PREMIUM_ZRS + err = 'Invalid --storage-pool-sku value. ' \ + 'Supported value for --storage-pool-sku are {0} ' \ + 'when --enable-azure-container-storage is set to elasticSan.' \ + .format(supported_skus) + with self.assertRaises(ArgumentUsageError) as cm: + acstor_validator.validate_azure_container_storage_params(True, None, storage_pool_name, storage_pool_type, storage_pool_sku, None, None, None) + self.assertEqual(str(cm.exception), err) + + def test_enable_with_option_and_non_ephemeral_disk_pool(self): + storage_pool_name = "valid-name" + storage_pool_option = acstor_consts.CONST_STORAGE_POOL_OPTION_NVME + storage_pool_type = acstor_consts.CONST_STORAGE_POOL_TYPE_AZURE_DISK + err = 'Cannot set --storage-pool-option when --enable-azure-container-storage is not ephemeralDisk.' + with self.assertRaises(ArgumentUsageError) as cm: + acstor_validator.validate_azure_container_storage_params(True, None, storage_pool_name, storage_pool_type, None, storage_pool_option, None, None) + self.assertEqual(str(cm.exception), err) + + def test_enable_with_ssd_option_and_ephemeral_disk_pool(self): + storage_pool_name = "valid-name" + storage_pool_option = acstor_consts.CONST_STORAGE_POOL_OPTION_SSD + storage_pool_type = acstor_consts.CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK + err = '--storage-pool-option Temp storage (SSD) currently not supported.' + with self.assertRaises(ArgumentUsageError) as cm: + acstor_validator.validate_azure_container_storage_params(True, None, storage_pool_name, storage_pool_type, None, storage_pool_option, None, None) + self.assertEqual(str(cm.exception), err) + + def test_enable_with_invalid_storage_pool_size(self): + storage_pool_name = "valid-name" + storage_pool_size = "5" + err = 'Value for --storage-pool-size should be defined with size followed by Gi or Ti e.g. 512Gi or 2Ti.' + with self.assertRaises(ArgumentUsageError) as cm: + acstor_validator.validate_azure_container_storage_params(True, None, storage_pool_name, None, None, None, storage_pool_size, None) + self.assertEqual(str(cm.exception), err) + + def test_enable_with_invalid_size_for_esan_storage_pool(self): + storage_pool_name = "valid-name" + storage_pool_size = "512Gi" + storage_pool_type = acstor_consts.CONST_STORAGE_POOL_TYPE_ELASTIC_SAN + err = 'Value for --storage-pool-size must be at least 1Ti when --enable-azure-container-storage is elasticSan.' + with self.assertRaises(ArgumentUsageError) as cm: + acstor_validator.validate_azure_container_storage_params(True, None, storage_pool_name, storage_pool_type, None, None, storage_pool_size, None) + self.assertEqual(str(cm.exception), err) + + def test_valid_enable_for_azure_disk_pool(self): + storage_pool_name = "valid-name" + storage_pool_size = "5Ti" + storage_pool_type = acstor_consts.CONST_STORAGE_POOL_TYPE_AZURE_DISK + storage_pool_sku = acstor_consts.CONST_STORAGE_POOL_SKU_PREMIUM_LRS + acstor_validator.validate_azure_container_storage_params(True, None, storage_pool_name, storage_pool_type, storage_pool_sku, None, storage_pool_size, None) + + def test_valid_enable_for_ephemeral_disk_pool(self): + storage_pool_name = "valid-name" + storage_pool_size = "5Ti" + storage_pool_type = acstor_consts.CONST_STORAGE_POOL_TYPE_EPHEMERAL_DISK + storage_pool_option = acstor_consts.CONST_STORAGE_POOL_OPTION_NVME + acstor_validator.validate_azure_container_storage_params(True, None, storage_pool_name, storage_pool_type, None, storage_pool_option, storage_pool_size, None) + + def test_missing_nodepool_from_cluster_nodepool_list(self): + nodepool_list = "pool1,pool2" + agentpools = {"nodepool1": "NODEPOOL1", "nodepool2": "NODEPOOL2"} + err = 'Nodepool: pool1 not found. Please provide existing nodepool names in --azure-container-storage-nodepools.' \ + '\nUse command `az nodepool list` to get the list of nodepools in the cluster.' \ + '\nAborting installation of Azure Container Storage.' + with self.assertRaises(InvalidArgumentValueError) as cm: + acstor_validator.validate_nodepool_names_with_cluster_nodepools(nodepool_list, agentpools) + self.assertEqual(str(cm.exception), err) + + def test_valid_nodepool_list_in_cluster_nodepool(self): + nodepool_list = "nodepool1,nodepool2" + agentpools = {"nodepool1": "NODEPOOL1", "nodepool2": "NODEPOOL2"} + acstor_validator.validate_nodepool_names_with_cluster_nodepools(nodepool_list, agentpools) + + if __name__ == "__main__": unittest.main() diff --git a/src/aks-preview/linter_exclusions.yml b/src/aks-preview/linter_exclusions.yml index edbb9ed7ed6..23a6b48f802 100644 --- a/src/aks-preview/linter_exclusions.yml +++ b/src/aks-preview/linter_exclusions.yml @@ -54,6 +54,9 @@ aks create: enable_azure_monitor_metrics: rule_exclusions: - option_length_too_long + enable_azure_container_storage: + rule_exclusions: + - option_length_too_long aks update: parameters: enable_pod_identity_with_kubenet: @@ -128,6 +131,15 @@ aks update: disable_azure_monitor_metrics: rule_exclusions: - option_length_too_long + enable_azure_container_storage: + rule_exclusions: + - option_length_too_long + disable_azure_container_storage: + rule_exclusions: + - option_length_too_long + azure_container_storage_nodepools: + rule_exclusions: + - option_length_too_long aks delete: parameters: ignore_pod_disruption_budget: diff --git a/src/aks-preview/setup.py b/src/aks-preview/setup.py index 4ba45bb2d1d..c2546eb29d1 100644 --- a/src/aks-preview/setup.py +++ b/src/aks-preview/setup.py @@ -9,7 +9,7 @@ from setuptools import setup, find_packages -VERSION = "0.5.163" +VERSION = "0.5.164" CLASSIFIERS = [ "Development Status :: 4 - Beta",