From 546ae671176320c668361ebab4a47c8711f26a36 Mon Sep 17 00:00:00 2001 From: Wei Shi <19400755+TheOnlyWei@users.noreply.github.com> Date: Tue, 15 Aug 2023 14:10:30 -0700 Subject: [PATCH] enforce valid custom locations SP app object id add .gitignore and test configuration file --- src/connectedk8s/.gitignore | 2 + src/connectedk8s/HISTORY.rst | 6 +- src/connectedk8s/azext_connectedk8s/_utils.py | 7 +- src/connectedk8s/azext_connectedk8s/custom.py | 91 ++++++++---------- .../azext_connectedk8s/tests/latest/README.md | 20 ++++ .../tests/latest/config.json.dist | 6 ++ .../latest/test_connectedk8s_scenario.py | 96 +++++++++++++------ src/connectedk8s/setup.py | 2 +- 8 files changed, 144 insertions(+), 86 deletions(-) create mode 100644 src/connectedk8s/.gitignore create mode 100644 src/connectedk8s/azext_connectedk8s/tests/latest/README.md create mode 100644 src/connectedk8s/azext_connectedk8s/tests/latest/config.json.dist diff --git a/src/connectedk8s/.gitignore b/src/connectedk8s/.gitignore new file mode 100644 index 00000000000..e00574fbdf0 --- /dev/null +++ b/src/connectedk8s/.gitignore @@ -0,0 +1,2 @@ +# Ignore user-specific test configuration +azext_connectedk8s/tests/latest/config.json \ No newline at end of file diff --git a/src/connectedk8s/HISTORY.rst b/src/connectedk8s/HISTORY.rst index 8d33c8594e7..f05c093f592 100644 --- a/src/connectedk8s/HISTORY.rst +++ b/src/connectedk8s/HISTORY.rst @@ -2,6 +2,11 @@ Release History =============== + +1.5.0 +++++++ +* Enforce valid custom locations service principal application object id passed in by the user for enabling custom locations feature. + 1.4.2 ++++++ * Fix reference error. @@ -16,7 +21,6 @@ Release History 1.3.20 ++++++ - * Bug fix in parsing logs for outbound connectivity check for troubleshoot command 1.3.19 diff --git a/src/connectedk8s/azext_connectedk8s/_utils.py b/src/connectedk8s/azext_connectedk8s/_utils.py index 8da3ef7971e..86fcbb16fe4 100644 --- a/src/connectedk8s/azext_connectedk8s/_utils.py +++ b/src/connectedk8s/azext_connectedk8s/_utils.py @@ -569,8 +569,8 @@ def cleanup_release_install_namespace_if_exists(): # DO NOT use this method for re-put scenarios. This method involves new NS creation for helm release. For re-put scenarios, brownfield scenario needs to be handled where helm release still stays in default NS def helm_install_release(resource_manager, chart_path, subscription_id, kubernetes_distro, kubernetes_infra, resource_group_name, cluster_name, location, onboarding_tenant_id, http_proxy, https_proxy, no_proxy, proxy_cert, private_key_pem, - kube_config, kube_context, no_wait, values_file, cloud_name, disable_auto_upgrade, enable_custom_locations, - custom_locations_oid, helm_client_location, enable_private_link, arm_metadata, onboarding_timeout="600", + kube_config, kube_context, no_wait, values_file, cloud_name, disable_auto_upgrade, custom_locations_oid, + helm_client_location, enable_private_link, arm_metadata, onboarding_timeout="600", container_log_path=None): cmd_helm_install = [helm_client_location, "upgrade", "--install", "azure-arc", chart_path, @@ -611,7 +611,7 @@ def helm_install_release(resource_manager, chart_path, subscription_id, kubernet ) # Add custom-locations related params - if enable_custom_locations and not enable_private_link: + if custom_locations_oid is not None and not enable_private_link: cmd_helm_install.extend(["--set", "systemDefaultValues.customLocations.enabled=true"]) cmd_helm_install.extend(["--set", "systemDefaultValues.customLocations.oid={}".format(custom_locations_oid)]) # Disable cluster connect if private link is enabled @@ -826,7 +826,6 @@ def get_metadata(arm_endpoint, api_version="2022-09-01"): import requests session = requests.Session() metadata_endpoint = arm_endpoint + metadata_url_suffix - print(f"Retrieving ARM metadata from: {metadata_endpoint}") response = session.get(metadata_endpoint) if response.status_code == 200: return response.json() diff --git a/src/connectedk8s/azext_connectedk8s/custom.py b/src/connectedk8s/azext_connectedk8s/custom.py index d106a98b6a0..ab742dceba8 100644 --- a/src/connectedk8s/azext_connectedk8s/custom.py +++ b/src/connectedk8s/azext_connectedk8s/custom.py @@ -404,15 +404,18 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, correlat put_cc_response = create_cc_resource(client, resource_group_name, cluster_name, cc, no_wait) put_cc_response = LongRunningOperation(cmd.cli_ctx)(put_cc_response) print("Azure resource provisioning has finished.") - # Checking if custom locations rp is registered and fetching oid if it is registered - enable_custom_locations, custom_locations_oid = check_cl_registration_and_get_oid(cmd, cl_oid, subscription_id) + + # Checking if custom locations rp is registered and verify passed-in cl_oid. + test_custom_location_requirements(cmd, cl_oid, subscription_id) print("Starting to install Azure arc agents on the Kubernetes cluster.") + # Install azure-arc agents utils.helm_install_release(cmd.cli_ctx.cloud.endpoints.resource_manager, chart_path, subscription_id, kubernetes_distro, kubernetes_infra, resource_group_name, cluster_name, location, onboarding_tenant_id, http_proxy, https_proxy, no_proxy, proxy_cert, private_key_pem, kube_config, - kube_context, no_wait, values_file, azure_cloud, disable_auto_upgrade, enable_custom_locations, - custom_locations_oid, helm_client_location, enable_private_link, arm_metadata, onboarding_timeout, container_log_path) + kube_context, no_wait, values_file, azure_cloud, disable_auto_upgrade, cl_oid, helm_client_location, + enable_private_link, arm_metadata, onboarding_timeout, container_log_path) + return put_cc_response @@ -1396,15 +1399,14 @@ def enable_features(cmd, client, resource_group_name, cluster_name, features, ku azrbac_skip_authz_check = escape_proxy_settings(azrbac_skip_authz_check) if enable_cl: + if cl_oid is None: + raise RequiredArgumentMissingError("Custom locations object id was not passed. Please pass '--custom-locations-oid' to enable custom locations.") subscription_id = os.getenv('AZURE_SUBSCRIPTION_ID') if custom_token_passed is True else get_subscription_id(cmd.cli_ctx) - enable_cl, custom_locations_oid = check_cl_registration_and_get_oid(cmd, cl_oid, subscription_id) - if not enable_cluster_connect and enable_cl: + # The following throws an error if custom location requirements are not met since user expects to use custom locations. + test_custom_location_requirements(cmd, cl_oid, subscription_id) + if not enable_cluster_connect: enable_cluster_connect = True logger.warning("Enabling 'custom-locations' feature will enable 'cluster-connect' feature too.") - if not enable_cl: - features.remove("custom-locations") - if len(features) == 0: - raise ClientRequestError("Failed to enable 'custom-locations' feature.") # Send cloud information to telemetry send_cloud_telemetry(cmd) @@ -1485,7 +1487,7 @@ def enable_features(cmd, client, resource_group_name, cluster_name, features, ku cmd_helm_upgrade.extend(["--set", "systemDefaultValues.clusterconnect-agent.enabled=true"]) if enable_cl: cmd_helm_upgrade.extend(["--set", "systemDefaultValues.customLocations.enabled=true"]) - cmd_helm_upgrade.extend(["--set", "systemDefaultValues.customLocations.oid={}".format(custom_locations_oid)]) + cmd_helm_upgrade.extend(["--set", "systemDefaultValues.customLocations.oid={}".format(cl_oid)]) response_helm_upgrade = Popen(cmd_helm_upgrade, stdout=PIPE, stderr=PIPE) _, error_helm_upgrade = response_helm_upgrade.communicate() @@ -2160,56 +2162,47 @@ def client_side_proxy(cmd, return expiry, clientproxy_process -def check_cl_registration_and_get_oid(cmd, cl_oid, subscription_id): - enable_custom_locations = True - custom_locations_oid = "" +def test_custom_location_requirements(cmd, cl_oid, subscription_id): + # Checking if custom locations rp is registered and verify passed-in cl_oid. + if cl_oid is not None: + if not extended_location_registered(cmd, subscription_id): + raise ResourceNotFoundError("Custom locations object id was passed, but Microsoft.ExtendedLocation is not registered. Please register it and run again.") + if not custom_locations_oid_valid(cmd, cl_oid): + raise InvalidArgumentValueError(f"Error looking up custom locations object id '{cl_oid}'. It might be invalid.") + else: + logger.warning("Won't enable custom locations feature as parameter '--custom-locations-oid' wasn't passed. Learn more at https://aka.ms/CustomLocationsObjectID") + + +def extended_location_registered(cmd, subscription_id): try: rp_client = resource_providers_client(cmd.cli_ctx, subscription_id) cl_registration_state = rp_client.get(consts.Custom_Locations_Provider_Namespace).registration_state - if cl_registration_state != "Registered": - enable_custom_locations = False - logger.warning("'Custom-locations' feature couldn't be enabled on this cluster as the pre-requisite registration of 'Microsoft.ExtendedLocation' was not met. More details for enabling this feature later on this cluster can be found here - https://aka.ms/EnableCustomLocations") - else: - custom_locations_oid = get_custom_locations_oid(cmd, cl_oid) - if custom_locations_oid == "": - enable_custom_locations = False except Exception as e: - enable_custom_locations = False - logger.warning("Unable to fetch registration state of 'Microsoft.ExtendedLocation'. Failed to enable 'custom-locations' feature. This is fine if not required. Proceeding with helm install.") + logger.warning("Unable to fetch registration state of 'Microsoft.ExtendedLocation'. Failed to enable 'custom-locations' feature.") telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_Registration_Check_Fault_Type, summary='Unable to fetch status of Custom Locations RP registration.') - return enable_custom_locations, custom_locations_oid + raise + if cl_registration_state != "Registered": + logger.warning("Custom locations couldn't be enabled on this cluster as 'Microsoft.ExtendedLocation' is not registered. More details for enabling this feature can be found here: https://aka.ms/EnableCustomLocations") + return False + return True -def get_custom_locations_oid(cmd, cl_oid): +def custom_locations_oid_valid(cmd, cl_oid): + if cl_oid is None: + raise RequiredArgumentMissingError("The passed in custom locations object id is None. Please pass in the custom locations object id to verify.") try: sp_graph_client = get_graph_client_service_principals(cmd.cli_ctx) - sub_filters = [] - sub_filters.append("displayName eq '{}'".format("Custom Locations RP")) - result = list(sp_graph_client.list(filter=(' and '.join(sub_filters)))) - if len(result) != 0: - if cl_oid is not None and cl_oid != result[0].object_id: - logger.debug("The 'Custom-locations' OID passed is different from the actual OID({}) of the Custom Locations RP app. Proceeding with the correct one...".format(result[0].object_id)) - return result[0].object_id # Using the fetched OID - - if cl_oid is None: - logger.warning("Failed to enable Custom Locations feature on the cluster. Unable to fetch Object ID of Azure AD application used by Azure Arc service. Try enabling the feature by passing the --custom-locations-oid parameter directly. Learn more at https://aka.ms/CustomLocationsObjectID") - telemetry.set_exception(exception='Unable to fetch oid of custom locations app.', fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type, - summary='Unable to fetch oid for custom locations app.') - return "" - else: - return cl_oid + # Do not use display names for look-up as display names are not unique. + result = sp_graph_client.get(cl_oid) + logger.debug(f"Retrieved SP app named '{result.display_name}' for object id {cl_oid}") + return True except Exception as e: - log_string = "Unable to fetch the Object ID of the Azure AD application used by Azure Arc service. " + error = f"Unable to get the custom locations service principal application using the object id {cl_oid}. Please verify the object id is valid." + logger.error(error + " " + str(e)) telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type, - summary='Unable to fetch oid for custom locations app.') - if cl_oid: - log_string += "Proceeding with the Object ID provided to enable the 'custom-locations' feature." - logger.warning(log_string) - return cl_oid - log_string += "Unable to enable the 'custom-locations' feature. " + str(e) - logger.warning(log_string) - return "" + summary=error) + return False def troubleshoot(cmd, client, resource_group_name, cluster_name, kube_config=None, kube_context=None, no_wait=False, tags=None): diff --git a/src/connectedk8s/azext_connectedk8s/tests/latest/README.md b/src/connectedk8s/azext_connectedk8s/tests/latest/README.md new file mode 100644 index 00000000000..9c400accc0a --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/tests/latest/README.md @@ -0,0 +1,20 @@ +# Connectedk8s Testing +Tests need to be configured before running. + +1. Make a copy of `config.json.dist` and rename it to `config.json` (the config.json is git ignored). +1. Fill in the details of the newly created `config.json` file: + - `customLocationsOid`: The custom locations RP service principal object id for enabling the custom locations feature. + - `rbacAppId`: The RBAC service principal app id for testing RBAC feature. + - `rbacAppSecret`: The RBAC service principal secret for testing RBAC feature. + - Querying for apps: Search for required application details via AAD graph with the following `$filter` query in az rest in PowerShell (make sure to fill in the tenant id): + - Get by starts with: + ```powershell + az rest --method get --url "https://graph.windows.net//servicePrincipals?`$filter=startswith(displayName,'Custom Locations')&api-version=1.6" + ``` + - Get by exact value: + ```powershell + az rest --method get --url "https://graph.windows.net//servicePrincipals?`$filter=appId eq ''&api-version=1.6" + ``` + - For more information about AAD graph queries: + - https://learn.microsoft.com/en-us/graph/filter-query-parameter?tabs=http + - https://learn.microsoft.com/en-us/graph/migrate-azure-ad-graph-request-differences \ No newline at end of file diff --git a/src/connectedk8s/azext_connectedk8s/tests/latest/config.json.dist b/src/connectedk8s/azext_connectedk8s/tests/latest/config.json.dist new file mode 100644 index 00000000000..6f25837f47d --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/tests/latest/config.json.dist @@ -0,0 +1,6 @@ +{ + "customLocationsOid": "", + "rbacAppId": "", + "rbacAppSecret": "", + "location": "eastus2euap" +} \ No newline at end of file diff --git a/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py b/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py index e7c881e72ac..4ad7c28363c 100644 --- a/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py +++ b/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py @@ -19,10 +19,25 @@ import subprocess from subprocess import Popen, PIPE, run, STDOUT, call, DEVNULL from azure.cli.testsdk import (LiveScenarioTest, ResourceGroupPreparer, live_only) # pylint: disable=import-error +from azure.cli.core.azclierror import RequiredArgumentMissingError TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..')) logger = get_logger(__name__) +# Set up configuration file. If configuration file is not found, then auto-populate with fake values where allowed. +CONFIG = {} # dictionary of configurations +config_path = os.path.join(os.path.dirname(__file__), "config.json") +if not os.path.isfile(config_path): + CONFIG["customLocationsOid"] = "" + CONFIG["rbacAppId"] = "fakeRbacAppId" + CONFIG["rbacAppSecret"] = "fakeRbacAppSecret" + CONFIG["location"] = "eastus2euap" +else: + with open(config_path, 'r') as f: + CONFIG = json.load(f) + for key in CONFIG: + if not CONFIG[key]: + raise RequiredArgumentMissingError(f"Missing required configuration in {config_path} file. Make sure all properties are populated.") def _get_test_data_file(filename): # Don't output temporary test data to "**/azext_connectedk8s/tests/latest/data/" as that location @@ -133,7 +148,7 @@ def install_kubectl_client(): class Connectedk8sScenarioTest(LiveScenarioTest): @live_only() - @ResourceGroupPreparer(name_prefix='conk8stest', location='eastus2euap', random_name_length=16) + @ResourceGroupPreparer(name_prefix='conk8stest', location=CONFIG['location'], random_name_length=16) def test_connect(self,resource_group): managed_cluster_name = self.create_random_name(prefix='test-connect', length=24) @@ -142,12 +157,13 @@ def test_connect(self,resource_group): 'rg': resource_group, 'name': self.create_random_name(prefix='cc-', length=12), 'kubeconfig': kubeconfig, - 'managed_cluster_name': managed_cluster_name + 'managed_cluster_name': managed_cluster_name, + 'location': CONFIG['location'] }) self.cmd('aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys') self.cmd('aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin') - self.cmd('connectedk8s connect -g {rg} -n {name} -l eastus --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ + self.cmd('connectedk8s connect -g {rg} -n {name} -l {location} --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ self.check('tags.foo', 'doo'), self.check('resourceGroup', '{rg}'), self.check('name', '{name}') @@ -161,7 +177,7 @@ def test_connect(self,resource_group): @live_only() - @ResourceGroupPreparer(name_prefix='conk8stest', location='eastus2euap', random_name_length=16) + @ResourceGroupPreparer(name_prefix='conk8stest', location=CONFIG['location'], random_name_length=16) def test_forcedelete(self,resource_group): managed_cluster_name = self.create_random_name(prefix='test-force-delete', length=24) @@ -170,12 +186,13 @@ def test_forcedelete(self,resource_group): 'rg': resource_group, 'name': self.create_random_name(prefix='cc-', length=12), 'kubeconfig': kubeconfig, - 'managed_cluster_name': managed_cluster_name + 'managed_cluster_name': managed_cluster_name, + 'location': CONFIG['location'] }) self.cmd('aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys') self.cmd('aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin') - self.cmd('connectedk8s connect -g {rg} -n {name} -l eastus --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ + self.cmd('connectedk8s connect -g {rg} -n {name} -l {location} --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ self.check('tags.foo', 'doo'), self.check('name', '{name}') ]) @@ -200,21 +217,33 @@ def test_forcedelete(self,resource_group): @live_only() - @ResourceGroupPreparer(name_prefix='conk8stest', location='eastus2euap', random_name_length=16) + @ResourceGroupPreparer(name_prefix='conk8stest', location=CONFIG['location'], random_name_length=16) def test_enable_disable_features(self,resource_group): managed_cluster_name = self.create_random_name(prefix='test-enable-disable', length=24) - kubeconfig="%s" % (_get_test_data_file(managed_cluster_name + '-config.yaml')) + kubeconfig="%s" % (_get_test_data_file(managed_cluster_name + '-config.yaml')) + + if CONFIG['customLocationsOid'] is None or CONFIG['customLocationsOid'] == "": + cli = get_default_cli() + cli.invoke(["ad", "sp", "list", "--filter", "displayName eq 'Custom Locations RP'"]) + if cli.result.exit_code != 0: + raise cli.result.error + CONFIG['customLocationsOid'] = cli.result.result[0]["id"] + self.kwargs.update({ 'rg': resource_group, 'name': self.create_random_name(prefix='cc-', length=12), 'kubeconfig': kubeconfig, - 'managed_cluster_name': managed_cluster_name + 'managed_cluster_name': managed_cluster_name, + 'custom_locations_oid': CONFIG['customLocationsOid'], + 'rbac_app_id': CONFIG['rbacAppId'], + 'rbac_app_secret': CONFIG['rbacAppSecret'], + 'location': CONFIG['location'] }) - + self.cmd('aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys') self.cmd('aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin') - self.cmd('connectedk8s connect -g {rg} -n {name} -l eastus --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ + self.cmd('connectedk8s connect -g {rg} -n {name} -l {location} --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ self.check('tags.foo', 'doo'), self.check('name', '{name}') ]) @@ -236,7 +265,7 @@ def test_enable_disable_features(self,resource_group): changed_cmd = json.loads(cmd_output.communicate()[0].strip()) assert(changed_cmd["systemDefaultValues"]['customLocations']['enabled'] == bool(0)) - self.cmd('connectedk8s enable-features -n {name} -g {rg} --features custom-locations --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin') + self.cmd('connectedk8s enable-features -n {name} -g {rg} --features custom-locations --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin --custom-locations-oid {custom_locations_oid}') cmd_output1 = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE) _, error_helm_delete = cmd_output1.communicate() assert(cmd_output1.returncode == 0) @@ -262,7 +291,7 @@ def test_enable_disable_features(self,resource_group): disabled_cmd1 = json.loads(cmd_output1.communicate()[0].strip()) assert(disabled_cmd1["systemDefaultValues"]['clusterconnect-agent']['enabled'] == bool(0)) - self.cmd('connectedk8s enable-features -n {name} -g {rg} --features custom-locations --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin') + self.cmd('connectedk8s enable-features -n {name} -g {rg} --features custom-locations --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin --custom-locations-oid {custom_locations_oid}') cmd_output1 = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE) _, error_helm_delete = cmd_output1.communicate() assert(cmd_output1.returncode == 0) @@ -278,7 +307,7 @@ def test_enable_disable_features(self,resource_group): disabled_cmd1 = json.loads(cmd_output1.communicate()[0].strip()) assert(disabled_cmd1["systemDefaultValues"]['guard']['enabled'] == bool(0)) - self.cmd('az connectedk8s enable-features -n {name} -g {rg} --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin --features azure-rbac --app-id ffba4043-836e-4dcc-906c-fbf60bf54eef --app-secret="6a6ae7a7-4260-40d3-ba00-af909f2ca8f0"') + self.cmd('az connectedk8s enable-features -n {name} -g {rg} --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin --features azure-rbac --app-id {rbac_app_id} --app-secret {rbac_app_secret}') # deleting the cluster self.cmd('connectedk8s delete -g {rg} -n {name} --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin -y') @@ -289,7 +318,7 @@ def test_enable_disable_features(self,resource_group): @live_only() - @ResourceGroupPreparer(name_prefix='conk8stest', location='eastus2euap', random_name_length=16) + @ResourceGroupPreparer(name_prefix='conk8stest', location=CONFIG['location'], random_name_length=16) def test_connectedk8s_list(self,resource_group): managed_cluster_name = self.create_random_name(prefix='first', length=24) @@ -309,12 +338,13 @@ def test_connectedk8s_list(self,resource_group): 'kubeconfig': kubeconfig, 'kubeconfigpls': kubeconfigpls, 'managed_cluster_name': managed_cluster_name, - 'managed_cluster_name_second': managed_cluster_name_second + 'managed_cluster_name_second': managed_cluster_name_second, + 'location': CONFIG['location'] }) # create two clusters and then list the cluster names self.cmd('aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys') self.cmd('aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin') - self.cmd('connectedk8s connect -g {rg} -n {name} -l eastus --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ + self.cmd('connectedk8s connect -g {rg} -n {name} -l {location} --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ self.check('tags.foo', 'doo'), self.check('name', '{name}') ]) @@ -326,7 +356,7 @@ def test_connectedk8s_list(self,resource_group): self.cmd('aks create -g {rg} -n {managed_cluster_name_second} --generate-ssh-keys') self.cmd('aks get-credentials -g {rg} -n {managed_cluster_name_second} -f {kubeconfigpls} --admin') - self.cmd('connectedk8s connect -g {rg} -n {name_second} -l eastus --tags foo=doo --kube-config {kubeconfigpls} --kube-context {managed_cluster_name_second}-admin', checks=[ + self.cmd('connectedk8s connect -g {rg} -n {name_second} -l {location} --tags foo=doo --kube-config {kubeconfigpls} --kube-context {managed_cluster_name_second}-admin', checks=[ self.check('tags.foo', 'doo'), self.check('name', '{name_second}') ]) @@ -362,7 +392,7 @@ def test_connectedk8s_list(self,resource_group): @live_only() - @ResourceGroupPreparer(name_prefix='conk8stest', location='eastus2euap', random_name_length=16) + @ResourceGroupPreparer(name_prefix='conk8stest', location=CONFIG['location'], random_name_length=16) def test_upgrade(self,resource_group): managed_cluster_name = self.create_random_name(prefix='test-upgrade', length=24) @@ -371,13 +401,14 @@ def test_upgrade(self,resource_group): 'name': self.create_random_name(prefix='cc-', length=12), 'rg': resource_group, 'kubeconfig': kubeconfig, - 'managed_cluster_name': managed_cluster_name + 'managed_cluster_name': managed_cluster_name, + 'location': CONFIG['location'] }) self.cmd('aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys') self.cmd('aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin') - self.cmd('connectedk8s connect -g {rg} -n {name} -l eastus --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ + self.cmd('connectedk8s connect -g {rg} -n {name} -l {location} --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ self.check('tags.foo', 'doo'), self.check('name', '{name}') ]) @@ -403,7 +434,7 @@ def test_upgrade(self,resource_group): assert(updated_cmd1["systemDefaultValues"]['azureArcAgents']['autoUpdate'] == bool(0)) self.cmd('connectedk8s upgrade -g {rg} -n {name} --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin') - response= requests.post('https://eastus.dp.kubernetesconfiguration.azure.com/azure-arc-k8sagents/GetLatestHelmPackagePath?api-version=2019-11-01-preview&releaseTrain=stable') + response= requests.post('https://{location}.dp.kubernetesconfiguration.azure.com/azure-arc-k8sagents/GetLatestHelmPackagePath?api-version=2019-11-01-preview&releaseTrain=stable') jsonData = json.loads(response.text) repo_path=jsonData['repositoryPath'] index_value = 0 @@ -427,7 +458,7 @@ def test_upgrade(self,resource_group): @live_only() - @ResourceGroupPreparer(name_prefix='conk8stest', location='eastus2euap', random_name_length=16) + @ResourceGroupPreparer(name_prefix='conk8stest', location=CONFIG['location'], random_name_length=16) def test_update(self,resource_group): managed_cluster_name = self.create_random_name(prefix='test-update', length=24) kubeconfig="%s" % (_get_test_data_file(managed_cluster_name + '-config.yaml')) @@ -435,12 +466,13 @@ def test_update(self,resource_group): 'name': self.create_random_name(prefix='cc-', length=12), 'kubeconfig': kubeconfig, 'rg':resource_group, - 'managed_cluster_name': managed_cluster_name + 'managed_cluster_name': managed_cluster_name, + 'location': CONFIG['location'] }) self.cmd('aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys') self.cmd('aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin') - self.cmd('connectedk8s connect -g {rg} -n {name} -l eastus --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ + self.cmd('connectedk8s connect -g {rg} -n {name} -l {location} --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ self.check('tags.foo', 'doo'), self.check('name', '{name}') ]) @@ -486,7 +518,7 @@ def test_update(self,resource_group): @live_only() - @ResourceGroupPreparer(name_prefix='conk8stest', location='eastus2euap', random_name_length=16) + @ResourceGroupPreparer(name_prefix='conk8stest', location=CONFIG['location'], random_name_length=16) def test_troubleshoot(self,resource_group): managed_cluster_name = self.create_random_name(prefix='test-troubleshoot', length=24) kubeconfig="%s" % (_get_test_data_file(managed_cluster_name + '-config.yaml')) @@ -494,12 +526,13 @@ def test_troubleshoot(self,resource_group): 'name': self.create_random_name(prefix='cc-', length=12), 'kubeconfig': kubeconfig, 'rg':resource_group, - 'managed_cluster_name': managed_cluster_name + 'managed_cluster_name': managed_cluster_name, + 'location': CONFIG['location'] }) self.cmd('aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys') self.cmd('aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin') - self.cmd('connectedk8s connect -g {rg} -n {name} -l eastus --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ + self.cmd('connectedk8s connect -g {rg} -n {name} -l {location} --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ self.check('tags.foo', 'doo'), self.check('name', '{name}') ]) @@ -518,7 +551,7 @@ def test_troubleshoot(self,resource_group): os.remove("%s" % (_get_test_data_file(managed_cluster_name + '-config.yaml'))) @live_only() - @ResourceGroupPreparer(name_prefix='conk8stest', location='eastus2euap', random_name_length=16) + @ResourceGroupPreparer(name_prefix='conk8stest', location=CONFIG['location'], random_name_length=16) def test_proxy(self,resource_group): managed_cluster_name = self.create_random_name(prefix='test-proxy', length=24) kubeconfig="%s" % (_get_test_data_file(managed_cluster_name + '-config.yaml')) @@ -529,12 +562,13 @@ def test_proxy(self,resource_group): 'kubeconfig': kubeconfig, 'kubeconfig2': kubeconfig2, 'rg':resource_group, - 'managed_cluster_name': managed_cluster_name + 'managed_cluster_name': managed_cluster_name, + 'location': CONFIG['location'] }) self.cmd('aks create -g {rg} -n {managed_cluster_name} --generate-ssh-keys') self.cmd('aks get-credentials -g {rg} -n {managed_cluster_name} -f {kubeconfig} --admin') - self.cmd('connectedk8s connect -g {rg} -n {name} -l eastus --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ + self.cmd('connectedk8s connect -g {rg} -n {name} -l {location} --tags foo=doo --kube-config {kubeconfig} --kube-context {managed_cluster_name}-admin', checks=[ self.check('tags.foo', 'doo'), self.check('name', '{name}') ]) diff --git a/src/connectedk8s/setup.py b/src/connectedk8s/setup.py index cd996bbebe5..a0cfba5e27c 100644 --- a/src/connectedk8s/setup.py +++ b/src/connectedk8s/setup.py @@ -17,7 +17,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '1.4.2' +VERSION = '1.5.0' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers