diff --git a/src/connectedk8s/HISTORY.rst b/src/connectedk8s/HISTORY.rst index ef982c24ce4..37c4923028c 100644 --- a/src/connectedk8s/HISTORY.rst +++ b/src/connectedk8s/HISTORY.rst @@ -3,6 +3,11 @@ Release History =============== +0.2.3 +++++++ +* `az connectedk8s connect`: Modified CLI params for proxy +* `az connectedk8s update`: Added update command + 0.2.2 ++++++ * `az connectedk8s connect`: Added CLI params to support proxy. @@ -18,4 +23,4 @@ Release History 0.1.5 ++++++ -* Initial release. \ No newline at end of file +* Initial release. diff --git a/src/connectedk8s/azext_connectedk8s/_constants.py b/src/connectedk8s/azext_connectedk8s/_constants.py new file mode 100644 index 00000000000..f8b84651d51 --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/_constants.py @@ -0,0 +1,36 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + + +Invalid_Location_Fault_Type = 'location-validation-error' +Load_Kubeconfig_Fault_Type = 'kubeconfig-load-error' +Read_ConfigMap_Fault_Type = 'configmap-read-error' +Create_ConnectedCluster_Fault_Type = 'connected-cluster-create-error' +Delete_ConnectedCluster_Fault_Type = 'connected-cluster-delete-error' +Bad_DeleteRequest_Fault_Type = 'bad-delete-request-error' +Cluster_Already_Onboarded_Fault_Type = 'cluster-already-onboarded-error' +Resource_Already_Exists_Fault_Type = 'resource-already-exists-error' +Resource_Does_Not_Exist_Fault_Type = 'resource-does-not-exist-error' +Create_ResourceGroup_Fault_Type = 'resource-group-creation-error' +Add_HelmRepo_Fault_Type = 'helm-repo-add-error' +List_HelmRelease_Fault_Type = 'helm-list-release-error' +KeyPair_Generate_Fault_Type = 'keypair-generation-error' +PublicKey_Export_Fault_Type = 'publickey-export-error' +PrivateKey_Export_Fault_Type = 'privatekey-export-error' +Install_HelmRelease_Fault_Type = 'helm-release-install-error' +Delete_HelmRelease_Fault_Type = 'helm-release-delete-error' +Check_PodStatus_Fault_Type = 'check-pod-status-error' +Kubernetes_Connectivity_FaultType = 'kubernetes-cluster-connection-error' +Helm_Version_Fault_Type = 'helm-not-updated-error' +Check_HelmVersion_Fault_Type = 'helm-version-check-error' +Helm_Installation_Fault_Type = 'helm-not-installed-error' +Check_HelmInstallation_Fault_Type = 'check-helm-installed-error' +Get_HelmRegistery_Path_Fault_Type = 'helm-registry-path-fetch-error' +Pull_HelmChart_Fault_Type = 'helm-chart-pull-error' +Export_HelmChart_Fault_Type = 'helm-chart-export-error' +Get_Kubernetes_Version_Fault_Type = 'kubernetes-get-version-error' +Get_Kubernetes_Distro_Fault_Type = 'kubernetes-get-distribution-error' +Update_Agent_Success = 'Agents for Connected Cluster {} have been updated successfully' +Update_Agent_Failure = 'Error while updating agents. Please run \"kubectl get pods -n azure-arc\" to check the pods in case of timeout error. Error: {}' diff --git a/src/connectedk8s/azext_connectedk8s/_help.py b/src/connectedk8s/azext_connectedk8s/_help.py index de251701dce..c1ad0b6f16e 100644 --- a/src/connectedk8s/azext_connectedk8s/_help.py +++ b/src/connectedk8s/azext_connectedk8s/_help.py @@ -21,10 +21,18 @@ - name: Onboard a connected kubernetes cluster by specifying the kubeconfig and kubecontext. text: az connectedk8s connect -g resourceGroupName -n connectedClusterName --kube-config /path/to/kubeconfig --kube-context kubeContextName - name: Onboard a connected kubernetes cluster by specifying the https proxy, http proxy, no proxy settings. - text: az connectedk8s connect -g resourceGroupName -n connectedClusterName --https-proxy https://proxy-url --http-proxy http://proxy-url --no-proxy excludedIP,excludedCIDR,exampleCIDRfollowed,10.0.0.0/24 + text: az connectedk8s connect -g resourceGroupName -n connectedClusterName --proxy-https https://proxy-url --proxy-http http://proxy-url --proxy-skip-range excludedIP,excludedCIDR,exampleCIDRfollowed,10.0.0.0/24 """ +helps['connectedk8s update'] = """ + type: command + short-summary: Update properties of the onboarded agents. + examples: + - name: Update proxy values for the agents + text: az connectedk8s update -g resourceGroupName -n connectedClusterName --proxy-https https://proxy-url --proxy-http http://proxy-url --proxy-skip-range excludedIP,excludedCIDR,exampleCIDRfollowed,10.0.0.0/24 +""" + helps['connectedk8s list'] = """ type: command short-summary: List connected kubernetes clusters. diff --git a/src/connectedk8s/azext_connectedk8s/_params.py b/src/connectedk8s/azext_connectedk8s/_params.py index 3c62118b6fd..4197216f036 100644 --- a/src/connectedk8s/azext_connectedk8s/_params.py +++ b/src/connectedk8s/azext_connectedk8s/_params.py @@ -18,9 +18,17 @@ def load_arguments(self, _): c.argument('cluster_name', options_list=['--name', '-n'], help='The name of the connected cluster.') c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.') c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.') - c.argument('https_proxy', options_list=['--https-proxy'], help='Https proxy url to be used.') - c.argument('http_proxy', options_list=['--http-proxy'], help='Http proxy url to be used.') - c.argument('no_proxy', options_list=['--no-proxy'], help='List of urls/CIDRs for which proxy should not to be used.') + c.argument('https_proxy', options_list=['--proxy-https'], help='Https proxy URL to be used.') + c.argument('http_proxy', options_list=['--proxy-http'], help='Http proxy URL to be used.') + c.argument('no_proxy', options_list=['--proxy-skip-range'], help='List of URLs/CIDRs for which proxy should not to be used.') + + with self.argument_context('connectedk8s update') as c: + c.argument('cluster_name', options_list=['--name', '-n'], id_part='name', help='The name of the connected cluster.') + c.argument('kube_config', options_list=['--kube-config'], help='Path to the kube config file.') + c.argument('kube_context', options_list=['--kube-context'], help='Kubconfig context from current machine.') + c.argument('https_proxy', options_list=['--proxy-https'], help='Https proxy URL to be used.') + c.argument('http_proxy', options_list=['--proxy-http'], help='Http proxy URL to be used.') + c.argument('no_proxy', options_list=['--proxy-skip-range'], help='List of URLs/CIDRs for which proxy should not to be used.') with self.argument_context('connectedk8s list') as c: pass diff --git a/src/connectedk8s/azext_connectedk8s/_utils.py b/src/connectedk8s/azext_connectedk8s/_utils.py new file mode 100644 index 00000000000..576f11959d1 --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/_utils.py @@ -0,0 +1,109 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import os +import subprocess +from subprocess import Popen, PIPE +import requests + +from knack.util import CLIError +from azure.cli.core.commands.client_factory import get_subscription_id +from azure.cli.core import telemetry +from azext_connectedk8s._client_factory import _resource_client_factory +import azext_connectedk8s._constants as consts + + +def validate_location(cmd, location): + subscription_id = get_subscription_id(cmd.cli_ctx) + rp_locations = [] + resourceClient = _resource_client_factory(cmd.cli_ctx, subscription_id=subscription_id) + providerDetails = resourceClient.providers.get('Microsoft.Kubernetes') + for resourceTypes in providerDetails.resource_types: + if resourceTypes.resource_type == 'connectedClusters': + rp_locations = [location.replace(" ", "").lower() for location in resourceTypes.locations] + if location.lower() not in rp_locations: + telemetry.set_user_fault() + telemetry.set_exception(exception='Location not supported', fault_type=consts.Invalid_Location_Fault_Type, + summary='Provided location is not supported for creating connected clusters') + raise CLIError("Connected cluster resource creation is supported only in the following locations: " + + ', '.join(map(str, rp_locations)) + + ". Use the --location flag to specify one of these locations.") + break + + +def get_chart_path(registry_path, kube_config, kube_context): + # Pulling helm chart from registry + os.environ['HELM_EXPERIMENTAL_OCI'] = '1' + pull_helm_chart(registry_path, kube_config, kube_context) + + # Exporting helm chart + chart_export_path = os.path.join(os.path.expanduser('~'), '.azure', 'AzureArcCharts') + export_helm_chart(registry_path, chart_export_path, kube_config, kube_context) + # Helm Install + helm_chart_path = os.path.join(chart_export_path, 'azure-arc-k8sagents') + chart_path = os.getenv('HELMCHART') if os.getenv('HELMCHART') else helm_chart_path + return chart_path + + +def pull_helm_chart(registry_path, kube_config, kube_context): + cmd_helm_chart_pull = ["helm", "chart", "pull", registry_path, "--kubeconfig", kube_config] + if kube_context: + cmd_helm_chart_pull.extend(["--kube-context", kube_context]) + response_helm_chart_pull = subprocess.Popen(cmd_helm_chart_pull, stdout=PIPE, stderr=PIPE) + _, error_helm_chart_pull = response_helm_chart_pull.communicate() + if response_helm_chart_pull.returncode != 0: + telemetry.set_exception(exception=error_helm_chart_pull.decode("ascii"), fault_type=consts.Pull_HelmChart_Fault_Type, + summary='Unable to pull helm chart from the registry') + raise CLIError("Unable to pull helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_pull.decode("ascii")) + + +def export_helm_chart(registry_path, chart_export_path, kube_config, kube_context): + chart_export_path = os.path.join(os.path.expanduser('~'), '.azure', 'AzureArcCharts') + cmd_helm_chart_export = ["helm", "chart", "export", registry_path, "--destination", chart_export_path, "--kubeconfig", kube_config] + if kube_context: + cmd_helm_chart_export.extend(["--kube-context", kube_context]) + response_helm_chart_export = subprocess.Popen(cmd_helm_chart_export, stdout=PIPE, stderr=PIPE) + _, error_helm_chart_export = response_helm_chart_export.communicate() + if response_helm_chart_export.returncode != 0: + telemetry.set_exception(exception=error_helm_chart_export.decode("ascii"), fault_type=consts.Export_HelmChart_Fault_Type, + summary='Unable to export helm chart from the registry') + raise CLIError("Unable to export helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_export.decode("ascii")) + + +def add_helm_repo(kube_config, kube_context): + repo_name = os.getenv('HELMREPONAME') + repo_url = os.getenv('HELMREPOURL') + cmd_helm_repo = ["helm", "repo", "add", repo_name, repo_url, "--kubeconfig", kube_config] + if kube_context: + cmd_helm_repo.extend(["--kube-context", kube_context]) + response_helm_repo = Popen(cmd_helm_repo, stdout=PIPE, stderr=PIPE) + _, error_helm_repo = response_helm_repo.communicate() + if response_helm_repo.returncode != 0: + telemetry.set_exception(exception=error_helm_repo.decode("ascii"), fault_type=consts.Add_HelmRepo_Fault_Type, + summary='Failed to add helm repository') + raise CLIError("Unable to add repository {} to helm: ".format(repo_url) + error_helm_repo.decode("ascii")) + + +def get_helm_registry(profile, location): + cred, _, _ = profile.get_login_credentials( + resource='https://management.core.windows.net/') + token = cred._token_retriever()[2].get('accessToken') # pylint: disable=protected-access + + get_chart_location_url = "https://{}.dp.kubernetesconfiguration.azure.com/{}/GetLatestHelmPackagePath?api-version=2019-11-01-preview".format(location, 'azure-arc-k8sagents') + query_parameters = {} + query_parameters['releaseTrain'] = os.getenv('RELEASETRAIN') if os.getenv('RELEASETRAIN') else 'stable' + header_parameters = {} + header_parameters['Authorization'] = "Bearer {}".format(str(token)) + try: + response = requests.post(get_chart_location_url, params=query_parameters, headers=header_parameters) + except Exception as e: + telemetry.set_exception(exception=e, fault_type=consts.Get_HelmRegistery_Path_Fault_Type, + summary='Error while fetching helm chart registry path') + raise CLIError("Error while fetching helm chart registry path: " + str(e)) + if response.status_code == 200: + return response.json().get('repositoryPath') + telemetry.set_exception(exception=str(response.json()), fault_type=consts.Get_HelmRegistery_Path_Fault_Type, + summary='Error while fetching helm chart registry path') + raise CLIError("Error while fetching helm chart registry path: {}".format(str(response.json()))) diff --git a/src/connectedk8s/azext_connectedk8s/commands.py b/src/connectedk8s/azext_connectedk8s/commands.py index f54ca669d95..fd2f71c7389 100644 --- a/src/connectedk8s/azext_connectedk8s/commands.py +++ b/src/connectedk8s/azext_connectedk8s/commands.py @@ -19,6 +19,7 @@ def load_command_table(self, _): with self.command_group('connectedk8s', connectedk8s_sdk, client_factory=cf_connected_cluster) as g: g.custom_command('connect', 'create_connectedk8s', supports_no_wait=True) + g.custom_command('update', 'update_agents') g.custom_command('delete', 'delete_connectedk8s', confirmation=True, supports_no_wait=True) g.custom_command('list', 'list_connectedk8s', table_transformer=connectedk8s_list_table_format) g.custom_show_command('show', 'get_connectedk8s', table_transformer=connectedk8s_show_table_format) diff --git a/src/connectedk8s/azext_connectedk8s/custom.py b/src/connectedk8s/azext_connectedk8s/custom.py index e3cea6ee2cc..796dbd5ec92 100644 --- a/src/connectedk8s/azext_connectedk8s/custom.py +++ b/src/connectedk8s/azext_connectedk8s/custom.py @@ -9,7 +9,6 @@ import subprocess from subprocess import Popen, PIPE from base64 import b64encode -import requests from knack.util import CLIError from knack.log import get_logger @@ -20,6 +19,8 @@ from azext_connectedk8s._client_factory import _graph_client_factory from azext_connectedk8s._client_factory import cf_resource_groups from azext_connectedk8s._client_factory import _resource_client_factory +import azext_connectedk8s._constants as consts +import azext_connectedk8s._utils as utils from msrestazure.azure_exceptions import CloudError from kubernetes import client as kube_client, config, watch # pylint: disable=import-error from Crypto.IO import PEM # pylint: disable=import-error @@ -31,41 +32,14 @@ logger = get_logger(__name__) -Invalid_Location_Fault_Type = 'location-validation-error' -Load_Kubeconfig_Fault_Type = 'kubeconfig-load-error' -Read_ConfigMap_Fault_Type = 'configmap-read-error' -Create_ConnectedCluster_Fault_Type = 'connected-cluster-create-error' -Delete_ConnectedCluster_Fault_Type = 'connected-cluster-delete-error' -Bad_DeleteRequest_Fault_Type = 'bad-delete-request-error' -Cluster_Already_Onboarded_Fault_Type = 'cluster-already-onboarded-error' -Resource_Already_Exists_Fault_Type = 'resource-already-exists-error' -Create_ResourceGroup_Fault_Type = 'resource-group-creation-error' -Add_HelmRepo_Fault_Type = 'helm-repo-add-error' -List_HelmRelease_Fault_Type = 'helm-list-release-error' -KeyPair_Generate_Fault_Type = 'keypair-generation-error' -PublicKey_Export_Fault_Type = 'publickey-export-error' -PrivateKey_Export_Fault_Type = 'privatekey-export-error' -Install_HelmRelease_Fault_Type = 'helm-release-install-error' -Delete_HelmRelease_Fault_Type = 'helm-release-delete-error' -Check_PodStatus_Fault_Type = 'check-pod-status-error' -Kubernetes_Connectivity_FaultType = 'kubernetes-cluster-connection-error' -Helm_Version_Fault_Type = 'helm-not-updated-error' -Check_HelmVersion_Fault_Type = 'helm-version-check-error' -Helm_Installation_Fault_Type = 'helm-not-installed-error' -Check_HelmInstallation_Fault_Type = 'check-helm-installed-error' -Get_HelmRegistery_Path_Fault_Type = 'helm-registry-path-fetch-error' -Pull_HelmChart_Fault_Type = 'helm-chart-pull-error' -Export_HelmChart_Fault_Type = 'helm-chart-export-error' -Get_Kubernetes_Version_Fault_Type = 'kubernetes-get-version-error' -Get_Kubernetes_Distro_Fault_Type = 'kubernetes-get-distribution-error' - - # pylint:disable=unused-argument # pylint: disable=too-many-locals # pylint: disable=too-many-branches # pylint: disable=too-many-statements # pylint: disable=line-too-long -def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_proxy=None, http_proxy=None, no_proxy=None, location=None, + + +def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_proxy="", http_proxy="", no_proxy="", location=None, kube_config=None, kube_context=None, no_wait=False, tags=None): logger.warning("Ensure that you have the latest helm version installed before proceeding.") logger.warning("This operation might take a while...\n") @@ -100,7 +74,7 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_pr config.load_kube_config(config_file=kube_config, context=kube_context) except Exception as e: telemetry.set_user_fault() - telemetry.set_exception(exception=e, fault_type=Load_Kubeconfig_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.Load_Kubeconfig_Fault_Type, summary='Problem loading the kubeconfig file') raise CLIError("Problem loading the kubeconfig file." + str(e)) configuration = kube_client.Configuration() @@ -128,20 +102,8 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_pr telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.HelmVersion': helm_version}) # Validate location - rp_locations = [] + utils.validate_location(cmd, location) resourceClient = _resource_client_factory(cmd.cli_ctx, subscription_id=subscription_id) - providerDetails = resourceClient.providers.get('Microsoft.Kubernetes') - for resourceTypes in providerDetails.resource_types: - if resourceTypes.resource_type == 'connectedClusters': - rp_locations = [location.replace(" ", "").lower() for location in resourceTypes.locations] - if location.lower() not in rp_locations: - telemetry.set_user_fault() - telemetry.set_exception(exception='Location not supported', fault_type=Invalid_Location_Fault_Type, - summary='Provided location is not supported for creating connected clusters') - raise CLIError("Connected cluster resource creation is supported only in the following locations: " + - ', '.join(map(str, rp_locations)) + - ". Use the --location flag to specify one of these locations.") - break # Check Release Existance release_namespace = get_release_namespace(kube_config, kube_context) @@ -151,7 +113,7 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_pr try: configmap = api_instance.read_namespaced_config_map('azure-clusterconfig', 'azure-arc') except Exception as e: # pylint: disable=broad-except - telemetry.set_exception(exception=e, fault_type=Read_ConfigMap_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.Read_ConfigMap_Fault_Type, summary='Unable to read ConfigMap') raise CLIError("Unable to read ConfigMap 'azure-clusterconfig' in 'azure-arc' namespace: %s\n" % e) configmap_rg_name = configmap.data["AZURE_RESOURCE_GROUP"] @@ -167,12 +129,12 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_pr return sdk_no_wait(no_wait, client.create, resource_group_name=resource_group_name, cluster_name=cluster_name, connected_cluster=cc) except CloudError as ex: - telemetry.set_exception(exception=ex, fault_type=Create_ConnectedCluster_Fault_Type, + telemetry.set_exception(exception=ex, fault_type=consts.Create_ConnectedCluster_Fault_Type, summary='Unable to create connected cluster resource') raise CLIError(ex) else: telemetry.set_user_fault() - telemetry.set_exception(exception='The kubernetes cluster is already onboarded', fault_type=Cluster_Already_Onboarded_Fault_Type, + telemetry.set_exception(exception='The kubernetes cluster is already onboarded', fault_type=consts.Cluster_Already_Onboarded_Fault_Type, summary='Kubernetes cluster already onboarded') raise CLIError("The kubernetes cluster you are trying to onboard " + "is already onboarded to the resource group" + @@ -183,7 +145,7 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_pr else: if connected_cluster_exists(client, resource_group_name, cluster_name): telemetry.set_user_fault() - telemetry.set_exception(exception='The connected cluster resource already exists', fault_type=Resource_Already_Exists_Fault_Type, + telemetry.set_exception(exception='The connected cluster resource already exists', fault_type=consts.Resource_Already_Exists_Fault_Type, summary='Connected cluster resource already exists') raise CLIError("The connected cluster resource {} already exists ".format(cluster_name) + "in the resource group {} ".format(resource_group_name) + @@ -196,62 +158,45 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_pr try: resourceClient.resource_groups.create_or_update(resource_group_name, resource_group_params) except Exception as e: - telemetry.set_exception(exception=e, fault_type=Create_ResourceGroup_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.Create_ResourceGroup_Fault_Type, summary='Failed to create the resource group') raise CLIError("Failed to create the resource group {} :".format(resource_group_name) + str(e)) # Adding helm repo if os.getenv('HELMREPONAME') and os.getenv('HELMREPOURL'): - repo_name = os.getenv('HELMREPONAME') - repo_url = os.getenv('HELMREPOURL') - cmd_helm_repo = ["helm", "repo", "add", repo_name, repo_url, "--kubeconfig", kube_config] - if kube_context: - cmd_helm_repo.extend(["--kube-context", kube_context]) - response_helm_repo = Popen(cmd_helm_repo, stdout=PIPE, stderr=PIPE) - _, error_helm_repo = response_helm_repo.communicate() - if response_helm_repo.returncode != 0: - telemetry.set_exception(exception=error_helm_repo.decode("ascii"), fault_type=Add_HelmRepo_Fault_Type, - summary='Failed to add helm repository') - raise CLIError("Unable to add repository {} to helm: ".format(repo_url) + error_helm_repo.decode("ascii")) + utils.add_helm_repo(kube_config, kube_context) # Retrieving Helm chart OCI Artifact location - registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else get_helm_registry(profile, location) + registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(profile, location) # Get azure-arc agent version for telemetry azure_arc_agent_version = registry_path.split(':')[1] telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.AgentVersion': azure_arc_agent_version}) - # Pulling helm chart from registry - os.environ['HELM_EXPERIMENTAL_OCI'] = '1' - pull_helm_chart(registry_path, kube_config, kube_context) - - # Exporting helm chart - chart_export_path = os.path.join(os.path.expanduser('~'), '.azure', 'AzureArcCharts') - export_helm_chart(registry_path, chart_export_path, kube_config, kube_context) + # Get helm chart path + chart_path = utils.get_chart_path(registry_path, kube_config, kube_context) # Generate public-private key pair try: key_pair = RSA.generate(4096) except Exception as e: - telemetry.set_exception(exception=e, fault_type=KeyPair_Generate_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.KeyPair_Generate_Fault_Type, summary='Failed to generate public-private key pair') raise CLIError("Failed to generate public-private key pair. " + str(e)) try: public_key = get_public_key(key_pair) except Exception as e: - telemetry.set_exception(exception=e, fault_type=PublicKey_Export_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.PublicKey_Export_Fault_Type, summary='Failed to export public key') raise CLIError("Failed to export public key." + str(e)) try: private_key_pem = get_private_key(key_pair) except Exception as e: - telemetry.set_exception(exception=e, fault_type=PrivateKey_Export_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.PrivateKey_Export_Fault_Type, summary='Failed to export private key') raise CLIError("Failed to export private key." + str(e)) # Helm Install - helm_chart_path = os.path.join(chart_export_path, 'azure-arc-k8sagents') - chart_path = os.getenv('HELMCHART') if os.getenv('HELMCHART') else helm_chart_path cmd_helm_install = ["helm", "upgrade", "--install", "azure-arc", chart_path, "--set", "global.subscriptionId={}".format(subscription_id), "--set", "global.kubernetesDistro={}".format(kubernetes_distro), @@ -270,7 +215,7 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_pr response_helm_install = Popen(cmd_helm_install, stdout=PIPE, stderr=PIPE) _, error_helm_install = response_helm_install.communicate() if response_helm_install.returncode != 0: - telemetry.set_exception(exception=error_helm_install.decode("ascii"), fault_type=Install_HelmRelease_Fault_Type, + telemetry.set_exception(exception=error_helm_install.decode("ascii"), fault_type=consts.Install_HelmRelease_Fault_Type, summary='Unable to install helm release') raise CLIError("Unable to install helm release: " + error_helm_install.decode("ascii")) @@ -283,7 +228,7 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_pr if no_wait: return put_cc_response except CloudError as ex: - telemetry.set_exception(exception=ex, fault_type=Create_ConnectedCluster_Fault_Type, + telemetry.set_exception(exception=ex, fault_type=consts.Create_ConnectedCluster_Fault_Type, summary='Unable to create connected cluster resource') raise CLIError(ex) @@ -295,7 +240,7 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, https_pr try: check_pod_status(pod_dict) except Exception as e: # pylint: disable=broad-except - telemetry.set_exception(exception=e, fault_type=Check_PodStatus_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.Check_PodStatus_Fault_Type, summary='Failed to check arc agent pods statuses') logger.warning("Failed to check arc agent pods statuses: %s", e) @@ -331,7 +276,7 @@ def check_kube_connection(configuration): api_instance.get_api_resources() except Exception as e: telemetry.set_user_fault() - telemetry.set_exception(exception=e, fault_type=Kubernetes_Connectivity_FaultType, + telemetry.set_exception(exception=e, fault_type=consts.Kubernetes_Connectivity_FaultType, summary='Unable to verify connectivity to the Kubernetes cluster') logger.warning("Unable to verify connectivity to the Kubernetes cluster: %s\n", e) raise CLIError("If you are using AAD Enabled cluster, " + @@ -349,16 +294,16 @@ def check_helm_install(kube_config, kube_context): if response_helm_installed.returncode != 0: if "unknown flag" in error_helm_installed.decode("ascii"): telemetry.set_user_fault() - telemetry.set_exception(exception='Helm 3 not found', fault_type=Helm_Version_Fault_Type, + telemetry.set_exception(exception='Helm 3 not found', fault_type=consts.Helm_Version_Fault_Type, summary='Helm3 not found on the machine') raise CLIError("Please install the latest version of Helm. " + "Learn more at https://aka.ms/arc/k8s/onboarding-helm-install") telemetry.set_user_fault() - telemetry.set_exception(exception=error_helm_installed.decode("ascii"), fault_type=Helm_Installation_Fault_Type, + telemetry.set_exception(exception=error_helm_installed.decode("ascii"), fault_type=consts.Helm_Installation_Fault_Type, summary='Helm3 not installed on the machine') raise CLIError(error_helm_installed.decode("ascii")) except FileNotFoundError as e: - telemetry.set_exception(exception=e, fault_type=Check_HelmInstallation_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.Check_HelmInstallation_Fault_Type, summary='Unable to verify helm installation') raise CLIError("Helm is not installed or requires elevated permissions. " + "Ensure that you have the latest version of Helm installed on your machine. " + @@ -375,12 +320,12 @@ def check_helm_version(kube_config, kube_context): response_helm_version = Popen(cmd_helm_version, stdout=PIPE, stderr=PIPE) output_helm_version, error_helm_version = response_helm_version.communicate() if response_helm_version.returncode != 0: - telemetry.set_exception(exception=error_helm_version.decode('ascii'), fault_type=Check_HelmVersion_Fault_Type, + telemetry.set_exception(exception=error_helm_version.decode('ascii'), fault_type=consts.Check_HelmVersion_Fault_Type, summary='Unable to determine helm version') raise CLIError("Unable to determine helm version: " + error_helm_version.decode("ascii")) if "v2" in output_helm_version.decode("ascii"): telemetry.set_user_fault() - telemetry.set_exception(exception='Helm 3 not found', fault_type=Helm_Version_Fault_Type, + telemetry.set_exception(exception='Helm 3 not found', fault_type=consts.Helm_Version_Fault_Type, summary='Helm3 not found on the machine') raise CLIError("Helm version 3+ is required. " + "Ensure that you have installed the latest version of Helm. " + @@ -407,54 +352,6 @@ def connected_cluster_exists(client, resource_group_name, cluster_name): return True -def get_helm_registry(profile, location): - cred, _, _ = profile.get_login_credentials( - resource='https://management.core.windows.net/') - token = cred._token_retriever()[2].get('accessToken') # pylint: disable=protected-access - - get_chart_location_url = "https://{}.dp.kubernetesconfiguration.azure.com/{}/GetLatestHelmPackagePath?api-version=2019-11-01-preview".format(location, 'azure-arc-k8sagents') - query_parameters = {} - query_parameters['releaseTrain'] = os.getenv('RELEASETRAIN') if os.getenv('RELEASETRAIN') else 'stable' - header_parameters = {} - header_parameters['Authorization'] = "Bearer {}".format(str(token)) - try: - response = requests.post(get_chart_location_url, params=query_parameters, headers=header_parameters) - except Exception as e: - telemetry.set_exception(exception=e, fault_type=Get_HelmRegistery_Path_Fault_Type, - summary='Error while fetching helm chart registry path') - raise CLIError("Error while fetching helm chart registry path: " + str(e)) - if response.status_code == 200: - return response.json().get('repositoryPath') - telemetry.set_exception(exception=str(response.json()), fault_type=Get_HelmRegistery_Path_Fault_Type, - summary='Error while fetching helm chart registry path') - raise CLIError("Error while fetching helm chart registry path: {}".format(str(response.json()))) - - -def pull_helm_chart(registry_path, kube_config, kube_context): - cmd_helm_chart_pull = ["helm", "chart", "pull", registry_path, "--kubeconfig", kube_config] - if kube_context: - cmd_helm_chart_pull.extend(["--kube-context", kube_context]) - response_helm_chart_pull = subprocess.Popen(cmd_helm_chart_pull, stdout=PIPE, stderr=PIPE) - _, error_helm_chart_pull = response_helm_chart_pull.communicate() - if response_helm_chart_pull.returncode != 0: - telemetry.set_exception(exception=error_helm_chart_pull.decode("ascii"), fault_type=Pull_HelmChart_Fault_Type, - summary='Unable to pull helm chart from the registry') - raise CLIError("Unable to pull helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_pull.decode("ascii")) - - -def export_helm_chart(registry_path, chart_export_path, kube_config, kube_context): - chart_export_path = os.path.join(os.path.expanduser('~'), '.azure', 'AzureArcCharts') - cmd_helm_chart_export = ["helm", "chart", "export", registry_path, "--destination", chart_export_path, "--kubeconfig", kube_config] - if kube_context: - cmd_helm_chart_export.extend(["--kube-context", kube_context]) - response_helm_chart_export = subprocess.Popen(cmd_helm_chart_export, stdout=PIPE, stderr=PIPE) - _, error_helm_chart_export = response_helm_chart_export.communicate() - if response_helm_chart_export.returncode != 0: - telemetry.set_exception(exception=error_helm_chart_export.decode("ascii"), fault_type=Export_HelmChart_Fault_Type, - summary='Unable to export helm chart from the registry') - raise CLIError("Unable to export helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_export.decode("ascii")) - - def get_public_key(key_pair): pubKey = key_pair.publickey() seq = asn1.DerSequence([pubKey.n, pubKey.e]) @@ -473,7 +370,7 @@ def get_server_version(configuration): api_response = api_instance.get_code() return api_response.git_version except Exception as e: # pylint: disable=broad-except - telemetry.set_exception(exception=e, fault_type=Get_Kubernetes_Version_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.Get_Kubernetes_Version_Fault_Type, summary='Unable to fetch kubernetes version') logger.warning("Unable to fetch kubernetes version: %s\n", e) @@ -488,7 +385,7 @@ def get_kubernetes_distro(configuration): return "openshift" return "default" except Exception as e: # pylint: disable=broad-except - telemetry.set_exception(exception=e, fault_type=Get_Kubernetes_Distro_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.Get_Kubernetes_Distro_Fault_Type, summary='Unable to fetch kubernetes distribution') logger.warning("Exception while trying to fetch kubernetes distribution: %s\n", e) @@ -582,7 +479,7 @@ def delete_connectedk8s(cmd, client, resource_group_name, cluster_name, config.load_kube_config(config_file=kube_config, context=kube_context) except Exception as e: telemetry.set_user_fault() - telemetry.set_exception(exception=e, fault_type=Load_Kubeconfig_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.Load_Kubeconfig_Fault_Type, summary='Problem loading the kubeconfig file') raise CLIError("Problem loading the kubeconfig file." + str(e)) configuration = kube_client.Configuration() @@ -609,7 +506,7 @@ def delete_connectedk8s(cmd, client, resource_group_name, cluster_name, try: configmap = api_instance.read_namespaced_config_map('azure-clusterconfig', 'azure-arc') except Exception as e: # pylint: disable=broad-except - telemetry.set_exception(exception=e, fault_type=Read_ConfigMap_Fault_Type, + telemetry.set_exception(exception=e, fault_type=consts.Read_ConfigMap_Fault_Type, summary='Unable to read ConfigMap') raise CLIError("Unable to read ConfigMap 'azure-clusterconfig' in 'azure-arc' namespace: %s\n" % e) @@ -618,7 +515,7 @@ def delete_connectedk8s(cmd, client, resource_group_name, cluster_name, delete_cc_resource(client, resource_group_name, cluster_name, no_wait) else: telemetry.set_user_fault() - telemetry.set_exception(exception='Unable to delete connected cluster', fault_type=Bad_DeleteRequest_Fault_Type, + telemetry.set_exception(exception='Unable to delete connected cluster', fault_type=consts.Bad_DeleteRequest_Fault_Type, summary='The resource cannot be deleted as kubernetes cluster is onboarded with some other resource id') raise CLIError("The current context in the kubeconfig file does not correspond " + "to the connected cluster resource specified. Agents installed on this cluster correspond " + @@ -636,7 +533,7 @@ def get_release_namespace(kube_config, kube_context): response_helm_release = Popen(cmd_helm_release, stdout=PIPE, stderr=PIPE) output_helm_release, error_helm_release = response_helm_release.communicate() if response_helm_release.returncode != 0: - telemetry.set_exception(exception=error_helm_release.decode("ascii"), fault_type=List_HelmRelease_Fault_Type, + telemetry.set_exception(exception=error_helm_release.decode("ascii"), fault_type=consts.List_HelmRelease_Fault_Type, summary='Unable to list helm release') raise CLIError("Helm list release failed: " + error_helm_release.decode("ascii")) output_helm_release = output_helm_release.decode("ascii") @@ -653,7 +550,7 @@ def delete_cc_resource(client, resource_group_name, cluster_name, no_wait): resource_group_name=resource_group_name, cluster_name=cluster_name) except CloudError as ex: - telemetry.set_exception(exception=ex, fault_type=Delete_ConnectedCluster_Fault_Type, + telemetry.set_exception(exception=ex, fault_type=consts.Delete_ConnectedCluster_Fault_Type, summary='Unable to create connected cluster resource') raise CLIError(ex) @@ -665,7 +562,7 @@ def delete_arc_agents(release_namespace, kube_config, kube_context, configuratio response_helm_delete = Popen(cmd_helm_delete, stdout=PIPE, stderr=PIPE) _, error_helm_delete = response_helm_delete.communicate() if response_helm_delete.returncode != 0: - telemetry.set_exception(exception=error_helm_delete.decode("ascii"), fault_type=Delete_HelmRelease_Fault_Type, + telemetry.set_exception(exception=error_helm_delete.decode("ascii"), fault_type=consts.Delete_HelmRelease_Fault_Type, summary='Unable to delete helm release') raise CLIError("Error occured while cleaning up arc agents. " + "Helm release deletion failed: " + error_helm_delete.decode("ascii")) @@ -692,3 +589,115 @@ def update_connectedk8s(cmd, instance, tags=None): with cmd.update_context(instance) as c: c.set_param('tags', tags) return instance + +# pylint:disable=unused-argument +# pylint: disable=too-many-locals +# pylint: disable=too-many-branches +# pylint: disable=too-many-statements +# pylint: disable=line-too-long + + +def update_agents(cmd, client, resource_group_name, cluster_name, https_proxy="", http_proxy="", no_proxy="", + kube_config=None, kube_context=None, no_wait=False): + logger.warning("Ensure that you have the latest helm version installed before proceeding.") + logger.warning("This operation might take a while...\n") + + # Setting user profile + profile = Profile(cli_ctx=cmd.cli_ctx) + + # Setting kubeconfig + kube_config = set_kube_config(kube_config) + + # Removing quotes from kubeconfig path. This is necessary for windows OS. + trim_kube_config(kube_config) + + # Escaping comma, forward slash present in https proxy urls, needed for helm params. + https_proxy = escape_proxy_settings(https_proxy) + + # Escaping comma, forward slash present in http proxy urls, needed for helm params. + http_proxy = escape_proxy_settings(http_proxy) + + # Escaping comma, forward slash present in no proxy urls, needed for helm params. + no_proxy = escape_proxy_settings(no_proxy) + + # Loading the kubeconfig file in kubernetes client configuration + try: + config.load_kube_config(config_file=kube_config, context=kube_context) + except Exception as e: + telemetry.set_user_fault() + telemetry.set_exception(exception=e, fault_type=consts.Load_Kubeconfig_Fault_Type, + summary='Problem loading the kubeconfig file') + raise CLIError("Problem loading the kubeconfig file." + str(e)) + configuration = kube_client.Configuration() + + # Checking the connection to kubernetes cluster. + # This check was added to avoid large timeouts when connecting to AAD Enabled AKS clusters + # if the user had not logged in. + check_kube_connection(configuration) + + # Get kubernetes cluster info for telemetry + kubernetes_version = get_server_version(configuration) + kubernetes_distro = get_kubernetes_distro(configuration) + + kubernetes_properties = { + 'Context.Default.AzureCLI.KubernetesVersion': kubernetes_version, + 'Context.Default.AzureCLI.KubernetesDistro': kubernetes_distro + } + telemetry.add_extension_event('connectedk8s', kubernetes_properties) + + # Checking helm installation + check_helm_install(kube_config, kube_context) + + # Check helm version + helm_version = check_helm_version(kube_config, kube_context) + telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.HelmVersion': helm_version}) + + # Check whether Connected Cluster is present + if not connected_cluster_exists(client, resource_group_name, cluster_name): + telemetry.set_user_fault() + telemetry.set_exception(exception='The connected cluster resource does not exist', fault_type=consts.Resource_Does_Not_Exist_Fault_Type, + summary='Connected cluster resource does not exist') + raise CLIError("The connected cluster resource {} does not exist ".format(cluster_name) + + "in the resource group {} ".format(resource_group_name) + + "Please onboard the connected cluster using: az connectedk8s connect -n -g ") + + # Fetch Connected Cluster for agent version + connected_cluster = get_connectedk8s(cmd, client, resource_group_name, cluster_name) + + # Adding helm repo + if os.getenv('HELMREPONAME') and os.getenv('HELMREPOURL'): + utils.add_helm_repo(kube_config, kube_context) + + # Retrieving Helm chart OCI Artifact location + registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(profile, connected_cluster.location) + + reg_path_array = registry_path.split(':') + agent_version = reg_path_array[1] + + # Set agent version in registry path + if connected_cluster.agent_version is not None: + agent_version = connected_cluster.agent_version + registry_path = reg_path_array[0] + ":" + connected_cluster.agent_version + + telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.AgentVersion': agent_version}) + + # Get Helm chart path + chart_path = utils.get_chart_path(registry_path, kube_config, kube_context) + + cmd_helm_upgrade = ["helm", "upgrade", "azure-arc", chart_path, + "--reuse-values", + "--set", "global.httpsProxy={}".format(https_proxy), + "--set", "global.httpProxy={}".format(http_proxy), + "--set", "global.noProxy={}".format(no_proxy), + "--wait", + "--kubeconfig", kube_config, "--output", "json"] + if kube_context: + cmd_helm_upgrade.extend(["--kube-context", kube_context]) + response_helm_upgrade = Popen(cmd_helm_upgrade, stdout=PIPE, stderr=PIPE) + _, error_helm_upgrade = response_helm_upgrade.communicate() + if response_helm_upgrade.returncode != 0: + telemetry.set_exception(exception=error_helm_upgrade.decode("ascii"), fault_type=consts.Install_HelmRelease_Fault_Type, + summary='Unable to install helm release') + raise CLIError(str.format(consts.Update_Agent_Failure, error_helm_upgrade.decode("ascii"))) + + return str.format(consts.Update_Agent_Success, connected_cluster.name) diff --git a/src/connectedk8s/setup.py b/src/connectedk8s/setup.py index 572dfe2f9e3..ca292d16d40 100644 --- a/src/connectedk8s/setup.py +++ b/src/connectedk8s/setup.py @@ -17,7 +17,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '0.2.1' +VERSION = '0.2.3' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers