From f25d744cbf15c94635bfe37227f2dafb572507ef Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 27 May 2020 14:18:28 +0200 Subject: [PATCH 01/55] r/kubernetes_cluster: support for load balancer settings Porting over the changes from #5824 X-Committed-With: neil-yechenwei --- .../containers/kubernetes_cluster_resource.go | 44 +- .../resource_arm_container_service.go | 683 ++++++++++++++++++ ...ubernetes_cluster_network_resource_test.go | 83 ++- .../tests/kubernetes_cluster_resource_test.go | 19 +- .../docs/r/kubernetes_cluster.html.markdown | 6 +- 5 files changed, 813 insertions(+), 22 deletions(-) create mode 100644 azurerm/internal/services/containers/resource_arm_container_service.go diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 8d297911c291..b080d9f2e7a9 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -267,6 +267,18 @@ func resourceArmKubernetesCluster() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "outbound_ports_allocated": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.IntBetween(0, 64000), + }, + "idle_timeout_in_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 30, + ValidateFunc: validation.IntBetween(4, 120), + }, "managed_outbound_ip_count": { Type: schema.TypeInt, Optional: true, @@ -1277,9 +1289,15 @@ func expandLoadBalancerProfile(d []interface{}, loadBalancerType string, ipCount config := d[0].(map[string]interface{}) - var managedOutboundIps *containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs - var outboundIpPrefixes *containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes - var outboundIps *containerservice.ManagedClusterLoadBalancerProfileOutboundIPs + profile := &containerservice.ManagedClusterLoadBalancerProfile{} + + if port, ok := config["outbound_ports_allocated"].(int); ok { + profile.AllocatedOutboundPorts = utils.Int32(int32(port)) + } + + if idleTimeout, ok := config["idle_timeout_in_minutes"].(int); ok { + profile.IdleTimeoutInMinutes = utils.Int32(int32(idleTimeout)) + } noChangesForLoadBalancerIps := !ipCountChanges && !ipPrefixesChanges && !outboundIpChanges allowToSetIpCount := ipCountChanges || noChangesForLoadBalancerIps @@ -1288,23 +1306,19 @@ func expandLoadBalancerProfile(d []interface{}, loadBalancerType string, ipCount if ipCount := config["managed_outbound_ip_count"]; ipCount != nil && allowToSetIpCount { if c := int32(ipCount.(int)); c > 0 { - managedOutboundIps = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{Count: &c} + profile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{Count: &c} } } if ipPrefixes := idsToResourceReferences(config["outbound_ip_prefix_ids"]); ipPrefixes != nil && allowToSetIpPrefixes { - outboundIpPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{PublicIPPrefixes: ipPrefixes} + profile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{PublicIPPrefixes: ipPrefixes} } if outIps := idsToResourceReferences(config["outbound_ip_address_ids"]); outIps != nil && allowToSetOutboundIp { - outboundIps = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{PublicIPs: outIps} + profile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{PublicIPs: outIps} } - return &containerservice.ManagedClusterLoadBalancerProfile{ - ManagedOutboundIPs: managedOutboundIps, - OutboundIPPrefixes: outboundIpPrefixes, - OutboundIPs: outboundIps, - }, nil + return profile, nil } func idsToResourceReferences(set interface{}) *[]containerservice.ResourceReference { @@ -1376,6 +1390,14 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro if lbp := profile.LoadBalancerProfile; lbp != nil { lb := make(map[string]interface{}) + if v := lbp.AllocatedOutboundPorts; v != nil { + lb["outbound_ports_allocated"] = v + } + + if v := lbp.IdleTimeoutInMinutes; v != nil { + lb["idle_timeout_in_minutes"] = v + } + if ips := lbp.ManagedOutboundIPs; ips != nil { if count := ips.Count; count != nil { lb["managed_outbound_ip_count"] = count diff --git a/azurerm/internal/services/containers/resource_arm_container_service.go b/azurerm/internal/services/containers/resource_arm_container_service.go new file mode 100644 index 000000000000..e1a896b20304 --- /dev/null +++ b/azurerm/internal/services/containers/resource_arm_container_service.go @@ -0,0 +1,683 @@ +package containers + +import ( + "bytes" + "context" + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmContainerService() *schema.Resource { + return &schema.Resource{ + Create: resourceArmContainerServiceCreateUpdate, + Read: resourceArmContainerServiceRead, + Update: resourceArmContainerServiceCreateUpdate, + Delete: resourceArmContainerServiceDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + DeprecationMessage: `Azure Container Service (ACS) has been deprecated in favour of Azure (Managed) Kubernetes Service (AKS). + +Azure will remove support for ACS Clusters on January 31, 2020. In preparation for this, the AzureRM Provider will remove support for the 'azurerm_container_service' resource in the next major version of the AzureRM Provider, which is targeted for Early 2019. + +If you're using ACS with Kubernetes, we'd recommend migrating to AKS / the 'azurerm_kubernetes_cluster' resource. + +More information can be found here: https://azure.microsoft.com/en-us/updates/azure-container-service-will-retire-on-january-31-2020/ +`, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "location": azure.SchemaLocation(), + + "resource_group_name": azure.SchemaResourceGroupName(), + + "orchestration_platform": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateArmContainerServiceOrchestrationPlatform, + }, + + //lintignore:S018 + "master_profile": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: ValidateArmContainerServiceMasterProfileCount, + }, + + "dns_prefix": { + Type: schema.TypeString, + Required: true, + }, + + "fqdn": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceAzureRMContainerServiceMasterProfileHash, + }, + + //lintignore:S018 + "linux_profile": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_username": { + Type: schema.TypeString, + Required: true, + }, + "ssh_key": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_data": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + Set: resourceAzureRMContainerServiceLinuxProfilesHash, + }, + + //lintignore:S018 + "agent_pool_profile": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: ValidateArmContainerServiceAgentPoolProfileCount, + }, + + "dns_prefix": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "fqdn": { + Type: schema.TypeString, + Computed: true, + }, + + "vm_size": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppress.CaseDifference, + }, + }, + }, + Set: resourceAzureRMContainerServiceAgentPoolProfilesHash, + }, + + //lintignore:S018 + "service_principal": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Required: true, + }, + + "client_secret": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + }, + }, + Set: resourceAzureRMContainerServiceServicePrincipalProfileHash, + }, + + //lintignore:S018 + "diagnostics_profile": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + + "storage_uri": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceAzureRMContainerServiceDiagnosticProfilesHash, + }, + + "tags": tags.Schema(), + }, + } +} + +func resourceArmContainerServiceCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client) + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + containerServiceClient := client.Containers.ServicesClient + + log.Printf("[INFO] preparing arguments for Azure ARM Container Service creation.") + + resGroup := d.Get("resource_group_name").(string) + name := d.Get("name").(string) + + if features.ShouldResourcesBeImported() && d.IsNewResource() { + existing, err := containerServiceClient.Get(ctx, resGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Container Service %q (Resource Group %q): %s", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_container_service", *existing.ID) + } + } + + location := azure.NormalizeLocation(d.Get("location").(string)) + + orchestrationPlatform := d.Get("orchestration_platform").(string) + + masterProfile := expandAzureRmContainerServiceMasterProfile(d) + linuxProfile := expandAzureRmContainerServiceLinuxProfile(d) + agentProfiles := expandAzureRmContainerServiceAgentProfiles(d) + diagnosticsProfile := expandAzureRmContainerServiceDiagnostics(d) + + t := d.Get("tags").(map[string]interface{}) + + parameters := containerservice.ContainerService{ + Name: &name, + Location: &location, + Properties: &containerservice.Properties{ + MasterProfile: &masterProfile, + LinuxProfile: &linuxProfile, + OrchestratorProfile: &containerservice.OrchestratorProfileType{ + OrchestratorType: containerservice.OrchestratorTypes(orchestrationPlatform), + }, + AgentPoolProfiles: &agentProfiles, + DiagnosticsProfile: &diagnosticsProfile, + }, + Tags: tags.Expand(t), + } + + servicePrincipalProfile := expandAzureRmContainerServiceServicePrincipal(d) + if servicePrincipalProfile != nil { + parameters.ServicePrincipalProfile = servicePrincipalProfile + } + + if _, err := containerServiceClient.CreateOrUpdate(ctx, resGroup, name, parameters); err != nil { + return err + } + + read, err := containerServiceClient.Get(ctx, resGroup, name) + if err != nil { + return err + } + + if read.ID == nil { + return fmt.Errorf("Cannot read Container Service %s (resource group %s) ID", name, resGroup) + } + + log.Printf("[DEBUG] Waiting for Container Service (%s) to become available", d.Get("name")) + stateConf := &resource.StateChangeConf{ + Pending: []string{"Updating", "Creating"}, + Target: []string{"Succeeded"}, + Refresh: containerServiceStateRefreshFunc(ctx, client, resGroup, name), + MinTimeout: 15 * time.Second, + } + + if features.SupportsCustomTimeouts() { + if d.IsNewResource() { + stateConf.Timeout = d.Timeout(schema.TimeoutCreate) + } else { + stateConf.Timeout = d.Timeout(schema.TimeoutUpdate) + } + } else { + stateConf.Timeout = 30 * time.Minute + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Container Service (%s) to become available: %s", d.Get("name"), err) + } + + d.SetId(*read.ID) + + return resourceArmContainerServiceRead(d, meta) +} + +func resourceArmContainerServiceRead(d *schema.ResourceData, meta interface{}) error { + containerServiceClient := meta.(*clients.Client).Containers.ServicesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["containerServices"] + + resp, err := containerServiceClient.Get(ctx, resGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request on Azure Container Service %s: %s", name, err) + } + + d.Set("name", resp.Name) + d.Set("resource_group_name", resGroup) + if location := resp.Location; location != nil { + d.Set("location", azure.NormalizeLocation(*location)) + } + + d.Set("orchestration_platform", string(resp.Properties.OrchestratorProfile.OrchestratorType)) + + masterProfiles := flattenAzureRmContainerServiceMasterProfile(*resp.Properties.MasterProfile) + d.Set("master_profile", &masterProfiles) + + linuxProfile := flattenAzureRmContainerServiceLinuxProfile(*resp.Properties.LinuxProfile) + d.Set("linux_profile", &linuxProfile) + + agentPoolProfiles := flattenAzureRmContainerServiceAgentPoolProfiles(resp.Properties.AgentPoolProfiles) + d.Set("agent_pool_profile", &agentPoolProfiles) + + servicePrincipal := flattenAzureRmContainerServiceServicePrincipalProfile(resp.Properties.ServicePrincipalProfile) + if servicePrincipal != nil { + d.Set("service_principal", servicePrincipal) + } + + diagnosticProfile := flattenAzureRmContainerServiceDiagnosticsProfile(resp.Properties.DiagnosticsProfile) + if diagnosticProfile != nil { + d.Set("diagnostics_profile", diagnosticProfile) + } + + return tags.FlattenAndSet(d, resp.Tags) +} + +func resourceArmContainerServiceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client) + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + containerServiceClient := client.Containers.ServicesClient + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["containerServices"] + + future, err := containerServiceClient.Delete(ctx, resGroup, name) + + if err != nil { + return fmt.Errorf("Error issuing Azure ARM delete request of Container Service '%s': %s", name, err) + } + + return future.WaitForCompletionRef(ctx, containerServiceClient.Client) +} + +func flattenAzureRmContainerServiceMasterProfile(profile containerservice.MasterProfile) *schema.Set { + masterProfiles := &schema.Set{ + F: resourceAzureRMContainerServiceMasterProfileHash, + } + + masterProfile := make(map[string]interface{}, 3) + + masterProfile["count"] = int(*profile.Count) + masterProfile["dns_prefix"] = *profile.DNSPrefix + masterProfile["fqdn"] = *profile.Fqdn + + masterProfiles.Add(masterProfile) + + return masterProfiles +} + +func flattenAzureRmContainerServiceLinuxProfile(profile containerservice.LinuxProfile) *schema.Set { + profiles := &schema.Set{ + F: resourceAzureRMContainerServiceLinuxProfilesHash, + } + + values := map[string]interface{}{} + + sshKeys := &schema.Set{ + F: resourceAzureRMContainerServiceLinuxProfilesSSHKeysHash, + } + for _, ssh := range *profile.SSH.PublicKeys { + keys := map[string]interface{}{} + keys["key_data"] = *ssh.KeyData + sshKeys.Add(keys) + } + + values["admin_username"] = *profile.AdminUsername + values["ssh_key"] = sshKeys + profiles.Add(values) + + return profiles +} + +func flattenAzureRmContainerServiceAgentPoolProfiles(profiles *[]containerservice.AgentPoolProfile) *schema.Set { + agentPoolProfiles := &schema.Set{ + F: resourceAzureRMContainerServiceAgentPoolProfilesHash, + } + + for _, profile := range *profiles { + agentPoolProfile := map[string]interface{}{} + agentPoolProfile["count"] = int(*profile.Count) + agentPoolProfile["dns_prefix"] = *profile.DNSPrefix + agentPoolProfile["fqdn"] = *profile.Fqdn + agentPoolProfile["name"] = *profile.Name + agentPoolProfile["vm_size"] = string(profile.VMSize) + agentPoolProfiles.Add(agentPoolProfile) + } + + return agentPoolProfiles +} + +func flattenAzureRmContainerServiceServicePrincipalProfile(profile *containerservice.ServicePrincipalProfile) *schema.Set { + if profile == nil { + return nil + } + + servicePrincipalProfiles := &schema.Set{ + F: resourceAzureRMContainerServiceServicePrincipalProfileHash, + } + + values := map[string]interface{}{} + + values["client_id"] = *profile.ClientID + if profile.Secret != nil { + values["client_secret"] = *profile.Secret + } + + servicePrincipalProfiles.Add(values) + + return servicePrincipalProfiles +} + +func flattenAzureRmContainerServiceDiagnosticsProfile(profile *containerservice.DiagnosticsProfile) *schema.Set { + diagnosticProfiles := &schema.Set{ + F: resourceAzureRMContainerServiceDiagnosticProfilesHash, + } + + values := map[string]interface{}{} + + values["enabled"] = *profile.VMDiagnostics.Enabled + if profile.VMDiagnostics.StorageURI != nil { + values["storage_uri"] = *profile.VMDiagnostics.StorageURI + } + diagnosticProfiles.Add(values) + + return diagnosticProfiles +} + +func expandAzureRmContainerServiceDiagnostics(d *schema.ResourceData) containerservice.DiagnosticsProfile { + configs := d.Get("diagnostics_profile").(*schema.Set).List() + + data := configs[0].(map[string]interface{}) + + enabled := data["enabled"].(bool) + + return containerservice.DiagnosticsProfile{ + VMDiagnostics: &containerservice.VMDiagnostics{ + Enabled: &enabled, + }, + } +} + +func expandAzureRmContainerServiceLinuxProfile(d *schema.ResourceData) containerservice.LinuxProfile { + profiles := d.Get("linux_profile").(*schema.Set).List() + config := profiles[0].(map[string]interface{}) + + adminUsername := config["admin_username"].(string) + + linuxKeys := config["ssh_key"].(*schema.Set).List() + sshPublicKeys := make([]containerservice.SSHPublicKey, 0) + + key := linuxKeys[0].(map[string]interface{}) + keyData := key["key_data"].(string) + + sshPublicKey := containerservice.SSHPublicKey{ + KeyData: &keyData, + } + + sshPublicKeys = append(sshPublicKeys, sshPublicKey) + + profile := containerservice.LinuxProfile{ + AdminUsername: &adminUsername, + SSH: &containerservice.SSHConfiguration{ + PublicKeys: &sshPublicKeys, + }, + } + + return profile +} + +func expandAzureRmContainerServiceMasterProfile(d *schema.ResourceData) containerservice.MasterProfile { + configs := d.Get("master_profile").(*schema.Set).List() + config := configs[0].(map[string]interface{}) + + count := int32(config["count"].(int)) + dnsPrefix := config["dns_prefix"].(string) + + profile := containerservice.MasterProfile{ + Count: &count, + DNSPrefix: &dnsPrefix, + } + + return profile +} + +func expandAzureRmContainerServiceServicePrincipal(d *schema.ResourceData) *containerservice.ServicePrincipalProfile { + value, exists := d.GetOk("service_principal") + if !exists { + return nil + } + + configs := value.(*schema.Set).List() + + config := configs[0].(map[string]interface{}) + + clientId := config["client_id"].(string) + clientSecret := config["client_secret"].(string) + + principal := containerservice.ServicePrincipalProfile{ + ClientID: &clientId, + Secret: &clientSecret, + } + + return &principal +} + +func expandAzureRmContainerServiceAgentProfiles(d *schema.ResourceData) []containerservice.AgentPoolProfile { + configs := d.Get("agent_pool_profile").(*schema.Set).List() + config := configs[0].(map[string]interface{}) + profiles := make([]containerservice.AgentPoolProfile, 0, len(configs)) + + name := config["name"].(string) + count := int32(config["count"].(int)) + dnsPrefix := config["dns_prefix"].(string) + vmSize := config["vm_size"].(string) + + profile := containerservice.AgentPoolProfile{ + Name: &name, + Count: &count, + VMSize: containerservice.VMSizeTypes(vmSize), + DNSPrefix: &dnsPrefix, + } + + profiles = append(profiles, profile) + + return profiles +} + +func containerServiceStateRefreshFunc(ctx context.Context, client *clients.Client, resourceGroupName string, containerServiceName string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + res, err := client.Containers.ServicesClient.Get(ctx, resourceGroupName, containerServiceName) + if err != nil { + return nil, "", fmt.Errorf("Error issuing read request in containerServiceStateRefreshFunc to Azure ARM for Container Service '%s' (RG: '%s'): %s", containerServiceName, resourceGroupName, err) + } + + return res, *res.Properties.ProvisioningState, nil + } +} + +func resourceAzureRMContainerServiceMasterProfileHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["dns_prefix"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceLinuxProfilesHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%s-", m["admin_username"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceLinuxProfilesSSHKeysHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%s-", m["key_data"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceAgentPoolProfilesHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["dns_prefix"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["vm_size"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceServicePrincipalProfileHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%s-", m["client_id"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceDiagnosticProfilesHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%t", m["enabled"].(bool))) + } + + return hashcode.String(buf.String()) +} + +func ValidateArmContainerServiceOrchestrationPlatform(v interface{}, _ string) (warnings []string, errors []error) { + value := v.(string) + capacities := map[string]bool{ + "DCOS": true, + "Kubernetes": true, + "Swarm": true, + } + + if !capacities[value] { + errors = append(errors, fmt.Errorf("Container Service: Orchestration Platgorm can only be DCOS / Kubernetes / Swarm")) + } + return warnings, errors +} + +func ValidateArmContainerServiceMasterProfileCount(v interface{}, _ string) (warnings []string, errors []error) { + value := v.(int) + capacities := map[int]bool{ + 1: true, + 3: true, + 5: true, + } + + if !capacities[value] { + errors = append(errors, fmt.Errorf("The number of master nodes must be 1, 3 or 5.")) + } + return warnings, errors +} + +func ValidateArmContainerServiceAgentPoolProfileCount(v interface{}, _ string) (warnings []string, errors []error) { + value := v.(int) + if value > 100 || 0 >= value { + errors = append(errors, fmt.Errorf("The Count for an Agent Pool Profile can only be between 1 and 100.")) + } + return warnings, errors +} diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go index e158519e2060..010bd93b964d 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go @@ -2,6 +2,7 @@ package tests import ( "fmt" + "os" "regexp" "testing" @@ -451,6 +452,8 @@ func testAccAzureRMKubernetesCluster_standardLoadBalancerProfile(t *testing.T) { resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_sku", "Standard"), resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.managed_outbound_ip_count", "3"), resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.effective_outbound_ips.#", "3"), + resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.idle_timeout_in_minutes", "30"), + resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.outbound_ports_allocated", "0"), ), }, data.ImportStep(), @@ -485,6 +488,34 @@ func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileComplete(t *test }) } +func TestAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout(t) +} + +func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeoutConfig(data, clientId, clientSecret), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.outbound_ports_allocated", "8000"), + resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.idle_timeout_in_minutes", "10"), + ), + }, + }, + }) +} + func TestAccAzureRMKubernetesCluster_basicLoadBalancerProfile(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesCluster_basicLoadBalancerProfile(t) @@ -1387,7 +1418,57 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) } -func testAccAzureRMKubernetesCluster_basicLoadBalancerProfileConfig(data acceptance.TestData) string { +func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeoutConfig(data acceptance.TestData, clientId string, clientSecret string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + kubernetes_version = "%s" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "kubenet" + load_balancer_sku = "Standard" + load_balancer_profile { + managed_outbound_ip_count = 2 + outbound_ports_allocated = 8000 + idle_timeout_in_minutes = 10 + } + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_basicLoadBalancerProfileConfig(data acceptance.TestData, clientId string, clientSecret string) string { return fmt.Sprintf(` provider "azurerm" { features {} diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go index 67968d3b51f9..8f7704aa7e51 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go @@ -40,23 +40,24 @@ func TestAccAzureRMKubernetes_all(t *testing.T) { "servicePrincipal": testAccAzureRMKubernetesCluster_servicePrincipal, }, "network": { - "advancedNetworkingKubenet": testAccAzureRMKubernetesCluster_advancedNetworkingKubenet, - "advancedNetworkingKubenetComplete": testAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete, - "advancedNetworkingAzure": testAccAzureRMKubernetesCluster_advancedNetworkingAzure, - "advancedNetworkingAzureComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete, - "advancedNetworkingAzureCalicoPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy, - "advancedNetworkingAzureCalicoPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete, - "advancedNetworkingAzureNPMPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy, - "advancedNetworkingAzureNPMPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete, "enableNodePublicIP": testAccAzureRMKubernetesCluster_enableNodePublicIP, "internalNetwork": testAccAzureRMKubernetesCluster_internalNetwork, - "basicLoadBalancerProfile": testAccAzureRMKubernetesCluster_basicLoadBalancerProfile, "changingLoadBalancerProfile": testAccAzureRMKubernetesCluster_changingLoadBalancerProfile, "prefixedLoadBalancerProfile": testAccAzureRMKubernetesCluster_prefixedLoadBalancerProfile, "standardLoadBalancer": testAccAzureRMKubernetesCluster_standardLoadBalancer, "standardLoadBalancerComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerComplete, "standardLoadBalancerProfile": testAccAzureRMKubernetesCluster_standardLoadBalancerProfile, "standardLoadBalancerProfileComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileComplete, + "advancedNetworkingKubenet": testAccAzureRMKubernetesCluster_advancedNetworkingKubenet, + "advancedNetworkingKubenetComplete": testAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete, + "advancedNetworkingAzure": testAccAzureRMKubernetesCluster_advancedNetworkingAzure, + "advancedNetworkingAzureComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete, + "advancedNetworkingAzureCalicoPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy, + "advancedNetworkingAzureCalicoPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete, + "advancedNetworkingAzureNPMPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy, + "advancedNetworkingAzureNPMPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete, + "basicLoadBalancerProfile": testAccAzureRMKubernetesCluster_basicLoadBalancerProfile, + "standardLoadBalancerProfileWithPortAndTimeout": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout, }, "nodePool": { "autoScale": testAccAzureRMKubernetesClusterNodePool_autoScale, diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index c27b5ff5a4ac..9fbdd45512a0 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -293,7 +293,11 @@ A `load_balancer_profile` block supports the following: ~> **NOTE:** These options are mutually exclusive. Note that when specifying `outbound_ip_address_ids` ([azurerm_public_ip](/docs/providers/azurerm/r/public_ip.html)) the SKU must be `Standard`. -* `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be in the range of [1, 100]. +* `outbound_ports_allocated` - (Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0`. + +* `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive. Defaults to `30`. + +* `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive. -> **NOTE** User has to explicitly set `managed_outbound_ip_count` to empty slice (`[]`) to remove it. From 5c0da259414eb3d4a06ebac699b04aa708a01f10 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 27 May 2020 14:55:10 +0200 Subject: [PATCH 02/55] d/kubernetes_cluster: making all read-only properties read-only --- .../services/containers/kubernetes_cluster_data_source.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index 603fc0f2331a..2975ca1c4119 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -201,13 +201,13 @@ func dataSourceArmKubernetesCluster() *schema.Resource { "node_taints": { Type: schema.TypeList, - Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, "enable_node_public_ip": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, }, }, @@ -234,14 +234,12 @@ func dataSourceArmKubernetesCluster() *schema.Resource { "private_link_enabled": { Type: schema.TypeBool, Computed: true, - Optional: true, ConflictsWith: []string{"private_cluster_enabled"}, Deprecated: "Deprecated in favor of `private_cluster_enabled`", // TODO -- remove this in next major version }, "private_cluster_enabled": { Type: schema.TypeBool, - Optional: true, Computed: true, // TODO -- remove this when deprecation resolves ConflictsWith: []string{"private_link_enabled"}, }, From 5f013596c5a452a7d531ea8403be7278c1a75ca4 Mon Sep 17 00:00:00 2001 From: Thibault Cohen Date: Mon, 9 Mar 2020 17:07:50 -0400 Subject: [PATCH 03/55] Add Orchestrator version for Kubernetes Cluster and Node Pool --- .../kubernetes_cluster_data_source.go | 9 +++ .../kubernetes_cluster_node_pool_resource.go | 20 +++++++ .../containers/kubernetes_cluster_resource.go | 42 +++++++------- .../containers/kubernetes_nodepool.go | 16 ++++++ ...ubernetes_cluster_network_resource_test.go | 55 ++++++++++--------- ...ernetes_cluster_node_pool_resource_test.go | 5 ++ .../docs/d/kubernetes_cluster.html.markdown | 2 + .../docs/r/kubernetes_cluster.html.markdown | 2 + ...kubernetes_cluster_node_pool.html.markdown | 2 + 9 files changed, 107 insertions(+), 46 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index 2975ca1c4119..736a6a1b63cf 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -186,6 +186,11 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Computed: true, }, + "orchestrator_version": { + Type: schema.TypeString, + Computed: true, + }, + "max_pods": { Type: schema.TypeInt, Computed: true, @@ -862,6 +867,10 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi agentPoolProfile["os_type"] = string(profile.OsType) } + if *profile.OrchestratorVersion != "" { + agentPoolProfile["orchestrator_version"] = *profile.OrchestratorVersion + } + if profile.MaxPods != nil { agentPoolProfile["max_pods"] = int(*profile.MaxPods) } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 8fed035f8d90..dcb95a333543 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -150,6 +150,13 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { ForceNew: true, ValidateFunc: azure.ValidateResourceID, }, + + "orchestrator_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringIsNotEmpty, + }, }, } } @@ -226,6 +233,11 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int Count: utils.Int32(int32(count)), } + orchestratorVersion := d.Get("orchestrator_version").(string) + if orchestratorVersion != "" { + profile.OrchestratorVersion = utils.String(orchestratorVersion) + } + availabilityZonesRaw := d.Get("availability_zones").([]interface{}) if availabilityZones := utils.ExpandStringSlice(availabilityZonesRaw); len(*availabilityZones) > 0 { profile.AvailabilityZones = availabilityZones @@ -383,6 +395,10 @@ func resourceArmKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta int props.Tags = tags.Expand(t) } + if d.HasChange("orchestrator_version") { + props.OrchestratorVersion = utils.String(d.Get("orchestrator_version").(string)) + } + // validate the auto-scale fields are both set/unset to prevent a continual diff maxCount := 0 if props.MaxCount != nil { @@ -514,6 +530,10 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter d.Set("os_type", string(props.OsType)) d.Set("vnet_subnet_id", props.VnetSubnetID) d.Set("vm_size", string(props.VMSize)) + + if props.OrchestratorVersion != nil { + d.Set("orchestrator_version", props.OrchestratorVersion) + } } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index b080d9f2e7a9..0bfb817a3cee 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -869,27 +869,6 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Updated the Kubernetes Cluster %q (Resource Group %q)..", id.Name, id.ResourceGroup) } - // update the node pool using the separate API - if d.HasChange("default_node_pool") { - log.Printf("[DEBUG] Updating of Default Node Pool..") - - agentProfiles, err := ExpandDefaultNodePool(d) - if err != nil { - return fmt.Errorf("expanding `default_node_pool`: %+v", err) - } - - agentProfile := ConvertDefaultNodePoolToAgentPool(agentProfiles) - agentPool, err := nodePoolsClient.CreateOrUpdate(ctx, id.ResourceGroup, id.Name, *agentProfile.Name, agentProfile) - if err != nil { - return fmt.Errorf("updating Default Node Pool %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - - if err := agentPool.WaitForCompletionRef(ctx, nodePoolsClient.Client); err != nil { - return fmt.Errorf("waiting for update of Default Node Pool %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) - } - log.Printf("[DEBUG] Updated Default Node Pool.") - } - // then roll the version of Kubernetes if necessary if d.HasChange("kubernetes_version") { existing, err = clusterClient.Get(ctx, id.ResourceGroup, id.Name) @@ -916,6 +895,27 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Upgraded the version of Kubernetes to %q..", kubernetesVersion) } + // update the node pool using the separate API + if d.HasChange("default_node_pool") { + log.Printf("[DEBUG] Updating of Default Node Pool..") + + agentProfiles, err := ExpandDefaultNodePool(d) + if err != nil { + return fmt.Errorf("expanding `default_node_pool`: %+v", err) + } + + agentProfile := ConvertDefaultNodePoolToAgentPool(agentProfiles) + agentPool, err := nodePoolsClient.CreateOrUpdate(ctx, id.ResourceGroup, id.Name, *agentProfile.Name, agentProfile) + if err != nil { + return fmt.Errorf("updating Default Node Pool %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + + if err := agentPool.WaitForCompletionRef(ctx, nodePoolsClient.Client); err != nil { + return fmt.Errorf("waiting for update of Default Node Pool %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) + } + log.Printf("[DEBUG] Updated Default Node Pool.") + } + d.Partial(false) return resourceArmKubernetesClusterRead(d, meta) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 790c8b6de5a8..52b3f80c9144 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -124,6 +124,12 @@ func SchemaDefaultNodePool() *schema.Schema { ForceNew: true, ValidateFunc: azure.ValidateResourceID, }, + "orchestrator_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringIsNotEmpty, + }, }, }, } @@ -208,6 +214,10 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC profile.VnetSubnetID = utils.String(vnetSubnetID) } + if orchestratorVersion := raw["orchestrator_version"].(string); orchestratorVersion != "" { + profile.OrchestratorVersion = utils.String(orchestratorVersion) + } + count := raw["node_count"].(int) maxCount := raw["max_count"].(int) minCount := raw["min_count"].(int) @@ -333,6 +343,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro vnetSubnetId = *agentPool.VnetSubnetID } + orchestratorVersion := "" + if agentPool.OrchestratorVersion != nil { + orchestratorVersion = *agentPool.OrchestratorVersion + } + return &[]interface{}{ map[string]interface{}{ "availability_zones": availabilityZones, @@ -349,6 +364,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "tags": tags.Flatten(agentPool.Tags), "type": string(agentPool.Type), "vm_size": string(agentPool.VMSize), + "orchestrator_version": orchestratorVersion, "vnet_subnet_id": vnetSubnetId, }, }, nil diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go index 010bd93b964d..d56c00ac145d 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go @@ -1188,10 +1188,11 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id + orchestrator_version = "%s" } identity { @@ -1203,7 +1204,7 @@ resource "azurerm_kubernetes_cluster" "test" { load_balancer_sku = "Standard" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) } func testAccAzureRMKubernetesCluster_standardLoadBalancerCompleteConfig(data acceptance.TestData) string { @@ -1265,10 +1266,11 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id + orchestrator_version = "%s" } identity { @@ -1283,7 +1285,7 @@ resource "azurerm_kubernetes_cluster" "test" { load_balancer_sku = "Standard" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) } func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileConfig(data acceptance.TestData) string { @@ -1327,10 +1329,11 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id + orchestrator_version = "%s" } identity { @@ -1345,7 +1348,7 @@ resource "azurerm_kubernetes_cluster" "test" { } } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) } func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileCompleteConfig(data acceptance.TestData) string { @@ -1509,10 +1512,11 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id + orchestrator_version = "%s" } identity { @@ -1527,7 +1531,7 @@ resource "azurerm_kubernetes_cluster" "test" { } } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) } func testAccAzureRMKubernetesCluster_prefixedLoadBalancerProfileConfig(data acceptance.TestData) string { @@ -1578,10 +1582,11 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id + orchestrator_version = "%s" } identity { @@ -1596,7 +1601,7 @@ resource "azurerm_kubernetes_cluster" "test" { } } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) } func testAccAzureRMKubernetesCluster_changingLoadBalancerProfileConfigIPPrefix(data acceptance.TestData) string { diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index 287c4ca48562..eea1555c30be 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -666,6 +666,11 @@ func testCheckAzureRMKubernetesNodePoolExists(resourceName string) resource.Test return fmt.Errorf("Error parsing kubernetes cluster id: %+v", err) } + OrchestratorVersion := rs.Primary.Attributes["orchestrator_version"] + if OrchestratorVersion == "" { + return fmt.Errorf("Error parsing orchestrator version") + } + agentPool, err := client.Get(ctx, parsedK8sId.ResourceGroup, parsedK8sId.Name, name) if err != nil { return fmt.Errorf("Bad: Get on kubernetesClustersClient: %+v", err) diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 1f5f4a1c8588..9ca4dff14065 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -124,6 +124,8 @@ A `agent_pool_profile` block exports the following: * `tags` - A mapping of tags to assign to the resource. +* `orchestrator_version` - Kubernetes version used for the Agents. + * `vm_size` - The size of each VM in the Agent Pool (e.g. `Standard_F1`). * `vnet_subnet_id` - The ID of the Subnet where the Agents in the Pool are provisioned. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 9fbdd45512a0..2ee8d38ec3b2 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -231,6 +231,8 @@ If `enable_auto_scaling` is set to `false`, then the following fields can also b -> **NOTE:** If `enable_auto_scaling` is set to `false` both `min_count` and `max_count` fields need to be set to `null` or omitted from the configuration. +* `orchestrator_version` - (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade) + --- A `http_application_routing` block supports the following: diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index a88df2846cab..723a2a470840 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -88,6 +88,8 @@ The following arguments are supported: ~> At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) until this is fixed in the AKS API. +* `orchestrator_version` - (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade) + * `vnet_subnet_id` - (Optional) The ID of the Subnet where this Node Pool should exist. -> **NOTE:** At this time the `vnet_subnet_id` must be the same for all node pools in the cluster From 2c7d8310e653d9f4c6659f0a954e80482504a5ef Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 27 May 2020 15:19:48 +0200 Subject: [PATCH 04/55] tests: splitting the test dictionaries amongst the files to reduce conflicts going forward --- ...kubernetes_cluster_addons_resource_test.go | 10 ++ .../kubernetes_cluster_auth_resource_test.go | 9 ++ .../kubernetes_cluster_data_source_test.go | 25 ++++ ...ubernetes_cluster_network_resource_test.go | 21 ++++ ...ernetes_cluster_node_pool_resource_test.go | 22 ++++ .../kubernetes_cluster_other_resource_test.go | 17 +++ .../tests/kubernetes_cluster_resource_test.go | 116 ++---------------- ...ubernetes_cluster_scaling_resource_test.go | 12 ++ 8 files changed, 123 insertions(+), 109 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go index 208aca3acf30..b1038b219140 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go @@ -8,6 +8,16 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" ) +var kubernetesAddOnTests = map[string]func(t *testing.T){ + "addonProfileAciConnectorLinux": testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux, + "addonProfileAciConnectorLinuxDisabled": testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled, + "addonProfileAzurePolicy": testAccAzureRMKubernetesCluster_addonProfileAzurePolicy, + "addonProfileKubeDashboard": testAccAzureRMKubernetesCluster_addonProfileKubeDashboard, + "addonProfileOMS": testAccAzureRMKubernetesCluster_addonProfileOMS, + "addonProfileOMSToggle": testAccAzureRMKubernetesCluster_addonProfileOMSToggle, + "addonProfileRouting": testAccAzureRMKubernetesCluster_addonProfileRouting, +} + func TestAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux(t) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_auth_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_auth_resource_test.go index 611dfb9e4bd9..9268846f86c4 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_auth_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_auth_resource_test.go @@ -8,6 +8,15 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" ) +var kubernetesAuthTests = map[string]func(t *testing.T){ + "apiServerAuthorizedIPRanges": testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges, + "enablePodSecurityPolicy": testAccAzureRMKubernetesCluster_enablePodSecurityPolicy, + "managedClusterIdentity": testAccAzureRMKubernetesCluster_managedClusterIdentity, + "roleBasedAccessControl": testAccAzureRMKubernetesCluster_roleBasedAccessControl, + "roleBasedAccessControlAAD": testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD, + "servicePrincipal": testAccAzureRMKubernetesCluster_servicePrincipal, +} + func TestAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges(t) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go index cc7924e88a58..da57819618df 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go @@ -9,6 +9,31 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" ) +var kubernetesDataSourceTests = map[string]func(t *testing.T) { + "basic": testAccDataSourceAzureRMKubernetesCluster_basic, + "roleBasedAccessControl": testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl, + "roleBasedAccessControlAAD": testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD, + "internalNetwork": testAccDataSourceAzureRMKubernetesCluster_internalNetwork, + "advancedNetworkingAzure": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzure, + "advancedNetworkingAzureCalicoPolicy": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy, + "advancedNetworkingAzureNPMPolicy": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy, + "advancedNetworkingAzureComplete": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureComplete, + "advancedNetworkingAzureCalicoPolicyComplete": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete, + "advancedNetworkingAzureNPMPolicyComplete": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete, + "advancedNetworkingKubenet": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingKubenet, + "advancedNetworkingKubenetComplete": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingKubenetComplete, + "addOnProfileOMS": testAccDataSourceAzureRMKubernetesCluster_addOnProfileOMS, + "addOnProfileKubeDashboard": testAccDataSourceAzureRMKubernetesCluster_addOnProfileKubeDashboard, + "addOnProfileAzurePolicy": testAccDataSourceAzureRMKubernetesCluster_addOnProfileAzurePolicy, + "addOnProfileRouting": testAccDataSourceAzureRMKubernetesCluster_addOnProfileRouting, + "autoscalingNoAvailabilityZones": testAccDataSourceAzureRMKubernetesCluster_autoscalingNoAvailabilityZones, + "autoscalingWithAvailabilityZones": testAccDataSourceAzureRMKubernetesCluster_autoscalingWithAvailabilityZones, + "nodeLabels": testAccDataSourceAzureRMKubernetesCluster_nodeLabels, + "nodeTaints": testAccDataSourceAzureRMKubernetesCluster_nodeTaints, + "enableNodePublicIP": testAccDataSourceAzureRMKubernetesCluster_enableNodePublicIP, + "privateCluster": testAccDataSourceAzureRMKubernetesCluster_privateCluster, +} + func TestAccDataSourceAzureRMKubernetesCluster_basic(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccDataSourceAzureRMKubernetesCluster_basic(t) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go index d56c00ac145d..23a54bdde6b8 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go @@ -10,6 +10,27 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" ) +var kubernetesNetworkAuthTests = map[string]func(t *testing.T){ + "enableNodePublicIP": testAccAzureRMKubernetesCluster_enableNodePublicIP, + "internalNetwork": testAccAzureRMKubernetesCluster_internalNetwork, + "changingLoadBalancerProfile": testAccAzureRMKubernetesCluster_changingLoadBalancerProfile, + "prefixedLoadBalancerProfile": testAccAzureRMKubernetesCluster_prefixedLoadBalancerProfile, + "standardLoadBalancer": testAccAzureRMKubernetesCluster_standardLoadBalancer, + "standardLoadBalancerComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerComplete, + "standardLoadBalancerProfile": testAccAzureRMKubernetesCluster_standardLoadBalancerProfile, + "standardLoadBalancerProfileComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileComplete, + "advancedNetworkingKubenet": testAccAzureRMKubernetesCluster_advancedNetworkingKubenet, + "advancedNetworkingKubenetComplete": testAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete, + "advancedNetworkingAzure": testAccAzureRMKubernetesCluster_advancedNetworkingAzure, + "advancedNetworkingAzureComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete, + "advancedNetworkingAzureCalicoPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy, + "advancedNetworkingAzureCalicoPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete, + "advancedNetworkingAzureNPMPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy, + "advancedNetworkingAzureNPMPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete, + "basicLoadBalancerProfile": testAccAzureRMKubernetesCluster_basicLoadBalancerProfile, + "standardLoadBalancerProfileWithPortAndTimeout": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout, +} + func TestAccAzureRMKubernetesCluster_advancedNetworkingKubenet(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesCluster_advancedNetworkingKubenet(t) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index eea1555c30be..566cf4686e0d 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -16,6 +16,28 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) +var kubernetesNodePoolTests = map[string]func(t *testing.T){ + "autoScale": testAccAzureRMKubernetesClusterNodePool_autoScale, + "autoScaleUpdate": testAccAzureRMKubernetesClusterNodePool_autoScaleUpdate, + "availabilityZones": testAccAzureRMKubernetesClusterNodePool_availabilityZones, + "errorForAvailabilitySet": testAccAzureRMKubernetesClusterNodePool_errorForAvailabilitySet, + "multiplePools": testAccAzureRMKubernetesClusterNodePool_multiplePools, + "manualScale": testAccAzureRMKubernetesClusterNodePool_manualScale, + "manualScaleMultiplePools": testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePools, + "manualScaleMultiplePoolsUpdate": testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePoolsUpdate, + "manualScaleUpdate": testAccAzureRMKubernetesClusterNodePool_manualScaleUpdate, + "manualScaleVMSku": testAccAzureRMKubernetesClusterNodePool_manualScaleVMSku, + "nodeLabels": testAccAzureRMKubernetesClusterNodePool_nodeLabels, + "nodePublicIP": testAccAzureRMKubernetesClusterNodePool_nodePublicIP, + "nodeTaints": testAccAzureRMKubernetesClusterNodePool_nodeTaints, + "requiresImport": testAccAzureRMKubernetesClusterNodePool_requiresImport, + "osDiskSizeGB": testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB, + "virtualNetworkAutomatic": testAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomatic, + "virtualNetworkManual": testAccAzureRMKubernetesClusterNodePool_virtualNetworkManual, + "windows": testAccAzureRMKubernetesClusterNodePool_windows, + "windowsAndLinux": testAccAzureRMKubernetesClusterNodePool_windowsAndLinux, +} + func TestAccAzureRMKubernetesClusterNodePool_autoScale(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesClusterNodePool_autoScale(t) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go index 47f02e40fa45..e7fea21e2160 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go @@ -10,6 +10,23 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" ) +var kubernetesOtherTests = map[string]func(t *testing.T) { + "basicAvailabilitySet": testAccAzureRMKubernetesCluster_basicAvailabilitySet, + "basicVMSS": testAccAzureRMKubernetesCluster_basicVMSS, + "requiresImport": testAccAzureRMKubernetesCluster_requiresImport, + "linuxProfile": testAccAzureRMKubernetesCluster_linuxProfile, + "nodeLabels": testAccAzureRMKubernetesCluster_nodeLabels, + "nodeTaints": testAccAzureRMKubernetesCluster_nodeTaints, + "nodeResourceGroup": testAccAzureRMKubernetesCluster_nodeResourceGroup, + "upgradeConfig": testAccAzureRMKubernetesCluster_upgrade, + "tags": testAccAzureRMKubernetesCluster_tags, + "windowsProfile": testAccAzureRMKubernetesCluster_windowsProfile, + "outboundTypeLoadBalancer": testAccAzureRMKubernetesCluster_outboundTypeLoadBalancer, + "outboundTypeUserDefinedRouting": testAccAzureRMKubernetesCluster_outboundTypeUserDefinedRouting, + "privateClusterOn": testAccAzureRMKubernetesCluster_privateClusterOn, + "privateClusterOff": testAccAzureRMKubernetesCluster_privateClusterOff, +} + func TestAccAzureRMKubernetesCluster_basicAvailabilitySet(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesCluster_basicAvailabilitySet(t) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go index 8f7704aa7e51..874c1f52c3bf 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go @@ -22,115 +22,13 @@ func TestAccAzureRMKubernetes_all(t *testing.T) { // NOTE: this is a combined test rather than separate split out tests to // ease the load on the kubernetes api testCases := map[string]map[string]func(t *testing.T){ - "clusterAddOn": { - "addonProfileAciConnectorLinux": testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux, - "addonProfileAciConnectorLinuxDisabled": testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled, - "addonProfileAzurePolicy": testAccAzureRMKubernetesCluster_addonProfileAzurePolicy, - "addonProfileKubeDashboard": testAccAzureRMKubernetesCluster_addonProfileKubeDashboard, - "addonProfileOMS": testAccAzureRMKubernetesCluster_addonProfileOMS, - "addonProfileOMSToggle": testAccAzureRMKubernetesCluster_addonProfileOMSToggle, - "addonProfileRouting": testAccAzureRMKubernetesCluster_addonProfileRouting, - }, - "auth": { - "apiServerAuthorizedIPRanges": testAccAzureRMKubernetesCluster_apiServerAuthorizedIPRanges, - "enablePodSecurityPolicy": testAccAzureRMKubernetesCluster_enablePodSecurityPolicy, - "managedClusterIdentity": testAccAzureRMKubernetesCluster_managedClusterIdentity, - "roleBasedAccessControl": testAccAzureRMKubernetesCluster_roleBasedAccessControl, - "roleBasedAccessControlAAD": testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD, - "servicePrincipal": testAccAzureRMKubernetesCluster_servicePrincipal, - }, - "network": { - "enableNodePublicIP": testAccAzureRMKubernetesCluster_enableNodePublicIP, - "internalNetwork": testAccAzureRMKubernetesCluster_internalNetwork, - "changingLoadBalancerProfile": testAccAzureRMKubernetesCluster_changingLoadBalancerProfile, - "prefixedLoadBalancerProfile": testAccAzureRMKubernetesCluster_prefixedLoadBalancerProfile, - "standardLoadBalancer": testAccAzureRMKubernetesCluster_standardLoadBalancer, - "standardLoadBalancerComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerComplete, - "standardLoadBalancerProfile": testAccAzureRMKubernetesCluster_standardLoadBalancerProfile, - "standardLoadBalancerProfileComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileComplete, - "advancedNetworkingKubenet": testAccAzureRMKubernetesCluster_advancedNetworkingKubenet, - "advancedNetworkingKubenetComplete": testAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete, - "advancedNetworkingAzure": testAccAzureRMKubernetesCluster_advancedNetworkingAzure, - "advancedNetworkingAzureComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete, - "advancedNetworkingAzureCalicoPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy, - "advancedNetworkingAzureCalicoPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete, - "advancedNetworkingAzureNPMPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy, - "advancedNetworkingAzureNPMPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete, - "basicLoadBalancerProfile": testAccAzureRMKubernetesCluster_basicLoadBalancerProfile, - "standardLoadBalancerProfileWithPortAndTimeout": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout, - }, - "nodePool": { - "autoScale": testAccAzureRMKubernetesClusterNodePool_autoScale, - "autoScaleUpdate": testAccAzureRMKubernetesClusterNodePool_autoScaleUpdate, - "availabilityZones": testAccAzureRMKubernetesClusterNodePool_availabilityZones, - "errorForAvailabilitySet": testAccAzureRMKubernetesClusterNodePool_errorForAvailabilitySet, - "multiplePools": testAccAzureRMKubernetesClusterNodePool_multiplePools, - "manualScale": testAccAzureRMKubernetesClusterNodePool_manualScale, - "manualScaleMultiplePools": testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePools, - "manualScaleMultiplePoolsUpdate": testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePoolsUpdate, - "manualScaleUpdate": testAccAzureRMKubernetesClusterNodePool_manualScaleUpdate, - "manualScaleVMSku": testAccAzureRMKubernetesClusterNodePool_manualScaleVMSku, - "nodeLabels": testAccAzureRMKubernetesClusterNodePool_nodeLabels, - "nodePublicIP": testAccAzureRMKubernetesClusterNodePool_nodePublicIP, - "nodeTaints": testAccAzureRMKubernetesClusterNodePool_nodeTaints, - "requiresImport": testAccAzureRMKubernetesClusterNodePool_requiresImport, - "osDiskSizeGB": testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB, - "virtualNetworkAutomatic": testAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomatic, - "virtualNetworkManual": testAccAzureRMKubernetesClusterNodePool_virtualNetworkManual, - "windows": testAccAzureRMKubernetesClusterNodePool_windows, - "windowsAndLinux": testAccAzureRMKubernetesClusterNodePool_windowsAndLinux, - }, - "other": { - "basicAvailabilitySet": testAccAzureRMKubernetesCluster_basicAvailabilitySet, - "basicVMSS": testAccAzureRMKubernetesCluster_basicVMSS, - "requiresImport": testAccAzureRMKubernetesCluster_requiresImport, - "linuxProfile": testAccAzureRMKubernetesCluster_linuxProfile, - "nodeLabels": testAccAzureRMKubernetesCluster_nodeLabels, - "nodeTaints": testAccAzureRMKubernetesCluster_nodeTaints, - "nodeResourceGroup": testAccAzureRMKubernetesCluster_nodeResourceGroup, - "upgradeConfig": testAccAzureRMKubernetesCluster_upgrade, - "tags": testAccAzureRMKubernetesCluster_tags, - "windowsProfile": testAccAzureRMKubernetesCluster_windowsProfile, - "outboundTypeLoadBalancer": testAccAzureRMKubernetesCluster_outboundTypeLoadBalancer, - "outboundTypeUserDefinedRouting": testAccAzureRMKubernetesCluster_outboundTypeUserDefinedRouting, - "privateClusterOn": testAccAzureRMKubernetesCluster_privateClusterOn, - "privateClusterOff": testAccAzureRMKubernetesCluster_privateClusterOff, - }, - "scaling": { - "addAgent": testAccAzureRMKubernetesCluster_addAgent, - "manualScaleIgnoreChanges": testAccAzureRMKubernetesCluster_manualScaleIgnoreChanges, - "removeAgent": testAccAzureRMKubernetesCluster_removeAgent, - "autoScalingEnabledError": testAccAzureRMKubernetesCluster_autoScalingError, - "autoScalingEnabledErrorMax": testAccAzureRMKubernetesCluster_autoScalingErrorMax, - "autoScalingEnabledErrorMin": testAccAzureRMKubernetesCluster_autoScalingErrorMin, - "autoScalingNodeCountUnset": testAccAzureRMKubernetesCluster_autoScalingNodeCountUnset, - "autoScalingNoAvailabilityZones": testAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones, - "autoScalingWithAvailabilityZones": testAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones, - }, - "datasource": { - "basic": testAccDataSourceAzureRMKubernetesCluster_basic, - "roleBasedAccessControl": testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl, - "roleBasedAccessControlAAD": testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD, - "internalNetwork": testAccDataSourceAzureRMKubernetesCluster_internalNetwork, - "advancedNetworkingAzure": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzure, - "advancedNetworkingAzureCalicoPolicy": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy, - "advancedNetworkingAzureNPMPolicy": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy, - "advancedNetworkingAzureComplete": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureComplete, - "advancedNetworkingAzureCalicoPolicyComplete": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete, - "advancedNetworkingAzureNPMPolicyComplete": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete, - "advancedNetworkingKubenet": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingKubenet, - "advancedNetworkingKubenetComplete": testAccDataSourceAzureRMKubernetesCluster_advancedNetworkingKubenetComplete, - "addOnProfileOMS": testAccDataSourceAzureRMKubernetesCluster_addOnProfileOMS, - "addOnProfileKubeDashboard": testAccDataSourceAzureRMKubernetesCluster_addOnProfileKubeDashboard, - "addOnProfileAzurePolicy": testAccDataSourceAzureRMKubernetesCluster_addOnProfileAzurePolicy, - "addOnProfileRouting": testAccDataSourceAzureRMKubernetesCluster_addOnProfileRouting, - "autoscalingNoAvailabilityZones": testAccDataSourceAzureRMKubernetesCluster_autoscalingNoAvailabilityZones, - "autoscalingWithAvailabilityZones": testAccDataSourceAzureRMKubernetesCluster_autoscalingWithAvailabilityZones, - "nodeLabels": testAccDataSourceAzureRMKubernetesCluster_nodeLabels, - "nodeTaints": testAccDataSourceAzureRMKubernetesCluster_nodeTaints, - "enableNodePublicIP": testAccDataSourceAzureRMKubernetesCluster_enableNodePublicIP, - "privateCluster": testAccDataSourceAzureRMKubernetesCluster_privateCluster, - }, + "auth": kubernetesAuthTests, + "clusterAddOn": kubernetesAddOnTests, + "datasource": kubernetesDataSourceTests, + "network": kubernetesNetworkAuthTests, + "nodePool": kubernetesNodePoolTests, + "other": kubernetesOtherTests, + "scaling": kubernetesScalingTests, } for group, m := range testCases { diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go index fac436260526..6cca8a17a752 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go @@ -8,6 +8,18 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" ) +var kubernetesScalingTests = map[string]func(t *testing.T) { + "addAgent": testAccAzureRMKubernetesCluster_addAgent, + "manualScaleIgnoreChanges": testAccAzureRMKubernetesCluster_manualScaleIgnoreChanges, + "removeAgent": testAccAzureRMKubernetesCluster_removeAgent, + "autoScalingEnabledError": testAccAzureRMKubernetesCluster_autoScalingError, + "autoScalingEnabledErrorMax": testAccAzureRMKubernetesCluster_autoScalingErrorMax, + "autoScalingEnabledErrorMin": testAccAzureRMKubernetesCluster_autoScalingErrorMin, + "autoScalingNodeCountUnset": testAccAzureRMKubernetesCluster_autoScalingNodeCountUnset, + "autoScalingNoAvailabilityZones": testAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones, + "autoScalingWithAvailabilityZones": testAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones, +} + func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesCluster_addAgent(t) From a5299f396add5a7f95d1eeccd0bb2eb1fc4ca038 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 27 May 2020 16:22:03 +0200 Subject: [PATCH 05/55] r/kubernetes_cluster: comments from PR review --- ...ubernetes_cluster_network_resource_test.go | 69 +++++++++---------- ...ernetes_cluster_node_pool_resource_test.go | 5 -- 2 files changed, 32 insertions(+), 42 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go index 23a54bdde6b8..81784ae013f7 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go @@ -11,14 +11,14 @@ import ( ) var kubernetesNetworkAuthTests = map[string]func(t *testing.T){ - "enableNodePublicIP": testAccAzureRMKubernetesCluster_enableNodePublicIP, - "internalNetwork": testAccAzureRMKubernetesCluster_internalNetwork, - "changingLoadBalancerProfile": testAccAzureRMKubernetesCluster_changingLoadBalancerProfile, - "prefixedLoadBalancerProfile": testAccAzureRMKubernetesCluster_prefixedLoadBalancerProfile, - "standardLoadBalancer": testAccAzureRMKubernetesCluster_standardLoadBalancer, - "standardLoadBalancerComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerComplete, - "standardLoadBalancerProfile": testAccAzureRMKubernetesCluster_standardLoadBalancerProfile, - "standardLoadBalancerProfileComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileComplete, + "enableNodePublicIP": testAccAzureRMKubernetesCluster_enableNodePublicIP, + "internalNetwork": testAccAzureRMKubernetesCluster_internalNetwork, + "changingLoadBalancerProfile": testAccAzureRMKubernetesCluster_changingLoadBalancerProfile, + "prefixedLoadBalancerProfile": testAccAzureRMKubernetesCluster_prefixedLoadBalancerProfile, + "standardLoadBalancer": testAccAzureRMKubernetesCluster_standardLoadBalancer, + "standardLoadBalancerComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerComplete, + "standardLoadBalancerProfile": testAccAzureRMKubernetesCluster_standardLoadBalancerProfile, + "standardLoadBalancerProfileComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileComplete, "advancedNetworkingKubenet": testAccAzureRMKubernetesCluster_advancedNetworkingKubenet, "advancedNetworkingKubenetComplete": testAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete, "advancedNetworkingAzure": testAccAzureRMKubernetesCluster_advancedNetworkingAzure, @@ -1209,11 +1209,10 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id - orchestrator_version = "%s" + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id } identity { @@ -1225,7 +1224,7 @@ resource "azurerm_kubernetes_cluster" "test" { load_balancer_sku = "Standard" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) } func testAccAzureRMKubernetesCluster_standardLoadBalancerCompleteConfig(data acceptance.TestData) string { @@ -1287,11 +1286,10 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id - orchestrator_version = "%s" + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id } identity { @@ -1350,11 +1348,10 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id - orchestrator_version = "%s" + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id } identity { @@ -1369,7 +1366,7 @@ resource "azurerm_kubernetes_cluster" "test" { } } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) } func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileCompleteConfig(data acceptance.TestData) string { @@ -1533,11 +1530,10 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id - orchestrator_version = "%s" + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id } identity { @@ -1552,7 +1548,7 @@ resource "azurerm_kubernetes_cluster" "test" { } } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) } func testAccAzureRMKubernetesCluster_prefixedLoadBalancerProfileConfig(data acceptance.TestData) string { @@ -1603,11 +1599,10 @@ resource "azurerm_kubernetes_cluster" "test" { } default_node_pool { - name = "default" - node_count = 2 - vm_size = "Standard_DS2_v2" - vnet_subnet_id = azurerm_subnet.test.id - orchestrator_version = "%s" + name = "default" + node_count = 2 + vm_size = "Standard_DS2_v2" + vnet_subnet_id = azurerm_subnet.test.id } identity { @@ -1622,7 +1617,7 @@ resource "azurerm_kubernetes_cluster" "test" { } } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) } func testAccAzureRMKubernetesCluster_changingLoadBalancerProfileConfigIPPrefix(data acceptance.TestData) string { diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index 566cf4686e0d..c19dd452bd73 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -688,11 +688,6 @@ func testCheckAzureRMKubernetesNodePoolExists(resourceName string) resource.Test return fmt.Errorf("Error parsing kubernetes cluster id: %+v", err) } - OrchestratorVersion := rs.Primary.Attributes["orchestrator_version"] - if OrchestratorVersion == "" { - return fmt.Errorf("Error parsing orchestrator version") - } - agentPool, err := client.Get(ctx, parsedK8sId.ResourceGroup, parsedK8sId.Name, name) if err != nil { return fmt.Errorf("Bad: Get on kubernetesClustersClient: %+v", err) From c86ac069658b03d8a4e06229124d1c0de7a346b0 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 28 May 2020 12:27:15 +0200 Subject: [PATCH 06/55] r/kubernetes_cluster_(node_pool): validating the orchestrator version prior to deployment This raises an error with more information about which Kubernetes Versions are supported by the Kubernetes Cluster - and prompts a user to upgrade the Kubernetes Cluster first if necessary. This commit also adds acceptance tests to confirm the upgrade scenarios --- .../kubernetes_cluster_node_pool_resource.go | 30 +- .../containers/kubernetes_cluster_resource.go | 20 +- .../containers/kubernetes_cluster_validate.go | 73 ++++ .../containers/kubernetes_nodepool.go | 1 - .../tests/kubernetes_cluster_resource_test.go | 1 + .../tests/kubernetes_cluster_upgrade_test.go | 328 ++++++++++++++++++ 6 files changed, 437 insertions(+), 16 deletions(-) create mode 100644 azurerm/internal/services/containers/tests/kubernetes_cluster_upgrade_test.go diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index dcb95a333543..bbb06ae2acba 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -162,8 +162,9 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { } func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta interface{}) error { - clustersClient := meta.(*clients.Client).Containers.KubernetesClustersClient - poolsClient := meta.(*clients.Client).Containers.AgentPoolsClient + containersClient := meta.(*clients.Client).Containers + clustersClient := containersClient.KubernetesClustersClient + poolsClient := containersClient.AgentPoolsClient ctx, cancel := timeouts.ForCreate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -235,6 +236,10 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int orchestratorVersion := d.Get("orchestrator_version").(string) if orchestratorVersion != "" { + if err := validateNodePoolSupportsVersion(ctx, containersClient, resourceGroup, clusterName, name, orchestratorVersion); err != nil { + return err + } + profile.OrchestratorVersion = utils.String(orchestratorVersion) } @@ -322,7 +327,8 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int } func resourceArmKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clients.Client).Containers.AgentPoolsClient + containersClient := meta.(*clients.Client).Containers + client := containersClient.AgentPoolsClient ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() @@ -390,15 +396,20 @@ func resourceArmKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta int props.NodeTaints = nodeTaints } + if d.HasChange("orchestrator_version") { + orchestratorVersion := d.Get("orchestrator_version").(string) + if err := validateNodePoolSupportsVersion(ctx, containersClient, id.ResourceGroup, id.ClusterName, id.Name, orchestratorVersion); err != nil { + return err + } + + props.OrchestratorVersion = utils.String(orchestratorVersion) + } + if d.HasChange("tags") { t := d.Get("tags").(map[string]interface{}) props.Tags = tags.Expand(t) } - if d.HasChange("orchestrator_version") { - props.OrchestratorVersion = utils.String(d.Get("orchestrator_version").(string)) - } - // validate the auto-scale fields are both set/unset to prevent a continual diff maxCount := 0 if props.MaxCount != nil { @@ -522,6 +533,7 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter return fmt.Errorf("setting `node_taints`: %+v", err) } + d.Set("orchestrator_version", props.OrchestratorVersion) osDiskSizeGB := 0 if props.OsDiskSizeGB != nil { osDiskSizeGB = int(*props.OsDiskSizeGB) @@ -530,10 +542,6 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter d.Set("os_type", string(props.OsType)) d.Set("vnet_subnet_id", props.VnetSubnetID) d.Set("vm_size", string(props.VMSize)) - - if props.OrchestratorVersion != nil { - d.Set("orchestrator_version", props.OrchestratorVersion) - } } return tags.FlattenAndSet(d, resp.Tags) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 0bfb817a3cee..3683d1b86c79 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -575,6 +575,8 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} linuxProfileRaw := d.Get("linux_profile").([]interface{}) linuxProfile := expandKubernetesClusterLinuxProfile(linuxProfileRaw) + // NOTE: we /could/ validate the default node pool version here - but since the entire cluster deployment + // will fail here this should be fine to omit for the Create agentProfiles, err := ExpandDefaultNodePool(d) if err != nil { return fmt.Errorf("expanding `default_node_pool`: %+v", err) @@ -686,9 +688,10 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} } func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}) error { - nodePoolsClient := meta.(*clients.Client).Containers.AgentPoolsClient - clusterClient := meta.(*clients.Client).Containers.KubernetesClustersClient - env := meta.(*clients.Client).Containers.Environment + containersClient := meta.(*clients.Client).Containers + nodePoolsClient := containersClient.AgentPoolsClient + clusterClient := containersClient.KubernetesClustersClient + env := containersClient.Environment ctx, cancel := timeouts.ForUpdate(meta.(*clients.Client).StopContext, d) defer cancel() tenantId := meta.(*clients.Client).Account.TenantId @@ -905,7 +908,16 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} } agentProfile := ConvertDefaultNodePoolToAgentPool(agentProfiles) - agentPool, err := nodePoolsClient.CreateOrUpdate(ctx, id.ResourceGroup, id.Name, *agentProfile.Name, agentProfile) + nodePoolName := *agentProfile.Name + + // if a users specified a version - confirm that version is supported on the cluster + if nodePoolVersion := agentProfile.ManagedClusterAgentPoolProfileProperties.OrchestratorVersion; nodePoolVersion != nil { + if err := validateNodePoolSupportsVersion(ctx, containersClient, id.ResourceGroup, id.Name, nodePoolName, *nodePoolVersion); err != nil { + return err + } + } + + agentPool, err := nodePoolsClient.CreateOrUpdate(ctx, id.ResourceGroup, id.Name, nodePoolName, agentProfile) if err != nil { return fmt.Errorf("updating Default Node Pool %q (Resource Group %q): %+v", id.Name, id.ResourceGroup, err) } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_validate.go b/azurerm/internal/services/containers/kubernetes_cluster_validate.go index e70c9af2a9b5..902aae968cd8 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_validate.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_validate.go @@ -1,11 +1,15 @@ package containers import ( + "context" "fmt" + "net/http" "strings" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/client" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) func validateKubernetesCluster(d *schema.ResourceData, cluster *containerservice.ManagedCluster, resourceGroup, name string) error { @@ -217,3 +221,72 @@ Principal or Managed Identity for Cluster Authentication - but not both. In order to create this Kubernetes Cluster, please remove either the 'identity' block or the 'service_principal' block. `) + +// returned when the Control Plane for the AKS Cluster must be upgraded in order to deploy this version to the Node Pool +var clusterControlPlaneMustBeUpgradedError = func(resourceGroup, clusterName, nodePoolName string, clusterVersion *string, desiredNodePoolVersion string, availableVersions []string) error { + versions := make([]string, 0) + for _, version := range availableVersions { + versions = append(versions, fmt.Sprintf(" * %s", version)) + } + versionsList := strings.Join(versions, "\n") + clusterVersionDetails := "We were unable to determine the version of Kubernetes available on the Kubernetes Cluster." + if clusterVersion != nil { + clusterVersionDetails = fmt.Sprintf("The Kubernetes Cluster is running version %q.", *clusterVersion) + } + + return fmt.Errorf(` +The Kubernetes/Orchestrator Version %q is not available for Node Pool %q. + +Please confirm that this version is supported by the Kubernetes Cluster %q +(Resource Group %q) - which may need to be upgraded first. + +%s + +The supported Orchestrator Versions for this Node Pool/supported by this Kubernetes Cluster are: +%s + +Node Pools cannot use a version of Kubernetes that is not supported on the Control Plane. More +details can be found at https://aka.ms/version-skew-policy. +`, desiredNodePoolVersion, nodePoolName, clusterName, resourceGroup, clusterVersionDetails, versionsList) +} + +func validateNodePoolSupportsVersion(ctx context.Context, client *client.Client, resourceGroup, clusterName, nodePoolName, desiredNodePoolVersion string) error { + // confirm the version being used is >= the version of the control plane + versions, err := client.AgentPoolsClient.GetAvailableAgentPoolVersions(ctx, resourceGroup, clusterName) + if err != nil { + return fmt.Errorf("retrieving Available Agent Pool Versions for Kubernetes Cluster %q (Resource Group %q): %+v", clusterName, resourceGroup, err) + } + versionExists := false + supportedVersions := make([]string, 0) + if versions.AgentPoolAvailableVersionsProperties != nil && versions.AgentPoolAvailableVersionsProperties.AgentPoolVersions != nil { + for _, version := range *versions.AgentPoolAvailableVersionsProperties.AgentPoolVersions { + if version.KubernetesVersion == nil { + continue + } + + supportedVersions = append(supportedVersions, *version.KubernetesVersion) + if *version.KubernetesVersion == desiredNodePoolVersion { + versionExists = true + } + } + } + + if !versionExists { + cluster, err := client.KubernetesClustersClient.Get(ctx, resourceGroup, clusterName) + if err != nil { + if !utils.ResponseWasStatusCode(cluster.Response, http.StatusUnauthorized) { + return fmt.Errorf("retrieving Kubernetes Cluster %q (Resource Group %q): %+v", clusterName, resourceGroup, err) + } + } + + // nilable since a user may not necessarily have access, and this is trying to be helpful + var clusterVersion *string + if props := cluster.ManagedClusterProperties; props != nil { + clusterVersion = props.KubernetesVersion + } + + return clusterControlPlaneMustBeUpgradedError(resourceGroup, clusterName, nodePoolName, clusterVersion, desiredNodePoolVersion, supportedVersions) + } + + return nil +} diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 52b3f80c9144..f1dcc5d92816 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -189,7 +189,6 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC OsType: containerservice.Linux, // // TODO: support these in time - // OrchestratorVersion: nil, // ScaleSetEvictionPolicy: "", // ScaleSetPriority: "", } diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go index 874c1f52c3bf..a4477229c8ba 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go @@ -29,6 +29,7 @@ func TestAccAzureRMKubernetes_all(t *testing.T) { "nodePool": kubernetesNodePoolTests, "other": kubernetesOtherTests, "scaling": kubernetesScalingTests, + "upgrade": kubernetesUpgradeTests, } for group, m := range testCases { diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_upgrade_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_upgrade_test.go new file mode 100644 index 000000000000..891ccadbb663 --- /dev/null +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_upgrade_test.go @@ -0,0 +1,328 @@ +package tests + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" +) + +var kubernetesUpgradeTests = map[string]func(t *testing.T){ + "upgradeControlPlane": testAccAzureRMKubernetesCluster_upgradeControlPlane, + "upgradeControlPlaneAndDefaultNodePoolTogether": testAccAzureRMKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTogether, + "upgradeControlPlaneAndDefaultNodePoolTwoPhase": testAccAzureRMKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTwoPhase, + "upgradeNodePoolBeforeControlPlaneFails": testAccAzureRMKubernetesCluster_upgradeNodePoolBeforeControlPlaneFails, + "upgradeCustomNodePoolAfterControlPlane": testAccAzureRMKubernetesCluster_upgradeCustomNodePoolAfterControlPlane, + "upgradeCustomNodePoolBeforeControlPlaneFails": testAccAzureRMKubernetesCluster_upgradeCustomNodePoolBeforeControlPlaneFails, +} + +func TestAccAzureRMKubernetesCluster_upgradeControlPlane(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_upgradeControlPlane(t) +} + +func testAccAzureRMKubernetesCluster_upgradeControlPlane(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_upgradeControlPlaneConfig(data, olderKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMKubernetesCluster_upgradeControlPlaneConfig(data, currentKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + // the control plane should have been upgraded but the default node pool shouldn't have been + // TODO: confirm if we can roll the default node pool if the value is unset in the config + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", currentKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + ), + }, + data.ImportStep(), + }, + }) +} + +func TestAccAzureRMKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTogether(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTogether(t) +} + +func testAccAzureRMKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTogether(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_upgradeControlPlaneDefaultNodePoolConfig(data, olderKubernetesVersion, olderKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMKubernetesCluster_upgradeControlPlaneDefaultNodePoolConfig(data, currentKubernetesVersion, currentKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", currentKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", currentKubernetesVersion), + ), + }, + data.ImportStep(), + }, + }) +} + +func TestAccAzureRMKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTwoPhase(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTwoPhase(t) +} + +func testAccAzureRMKubernetesCluster_upgradeControlPlaneAndDefaultNodePoolTwoPhase(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_upgradeControlPlaneDefaultNodePoolConfig(data, olderKubernetesVersion, olderKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMKubernetesCluster_upgradeControlPlaneDefaultNodePoolConfig(data, currentKubernetesVersion, olderKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", currentKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMKubernetesCluster_upgradeControlPlaneDefaultNodePoolConfig(data, currentKubernetesVersion, currentKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", currentKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", currentKubernetesVersion), + ), + }, + data.ImportStep(), + }, + }) +} + +func TestAccAzureRMKubernetesCluster_upgradeNodePoolBeforeControlPlaneFails(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_upgradeNodePoolBeforeControlPlaneFails(t) +} + +func testAccAzureRMKubernetesCluster_upgradeNodePoolBeforeControlPlaneFails(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_upgradeControlPlaneDefaultNodePoolConfig(data, olderKubernetesVersion, olderKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMKubernetesCluster_upgradeControlPlaneDefaultNodePoolConfig(data, olderKubernetesVersion, currentKubernetesVersion), + ExpectError: regexp.MustCompile("Node Pools cannot use a version of Kubernetes that is not supported on the Control Plane."), + }, + }, + }) +} + +func TestAccAzureRMKubernetesCluster_upgradeCustomNodePoolAfterControlPlane(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_upgradeCustomNodePoolAfterControlPlane(t) +} + +func testAccAzureRMKubernetesCluster_upgradeCustomNodePoolAfterControlPlane(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + nodePoolName := "azurerm_kubernetes_cluster_node_pool.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + // all on the older version + Config: testAccAzureRMKubernetesCluster_upgradeVersionsConfig(data, olderKubernetesVersion, olderKubernetesVersion, olderKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(nodePoolName, "orchestrator_version", olderKubernetesVersion), + ), + }, + data.ImportStep(), + { + // upgrade the control plane + Config: testAccAzureRMKubernetesCluster_upgradeVersionsConfig(data, currentKubernetesVersion, olderKubernetesVersion, olderKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", currentKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(nodePoolName, "orchestrator_version", olderKubernetesVersion), + ), + }, + data.ImportStep(), + { + // upgrade the node pool + Config: testAccAzureRMKubernetesCluster_upgradeVersionsConfig(data, currentKubernetesVersion, olderKubernetesVersion, currentKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", currentKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(nodePoolName, "orchestrator_version", currentKubernetesVersion), + ), + }, + data.ImportStep(), + }, + }) +} + +func TestAccAzureRMKubernetesCluster_upgradeCustomNodePoolBeforeControlPlaneFails(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_upgradeCustomNodePoolBeforeControlPlaneFails(t) +} + +func testAccAzureRMKubernetesCluster_upgradeCustomNodePoolBeforeControlPlaneFails(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + nodePoolName := "azurerm_kubernetes_cluster_node_pool.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + // all on the older version + Config: testAccAzureRMKubernetesCluster_upgradeVersionsConfig(data, olderKubernetesVersion, olderKubernetesVersion, olderKubernetesVersion), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "kubernetes_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.orchestrator_version", olderKubernetesVersion), + resource.TestCheckResourceAttr(nodePoolName, "orchestrator_version", olderKubernetesVersion), + ), + }, + data.ImportStep(), + { + // upgrade the node pool + Config: testAccAzureRMKubernetesCluster_upgradeVersionsConfig(data, olderKubernetesVersion, olderKubernetesVersion, currentKubernetesVersion), + ExpectError: regexp.MustCompile("Node Pools cannot use a version of Kubernetes that is not supported on the Control Plane."), + }, + }, + }) +} + +func testAccAzureRMKubernetesCluster_upgradeControlPlaneConfig(data acceptance.TestData, controlPlaneVersion string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + kubernetes_version = %q + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, controlPlaneVersion) +} + +func testAccAzureRMKubernetesCluster_upgradeControlPlaneDefaultNodePoolConfig(data acceptance.TestData, controlPlaneVersion, defaultNodePoolVersion string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + kubernetes_version = %q + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + orchestrator_version = %q + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, controlPlaneVersion, defaultNodePoolVersion) +} + +func testAccAzureRMKubernetesCluster_upgradeVersionsConfig(data acceptance.TestData, controlPlaneVersion, defaultNodePoolVersion, customNodePoolVersion string) string { + template := testAccAzureRMKubernetesCluster_upgradeControlPlaneDefaultNodePoolConfig(data, controlPlaneVersion, defaultNodePoolVersion) + return fmt.Sprintf(` +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + orchestrator_version = %q +} +`, template, customNodePoolVersion) +} From 3ef2bdc03e28bf23ef5541d65680ac811015adf5 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 28 May 2020 12:28:52 +0200 Subject: [PATCH 07/55] r/kubernetes_cluster: mapping `spot_max_price` for the default node pool --- azurerm/internal/services/containers/kubernetes_nodepool.go | 1 + 1 file changed, 1 insertion(+) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index f1dcc5d92816..d1947ce1d373 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -155,6 +155,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA EnableNodePublicIP: defaultCluster.EnableNodePublicIP, ScaleSetPriority: defaultCluster.ScaleSetPriority, ScaleSetEvictionPolicy: defaultCluster.ScaleSetEvictionPolicy, + SpotMaxPrice: defaultCluster.SpotMaxPrice, NodeLabels: defaultCluster.NodeLabels, NodeTaints: defaultCluster.NodeTaints, Tags: defaultCluster.Tags, From 84cd429f0938c2b764b1f679e542d2844c446fa0 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 28 May 2020 12:30:43 +0200 Subject: [PATCH 08/55] r/kubernetes_cluster_(node_pool): adding a note covering the version constraints for AKS --- website/docs/r/kubernetes_cluster.html.markdown | 2 ++ website/docs/r/kubernetes_cluster_node_pool.html.markdown | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 2ee8d38ec3b2..37a6f9b224df 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -233,6 +233,8 @@ If `enable_auto_scaling` is set to `false`, then the following fields can also b * `orchestrator_version` - (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade) +-> **Note:** This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. + --- A `http_application_routing` block supports the following: diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index 723a2a470840..eebe1a764068 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -90,6 +90,8 @@ The following arguments are supported: * `orchestrator_version` - (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade) +-> **Note:** This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. + * `vnet_subnet_id` - (Optional) The ID of the Subnet where this Node Pool should exist. -> **NOTE:** At this time the `vnet_subnet_id` must be the same for all node pools in the cluster @@ -120,8 +122,6 @@ The following attributes are exported: ## Timeouts - - The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: * `create` - (Defaults to 60 minutes) Used when creating the Kubernetes Cluster Node Pool. From fb3ec286478336d3d4e53006c4c9e0d39142cb7b Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 28 May 2020 12:33:38 +0200 Subject: [PATCH 09/55] r/kubernetes_cluster: Azure Policy is not supported in US Government. Fixes #6702. --- azurerm/internal/services/containers/kubernetes_addons.go | 1 + website/docs/r/kubernetes_cluster.html.markdown | 2 ++ 2 files changed, 3 insertions(+) diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go index becc0910065b..28d9e64b9409 100644 --- a/azurerm/internal/services/containers/kubernetes_addons.go +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -32,6 +32,7 @@ var unsupportedAddonsForEnvironment = map[string][]string{ httpApplicationRoutingKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/5960 }, azure.USGovernmentCloud.Name: { + azurePolicyKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/6702 httpApplicationRoutingKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/5960 }, } diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 37a6f9b224df..78c797b0d3df 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -148,6 +148,8 @@ A `addon_profile` block supports the following: * `azure_policy` - (Optional) A `azure_policy` block as defined below. For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) +-> **NOTE:** At this time Azure Policy is not supported in Azure US Government. + -> **NOTE**: Azure Policy for Azure Kubernetes Service is currently in preview and not available to subscriptions that have not [opted-in](https://docs.microsoft.com/en-us/azure/governance/policy/concepts/rego-for-aks?toc=/azure/aks/toc.json) to join `Azure Policy` preview. * `http_application_routing` - (Optional) A `http_application_routing` block as defined below. From 6d2256ceb824e521d7515d387970f70e56305490 Mon Sep 17 00:00:00 2001 From: Aris van Ommeren Date: Fri, 24 Apr 2020 21:21:40 +0200 Subject: [PATCH 10/55] init kubernetes_cluster auto_scale_profile --- .../containers/kubernetes_cluster_resource.go | 176 ++++++++++++++++++ ...ubernetes_cluster_scaling_resource_test.go | 86 +++++++++ .../services/containers/validate/duration.go | 20 ++ .../docs/d/kubernetes_cluster.html.markdown | 22 +++ 4 files changed, 304 insertions(+) create mode 100644 azurerm/internal/services/containers/validate/duration.go diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 3683d1b86c79..20aa7da1c4a0 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -71,6 +71,67 @@ func resourceArmKubernetesCluster() *schema.Resource { "default_node_pool": SchemaDefaultNodePool(), + "auto_scaler_profile": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scan_interval": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: containerValidate.Duration, + }, + "scale_down_delay_after_add": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: containerValidate.Duration, + }, + "scale_down_delay_after_delete": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: containerValidate.Duration, + }, + "scale_down_delay_after_failure": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: containerValidate.Duration, + }, + "scale_down_unneeded": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: containerValidate.Duration, + }, + "scale_down_unready": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: containerValidate.Duration, + }, + "scale_down_utilization_treshold": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "max_graceful_termination_sec": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + // "balance_similar_node_groups": { + // Type: schema.TypeBool, + // Required: true, + // }, + }, + }, + }, + // Optional "addon_profile": schemaKubernetesAddOnProfiles(), @@ -622,6 +683,9 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} enablePodSecurityPolicy := d.Get("enable_pod_security_policy").(bool) + autoScalerProfileRaw := d.Get("auto_scaler_profile").([]interface{}) + autoScalerProfile := expandKubernetesClusterAutoScalerProfile(autoScalerProfileRaw) + parameters := containerservice.ManagedCluster{ Name: &name, Location: &location, @@ -630,6 +694,7 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} AadProfile: azureADProfile, AddonProfiles: *addonProfiles, AgentPoolProfiles: agentProfiles, + AutoScalerProfile: autoScalerProfile, DNSPrefix: utils.String(dnsPrefix), EnableRBAC: utils.Bool(rbacEnabled), KubernetesVersion: utils.String(kubernetesVersion), @@ -810,6 +875,15 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} } } + if d.HasChange("auto_scaler_profile") { + updateCluster = true + autoScalerProfileRaw := d.Get("auto_scaler_profile").(*schema.Set).List() + + autoScalerProfile := expandKubernetesClusterAutoScalerProfile(autoScalerProfileRaw) + + existing.ManagedClusterProperties.AutoScalerProfile = autoScalerProfile + } + if d.HasChange("enable_pod_security_policy") { updateCluster = true enablePodSecurityPolicy := d.Get("enable_pod_security_policy").(bool) @@ -989,6 +1063,11 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("setting `addon_profile`: %+v", err) } + autoScalerProfile := flattenKubernetesClusterAutoScalerProfile(props.AutoScalerProfile) + if err := d.Set("auto_scaler_profile", autoScalerProfile); err != nil { + return fmt.Errorf("setting `auto_scaler_profile`: %+v", err) + } + flattenedDefaultNodePool, err := FlattenDefaultNodePool(props.AgentPoolProfiles, d) if err != nil { return fmt.Errorf("flattening `default_node_pool`: %+v", err) @@ -1648,3 +1727,100 @@ func flattenKubernetesClusterManagedClusterIdentity(input *containerservice.Mana return []interface{}{identity} } + +func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.ManagedClusterPropertiesAutoScalerProfile) []interface{} { + if profile == nil { + return []interface{}{} + } + + scanInterval := "" + if profile.ScanInterval != nil { + scanInterval = *profile.ScanInterval + } + + scaleDownDelayAfterAdd := "" + if profile.ScaleDownDelayAfterAdd != nil { + scaleDownDelayAfterAdd = *profile.ScaleDownDelayAfterAdd + } + + scaleDownDelayAfterDelete := "" + if profile.ScaleDownDelayAfterDelete != nil { + scaleDownDelayAfterDelete = *profile.ScaleDownDelayAfterDelete + } + + scaleDownDelayAfterFailure := "" + if profile.ScaleDownDelayAfterFailure != nil { + scaleDownDelayAfterFailure = *profile.ScaleDownDelayAfterFailure + } + + scaleDownUnneededTime := "" + if profile.ScaleDownUnneededTime != nil { + scaleDownUnneededTime = *profile.ScaleDownUnneededTime + } + + scaleDownUnreadyTime := "" + if profile.ScaleDownUnreadyTime != nil { + scaleDownUnreadyTime = *profile.ScaleDownUnreadyTime + } + + scaleDownUtilizationThreshold := "" + if profile.ScaleDownUtilizationThreshold != nil { + scaleDownUtilizationThreshold = *profile.ScaleDownUtilizationThreshold + } + + maxGracefulTerminationSec := "" + if profile.MaxGracefulTerminationSec != nil { + maxGracefulTerminationSec = *profile.MaxGracefulTerminationSec + } + + // balanceSimilarNodeGroups := false + // if profile.BalanceSimilarNodeGroups != nil { + // balanceSimilarNodeGroups = *profile.BalanceSimilarNodeGroups + // } + + return []interface{}{ + map[string]interface{}{ + "scan_interval": scanInterval, + "scale_down_delay_after_add": scaleDownDelayAfterAdd, + "scale_down_delay_after_delete": scaleDownDelayAfterDelete, + "scale_down_delay_after_failure": scaleDownDelayAfterFailure, + "scale_down_unneeded": scaleDownUnneededTime, + "scale_down_unready": scaleDownUnreadyTime, + "scale_down_utilization_treshold": scaleDownUtilizationThreshold, + "max_graceful_termination_sec": maxGracefulTerminationSec, + // "balance_similar_node_groups": balanceSimilarNodeGroups, + }, + } +} + +func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerservice.ManagedClusterPropertiesAutoScalerProfile { + if len(input) == 0 { + return nil + } + + config := input[0].(map[string]interface{}) + + // balanceSimilarNodeGroups := config["balance_similar_node_groups"].(bool) + scanInterval := config["scan_interval"].(string) + scaleDownDelayAfterAdd := config["scale_down_delay_after_add"].(string) + scaleDownDelayAfterDelete := config["scale_down_delay_after_delete"].(string) + scaleDownDelayAfterFailure := config["scale_down_delay_after_failure"].(string) + scaleDownUnneededTime := config["scale_down_unneeded"].(string) + scaleDownUnreadyTime := config["scale_down_unready"].(string) + scaleDownUtilizationThreshold := config["scale_down_utilization_treshold"].(string) + maxGracefulTerminationSec := config["max_graceful_termination_sec"].(string) + + autoScalerProfile := containerservice.ManagedClusterPropertiesAutoScalerProfile{ + // BalanceSimilarNodeGroups: utils.Bool(balanceSimilarNodeGroups), + ScanInterval: utils.String(scanInterval), + ScaleDownDelayAfterAdd: utils.String(scaleDownDelayAfterAdd), + ScaleDownDelayAfterDelete: utils.String(scaleDownDelayAfterDelete), + ScaleDownDelayAfterFailure: utils.String(scaleDownDelayAfterFailure), + ScaleDownUnneededTime: utils.String(scaleDownUnneededTime), + ScaleDownUnreadyTime: utils.String(scaleDownUnreadyTime), + ScaleDownUtilizationThreshold: utils.String(scaleDownUtilizationThreshold), + MaxGracefulTerminationSec: utils.String(maxGracefulTerminationSec), + } + + return &autoScalerProfile +} diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go index 6cca8a17a752..e3a2814ea5e2 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go @@ -132,6 +132,14 @@ func testAccAzureRMKubernetesCluster_autoScalingNodeCountUnset(t *testing.T) { resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.min_count", "2"), resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.max_count", "4"), resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.enable_auto_scaling", "true"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.max_graceful_termination_sec", "600"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_add", "10m"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_delete", "10s"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_failure", "3m"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_unneeded", "10m"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_unready", "20m"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_utilization_treshold", "0.5"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scan_interval", "10s"), ), }, data.ImportStep(), @@ -198,6 +206,39 @@ func testAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones(t *testing }) } +func TestAccAzureRMKubernetesCluster_autoScalingProfile(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_autoScalingProfile(t) +} + +func testAccAzureRMKubernetesCluster_autoScalingProfile(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_autoScalingProfileConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.enable_auto_scaling", "true"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.max_graceful_termination_sec", "15"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_add", "10m"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_delete", "10s"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_failure", "15m"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_unneeded", "15m"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_unready", "15m"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_utilization_treshold", "0.5"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scan_interval", "10s"), + ), + }, + data.ImportStep(), + }, + }) +} + func testAccAzureRMKubernetesCluster_addAgentConfig(data acceptance.TestData, numberOfAgents int) string { return fmt.Sprintf(` provider "azurerm" { @@ -411,3 +452,48 @@ resource "azurerm_kubernetes_cluster" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, olderKubernetesVersion) } + +func testAccAzureRMKubernetesCluster_autoScalingProfileConfig(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + kubernetes_version = "%s" + + default_node_pool { + name = "default" + enable_auto_scaling = true + min_count = 2 + max_count = 4 + vm_size = "Standard_DS2_v2" + } + + auto_scaler_profile { + scan_interval = "10s" + scale_down_delay_after_add = "10m" + scale_down_delay_after_delete = "10s" + scale_down_delay_after_failure = "15m" + scale_down_unneeded = "15m" + scale_down_unready = "15m" + scale_down_utilization_treshold = "0.5" + max_graceful_termination_sec = 15 + // balance_similar_node_groups = "true" + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, currentKubernetesVersion) +} diff --git a/azurerm/internal/services/containers/validate/duration.go b/azurerm/internal/services/containers/validate/duration.go new file mode 100644 index 000000000000..01c2dc0028ec --- /dev/null +++ b/azurerm/internal/services/containers/validate/duration.go @@ -0,0 +1,20 @@ +package validate + +import ( + "fmt" + "time" +) + +func Duration(i interface{}, k string) (warnings []string, errors []error) { + value := i.(string) + duration, err := time.ParseDuration(value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as a duration: %s", k, err)) + } + if duration < 0 { + errors = append(errors, fmt.Errorf( + "%q must be greater than zero", k)) + } + return warnings, errors +} diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 9ca4dff14065..3fd3465997fd 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -143,6 +143,28 @@ A `azure_active_directory` block exports the following: --- +A `auto_scaler_profile` block exports the following: + +* `scan_interval` - (Defaults to `10s`) How often the AKS cluster is reevaluated for scale up or down. + +* `scale_down_delay_after_add` - (Defaults to `10m`) How long after the scale up of AKS nodes the scale down evaluation resumes. + +* `scale_down_delay_after_delete` - (Defaults to `scan_interval`) How long after node deletion that scale down evaluation resumes. + +* `scale_down_delay_after_failure` - (Defaults to `3m`) How long after scale down failure that scale down evaluation resumes. + +* `scale_down_unneeded` - (Defaults to `10m`) How long a node should be unneeded before it is eligible for scale down. + +* `scale_down_unready` - (Defaults to `20m`) How long an unready node should be unneeded before it is eligible for scale down. + +* `scale_down_utilization_treshold` - (Defaults to `0.5`) Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down + +* `max_graceful_termination_sec` - (Defaults to `600`) Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. + + + +--- + A `http_application_routing` block exports the following: * `enabled` - Is HTTP Application Routing Enabled? From 0188f09d3541bf17a2eb874a89d1855c5d8be337 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 29 May 2020 13:05:45 +0200 Subject: [PATCH 11/55] r/kubernetes_cluster: fixing pr comments --- .../containers/kubernetes_cluster_resource.go | 77 +++++++++---------- ...ubernetes_cluster_scaling_resource_test.go | 22 +++--- .../docs/d/kubernetes_cluster.html.markdown | 23 ------ .../docs/r/kubernetes_cluster.html.markdown | 28 ++++++- 4 files changed, 75 insertions(+), 75 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 20aa7da1c4a0..1c22a1f24c7d 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -78,6 +78,16 @@ func resourceArmKubernetesCluster() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + // "balance_similar_node_groups": { + // Type: schema.TypeBool, + // Optional: true, + // Default: false, + // }, + "max_graceful_termination_sec": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, "scan_interval": { Type: schema.TypeString, Optional: true, @@ -114,20 +124,11 @@ func resourceArmKubernetesCluster() *schema.Resource { Computed: true, ValidateFunc: containerValidate.Duration, }, - "scale_down_utilization_treshold": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "max_graceful_termination_sec": { + "scale_down_utilization_threshold": { Type: schema.TypeString, Optional: true, Computed: true, }, - // "balance_similar_node_groups": { - // Type: schema.TypeBool, - // Required: true, - // }, }, }, }, @@ -877,10 +878,9 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} if d.HasChange("auto_scaler_profile") { updateCluster = true - autoScalerProfileRaw := d.Get("auto_scaler_profile").(*schema.Set).List() + autoScalerProfileRaw := d.Get("auto_scaler_profile").([]interface{}) autoScalerProfile := expandKubernetesClusterAutoScalerProfile(autoScalerProfileRaw) - existing.ManagedClusterProperties.AutoScalerProfile = autoScalerProfile } @@ -1733,9 +1733,14 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed return []interface{}{} } - scanInterval := "" - if profile.ScanInterval != nil { - scanInterval = *profile.ScanInterval + // balanceSimilarNodeGroups := false + // if profile.BalanceSimilarNodeGroups != nil { + // balanceSimilarNodeGroups = *profile.BalanceSimilarNodeGroups + // } + + maxGracefulTerminationSec := "" + if profile.MaxGracefulTerminationSec != nil { + maxGracefulTerminationSec = *profile.MaxGracefulTerminationSec } scaleDownDelayAfterAdd := "" @@ -1768,27 +1773,22 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed scaleDownUtilizationThreshold = *profile.ScaleDownUtilizationThreshold } - maxGracefulTerminationSec := "" - if profile.MaxGracefulTerminationSec != nil { - maxGracefulTerminationSec = *profile.MaxGracefulTerminationSec + scanInterval := "" + if profile.ScanInterval != nil { + scanInterval = *profile.ScanInterval } - // balanceSimilarNodeGroups := false - // if profile.BalanceSimilarNodeGroups != nil { - // balanceSimilarNodeGroups = *profile.BalanceSimilarNodeGroups - // } - return []interface{}{ map[string]interface{}{ - "scan_interval": scanInterval, - "scale_down_delay_after_add": scaleDownDelayAfterAdd, - "scale_down_delay_after_delete": scaleDownDelayAfterDelete, - "scale_down_delay_after_failure": scaleDownDelayAfterFailure, - "scale_down_unneeded": scaleDownUnneededTime, - "scale_down_unready": scaleDownUnreadyTime, - "scale_down_utilization_treshold": scaleDownUtilizationThreshold, - "max_graceful_termination_sec": maxGracefulTerminationSec, // "balance_similar_node_groups": balanceSimilarNodeGroups, + "max_graceful_termination_sec": maxGracefulTerminationSec, + "scale_down_delay_after_add": scaleDownDelayAfterAdd, + "scale_down_delay_after_delete": scaleDownDelayAfterDelete, + "scale_down_delay_after_failure": scaleDownDelayAfterFailure, + "scale_down_unneeded": scaleDownUnneededTime, + "scale_down_unready": scaleDownUnreadyTime, + "scale_down_utilization_threshold": scaleDownUtilizationThreshold, + "scan_interval": scanInterval, }, } } @@ -1800,27 +1800,26 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser config := input[0].(map[string]interface{}) + // TODO: re-enable once the sdk's been upgraded // balanceSimilarNodeGroups := config["balance_similar_node_groups"].(bool) - scanInterval := config["scan_interval"].(string) + maxGracefulTerminationSec := config["max_graceful_termination_sec"].(string) scaleDownDelayAfterAdd := config["scale_down_delay_after_add"].(string) scaleDownDelayAfterDelete := config["scale_down_delay_after_delete"].(string) scaleDownDelayAfterFailure := config["scale_down_delay_after_failure"].(string) scaleDownUnneededTime := config["scale_down_unneeded"].(string) scaleDownUnreadyTime := config["scale_down_unready"].(string) - scaleDownUtilizationThreshold := config["scale_down_utilization_treshold"].(string) - maxGracefulTerminationSec := config["max_graceful_termination_sec"].(string) + scaleDownUtilizationThreshold := config["scale_down_utilization_threshold"].(string) + scanInterval := config["scan_interval"].(string) - autoScalerProfile := containerservice.ManagedClusterPropertiesAutoScalerProfile{ + return &containerservice.ManagedClusterPropertiesAutoScalerProfile{ // BalanceSimilarNodeGroups: utils.Bool(balanceSimilarNodeGroups), - ScanInterval: utils.String(scanInterval), + MaxGracefulTerminationSec: utils.String(maxGracefulTerminationSec), ScaleDownDelayAfterAdd: utils.String(scaleDownDelayAfterAdd), ScaleDownDelayAfterDelete: utils.String(scaleDownDelayAfterDelete), ScaleDownDelayAfterFailure: utils.String(scaleDownDelayAfterFailure), ScaleDownUnneededTime: utils.String(scaleDownUnneededTime), ScaleDownUnreadyTime: utils.String(scaleDownUnreadyTime), ScaleDownUtilizationThreshold: utils.String(scaleDownUtilizationThreshold), - MaxGracefulTerminationSec: utils.String(maxGracefulTerminationSec), + ScanInterval: utils.String(scanInterval), } - - return &autoScalerProfile } diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go index e3a2814ea5e2..d630c0ff204e 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go @@ -138,7 +138,7 @@ func testAccAzureRMKubernetesCluster_autoScalingNodeCountUnset(t *testing.T) { resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_failure", "3m"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_unneeded", "10m"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_unready", "20m"), - resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_utilization_treshold", "0.5"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_utilization_threshold", "0.5"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scan_interval", "10s"), ), }, @@ -230,7 +230,7 @@ func testAccAzureRMKubernetesCluster_autoScalingProfile(t *testing.T) { resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_failure", "15m"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_unneeded", "15m"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_unready", "15m"), - resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_utilization_treshold", "0.5"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_utilization_threshold", "0.5"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scan_interval", "10s"), ), }, @@ -480,15 +480,15 @@ resource "azurerm_kubernetes_cluster" "test" { } auto_scaler_profile { - scan_interval = "10s" - scale_down_delay_after_add = "10m" - scale_down_delay_after_delete = "10s" - scale_down_delay_after_failure = "15m" - scale_down_unneeded = "15m" - scale_down_unready = "15m" - scale_down_utilization_treshold = "0.5" - max_graceful_termination_sec = 15 - // balance_similar_node_groups = "true" + # balance_similar_node_groups = "true" + max_graceful_termination_sec = 15 + scan_interval = "10s" + scale_down_delay_after_add = "10m" + scale_down_delay_after_delete = "10s" + scale_down_delay_after_failure = "15m" + scale_down_unneeded = "15m" + scale_down_unready = "15m" + scale_down_utilization_threshold = "0.5" } identity { diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 3fd3465997fd..379afe69f072 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -130,7 +130,6 @@ A `agent_pool_profile` block exports the following: * `vnet_subnet_id` - The ID of the Subnet where the Agents in the Pool are provisioned. - --- A `azure_active_directory` block exports the following: @@ -143,28 +142,6 @@ A `azure_active_directory` block exports the following: --- -A `auto_scaler_profile` block exports the following: - -* `scan_interval` - (Defaults to `10s`) How often the AKS cluster is reevaluated for scale up or down. - -* `scale_down_delay_after_add` - (Defaults to `10m`) How long after the scale up of AKS nodes the scale down evaluation resumes. - -* `scale_down_delay_after_delete` - (Defaults to `scan_interval`) How long after node deletion that scale down evaluation resumes. - -* `scale_down_delay_after_failure` - (Defaults to `3m`) How long after scale down failure that scale down evaluation resumes. - -* `scale_down_unneeded` - (Defaults to `10m`) How long a node should be unneeded before it is eligible for scale down. - -* `scale_down_unready` - (Defaults to `20m`) How long an unready node should be unneeded before it is eligible for scale down. - -* `scale_down_utilization_treshold` - (Defaults to `0.5`) Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down - -* `max_graceful_termination_sec` - (Defaults to `600`) Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. - - - ---- - A `http_application_routing` block exports the following: * `enabled` - Is HTTP Application Routing Enabled? diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 78c797b0d3df..226a19c98c7e 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -70,7 +70,7 @@ The following arguments are supported: -> **NOTE:** The `dns_prefix` must contain between 3 and 45 characters, and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number. -In addition, one of either `identity` or `service_principal` must be specified. +In addition, one of either `identity` or `service_principal` blocks must be specified. --- @@ -78,6 +78,8 @@ In addition, one of either `identity` or `service_principal` must be specified. * `api_server_authorized_ip_ranges` - (Optional) The IP ranges to whitelist for incoming traffic to the masters. +* `auto_scaler_profile` - (Optional) A `auto_scaler_profile` block as defined below. + * `enable_pod_security_policy` - (Optional) Whether Pod Security Policies are enabled. Note that this also requires role based access control to be enabled. -> **NOTE:** Support for `enable_pod_security_policy` is currently in Preview on an opt-in basis. To use it, enable feature `PodSecurityPolicyPreview` for `namespace Microsoft.ContainerService`. For an example of how to enable a Preview feature, please visit [Register scale set feature provider](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler#register-scale-set-feature-provider). @@ -160,6 +162,29 @@ A `addon_profile` block supports the following: * `oms_agent` - (Optional) A `oms_agent` block as defined below. For more details, please visit [How to onboard Azure Monitor for containers](https://docs.microsoft.com/en-us/azure/monitoring/monitoring-container-insights-onboard). + +--- + +A `auto_scaler_profile` block supports the following: + +* `balance_similar_node_groups` - Detect similar node groups and balance the number of nodes between them. Defaults to `false`. + +* `max_graceful_termination_sec` - Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`. + +* `scale_down_delay_after_add` - How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`. + +* `scale_down_delay_after_delete` - How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`. + +* `scale_down_delay_after_failure` - How long after scale down failure that scale down evaluation resumes. Defaults to `3m`. + +* `scan_interval` - How often the AKS Cluster should be re-evaluated for scale up/down. Defaults to `10s`. + +* `scale_down_unneeded` - How long a node should be unneeded before it is eligible for scale down. Defaults to `10m`. + +* `scale_down_unready` - How long an unready node should be unneeded before it is eligible for scale down. Defaults to `20m`. + +* `scale_down_utilization_threshold` - Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down. Defaults to `0.5`. + --- A `azure_active_directory` block supports the following: @@ -172,7 +197,6 @@ A `azure_active_directory` block supports the following: * `tenant_id` - (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. - --- A `azure_policy` block supports the following: From e222692352517133f336632e7aaa85faf57373df Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 29 May 2020 13:32:12 +0200 Subject: [PATCH 12/55] r/kubernetes_cluster: grouping optional fields --- .../containers/kubernetes_cluster_resource.go | 34 +++++++++---------- .../docs/r/kubernetes_cluster.html.markdown | 4 +-- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 1c22a1f24c7d..fca4ff249639 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -71,6 +71,18 @@ func resourceArmKubernetesCluster() *schema.Resource { "default_node_pool": SchemaDefaultNodePool(), + // Optional + "addon_profile": schemaKubernetesAddOnProfiles(), + + "api_server_authorized_ip_ranges": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validate.CIDR, + }, + }, + "auto_scaler_profile": { Type: schema.TypeList, Optional: true, @@ -133,23 +145,6 @@ func resourceArmKubernetesCluster() *schema.Resource { }, }, - // Optional - "addon_profile": schemaKubernetesAddOnProfiles(), - - "api_server_authorized_ip_ranges": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validate.CIDR, - }, - }, - - "private_fqdn": { - Type: schema.TypeString, - Computed: true, - }, - "enable_pod_security_policy": { Type: schema.TypeBool, Optional: true, @@ -392,6 +387,11 @@ func resourceArmKubernetesCluster() *schema.Resource { ForceNew: true, }, + "private_fqdn": { + Type: schema.TypeString, + Computed: true, + }, + "private_link_enabled": { Type: schema.TypeBool, Optional: true, diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 226a19c98c7e..5ea0fe0b7efc 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -62,9 +62,7 @@ The following arguments are supported: * `resource_group_name` - (Required) Specifies the Resource Group where the Managed Kubernetes Cluster should exist. Changing this forces a new resource to be created. -* `default_node_pool` - (Optional) A `default_node_pool` block as defined below. - --> **NOTE:** The `default_node_pool` block will become required in 2.0 +* `default_node_pool` - (Required) A `default_node_pool` block as defined below. * `dns_prefix` - (Required) DNS prefix specified when creating the managed cluster. Changing this forces a new resource to be created. From 0af18ac8f8514fe3ffb7084b9df49c8192cb3812 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 29 May 2020 13:35:14 +0200 Subject: [PATCH 13/55] dependencies: updating to use 2020-03 of the containerservice api --- .../services/containers/client/client.go | 2 +- .../services/containers/kubernetes_addons.go | 2 +- .../kubernetes_cluster_data_source.go | 2 +- .../kubernetes_cluster_node_pool_resource.go | 2 +- .../containers/kubernetes_cluster_resource.go | 2 +- .../containers/kubernetes_cluster_validate.go | 2 +- .../containers/kubernetes_nodepool.go | 2 +- .../resource_arm_container_service.go | 2 +- .../containerservice/agentpools.go | 14 +-- .../containerservice/client.go | 0 .../containerservice/containerservices.go | 0 .../containerservice/managedclusters.go | 37 ++++---- .../containerservice/models.go | 86 ++++++++++++++++++- .../openshiftmanagedclusters.go | 0 .../containerservice/operations.go | 2 +- .../containerservice/version.go | 2 +- vendor/modules.txt | 2 +- 17 files changed, 116 insertions(+), 43 deletions(-) rename vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/{2020-02-01 => 2020-03-01}/containerservice/agentpools.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/{2020-02-01 => 2020-03-01}/containerservice/client.go (100%) rename vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/{2020-02-01 => 2020-03-01}/containerservice/containerservices.go (100%) rename vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/{2020-02-01 => 2020-03-01}/containerservice/managedclusters.go (98%) rename vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/{2020-02-01 => 2020-03-01}/containerservice/models.go (98%) rename vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/{2020-02-01 => 2020-03-01}/containerservice/openshiftmanagedclusters.go (100%) rename vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/{2020-02-01 => 2020-03-01}/containerservice/operations.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/{2020-02-01 => 2020-03-01}/containerservice/version.go (99%) diff --git a/azurerm/internal/services/containers/client/client.go b/azurerm/internal/services/containers/client/client.go index 3efd11999271..5e2cbf13e905 100644 --- a/azurerm/internal/services/containers/client/client.go +++ b/azurerm/internal/services/containers/client/client.go @@ -3,7 +3,7 @@ package client import ( "github.com/Azure/azure-sdk-for-go/services/containerinstance/mgmt/2018-10-01/containerinstance" "github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2018-09-01/containerregistry" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" "github.com/Azure/go-autorest/autorest/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common" ) diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go index 28d9e64b9409..8bde80eb4efd 100644 --- a/azurerm/internal/services/containers/kubernetes_addons.go +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" "github.com/Azure/go-autorest/autorest/azure" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index 736a6a1b63cf..b44fd61f4eae 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/kubernetes" diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index bbb06ae2acba..2d718f8ab6cc 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index fca4ff249639..e0866940441d 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" diff --git a/azurerm/internal/services/containers/kubernetes_cluster_validate.go b/azurerm/internal/services/containers/kubernetes_cluster_validate.go index 902aae968cd8..83b9430de483 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_validate.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_validate.go @@ -6,7 +6,7 @@ import ( "net/http" "strings" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/client" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index d1947ce1d373..e78dc8bd6078 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -3,7 +3,7 @@ package containers import ( "fmt" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" diff --git a/azurerm/internal/services/containers/resource_arm_container_service.go b/azurerm/internal/services/containers/resource_arm_container_service.go index e1a896b20304..2ddbb4eea738 100644 --- a/azurerm/internal/services/containers/resource_arm_container_service.go +++ b/azurerm/internal/services/containers/resource_arm_container_service.go @@ -7,7 +7,7 @@ import ( "log" "time" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/agentpools.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/agentpools.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/agentpools.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/agentpools.go index a7e112f0d182..37ff511cff7a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/agentpools.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/agentpools.go @@ -70,7 +70,7 @@ func (client AgentPoolsClient) CreateOrUpdate(ctx context.Context, resourceGroup Constraints: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.Null, Rule: true, Chain: []validation.Constraint{{Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, - {Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + {Target: "parameters.ManagedClusterAgentPoolProfileProperties.Count", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, }}, }}}}}); err != nil { return result, validation.NewError("containerservice.AgentPoolsClient", "CreateOrUpdate", err.Error()) @@ -100,7 +100,7 @@ func (client AgentPoolsClient) CreateOrUpdatePreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -190,7 +190,7 @@ func (client AgentPoolsClient) DeletePreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -283,7 +283,7 @@ func (client AgentPoolsClient) GetPreparer(ctx context.Context, resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -369,7 +369,7 @@ func (client AgentPoolsClient) GetAvailableAgentPoolVersionsPreparer(ctx context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -458,7 +458,7 @@ func (client AgentPoolsClient) GetUpgradeProfilePreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -546,7 +546,7 @@ func (client AgentPoolsClient) ListPreparer(ctx context.Context, resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/client.go similarity index 100% rename from vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/client.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/client.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/containerservices.go similarity index 100% rename from vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/containerservices.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/containerservices.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/managedclusters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/managedclusters.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/managedclusters.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/managedclusters.go index b64582bb359e..730430139951 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/managedclusters.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/managedclusters.go @@ -106,10 +106,6 @@ func (client ManagedClustersClient) CreateOrUpdate(ctx context.Context, resource }}, }}, }}, - {Target: "parameters.ManagedClusterProperties.AadProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ManagedClusterProperties.AadProfile.ClientAppID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ManagedClusterProperties.AadProfile.ServerAppID", Name: validation.Null, Rule: true, Chain: nil}, - }}, }}}}}); err != nil { return result, validation.NewError("containerservice.ManagedClustersClient", "CreateOrUpdate", err.Error()) } @@ -137,7 +133,7 @@ func (client ManagedClustersClient) CreateOrUpdatePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -225,7 +221,7 @@ func (client ManagedClustersClient) DeletePreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -316,7 +312,7 @@ func (client ManagedClustersClient) GetPreparer(ctx context.Context, resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -405,7 +401,7 @@ func (client ManagedClustersClient) GetAccessProfilePreparer(ctx context.Context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -492,7 +488,7 @@ func (client ManagedClustersClient) GetUpgradeProfilePreparer(ctx context.Contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -565,7 +561,7 @@ func (client ManagedClustersClient) ListPreparer(ctx context.Context) (*http.Req "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -684,7 +680,7 @@ func (client ManagedClustersClient) ListByResourceGroupPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -808,7 +804,7 @@ func (client ManagedClustersClient) ListClusterAdminCredentialsPreparer(ctx cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -895,7 +891,7 @@ func (client ManagedClustersClient) ListClusterMonitoringUserCredentialsPreparer "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -982,7 +978,7 @@ func (client ManagedClustersClient) ListClusterUserCredentialsPreparer(ctx conte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1036,10 +1032,7 @@ func (client ManagedClustersClient) ResetAADProfile(ctx context.Context, resourc {TargetValue: resourceName, Constraints: []validation.Constraint{{Target: "resourceName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "resourceName", Name: validation.MinLength, Rule: 1, Chain: nil}, - {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.ClientAppID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ServerAppID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + {Target: "resourceName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$`, Chain: nil}}}}); err != nil { return result, validation.NewError("containerservice.ManagedClustersClient", "ResetAADProfile", err.Error()) } @@ -1066,7 +1059,7 @@ func (client ManagedClustersClient) ResetAADProfilePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1156,7 +1149,7 @@ func (client ManagedClustersClient) ResetServicePrincipalProfilePreparer(ctx con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1243,7 +1236,7 @@ func (client ManagedClustersClient) RotateClusterCertificatesPreparer(ctx contex "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1329,7 +1322,7 @@ func (client ManagedClustersClient) UpdateTagsPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/models.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/models.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/models.go index 8b7e5cdcf6ca..6de54428e10a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/models.go @@ -28,7 +28,22 @@ import ( ) // The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" +const fqdn = "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" + +// AgentPoolMode enumerates the values for agent pool mode. +type AgentPoolMode string + +const ( + // System ... + System AgentPoolMode = "System" + // User ... + User AgentPoolMode = "User" +) + +// PossibleAgentPoolModeValues returns an array of possible values for the AgentPoolMode const type. +func PossibleAgentPoolModeValues() []AgentPoolMode { + return []AgentPoolMode{System, User} +} // AgentPoolType enumerates the values for agent pool type. type AgentPoolType string @@ -75,6 +90,34 @@ func PossibleLoadBalancerSkuValues() []LoadBalancerSku { return []LoadBalancerSku{Basic, Standard} } +// ManagedClusterSKUName enumerates the values for managed cluster sku name. +type ManagedClusterSKUName string + +const ( + // ManagedClusterSKUNameBasic ... + ManagedClusterSKUNameBasic ManagedClusterSKUName = "Basic" +) + +// PossibleManagedClusterSKUNameValues returns an array of possible values for the ManagedClusterSKUName const type. +func PossibleManagedClusterSKUNameValues() []ManagedClusterSKUName { + return []ManagedClusterSKUName{ManagedClusterSKUNameBasic} +} + +// ManagedClusterSKUTier enumerates the values for managed cluster sku tier. +type ManagedClusterSKUTier string + +const ( + // Free ... + Free ManagedClusterSKUTier = "Free" + // Paid ... + Paid ManagedClusterSKUTier = "Paid" +) + +// PossibleManagedClusterSKUTierValues returns an array of possible values for the ManagedClusterSKUTier const type. +func PossibleManagedClusterSKUTierValues() []ManagedClusterSKUTier { + return []ManagedClusterSKUTier{Free, Paid} +} + // NetworkMode enumerates the values for network mode. type NetworkMode string @@ -1534,6 +1577,8 @@ type ManagedCluster struct { *ManagedClusterProperties `json:"properties,omitempty"` // Identity - The identity of the managed cluster, if configured. Identity *ManagedClusterIdentity `json:"identity,omitempty"` + // Sku - The managed cluster SKU. + Sku *ManagedClusterSKU `json:"sku,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` // Name - READ-ONLY; Resource name @@ -1555,6 +1600,9 @@ func (mc ManagedCluster) MarshalJSON() ([]byte, error) { if mc.Identity != nil { objectMap["identity"] = mc.Identity } + if mc.Sku != nil { + objectMap["sku"] = mc.Sku + } if mc.Location != nil { objectMap["location"] = mc.Location } @@ -1591,6 +1639,15 @@ func (mc *ManagedCluster) UnmarshalJSON(body []byte) error { } mc.Identity = &identity } + case "sku": + if v != nil { + var sku ManagedClusterSKU + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + mc.Sku = &sku + } case "id": if v != nil { var ID string @@ -1644,6 +1701,10 @@ func (mc *ManagedCluster) UnmarshalJSON(body []byte) error { // ManagedClusterAADProfile aADProfile specifies attributes for Azure Active Directory integration. type ManagedClusterAADProfile struct { + // Managed - Whether to enable managed AAD. + Managed *bool `json:"managed,omitempty"` + // AdminGroupObjectIDs - AAD group object IDs that will have admin role of the cluster. + AdminGroupObjectIDs *[]string `json:"adminGroupObjectIDs,omitempty"` // ClientAppID - The client AAD application ID. ClientAppID *string `json:"clientAppID,omitempty"` // ServerAppID - The server AAD application ID. @@ -1791,7 +1852,7 @@ type ManagedClusterAddonProfileIdentity struct { type ManagedClusterAgentPoolProfile struct { // Name - Unique name of the agent pool profile in the context of the subscription and resource group. Name *string `json:"name,omitempty"` - // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. + // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive). The default value is 1. Count *int32 `json:"count,omitempty"` // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6' VMSize VMSizeTypes `json:"vmSize,omitempty"` @@ -1811,6 +1872,8 @@ type ManagedClusterAgentPoolProfile struct { EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` // Type - AgentPoolType represents types of an agent pool. Possible values include: 'VirtualMachineScaleSets', 'AvailabilitySet' Type AgentPoolType `json:"type,omitempty"` + // Mode - AgentPoolMode represents mode of an agent pool. Possible values include: 'System', 'User' + Mode AgentPoolMode `json:"mode,omitempty"` // OrchestratorVersion - Version of orchestrator specified when creating the managed cluster. OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response. @@ -1869,6 +1932,9 @@ func (mcapp ManagedClusterAgentPoolProfile) MarshalJSON() ([]byte, error) { if mcapp.Type != "" { objectMap["type"] = mcapp.Type } + if mcapp.Mode != "" { + objectMap["mode"] = mcapp.Mode + } if mcapp.OrchestratorVersion != nil { objectMap["orchestratorVersion"] = mcapp.OrchestratorVersion } @@ -1901,7 +1967,7 @@ func (mcapp ManagedClusterAgentPoolProfile) MarshalJSON() ([]byte, error) { // ManagedClusterAgentPoolProfileProperties properties for the container service agent pool profile. type ManagedClusterAgentPoolProfileProperties struct { - // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. + // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive). The default value is 1. Count *int32 `json:"count,omitempty"` // VMSize - Size of agent VMs. Possible values include: 'VMSizeTypesStandardA1', 'VMSizeTypesStandardA10', 'VMSizeTypesStandardA11', 'VMSizeTypesStandardA1V2', 'VMSizeTypesStandardA2', 'VMSizeTypesStandardA2V2', 'VMSizeTypesStandardA2mV2', 'VMSizeTypesStandardA3', 'VMSizeTypesStandardA4', 'VMSizeTypesStandardA4V2', 'VMSizeTypesStandardA4mV2', 'VMSizeTypesStandardA5', 'VMSizeTypesStandardA6', 'VMSizeTypesStandardA7', 'VMSizeTypesStandardA8', 'VMSizeTypesStandardA8V2', 'VMSizeTypesStandardA8mV2', 'VMSizeTypesStandardA9', 'VMSizeTypesStandardB2ms', 'VMSizeTypesStandardB2s', 'VMSizeTypesStandardB4ms', 'VMSizeTypesStandardB8ms', 'VMSizeTypesStandardD1', 'VMSizeTypesStandardD11', 'VMSizeTypesStandardD11V2', 'VMSizeTypesStandardD11V2Promo', 'VMSizeTypesStandardD12', 'VMSizeTypesStandardD12V2', 'VMSizeTypesStandardD12V2Promo', 'VMSizeTypesStandardD13', 'VMSizeTypesStandardD13V2', 'VMSizeTypesStandardD13V2Promo', 'VMSizeTypesStandardD14', 'VMSizeTypesStandardD14V2', 'VMSizeTypesStandardD14V2Promo', 'VMSizeTypesStandardD15V2', 'VMSizeTypesStandardD16V3', 'VMSizeTypesStandardD16sV3', 'VMSizeTypesStandardD1V2', 'VMSizeTypesStandardD2', 'VMSizeTypesStandardD2V2', 'VMSizeTypesStandardD2V2Promo', 'VMSizeTypesStandardD2V3', 'VMSizeTypesStandardD2sV3', 'VMSizeTypesStandardD3', 'VMSizeTypesStandardD32V3', 'VMSizeTypesStandardD32sV3', 'VMSizeTypesStandardD3V2', 'VMSizeTypesStandardD3V2Promo', 'VMSizeTypesStandardD4', 'VMSizeTypesStandardD4V2', 'VMSizeTypesStandardD4V2Promo', 'VMSizeTypesStandardD4V3', 'VMSizeTypesStandardD4sV3', 'VMSizeTypesStandardD5V2', 'VMSizeTypesStandardD5V2Promo', 'VMSizeTypesStandardD64V3', 'VMSizeTypesStandardD64sV3', 'VMSizeTypesStandardD8V3', 'VMSizeTypesStandardD8sV3', 'VMSizeTypesStandardDS1', 'VMSizeTypesStandardDS11', 'VMSizeTypesStandardDS11V2', 'VMSizeTypesStandardDS11V2Promo', 'VMSizeTypesStandardDS12', 'VMSizeTypesStandardDS12V2', 'VMSizeTypesStandardDS12V2Promo', 'VMSizeTypesStandardDS13', 'VMSizeTypesStandardDS132V2', 'VMSizeTypesStandardDS134V2', 'VMSizeTypesStandardDS13V2', 'VMSizeTypesStandardDS13V2Promo', 'VMSizeTypesStandardDS14', 'VMSizeTypesStandardDS144V2', 'VMSizeTypesStandardDS148V2', 'VMSizeTypesStandardDS14V2', 'VMSizeTypesStandardDS14V2Promo', 'VMSizeTypesStandardDS15V2', 'VMSizeTypesStandardDS1V2', 'VMSizeTypesStandardDS2', 'VMSizeTypesStandardDS2V2', 'VMSizeTypesStandardDS2V2Promo', 'VMSizeTypesStandardDS3', 'VMSizeTypesStandardDS3V2', 'VMSizeTypesStandardDS3V2Promo', 'VMSizeTypesStandardDS4', 'VMSizeTypesStandardDS4V2', 'VMSizeTypesStandardDS4V2Promo', 'VMSizeTypesStandardDS5V2', 'VMSizeTypesStandardDS5V2Promo', 'VMSizeTypesStandardE16V3', 'VMSizeTypesStandardE16sV3', 'VMSizeTypesStandardE2V3', 'VMSizeTypesStandardE2sV3', 'VMSizeTypesStandardE3216sV3', 'VMSizeTypesStandardE328sV3', 'VMSizeTypesStandardE32V3', 'VMSizeTypesStandardE32sV3', 'VMSizeTypesStandardE4V3', 'VMSizeTypesStandardE4sV3', 'VMSizeTypesStandardE6416sV3', 'VMSizeTypesStandardE6432sV3', 'VMSizeTypesStandardE64V3', 'VMSizeTypesStandardE64sV3', 'VMSizeTypesStandardE8V3', 'VMSizeTypesStandardE8sV3', 'VMSizeTypesStandardF1', 'VMSizeTypesStandardF16', 'VMSizeTypesStandardF16s', 'VMSizeTypesStandardF16sV2', 'VMSizeTypesStandardF1s', 'VMSizeTypesStandardF2', 'VMSizeTypesStandardF2s', 'VMSizeTypesStandardF2sV2', 'VMSizeTypesStandardF32sV2', 'VMSizeTypesStandardF4', 'VMSizeTypesStandardF4s', 'VMSizeTypesStandardF4sV2', 'VMSizeTypesStandardF64sV2', 'VMSizeTypesStandardF72sV2', 'VMSizeTypesStandardF8', 'VMSizeTypesStandardF8s', 'VMSizeTypesStandardF8sV2', 'VMSizeTypesStandardG1', 'VMSizeTypesStandardG2', 'VMSizeTypesStandardG3', 'VMSizeTypesStandardG4', 'VMSizeTypesStandardG5', 'VMSizeTypesStandardGS1', 'VMSizeTypesStandardGS2', 'VMSizeTypesStandardGS3', 'VMSizeTypesStandardGS4', 'VMSizeTypesStandardGS44', 'VMSizeTypesStandardGS48', 'VMSizeTypesStandardGS5', 'VMSizeTypesStandardGS516', 'VMSizeTypesStandardGS58', 'VMSizeTypesStandardH16', 'VMSizeTypesStandardH16m', 'VMSizeTypesStandardH16mr', 'VMSizeTypesStandardH16r', 'VMSizeTypesStandardH8', 'VMSizeTypesStandardH8m', 'VMSizeTypesStandardL16s', 'VMSizeTypesStandardL32s', 'VMSizeTypesStandardL4s', 'VMSizeTypesStandardL8s', 'VMSizeTypesStandardM12832ms', 'VMSizeTypesStandardM12864ms', 'VMSizeTypesStandardM128ms', 'VMSizeTypesStandardM128s', 'VMSizeTypesStandardM6416ms', 'VMSizeTypesStandardM6432ms', 'VMSizeTypesStandardM64ms', 'VMSizeTypesStandardM64s', 'VMSizeTypesStandardNC12', 'VMSizeTypesStandardNC12sV2', 'VMSizeTypesStandardNC12sV3', 'VMSizeTypesStandardNC24', 'VMSizeTypesStandardNC24r', 'VMSizeTypesStandardNC24rsV2', 'VMSizeTypesStandardNC24rsV3', 'VMSizeTypesStandardNC24sV2', 'VMSizeTypesStandardNC24sV3', 'VMSizeTypesStandardNC6', 'VMSizeTypesStandardNC6sV2', 'VMSizeTypesStandardNC6sV3', 'VMSizeTypesStandardND12s', 'VMSizeTypesStandardND24rs', 'VMSizeTypesStandardND24s', 'VMSizeTypesStandardND6s', 'VMSizeTypesStandardNV12', 'VMSizeTypesStandardNV24', 'VMSizeTypesStandardNV6' VMSize VMSizeTypes `json:"vmSize,omitempty"` @@ -1921,6 +1987,8 @@ type ManagedClusterAgentPoolProfileProperties struct { EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` // Type - AgentPoolType represents types of an agent pool. Possible values include: 'VirtualMachineScaleSets', 'AvailabilitySet' Type AgentPoolType `json:"type,omitempty"` + // Mode - AgentPoolMode represents mode of an agent pool. Possible values include: 'System', 'User' + Mode AgentPoolMode `json:"mode,omitempty"` // OrchestratorVersion - Version of orchestrator specified when creating the managed cluster. OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` // ProvisioningState - READ-ONLY; The current deployment or provisioning state, which only appears in the response. @@ -1976,6 +2044,9 @@ func (mcappp ManagedClusterAgentPoolProfileProperties) MarshalJSON() ([]byte, er if mcappp.Type != "" { objectMap["type"] = mcappp.Type } + if mcappp.Mode != "" { + objectMap["mode"] = mcappp.Mode + } if mcappp.OrchestratorVersion != nil { objectMap["orchestratorVersion"] = mcappp.OrchestratorVersion } @@ -2328,6 +2399,7 @@ func (mcp ManagedClusterProperties) MarshalJSON() ([]byte, error) { // ManagedClusterPropertiesAutoScalerProfile parameters to be applied to the cluster-autoscaler when // enabled type ManagedClusterPropertiesAutoScalerProfile struct { + BalanceSimilarNodeGroups *string `json:"balance-similar-node-groups,omitempty"` ScanInterval *string `json:"scan-interval,omitempty"` ScaleDownDelayAfterAdd *string `json:"scale-down-delay-after-add,omitempty"` ScaleDownDelayAfterDelete *string `json:"scale-down-delay-after-delete,omitempty"` @@ -2409,6 +2481,14 @@ type ManagedClusterServicePrincipalProfile struct { Secret *string `json:"secret,omitempty"` } +// ManagedClusterSKU ... +type ManagedClusterSKU struct { + // Name - Name of a managed cluster SKU. Possible values include: 'ManagedClusterSKUNameBasic' + Name ManagedClusterSKUName `json:"name,omitempty"` + // Tier - Tier of a managed cluster SKU. Possible values include: 'Paid', 'Free' + Tier ManagedClusterSKUTier `json:"tier,omitempty"` +} + // ManagedClustersResetAADProfileFuture an abstraction for monitoring and retrieving the results of a // long-running operation. type ManagedClustersResetAADProfileFuture struct { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/openshiftmanagedclusters.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/openshiftmanagedclusters.go similarity index 100% rename from vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/openshiftmanagedclusters.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/openshiftmanagedclusters.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/operations.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/operations.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/operations.go index f8ca1cc7e919..cd09b8abfe44 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/operations.go @@ -76,7 +76,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2020-02-01" + const APIVersion = "2020-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/version.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/version.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/version.go index c045340ce797..d24586c9ba1d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice/version.go @@ -21,7 +21,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " containerservice/2020-02-01" + return "Azure-SDK-For-Go/" + Version() + " containerservice/2020-03-01" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/modules.txt b/vendor/modules.txt index a17a03465fda..2337523f984d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -20,7 +20,7 @@ github.com/Azure/azure-sdk-for-go/services/cognitiveservices/mgmt/2017-04-18/cog github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute github.com/Azure/azure-sdk-for-go/services/containerinstance/mgmt/2018-10-01/containerinstance github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2018-09-01/containerregistry -github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice +github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice github.com/Azure/azure-sdk-for-go/services/cosmos-db/mgmt/2015-04-08/documentdb github.com/Azure/azure-sdk-for-go/services/costmanagement/mgmt/2019-10-01/costmanagement github.com/Azure/azure-sdk-for-go/services/databricks/mgmt/2018-04-01/databricks From 5348f2a15456acffa19dde96b39487b12263b322 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 29 May 2020 13:56:01 +0200 Subject: [PATCH 14/55] r/kubernetes_cluster: support for the Paid SKU (a.k.a. Uptime SLA) Fixes #6912 --- .../containers/kubernetes_cluster_resource.go | 25 ++++++++ .../containers/kubernetes_nodepool.go | 6 ++ .../kubernetes_cluster_other_resource_test.go | 61 ++++++++++++++++++- .../docs/r/kubernetes_cluster.html.markdown | 4 ++ 4 files changed, 95 insertions(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index e0866940441d..cdae5ea49254 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -482,6 +482,21 @@ func resourceArmKubernetesCluster() *schema.Resource { }, }, + "sku_tier": { + Type: schema.TypeString, + Optional: true, + // @tombuildsstuff (2020-05-29) - Preview limitations: + // * Currently, cannot convert as existing cluster to enable the Uptime SLA. + // * Currently, there is no way to remove Uptime SLA from an AKS cluster after creation with it enabled. + // * Private clusters aren't currently supported. + ForceNew: true, + Default: string(containerservice.Free), + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.Free), + string(containerservice.Paid), + }, false), + }, + "tags": tags.Schema(), "windows_profile": { @@ -690,6 +705,10 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} parameters := containerservice.ManagedCluster{ Name: &name, Location: &location, + Sku: &containerservice.ManagedClusterSKU{ + Name: containerservice.ManagedClusterSKUNameBasic, // the only possible value at this point + Tier: containerservice.ManagedClusterSKUTier(d.Get("sku_tier").(string)), + }, ManagedClusterProperties: &containerservice.ManagedClusterProperties{ APIServerAccessProfile: &apiAccessProfile, AadProfile: azureADProfile, @@ -1039,6 +1058,12 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) d.Set("location", azure.NormalizeLocation(*location)) } + skuTier := string(containerservice.Free) + if resp.Sku != nil && resp.Sku.Tier != "" { + skuTier = string(resp.Sku.Tier) + } + d.Set("sku_tier", skuTier) + if props := resp.ManagedClusterProperties; props != nil { d.Set("dns_prefix", props.DNSPrefix) d.Set("fqdn", props.Fqdn) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index e78dc8bd6078..9696e30fb3e0 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -156,6 +156,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA ScaleSetPriority: defaultCluster.ScaleSetPriority, ScaleSetEvictionPolicy: defaultCluster.ScaleSetEvictionPolicy, SpotMaxPrice: defaultCluster.SpotMaxPrice, + Mode: defaultCluster.Mode, NodeLabels: defaultCluster.NodeLabels, NodeTaints: defaultCluster.NodeTaints, Tags: defaultCluster.Tags, @@ -189,6 +190,11 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC // Windows agents can be configured via the separate node pool resource OsType: containerservice.Linux, + // without this set the API returns: + // Code="MustDefineAtLeastOneSystemPool" Message="Must define at least one system pool." + // since this is the "default" node pool we can assume this is a system node pool + Mode: containerservice.System, + // // TODO: support these in time // ScaleSetEvictionPolicy: "", // ScaleSetPriority: "", diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go index e7fea21e2160..7cd0ecf593f1 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go @@ -10,7 +10,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" ) -var kubernetesOtherTests = map[string]func(t *testing.T) { +var kubernetesOtherTests = map[string]func(t *testing.T){ "basicAvailabilitySet": testAccAzureRMKubernetesCluster_basicAvailabilitySet, "basicVMSS": testAccAzureRMKubernetesCluster_basicVMSS, "requiresImport": testAccAzureRMKubernetesCluster_requiresImport, @@ -18,6 +18,7 @@ var kubernetesOtherTests = map[string]func(t *testing.T) { "nodeLabels": testAccAzureRMKubernetesCluster_nodeLabels, "nodeTaints": testAccAzureRMKubernetesCluster_nodeTaints, "nodeResourceGroup": testAccAzureRMKubernetesCluster_nodeResourceGroup, + "paidSku": testAccAzureRMKubernetesCluster_paidSku, "upgradeConfig": testAccAzureRMKubernetesCluster_upgrade, "tags": testAccAzureRMKubernetesCluster_tags, "windowsProfile": testAccAzureRMKubernetesCluster_windowsProfile, @@ -326,6 +327,30 @@ func testAccAzureRMKubernetesCluster_nodeResourceGroup(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_paidSku(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_paidSku(t) +} + +func testAccAzureRMKubernetesCluster_paidSku(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_paidSkuConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + func TestAccAzureRMKubernetesCluster_upgrade(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesCluster_upgrade(t) @@ -776,6 +801,40 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) } +func testAccAzureRMKubernetesCluster_paidSkuConfig(data acceptance.TestData) string { + // @tombuildsstuff (2020-05-29) - this is only supported in a handful of regions + // whilst in Preview - hard-coding for now + location := "westus2" // TODO: data.Locations.Primary + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + sku_tier = "Paid" + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, location, data.RandomInteger, data.RandomInteger) +} + func testAccAzureRMKubernetesCluster_tagsConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 5ea0fe0b7efc..164613e68794 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -108,6 +108,10 @@ In addition, one of either `identity` or `service_principal` blocks must be spec -> **NOTE:** One of either `identity` or `service_principal` must be specified. +* `sku_tier` - (Optional) The SKU Tier that should be used for this Kubernetes Cluster. Changing this forces a new resource to be created. Possible values are `Free` and `Paid` (which includes the Uptime SLA). Defaults to `Free`. + +~> **Note:** This functionality is in Preview and has [several limitations](https://docs.microsoft.com/en-us/azure/aks/uptime-sla). + * `tags` - (Optional) A mapping of tags to assign to the resource. * `windows_profile` - (Optional) A `windows_profile` block as defined below. From 7b59502927df8ada84209d4763fec3568c78c678 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 29 May 2020 15:00:38 +0200 Subject: [PATCH 15/55] fixing broken tests from merge conflicts --- .../tests/kubernetes_cluster_network_resource_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go index 81784ae013f7..899cc287f4d2 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go @@ -1275,7 +1275,6 @@ resource "azurerm_kubernetes_cluster" "test" { location = azurerm_resource_group.test.location resource_group_name = azurerm_resource_group.test.name dns_prefix = "acctestaks%d" - kubernetes_version = "%s" linux_profile { admin_username = "acctestuser%d" @@ -1304,7 +1303,7 @@ resource "azurerm_kubernetes_cluster" "test" { load_balancer_sku = "Standard" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, currentKubernetesVersion) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) } func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileConfig(data acceptance.TestData) string { @@ -1489,7 +1488,7 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, clientId, clientSecret) } -func testAccAzureRMKubernetesCluster_basicLoadBalancerProfileConfig(data acceptance.TestData, clientId string, clientSecret string) string { +func testAccAzureRMKubernetesCluster_basicLoadBalancerProfileConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { features {} From 2101cdfba8a0138aebefda089ed39ac0a1b41d92 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 29 May 2020 15:11:20 +0200 Subject: [PATCH 16/55] r/kubernetes_cluster: exposing the `balance_similar_node_groups` field now it's available --- .../containers/kubernetes_cluster_resource.go | 28 ++++++++++--------- ...ubernetes_cluster_scaling_resource_test.go | 22 +++++++-------- 2 files changed, 26 insertions(+), 24 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index cdae5ea49254..8f9789109754 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -3,6 +3,7 @@ package containers import ( "fmt" "log" + "strconv" "strings" "time" @@ -90,11 +91,11 @@ func resourceArmKubernetesCluster() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - // "balance_similar_node_groups": { - // Type: schema.TypeBool, - // Optional: true, - // Default: false, - // }, + "balance_similar_node_groups": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "max_graceful_termination_sec": { Type: schema.TypeString, Optional: true, @@ -1758,10 +1759,12 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed return []interface{}{} } - // balanceSimilarNodeGroups := false - // if profile.BalanceSimilarNodeGroups != nil { - // balanceSimilarNodeGroups = *profile.BalanceSimilarNodeGroups - // } + balanceSimilarNodeGroups := false + if profile.BalanceSimilarNodeGroups != nil { + // @tombuildsstuff: presumably this'll get converted to a Boolean at some point + // at any rate we should use the proper type users expect here + balanceSimilarNodeGroups = strings.EqualFold(*profile.BalanceSimilarNodeGroups, "true") + } maxGracefulTerminationSec := "" if profile.MaxGracefulTerminationSec != nil { @@ -1805,7 +1808,7 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed return []interface{}{ map[string]interface{}{ - // "balance_similar_node_groups": balanceSimilarNodeGroups, + "balance_similar_node_groups": balanceSimilarNodeGroups, "max_graceful_termination_sec": maxGracefulTerminationSec, "scale_down_delay_after_add": scaleDownDelayAfterAdd, "scale_down_delay_after_delete": scaleDownDelayAfterDelete, @@ -1825,8 +1828,7 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser config := input[0].(map[string]interface{}) - // TODO: re-enable once the sdk's been upgraded - // balanceSimilarNodeGroups := config["balance_similar_node_groups"].(bool) + balanceSimilarNodeGroups := config["balance_similar_node_groups"].(bool) maxGracefulTerminationSec := config["max_graceful_termination_sec"].(string) scaleDownDelayAfterAdd := config["scale_down_delay_after_add"].(string) scaleDownDelayAfterDelete := config["scale_down_delay_after_delete"].(string) @@ -1837,7 +1839,7 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser scanInterval := config["scan_interval"].(string) return &containerservice.ManagedClusterPropertiesAutoScalerProfile{ - // BalanceSimilarNodeGroups: utils.Bool(balanceSimilarNodeGroups), + BalanceSimilarNodeGroups: utils.String(strconv.FormatBool(balanceSimilarNodeGroups)), MaxGracefulTerminationSec: utils.String(maxGracefulTerminationSec), ScaleDownDelayAfterAdd: utils.String(scaleDownDelayAfterAdd), ScaleDownDelayAfterDelete: utils.String(scaleDownDelayAfterDelete), diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go index d630c0ff204e..08292ca31b10 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go @@ -8,16 +8,16 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" ) -var kubernetesScalingTests = map[string]func(t *testing.T) { - "addAgent": testAccAzureRMKubernetesCluster_addAgent, - "manualScaleIgnoreChanges": testAccAzureRMKubernetesCluster_manualScaleIgnoreChanges, - "removeAgent": testAccAzureRMKubernetesCluster_removeAgent, - "autoScalingEnabledError": testAccAzureRMKubernetesCluster_autoScalingError, - "autoScalingEnabledErrorMax": testAccAzureRMKubernetesCluster_autoScalingErrorMax, - "autoScalingEnabledErrorMin": testAccAzureRMKubernetesCluster_autoScalingErrorMin, - "autoScalingNodeCountUnset": testAccAzureRMKubernetesCluster_autoScalingNodeCountUnset, - "autoScalingNoAvailabilityZones": testAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones, - "autoScalingWithAvailabilityZones": testAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones, +var kubernetesScalingTests = map[string]func(t *testing.T){ + "addAgent": testAccAzureRMKubernetesCluster_addAgent, + "manualScaleIgnoreChanges": testAccAzureRMKubernetesCluster_manualScaleIgnoreChanges, + "removeAgent": testAccAzureRMKubernetesCluster_removeAgent, + "autoScalingEnabledError": testAccAzureRMKubernetesCluster_autoScalingError, + "autoScalingEnabledErrorMax": testAccAzureRMKubernetesCluster_autoScalingErrorMax, + "autoScalingEnabledErrorMin": testAccAzureRMKubernetesCluster_autoScalingErrorMin, + "autoScalingNodeCountUnset": testAccAzureRMKubernetesCluster_autoScalingNodeCountUnset, + "autoScalingNoAvailabilityZones": testAccAzureRMKubernetesCluster_autoScalingNoAvailabilityZones, + "autoScalingWithAvailabilityZones": testAccAzureRMKubernetesCluster_autoScalingWithAvailabilityZones, } func TestAccAzureRMKubernetesCluster_addAgent(t *testing.T) { @@ -480,7 +480,7 @@ resource "azurerm_kubernetes_cluster" "test" { } auto_scaler_profile { - # balance_similar_node_groups = "true" + balance_similar_node_groups = true max_graceful_termination_sec = 15 scan_interval = "10s" scale_down_delay_after_add = "10m" From ccd95cd3c23b8ab08a640554858a2026b89c42c2 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 29 May 2020 15:59:28 +0200 Subject: [PATCH 17/55] r/kubernetes_cluster_node_pool: support for configuring `mode` This allows for setting `mode` to `System` (or `User`) for definining secondary System node pools. Fixes #6058 --- .../kubernetes_cluster_node_pool_resource.go | 40 ++++++++++++----- ...ernetes_cluster_node_pool_resource_test.go | 44 +++++++++++++++++++ ...kubernetes_cluster_node_pool.html.markdown | 2 + 3 files changed, 76 insertions(+), 10 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 2d718f8ab6cc..05f0183d68de 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -55,9 +55,10 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { }, "node_count": { - Type: schema.TypeInt, - Optional: true, - Computed: true, + Type: schema.TypeInt, + Optional: true, + Computed: true, + // TODO: this can go to 0 after the next version of the Azure SDK ValidateFunc: validation.IntBetween(1, 100), }, @@ -103,6 +104,17 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { ForceNew: true, }, + "mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: string(containerservice.User), + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.System), + string(containerservice.User), + }, false), + }, + "min_count": { Type: schema.TypeInt, Optional: true, @@ -125,6 +137,13 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, + "orchestrator_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + "os_disk_size_gb": { Type: schema.TypeInt, Optional: true, @@ -150,13 +169,6 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { ForceNew: true, ValidateFunc: azure.ValidateResourceID, }, - - "orchestrator_version": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringIsNotEmpty, - }, }, } } @@ -218,6 +230,7 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int count := d.Get("node_count").(int) enableAutoScaling := d.Get("enable_auto_scaling").(bool) + mode := containerservice.AgentPoolMode(d.Get("mode").(string)) osType := d.Get("os_type").(string) t := d.Get("tags").(map[string]interface{}) vmSize := d.Get("vm_size").(string) @@ -226,6 +239,7 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int OsType: containerservice.OSType(osType), EnableAutoScaling: utils.Bool(enableAutoScaling), EnableNodePublicIP: utils.Bool(d.Get("enable_node_public_ip").(bool)), + Mode: mode, Tags: tags.Expand(t), Type: containerservice.VirtualMachineScaleSets, VMSize: containerservice.VMSizeTypes(vmSize), @@ -519,6 +533,12 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter } d.Set("min_count", minCount) + mode := string(containerservice.User) + if props.Mode != "" { + mode = string(props.Mode) + } + d.Set("mode", mode) + count := 0 if props.Count != nil { count = int(*props.Count) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index c19dd452bd73..3dd62bada21d 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -32,6 +32,7 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){ "nodeTaints": testAccAzureRMKubernetesClusterNodePool_nodeTaints, "requiresImport": testAccAzureRMKubernetesClusterNodePool_requiresImport, "osDiskSizeGB": testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB, + "modeSystem": testAccAzureRMKubernetesClusterNodePool_modeSystem, "virtualNetworkAutomatic": testAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomatic, "virtualNetworkManual": testAccAzureRMKubernetesClusterNodePool_virtualNetworkManual, "windows": testAccAzureRMKubernetesClusterNodePool_windows, @@ -396,6 +397,30 @@ func testAccAzureRMKubernetesClusterNodePool_manualScaleVMSku(t *testing.T) { }) } +func TestAccAzureRMKubernetesClusterNodePool_modeSystem(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesClusterNodePool_modeSystem(t) +} + +func testAccAzureRMKubernetesClusterNodePool_modeSystem(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_modeSystemConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + func TestAccAzureRMKubernetesClusterNodePool_nodeLabels(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesClusterNodePool_nodeLabels(t) @@ -1086,6 +1111,25 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { `, template, sku) } +func testAccAzureRMKubernetesClusterNodePool_modeSystemConfig(data acceptance.TestData) string { + template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data) + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + mode = "System" +} +`, template) +} + func testAccAzureRMKubernetesClusterNodePool_multiplePoolsConfig(data acceptance.TestData, numberOfAgents int) string { template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data) return fmt.Sprintf(` diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index eebe1a764068..1c010bb2c618 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -76,6 +76,8 @@ The following arguments are supported: * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. +* `mode` - (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. + * `node_labels` - (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool. * `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). From 8a217b87c9d89d0ed3ff351a21b14941d526ace4 Mon Sep 17 00:00:00 2001 From: Jose Luis Pedrosa Date: Wed, 27 May 2020 18:34:27 +0100 Subject: [PATCH 18/55] Add support for encrypted disks in aks Added disk_encryption_set to kubernetes_cluster resource and data --- .../kubernetes_cluster_data_source.go | 7 + .../containers/kubernetes_cluster_resource.go | 17 ++ .../kubernetes_cluster_other_resource_test.go | 149 ++++++++++++++++++ .../docs/d/kubernetes_cluster.html.markdown | 4 +- .../docs/r/kubernetes_cluster.html.markdown | 2 + 5 files changed, 178 insertions(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index b44fd61f4eae..5e8f45833fc0 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -236,6 +236,12 @@ func dataSourceArmKubernetesCluster() *schema.Resource { }, }, + "disk_encryption_set": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "private_link_enabled": { Type: schema.TypeBool, Computed: true, @@ -556,6 +562,7 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{} if props := resp.ManagedClusterProperties; props != nil { d.Set("dns_prefix", props.DNSPrefix) d.Set("fqdn", props.Fqdn) + d.Set("disk_encryption_set", props.DiskEncryptionSetID) d.Set("private_fqdn", props.PrivateFQDN) d.Set("kubernetes_version", props.KubernetesVersion) d.Set("node_resource_group", props.NodeResourceGroup) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 8f9789109754..6f6401e77897 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -393,6 +393,14 @@ func resourceArmKubernetesCluster() *schema.Resource { Computed: true, }, + "disk_encryption_set": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: azure.ValidateResourceID, + }, + "private_link_enabled": { Type: schema.TypeBool, Optional: true, @@ -649,6 +657,7 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} location := azure.NormalizeLocation(d.Get("location").(string)) dnsPrefix := d.Get("dns_prefix").(string) kubernetesVersion := d.Get("kubernetes_version").(string) + diskEncryptionSet := d.Get("disk_encryption_set").(string) linuxProfileRaw := d.Get("linux_profile").([]interface{}) linuxProfile := expandKubernetesClusterLinuxProfile(linuxProfileRaw) @@ -717,6 +726,7 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} AgentPoolProfiles: agentProfiles, AutoScalerProfile: autoScalerProfile, DNSPrefix: utils.String(dnsPrefix), + DiskEncryptionSetID: utils.String(diskEncryptionSet), EnableRBAC: utils.Bool(rbacEnabled), KubernetesVersion: utils.String(kubernetesVersion), LinuxProfile: linuxProfile, @@ -904,6 +914,12 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} existing.ManagedClusterProperties.AutoScalerProfile = autoScalerProfile } + if d.HasChange("disk_encryption_set") { + updateCluster = true + diskEncryptionSet := d.Get("disk_encryption_set").(string) + existing.ManagedClusterProperties.DiskEncryptionSetID = utils.String(diskEncryptionSet) + } + if d.HasChange("enable_pod_security_policy") { updateCluster = true enablePodSecurityPolicy := d.Get("enable_pod_security_policy").(bool) @@ -1069,6 +1085,7 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) d.Set("dns_prefix", props.DNSPrefix) d.Set("fqdn", props.Fqdn) d.Set("private_fqdn", props.PrivateFQDN) + d.Set("disk_encryption_set", props.DiskEncryptionSetID) d.Set("kubernetes_version", props.KubernetesVersion) d.Set("node_resource_group", props.NodeResourceGroup) d.Set("enable_pod_security_policy", props.EnablePodSecurityPolicy) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go index 7cd0ecf593f1..f589c6bd9be6 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go @@ -448,6 +448,33 @@ func testAccAzureRMKubernetesCluster_windowsProfile(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_diskEncryption(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_diskEncryption(t) +} + +func testAccAzureRMKubernetesCluster_diskEncryption(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_diskEncryptionConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttrSet(data.ResourceName, "disk_encryption_set"), + ), + }, + data.ImportStep( + "windows_profile.0.admin_password", + ), + }, + }) +} + func testAccAzureRMKubernetesCluster_basicAvailabilitySetConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -993,3 +1020,125 @@ resource "azurerm_kubernetes_cluster" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) } + +func testAccAzureRMKubernetesCluster_diskEncryptionConfig(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "current" {} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_key_vault" "test" { + name = "acctestkeyvault%s" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "premium" + enabled_for_disk_encryption = true + soft_delete_enabled = true + purge_protection_enabled = true + + access_policy { + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "get", + "create", + "delete" + ] + } +} + +resource "azurerm_key_vault_key" "test" { + name = "destestkey" + key_vault_id = azurerm_key_vault.test.id + key_type = "RSA" + key_size = 2048 + + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] +} + +resource "azurerm_disk_encryption_set" "test" { + name = "acctestDES-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + key_vault_key_id = azurerm_key_vault_key.test.id + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_key_vault_access_policy" "disk-encryption-perm" { + key_vault_id = azurerm_key_vault.test.id + + tenant_id = azurerm_disk_encryption_set.test.identity.0.tenant_id + object_id = azurerm_disk_encryption_set.test.identity.0.principal_id + + key_permissions = [ + "get", + "wrapkey", + "unwrapkey", + ] +} + +resource "azurerm_role_assignment" "disk-encryption-read-keyvault" { + scope = azurerm_key_vault.test.id + role_definition_name = "Reader" + principal_id = azurerm_disk_encryption_set.test.identity.0.principal_id +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + disk_encryption_set = azurerm_disk_encryption_set.test.id + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + default_node_pool { + name = "np" + node_count = 3 + vm_size = "Standard_DS2_v2" + } + + identity { + type = "SystemAssigned" + } + + network_profile { + network_plugin = "azure" + network_policy = "azure" + dns_service_ip = "10.10.0.10" + docker_bridge_cidr = "172.18.0.1/16" + service_cidr = "10.10.0.0/16" + } + + depends_on = [ + azurerm_key_vault_access_policy.disk-encryption-perm + ] + +} +`, data.RandomInteger, data.Locations.Primary, data.RandomString, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 379afe69f072..e91d74ab0576 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -66,6 +66,8 @@ The following attributes are exported: * `location` - The Azure Region in which the managed Kubernetes Cluster exists. +* `disk_encryption_set` - The id of the disk encruption set used for the nodes and volumes (only present if disk encryption enabled), See:[Documentation](https://docs.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys) + * `linux_profile` - A `linux_profile` block as documented below. * `windows_profile` - A `windows_profile` block as documented below. @@ -80,7 +82,7 @@ The following attributes are exported: * `identity` - A `identity` block as documented below. -* `kubelet_identity` - A `kubelet_identity` block as documented below. +* `kubelet_identity` - A `kubelet_identity` block as documented below. * `tags` - A mapping of tags assigned to this resource. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 164613e68794..5eb554d26e33 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -114,6 +114,8 @@ In addition, one of either `identity` or `service_principal` blocks must be spec * `tags` - (Optional) A mapping of tags to assign to the resource. +* `disk_encryption_set` - (Optional) Id of the disk encryption set used for the nodes. See:[Documentation](https://docs.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys) + * `windows_profile` - (Optional) A `windows_profile` block as defined below. From 9b433d878c805ffea874dc8a450b19be9a7bb5d1 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 29 May 2020 17:33:20 +0200 Subject: [PATCH 19/55] d|r/kubernetes_cluster: comments from the pr --- .../kubernetes_cluster_data_source.go | 5 ++-- .../containers/kubernetes_cluster_resource.go | 28 ++++++++----------- .../kubernetes_cluster_other_resource_test.go | 13 +++++---- .../docs/d/kubernetes_cluster.html.markdown | 2 +- .../docs/r/kubernetes_cluster.html.markdown | 5 ++-- 5 files changed, 24 insertions(+), 29 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index 5e8f45833fc0..128ef8938ec5 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -236,9 +236,8 @@ func dataSourceArmKubernetesCluster() *schema.Resource { }, }, - "disk_encryption_set": { + "disk_encryption_set_id": { Type: schema.TypeString, - Optional: true, Computed: true, }, @@ -562,7 +561,7 @@ func dataSourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{} if props := resp.ManagedClusterProperties; props != nil { d.Set("dns_prefix", props.DNSPrefix) d.Set("fqdn", props.Fqdn) - d.Set("disk_encryption_set", props.DiskEncryptionSetID) + d.Set("disk_encryption_set_id", props.DiskEncryptionSetID) d.Set("private_fqdn", props.PrivateFQDN) d.Set("kubernetes_version", props.KubernetesVersion) d.Set("node_resource_group", props.NodeResourceGroup) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 6f6401e77897..a2df83b8e032 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -17,6 +17,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + computeValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/parse" containerValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" @@ -146,6 +147,13 @@ func resourceArmKubernetesCluster() *schema.Resource { }, }, + "disk_encryption_set_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: computeValidate.DiskEncryptionSetID, + }, + "enable_pod_security_policy": { Type: schema.TypeBool, Optional: true, @@ -393,14 +401,6 @@ func resourceArmKubernetesCluster() *schema.Resource { Computed: true, }, - "disk_encryption_set": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: azure.ValidateResourceID, - }, - "private_link_enabled": { Type: schema.TypeBool, Optional: true, @@ -657,7 +657,6 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} location := azure.NormalizeLocation(d.Get("location").(string)) dnsPrefix := d.Get("dns_prefix").(string) kubernetesVersion := d.Get("kubernetes_version").(string) - diskEncryptionSet := d.Get("disk_encryption_set").(string) linuxProfileRaw := d.Get("linux_profile").([]interface{}) linuxProfile := expandKubernetesClusterLinuxProfile(linuxProfileRaw) @@ -726,7 +725,6 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} AgentPoolProfiles: agentProfiles, AutoScalerProfile: autoScalerProfile, DNSPrefix: utils.String(dnsPrefix), - DiskEncryptionSetID: utils.String(diskEncryptionSet), EnableRBAC: utils.Bool(rbacEnabled), KubernetesVersion: utils.String(kubernetesVersion), LinuxProfile: linuxProfile, @@ -760,6 +758,10 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} } } + if v, ok := d.GetOk("disk_encryption_set_id"); ok && v.(string) != "" { + parameters.ManagedClusterProperties.DiskEncryptionSetID = utils.String(v.(string)) + } + future, err := client.CreateOrUpdate(ctx, resGroup, name, parameters) if err != nil { return fmt.Errorf("creating Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resGroup, err) @@ -914,12 +916,6 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} existing.ManagedClusterProperties.AutoScalerProfile = autoScalerProfile } - if d.HasChange("disk_encryption_set") { - updateCluster = true - diskEncryptionSet := d.Get("disk_encryption_set").(string) - existing.ManagedClusterProperties.DiskEncryptionSetID = utils.String(diskEncryptionSet) - } - if d.HasChange("enable_pod_security_policy") { updateCluster = true enablePodSecurityPolicy := d.Get("enable_pod_security_policy").(bool) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go index f589c6bd9be6..0233ff1fd9ab 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go @@ -1103,11 +1103,11 @@ resource "azurerm_role_assignment" "disk-encryption-read-keyvault" { } resource "azurerm_kubernetes_cluster" "test" { - name = "acctestaks%d" - location = azurerm_resource_group.test.location - resource_group_name = azurerm_resource_group.test.name - dns_prefix = "acctestaks%d" - disk_encryption_set = azurerm_disk_encryption_set.test.id + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + disk_encryption_set_id = azurerm_disk_encryption_set.test.id linux_profile { admin_username = "acctestuser%d" @@ -1136,7 +1136,8 @@ resource "azurerm_kubernetes_cluster" "test" { } depends_on = [ - azurerm_key_vault_access_policy.disk-encryption-perm + azurerm_key_vault_access_policy.disk-encryption-perm, + azurerm_role_assignment.disk-encryption-read-keyvault ] } diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index e91d74ab0576..066ff8dc03de 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -66,7 +66,7 @@ The following attributes are exported: * `location` - The Azure Region in which the managed Kubernetes Cluster exists. -* `disk_encryption_set` - The id of the disk encruption set used for the nodes and volumes (only present if disk encryption enabled), See:[Documentation](https://docs.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys) +* `disk_encryption_set_id` - The ID of the Disk Encryption Set used for the Nodes and Volumes. * `linux_profile` - A `linux_profile` block as documented below. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 5eb554d26e33..754466cdeee3 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -82,6 +82,8 @@ In addition, one of either `identity` or `service_principal` blocks must be spec -> **NOTE:** Support for `enable_pod_security_policy` is currently in Preview on an opt-in basis. To use it, enable feature `PodSecurityPolicyPreview` for `namespace Microsoft.ContainerService`. For an example of how to enable a Preview feature, please visit [Register scale set feature provider](https://docs.microsoft.com/en-us/azure/aks/cluster-autoscaler#register-scale-set-feature-provider). +* `disk_encryption_set_id` - (Optional) The ID of the Disk Encryption Set which should be used for the Nodes and Volumes. More information [can be found in the documentation](https://docs.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys). + * `identity` - (Optional) A `identity` block as defined below. Changing this forces a new resource to be created. -> **NOTE:** One of either `identity` or `service_principal` must be specified. @@ -114,11 +116,8 @@ In addition, one of either `identity` or `service_principal` blocks must be spec * `tags` - (Optional) A mapping of tags to assign to the resource. -* `disk_encryption_set` - (Optional) Id of the disk encryption set used for the nodes. See:[Documentation](https://docs.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys) - * `windows_profile` - (Optional) A `windows_profile` block as defined below. - --- A `aci_connector_linux` block supports the following: From 41171531a445ebf78e2be18fe77020500f19fcef Mon Sep 17 00:00:00 2001 From: Jose Luis Pedrosa Date: Mon, 18 May 2020 14:31:16 +0100 Subject: [PATCH 20/55] Enable AKS AAD integration v2 + SDK version bump --- .../kubernetes_cluster_data_source.go | 37 +++++++- .../containers/kubernetes_cluster_resource.go | 90 ++++++++++++++---- .../kubernetes_cluster_auth_resource_test.go | 92 +++++++++++++++++++ .../docs/d/kubernetes_cluster.html.markdown | 4 + .../docs/r/kubernetes_cluster.html.markdown | 14 ++- 5 files changed, 213 insertions(+), 24 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index 128ef8938ec5..2e93033f4785 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -489,11 +489,24 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "admin_group_object_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "client_app_id": { Type: schema.TypeString, Computed: true, }, + "managed": { + Type: schema.TypeBool, + Computed: true, + }, + "server_app_id": { Type: schema.TypeString, Computed: true, @@ -656,21 +669,35 @@ func flattenKubernetesClusterDataSourceRoleBasedAccessControl(input *containerse results := make([]interface{}, 0) if profile := input.AadProfile; profile != nil { - output := make(map[string]interface{}) + adminGroupObjectIds := utils.FlattenStringSlice(profile.AdminGroupObjectIDs) + clientAppId := "" if profile.ClientAppID != nil { - output["client_app_id"] = *profile.ClientAppID + clientAppId = *profile.ClientAppID } + managed := false + if profile.Managed != nil { + managed = *profile.Managed + } + + serverAppId := "" if profile.ServerAppID != nil { - output["server_app_id"] = *profile.ServerAppID + serverAppId = *profile.ServerAppID } + tenantId := "" if profile.TenantID != nil { - output["tenant_id"] = *profile.TenantID + tenantId = *profile.TenantID } - results = append(results, output) + results = append(results, map[string]interface{}{ + "admin_group_object_ids": adminGroupObjectIds, + "client_app_id": clientAppId, + "managed": managed, + "server_app_id": serverAppId, + "tenant_id": tenantId, + }) } return []interface{}{ diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index a2df83b8e032..6f445bf9222a 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -438,19 +438,19 @@ func resourceArmKubernetesCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "client_app_id": { Type: schema.TypeString, - Required: true, + Optional: true, ValidateFunc: validation.IsUUID, }, "server_app_id": { Type: schema.TypeString, - Required: true, + Optional: true, ValidateFunc: validation.IsUUID, }, "server_app_secret": { Type: schema.TypeString, - Required: true, + Optional: true, Sensitive: true, ValidateFunc: validation.StringIsNotEmpty, }, @@ -462,6 +462,21 @@ func resourceArmKubernetesCluster() *schema.Resource { // OrEmpty since this can be sourced from the client config if it's not specified ValidateFunc: validation.Any(validation.IsUUID, validation.StringIsEmpty), }, + + "managed": { + Type: schema.TypeBool, + Optional: true, + }, + + "admin_group_object_ids": { + Type: schema.TypeSet, + Optional: true, + ConfigMode: schema.SchemaConfigModeAttr, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.IsUUID, + }, + }, }, }, }, @@ -681,7 +696,10 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} } rbacRaw := d.Get("role_based_access_control").([]interface{}) - rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) + rbacEnabled, azureADProfile, err := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) + if err != nil { + return err + } t := d.Get("tags").(map[string]interface{}) @@ -860,7 +878,11 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} return fmt.Errorf("reading current state of RBAC Enabled, expected bool got %+v", props.EnableRBAC) } rbacRaw := d.Get("role_based_access_control").([]interface{}) - rbacEnabled, azureADProfile := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) + rbacEnabled, azureADProfile, err := expandKubernetesClusterRoleBasedAccessControl(rbacRaw, tenantId) + if err != nil { + return err + } + // changing rbacEnabled must still force cluster recreation if *props.EnableRBAC == rbacEnabled { props.AadProfile = azureADProfile @@ -1565,9 +1587,9 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro } } -func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, providerTenantId string) (bool, *containerservice.ManagedClusterAADProfile) { +func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, providerTenantId string) (bool, *containerservice.ManagedClusterAADProfile, error) { if len(input) == 0 { - return false, nil + return false, nil, nil } val := input[0].(map[string]interface{}) @@ -1576,6 +1598,7 @@ func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, provider azureADsRaw := val["azure_active_directory"].([]interface{}) var aad *containerservice.ManagedClusterAADProfile + if len(azureADsRaw) > 0 { azureAdRaw := azureADsRaw[0].(map[string]interface{}) @@ -1583,20 +1606,44 @@ func expandKubernetesClusterRoleBasedAccessControl(input []interface{}, provider serverAppId := azureAdRaw["server_app_id"].(string) serverAppSecret := azureAdRaw["server_app_secret"].(string) tenantId := azureAdRaw["tenant_id"].(string) + managed := azureAdRaw["managed"].(bool) + adminGroupObjectIdsRaw := azureAdRaw["admin_group_object_ids"].(*schema.Set).List() + adminGroupObjectIds := utils.ExpandStringSlice(adminGroupObjectIdsRaw) if tenantId == "" { tenantId = providerTenantId } - aad = &containerservice.ManagedClusterAADProfile{ - ClientAppID: utils.String(clientAppId), - ServerAppID: utils.String(serverAppId), - ServerAppSecret: utils.String(serverAppSecret), - TenantID: utils.String(tenantId), + if managed { + aad = &containerservice.ManagedClusterAADProfile{ + TenantID: utils.String(tenantId), + Managed: utils.Bool(managed), + AdminGroupObjectIDs: adminGroupObjectIds, + } + + if clientAppId != "" || serverAppId != "" || serverAppSecret != "" { + return false, nil, fmt.Errorf("Can't specify client_app_id or server_app_id or server_app_secret when using managed aad rbac (managed = true)") + } + } else { + aad = &containerservice.ManagedClusterAADProfile{ + ClientAppID: utils.String(clientAppId), + ServerAppID: utils.String(serverAppId), + ServerAppSecret: utils.String(serverAppSecret), + TenantID: utils.String(tenantId), + Managed: utils.Bool(managed), + } + + if len(*adminGroupObjectIds) > 0 { + return false, nil, fmt.Errorf("Can't specify admin_group_object_ids when using managed aad rbac (managed = false)") + } + + if clientAppId == "" || serverAppId == "" || serverAppSecret == "" { + return false, nil, fmt.Errorf("You must specify client_app_id and server_app_id and server_app_secret when using managed aad rbac (managed = false)") + } } } - return rbacEnabled, aad + return rbacEnabled, aad, nil } func expandKubernetesClusterManagedClusterIdentity(input []interface{}) *containerservice.ManagedClusterIdentity { @@ -1621,11 +1668,18 @@ func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.Mana results := make([]interface{}, 0) if profile := input.AadProfile; profile != nil { + adminGroupObjectIds := utils.FlattenStringSlice(profile.AdminGroupObjectIDs) + clientAppId := "" if profile.ClientAppID != nil { clientAppId = *profile.ClientAppID } + managed := false + if profile.Managed != nil { + managed = *profile.Managed + } + serverAppId := "" if profile.ServerAppID != nil { serverAppId = *profile.ServerAppID @@ -1654,10 +1708,12 @@ func flattenKubernetesClusterRoleBasedAccessControl(input *containerservice.Mana } results = append(results, map[string]interface{}{ - "client_app_id": clientAppId, - "server_app_id": serverAppId, - "server_app_secret": serverAppSecret, - "tenant_id": tenantId, + "admin_group_object_ids": schema.NewSet(schema.HashString, adminGroupObjectIds), + "client_app_id": clientAppId, + "managed": managed, + "server_app_id": serverAppId, + "server_app_secret": serverAppSecret, + "tenant_id": tenantId, }) } diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_auth_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_auth_resource_test.go index 9268846f86c4..697d31ebd237 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_auth_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_auth_resource_test.go @@ -185,6 +185,51 @@ func testAccAzureRMKubernetesCluster_roleBasedAccessControlAAD(t *testing.T) { }) } +func TestAccAzureRMKubernetesCluster_roleBasedAccessControlAADManaged(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_roleBasedAccessControlAADManaged(t) +} + +func testAccAzureRMKubernetesCluster_roleBasedAccessControlAADManaged(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + clientData := data.Client() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAADManagedConfig(data, ""), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "role_based_access_control.#", "1"), + resource.TestCheckResourceAttr(data.ResourceName, "role_based_access_control.0.enabled", "true"), + resource.TestCheckResourceAttr(data.ResourceName, "role_based_access_control.0.azure_active_directory.#", "1"), + resource.TestCheckResourceAttrSet(data.ResourceName, "role_based_access_control.0.azure_active_directory.0.tenant_id"), + resource.TestCheckResourceAttrSet(data.ResourceName, "role_based_access_control.0.azure_active_directory.0.managed"), + resource.TestCheckResourceAttr(data.ResourceName, "kube_admin_config.#", "1"), + resource.TestCheckResourceAttrSet(data.ResourceName, "kube_admin_config_raw"), + ), + }, + data.ImportStep( + "role_based_access_control.0.azure_active_directory.0.server_app_secret", + ), + { + // should be no changes since the default for Tenant ID comes from the Provider block + Config: testAccAzureRMKubernetesCluster_roleBasedAccessControlAADManagedConfig(data, clientData.TenantID), + PlanOnly: true, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + ), + }, + data.ImportStep( + "role_based_access_control.0.azure_active_directory.0.server_app_secret", + ), + }, + }) +} + func TestAccAzureRMKubernetesCluster_servicePrincipal(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesCluster_servicePrincipal(t) @@ -488,6 +533,53 @@ resource "azurerm_kubernetes_cluster" "test" { `, tenantId, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, clientId, clientSecret, clientId) } +func testAccAzureRMKubernetesCluster_roleBasedAccessControlAADManagedConfig(data acceptance.TestData, tenantId string) string { + return fmt.Sprintf(` +variable "tenant_id" { + default = "%s" +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + dns_prefix = "acctestaks%d" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + identity { + type = "SystemAssigned" + } + + role_based_access_control { + enabled = true + + azure_active_directory { + tenant_id = var.tenant_id + managed = true + } + } +} +`, tenantId, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +} + func testAccAzureRMKubernetesCluster_servicePrincipalConfig(data acceptance.TestData, clientId, clientSecret string) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 066ff8dc03de..9775e86aa05d 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -136,8 +136,12 @@ A `agent_pool_profile` block exports the following: A `azure_active_directory` block exports the following: +* `admin_group_object_ids` - The list of Object IDs of Azure Active Directory Groups which have Admin Role on the Cluster (when using a Managed integration). + * `client_app_id` - The Client ID of an Azure Active Directory Application. +* `managed` - Is the Azure Active Directory Integration managed (also known as AAD Integration V2)? + * `server_app_id` - The Server ID of an Azure Active Directory Application. * `tenant_id` - The Tenant ID used for Azure Active Directory Application. diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 754466cdeee3..0c8e281d642f 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -192,14 +192,24 @@ A `auto_scaler_profile` block supports the following: A `azure_active_directory` block supports the following: +* `managed` - Is the Azure Active Directory integration Managed, meaning that Azure will create/manage the Service Principal used for integration. + +~> **Note:** Managed Azure Active Directory Integrations is in Preview and needs to be enabled prior to use. More information, including how to enable the Preview feature - [can be found in the Managed Azure Active Directory Integration Documentation](https://docs.microsoft.com/en-us/azure/aks/azure-ad-v2). + +* `tenant_id` - (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. + +When `managed` is set to `true` the following properties can be specified: + +* `admin_group_object_ids` - (Optional) A list of Object IDs of Azure Active Directory Groups which should have Admin Role on the Cluster. + +When `managed` is set to `false` the following properties can be specified: + * `client_app_id` - (Required) The Client ID of an Azure Active Directory Application. * `server_app_id` - (Required) The Server ID of an Azure Active Directory Application. * `server_app_secret` - (Required) The Server Secret of an Azure Active Directory Application. -* `tenant_id` - (Optional) The Tenant ID used for Azure Active Directory Application. If this isn't specified the Tenant ID of the current Subscription is used. - --- A `azure_policy` block supports the following: From 94c85d2ba2d016b9074cffed70733ef8cbe41b60 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 2 Jun 2020 08:46:56 +0200 Subject: [PATCH 21/55] r/kubernetes_cluster_node_pool: allowing updating of the `mode` field --- .../kubernetes_cluster_node_pool_resource.go | 8 ++- ...ernetes_cluster_node_pool_resource_test.go | 58 +++++++++++++++++++ 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 05f0183d68de..abbbfbe161e8 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -107,8 +107,8 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { "mode": { Type: schema.TypeString, Optional: true, - ForceNew: true, - Default: string(containerservice.User), + //ForceNew: true, + Default: string(containerservice.User), ValidateFunc: validation.StringInSlice([]string{ string(containerservice.System), string(containerservice.User), @@ -396,6 +396,10 @@ func resourceArmKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta int props.MaxCount = utils.Int32(int32(d.Get("max_count").(int))) } + if d.HasChange("mode") { + props.Mode = containerservice.AgentPoolMode(d.Get("mode").(string)) + } + if d.HasChange("min_count") { props.MinCount = utils.Int32(int32(d.Get("min_count").(int))) } diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index 3dd62bada21d..e81e879a9051 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -33,6 +33,7 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){ "requiresImport": testAccAzureRMKubernetesClusterNodePool_requiresImport, "osDiskSizeGB": testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB, "modeSystem": testAccAzureRMKubernetesClusterNodePool_modeSystem, + "modeUpdate": testAccAzureRMKubernetesClusterNodePool_modeUpdate, "virtualNetworkAutomatic": testAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomatic, "virtualNetworkManual": testAccAzureRMKubernetesClusterNodePool_virtualNetworkManual, "windows": testAccAzureRMKubernetesClusterNodePool_windows, @@ -421,6 +422,44 @@ func testAccAzureRMKubernetesClusterNodePool_modeSystem(t *testing.T) { }) } +func TestAccAzureRMKubernetesClusterNodePool_modeUpdate(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesClusterNodePool_modeUpdate(t) +} + +func testAccAzureRMKubernetesClusterNodePool_modeUpdate(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_modeUserConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMKubernetesClusterNodePool_modeSystemConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMKubernetesClusterNodePool_modeUserConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + func TestAccAzureRMKubernetesClusterNodePool_nodeLabels(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesClusterNodePool_nodeLabels(t) @@ -1130,6 +1169,25 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { `, template) } +func testAccAzureRMKubernetesClusterNodePool_modeUserConfig(data acceptance.TestData) string { + template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data) + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + mode = "User" +} +`, template) +} + func testAccAzureRMKubernetesClusterNodePool_multiplePoolsConfig(data acceptance.TestData, numberOfAgents int) string { template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data) return fmt.Sprintf(` From d0a603f800823a719354219104441cd9bdea5c95 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 2 Jun 2020 14:08:35 +0200 Subject: [PATCH 22/55] r/kubernetes_cluster: support for v2 of the azure policy addon Fixes #6994 --- .../services/containers/kubernetes_addons.go | 24 ++++++++++++++++++- .../kubernetes_cluster_data_source.go | 10 ++++++++ ...kubernetes_cluster_addons_resource_test.go | 20 +++++++++++++--- .../kubernetes_cluster_data_source_test.go | 5 ++-- .../docs/d/kubernetes_cluster.html.markdown | 2 ++ .../docs/r/kubernetes_cluster.html.markdown | 6 ++++- 6 files changed, 60 insertions(+), 7 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go index 8bde80eb4efd..ad0b0fd7420a 100644 --- a/azurerm/internal/services/containers/kubernetes_addons.go +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -75,6 +75,18 @@ func schemaKubernetesAddOnProfiles() *schema.Schema { Type: schema.TypeBool, Required: true, }, + + "version": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + // NOTE: v1 will be removed "after Spring 2020" - https://github.com/terraform-providers/terraform-provider-azurerm/issues/6994 + // The current cluster uses policy add-on V1. Please migrate to V2 by disabling the add-on, and re-enabling it. + // Azure Policy will not support V1 after spring 2020. V2 is a breaking change, so please read carefully on the instruction and impact at: https://aka.ms/akspolicydoc + "v1", + "v2", + }, false), + }, }, }, }, @@ -232,10 +244,13 @@ func expandKubernetesAddOnProfiles(input []interface{}, env azure.Environment) ( if len(azurePolicy) > 0 && azurePolicy[0] != nil { value := azurePolicy[0].(map[string]interface{}) enabled := value["enabled"].(bool) + version := value["version"].(string) addonProfiles[azurePolicyKey] = &containerservice.ManagedClusterAddonProfile{ Enabled: utils.Bool(enabled), - Config: nil, + Config: map[string]*string{ + "version": utils.String(version), + }, } } @@ -309,8 +324,15 @@ func flattenKubernetesAddOnProfiles(profile map[string]*containerservice.Managed enabled = *enabledVal } + // not returned for v1 + version := "v1" + if versionVal, ok := azurePolicy.Config["version"]; ok && *versionVal != "" { + version = *versionVal + } + azurePolicies = append(azurePolicies, map[string]interface{}{ "enabled": enabled, + "version": version, }) } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index 2e93033f4785..0eff5231c2cd 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -114,6 +114,10 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "version": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -808,8 +812,14 @@ func flattenKubernetesClusterDataSourceAddonProfiles(profile map[string]*contain enabled = *enabledVal } + version := "v1" + if versionVal, ok := azurePolicy.Config["version"]; ok && *versionVal != "" { + version = *versionVal + } + output := map[string]interface{}{ "enabled": enabled, + "version": version, } azurePolicies = append(azurePolicies, output) } diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go index b1038b219140..3e3203ca05cb 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go @@ -12,6 +12,8 @@ var kubernetesAddOnTests = map[string]func(t *testing.T){ "addonProfileAciConnectorLinux": testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux, "addonProfileAciConnectorLinuxDisabled": testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled, "addonProfileAzurePolicy": testAccAzureRMKubernetesCluster_addonProfileAzurePolicy, + "addonProfileAzurePolicyV1ToV2": testAccAzureRMKubernetesCluster_addonProfileAzurePolicyV1ToV2, + "addonProfileAzurePolicyV2": testAccAzureRMKubernetesCluster_addonProfileAzurePolicyV2, "addonProfileKubeDashboard": testAccAzureRMKubernetesCluster_addonProfileKubeDashboard, "addonProfileOMS": testAccAzureRMKubernetesCluster_addonProfileOMS, "addonProfileOMSToggle": testAccAzureRMKubernetesCluster_addonProfileOMSToggle, @@ -89,11 +91,22 @@ func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data), + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, "v1"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "true"), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.version", "v1"), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, "v2"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "true"), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.version", "v2"), ), }, data.ImportStep(), @@ -382,7 +395,7 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) } -func testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data acceptance.TestData) string { +func testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data acceptance.TestData, version string) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -416,6 +429,7 @@ resource "azurerm_kubernetes_cluster" "test" { addon_profile { azure_policy { enabled = true + version = "%s" } } @@ -423,7 +437,7 @@ resource "azurerm_kubernetes_cluster" "test" { type = "SystemAssigned" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, version) } func testAccAzureRMKubernetesCluster_addonProfileKubeDashboardConfig(data acceptance.TestData) string { diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go index da57819618df..6dfb2b04dba2 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go @@ -9,7 +9,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" ) -var kubernetesDataSourceTests = map[string]func(t *testing.T) { +var kubernetesDataSourceTests = map[string]func(t *testing.T){ "basic": testAccDataSourceAzureRMKubernetesCluster_basic, "roleBasedAccessControl": testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControl, "roleBasedAccessControlAAD": testAccDataSourceAzureRMKubernetesCluster_roleBasedAccessControlAAD, @@ -500,6 +500,7 @@ func testAccDataSourceAzureRMKubernetesCluster_addOnProfileAzurePolicy(t *testin testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "true"), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.version", "v2"), ), }, }, @@ -833,7 +834,7 @@ data "azurerm_kubernetes_cluster" "test" { } func testAccDataSourceAzureRMKubernetesCluster_addOnProfileAzurePolicyConfig(data acceptance.TestData) string { - r := testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data) + r := testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, "v2") return fmt.Sprintf(` %s diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 9775e86aa05d..b629c9a68757 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -246,6 +246,8 @@ A `azure_policy` block supports the following: * `enabled` - Is Azure Policy for Kubernetes enabled? +* `version`- The version of Azure Policy being used. + --- A `role_based_access_control` block exports the following: diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 0c8e281d642f..588ee8b0a205 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -155,7 +155,7 @@ A `addon_profile` block supports the following: -> **NOTE:** At this time Azure Policy is not supported in Azure US Government. --> **NOTE**: Azure Policy for Azure Kubernetes Service is currently in preview and not available to subscriptions that have not [opted-in](https://docs.microsoft.com/en-us/azure/governance/policy/concepts/rego-for-aks?toc=/azure/aks/toc.json) to join `Azure Policy` preview. +~> **Note:** Azure Policy is in Public Preview - more information and details on how to opt into the Preview [can be found in this article](https://docs.microsoft.com/en-gb/azure/governance/policy/concepts/policy-for-kubernetes). * `http_application_routing` - (Optional) A `http_application_routing` block as defined below. @@ -216,6 +216,10 @@ A `azure_policy` block supports the following: * `enabled` - (Required) Is the Azure Policy for Kubernetes Add On enabled? +* `version` - (Required) The Version of Azure Policy which should be installed on this Kubernetes Cluster. Possible values are `v1` and `v2`. + +!> **Note:** Support for `v1` is in Private Preview will be removed by AKS "after Spring 2020". + --- A `default_node_pool` block supports the following: From 3151a8f69bfaa088716921dbe7bf56e84f380e02 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 2 Jun 2020 14:46:16 +0200 Subject: [PATCH 23/55] r/kubernetes_cluster: `kubernetes_dashboard` is not supported in US Government --- azurerm/internal/services/containers/kubernetes_addons.go | 1 + 1 file changed, 1 insertion(+) diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go index ad0b0fd7420a..b48f633260a6 100644 --- a/azurerm/internal/services/containers/kubernetes_addons.go +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -34,6 +34,7 @@ var unsupportedAddonsForEnvironment = map[string][]string{ azure.USGovernmentCloud.Name: { azurePolicyKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/6702 httpApplicationRoutingKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/5960 + kubernetesDashboardKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/7136 }, } From 77a6311cdf6881fe1865552be233879c3d29deef Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 2 Jun 2020 14:47:59 +0200 Subject: [PATCH 24/55] r/kubernetes_cluster: azure policy is not supported in China Fixes #6462 --- azurerm/internal/services/containers/kubernetes_addons.go | 1 + 1 file changed, 1 insertion(+) diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go index b48f633260a6..22d3a6311d4f 100644 --- a/azurerm/internal/services/containers/kubernetes_addons.go +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -29,6 +29,7 @@ const ( var unsupportedAddonsForEnvironment = map[string][]string{ azure.ChinaCloud.Name: { aciConnectorKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/5510 + azurePolicyKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/6462 httpApplicationRoutingKey, // https://github.com/terraform-providers/terraform-provider-azurerm/issues/5960 }, azure.USGovernmentCloud.Name: { From c73f55dc96b2931d56bffc3b7b73fa6a636d1ac8 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 3 Jun 2020 10:29:01 +0200 Subject: [PATCH 25/55] r/kubernetes_cluster: raising an error when trying to upgrade the kubernetes version for spot node pools --- .../kubernetes_cluster_node_pool_resource.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index abbbfbe161e8..c67eebb98a8f 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -3,6 +3,7 @@ package containers import ( "fmt" "log" + "strings" "time" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" @@ -357,7 +358,7 @@ func resourceArmKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta int existing, err := client.Get(ctx, id.ResourceGroup, id.ClusterName, id.Name) if err != nil { if utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("[DEBUG] Node Pool %q was not found in Managed Kubernetes Cluster %q / Resource Group %q!", id.Name, id.ClusterName, id.ResourceGroup) + return fmt.Errorf("Node Pool %q was not found in Managed Kubernetes Cluster %q / Resource Group %q!", id.Name, id.ClusterName, id.ResourceGroup) } return fmt.Errorf("retrieving Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): %+v", id.Name, id.ClusterName, id.ResourceGroup, err) @@ -415,6 +416,16 @@ func resourceArmKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta int } if d.HasChange("orchestrator_version") { + // Spot Node pool's can't be updated - Azure Docs: https://docs.microsoft.com/en-us/azure/aks/spot-node-pool + // > You can't upgrade a spot node pool since spot node pools can't guarantee cordon and drain. + // > You must replace your existing spot node pool with a new one to do operations such as upgrading + // > the Kubernetes version. To replace a spot node pool, create a new spot node pool with a different + // > version of Kubernetes, wait until its status is Ready, then remove the old node pool. + if strings.EqualFold(string(props.ScaleSetPriority), string(containerservice.Spot)) { + // ^ the Scale Set Priority isn't returned when Regular + return fmt.Errorf("the Orchestrator Version cannot be updated when using a Spot Node Pool") + } + orchestratorVersion := d.Get("orchestrator_version").(string) if err := validateNodePoolSupportsVersion(ctx, containersClient, id.ResourceGroup, id.ClusterName, id.Name, orchestratorVersion); err != nil { return err From b65213696729d0bad3e7da2380f7f75a5b0f862c Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 3 Jun 2020 11:47:35 +0200 Subject: [PATCH 26/55] r/kubernetes_cluster: support for spot node pools --- .../kubernetes_cluster_node_pool_resource.go | 78 ++++++++++++++++--- ...ernetes_cluster_node_pool_resource_test.go | 60 +++++++++++++- ...kubernetes_cluster_node_pool.html.markdown | 23 +++++- 3 files changed, 144 insertions(+), 17 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index c67eebb98a8f..59382de756ea 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -91,6 +91,16 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { Optional: true, }, + "eviction_policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.Delete), + string(containerservice.Deallocate), + }, false), + }, + "max_count": { Type: schema.TypeInt, Optional: true, @@ -164,6 +174,25 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { }, false), }, + "priority": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: string(containerservice.Regular), + ValidateFunc: validation.StringInSlice([]string{ + string(containerservice.Regular), + string(containerservice.Spot), + }, false), + }, + + "spot_max_price": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + Default: -1.0, + // TODO: validation function + }, + "vnet_subnet_id": { Type: schema.TypeString, Optional: true, @@ -216,23 +245,24 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int return fmt.Errorf("The Default Node Pool for Kubernetes Cluster %q (Resource Group %q) must be a VirtualMachineScaleSet to attach multiple node pools!", clusterName, resourceGroup) } - if d.IsNewResource() { - existing, err := poolsClient.Get(ctx, resourceGroup, clusterName, name) - if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("checking for presence of existing Agent Pool %q (Kubernetes Cluster %q / Resource Group %q): %s", name, clusterName, resourceGroup, err) - } + existing, err := poolsClient.Get(ctx, resourceGroup, clusterName, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("checking for presence of existing Agent Pool %q (Kubernetes Cluster %q / Resource Group %q): %s", name, clusterName, resourceGroup, err) } + } - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_kubernetes_cluster_node_pool", *existing.ID) - } + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_kubernetes_cluster_node_pool", *existing.ID) } count := d.Get("node_count").(int) enableAutoScaling := d.Get("enable_auto_scaling").(bool) + evictionPolicy := d.Get("eviction_policy").(string) mode := containerservice.AgentPoolMode(d.Get("mode").(string)) osType := d.Get("os_type").(string) + priority := d.Get("priority").(string) + spotMaxPrice := d.Get("spot_max_price").(float64) t := d.Get("tags").(map[string]interface{}) vmSize := d.Get("vm_size").(string) @@ -241,6 +271,7 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int EnableAutoScaling: utils.Bool(enableAutoScaling), EnableNodePublicIP: utils.Bool(d.Get("enable_node_public_ip").(bool)), Mode: mode, + ScaleSetPriority: containerservice.ScaleSetPriority(priority), Tags: tags.Expand(t), Type: containerservice.VirtualMachineScaleSets, VMSize: containerservice.VMSizeTypes(vmSize), @@ -249,6 +280,15 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int Count: utils.Int32(int32(count)), } + if priority == string(containerservice.Spot) { + profile.ScaleSetEvictionPolicy = containerservice.ScaleSetEvictionPolicy(evictionPolicy) + profile.SpotMaxPrice = utils.Float(spotMaxPrice) + } else if evictionPolicy != "" { + return fmt.Errorf("`eviction_policy` can only be set when `priority` is set to `Spot`") + } else if spotMaxPrice != -1.0 { + return fmt.Errorf("`spot_max_price` can only be set when `priority` is set to `Spot`") + } + orchestratorVersion := d.Get("orchestrator_version").(string) if orchestratorVersion != "" { if err := validateNodePoolSupportsVersion(ctx, containersClient, resourceGroup, clusterName, name, orchestratorVersion); err != nil { @@ -530,6 +570,12 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter d.Set("enable_auto_scaling", props.EnableAutoScaling) d.Set("enable_node_public_ip", props.EnableNodePublicIP) + evictionPolicy := "" + if props.ScaleSetEvictionPolicy != "" { + evictionPolicy = string(props.ScaleSetEvictionPolicy) + } + d.Set("eviction_policy", evictionPolicy) + maxCount := 0 if props.MaxCount != nil { maxCount = int(*props.MaxCount) @@ -575,6 +621,20 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter } d.Set("os_disk_size_gb", osDiskSizeGB) d.Set("os_type", string(props.OsType)) + + // not returned from the API if not Spot + priority := string(containerservice.Regular) + if props.ScaleSetPriority != "" { + priority = string(props.ScaleSetPriority) + } + d.Set("priority", priority) + + spotMaxPrice := -1.0 + if props.SpotMaxPrice != nil { + spotMaxPrice = *props.SpotMaxPrice + } + d.Set("spot_max_price", spotMaxPrice) + d.Set("vnet_subnet_id", props.VnetSubnetID) d.Set("vm_size", string(props.VMSize)) } diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index e81e879a9051..62d0319d5e95 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -31,6 +31,7 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){ "nodePublicIP": testAccAzureRMKubernetesClusterNodePool_nodePublicIP, "nodeTaints": testAccAzureRMKubernetesClusterNodePool_nodeTaints, "requiresImport": testAccAzureRMKubernetesClusterNodePool_requiresImport, + "spot": testAccAzureRMKubernetesClusterNodePool_spot, "osDiskSizeGB": testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB, "modeSystem": testAccAzureRMKubernetesClusterNodePool_modeSystem, "modeUpdate": testAccAzureRMKubernetesClusterNodePool_modeUpdate, @@ -546,6 +547,30 @@ func testAccAzureRMKubernetesClusterNodePool_nodeTaints(t *testing.T) { }) } +func TestAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(t) +} + +func testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePool_osDiskSizeGBConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + ), + }, + data.ImportStep(), + }, + }) +} + func TestAccAzureRMKubernetesClusterNodePool_requiresImport(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesClusterNodePool_requiresImport(t) @@ -573,12 +598,12 @@ func testAccAzureRMKubernetesClusterNodePool_requiresImport(t *testing.T) { }) } -func TestAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(t *testing.T) { +func TestAccAzureRMKubernetesClusterNodePool_spot(t *testing.T) { checkIfShouldRunTestsIndividually(t) - testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(t) + testAccAzureRMKubernetesClusterNodePool_spot(t) } -func testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(t *testing.T) { +func testAccAzureRMKubernetesClusterNodePool_spot(t *testing.T) { data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test") resource.ParallelTest(t, resource.TestCase{ @@ -587,7 +612,7 @@ func testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesClusterNodePool_osDiskSizeGBConfig(data), + Config: testAccAzureRMKubernetesClusterNodePool_spotConfig(data), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), ), @@ -1314,6 +1339,33 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { `, template) } +func testAccAzureRMKubernetesClusterNodePool_spotConfig(data acceptance.TestData) string { + template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data) + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +%s + +resource "azurerm_kubernetes_cluster_node_pool" "test" { + name = "internal" + kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id + vm_size = "Standard_DS2_v2" + node_count = 1 + priority = "Spot" + eviction_policy = "Delete" + spot_max_price = 0.5 # high, but this is a maximum (we pay less) so ensures this won't fail + node_labels = [ + "kubernetes.azure.com/scalesetpriority" = "spot" + ] + node_taints = [ + "kubernetes.azure.com/scalesetpriority=spot:NoSchedule" + ] +} +`, template) +} + func testAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomaticConfig(data acceptance.TestData) string { template := testAccAzureRMKubernetesClusterNodePool_templateVirtualNetworkConfig(data) return fmt.Sprintf(` diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index 1c010bb2c618..b9daaa747ce7 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -14,6 +14,9 @@ Manages a Node Pool within a Kubernetes Cluster ## Example Usage +This example provisions a basic Kubernetes Node Pool. Other examples of the `azurerm_kubernetes_cluster_node_pool` resource can be found in [the `./examples/kubernetes` directory within the Github Repository](https://github.com/terraform-providers/terraform-provider-azurerm/tree/master/examples/kubernetes) + + ```hcl resource "azurerm_resource_group" "example" { name = "example-resources" @@ -74,6 +77,10 @@ The following arguments are supported: * `enable_node_public_ip` - (Optional) Should each node have a Public IP Address? Defaults to `false`. +* `eviction_policy` - (Optional) The Eviction Policy which should be used for Virtual Machines within the Virtual Machine Scale Set powering this Node Pool. Possible values are `Deallocate` and `Delete`. Changing this forces a new resource to be created. + +-> **Note:** An Eviction Policy can only be configured when `priority` is set to `Spot`. + * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. * `mode` - (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. @@ -82,17 +89,25 @@ The following arguments are supported: * `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). +* `orchestrator_version` - (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade) + +-> **Note:** This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. + * `os_disk_size_gb` - (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created. * `os_type` - (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. -* `tags` - (Optional) A mapping of tags to assign to the resource. +* `priority` - (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created. -~> At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) until this is fixed in the AKS API. +-> **Note:** When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/en-us/azure/aks/spot-node-pool). -* `orchestrator_version` - (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade) +* `spot_max_price` - (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are -1 (the current Virtual Machine price) or a positive value with up to five decimal places. Changing this forces a new resource to be created. --> **Note:** This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first. +~> **Note:** This field can only be configured when `priority` is set to `Spot`. + +* `tags` - (Optional) A mapping of tags to assign to the resource. + +~> At this time there's a bug in the AKS API where Tags for a Node Pool are not stored in the correct case - you [may wish to use Terraform's `ignore_changes` functionality to ignore changes to the casing](https://www.terraform.io/docs/configuration/resources.html#ignore_changes) until this is fixed in the AKS API. * `vnet_subnet_id` - (Optional) The ID of the Subnet where this Node Pool should exist. From ed061efbc402e6af7687937db3331b474071a2e0 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 3 Jun 2020 12:15:17 +0200 Subject: [PATCH 27/55] validating the `spot_max_price` and `max_bid_price` fields --- ...inux_virtual_machine_scale_set_resource.go | 7 +++-- .../compute/validate/spot_max_price.go | 28 +++++++++++++++++++ ...dows_virtual_machine_scale_set_resource.go | 8 ++++-- .../kubernetes_cluster_node_pool_resource.go | 11 ++++---- 4 files changed, 43 insertions(+), 11 deletions(-) create mode 100644 azurerm/internal/services/compute/validate/spot_max_price.go diff --git a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource.go b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource.go index 7d5abf9d8de1..6d154acebdbd 100644 --- a/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource.go +++ b/azurerm/internal/services/compute/linux_virtual_machine_scale_set_resource.go @@ -147,9 +147,10 @@ func resourceArmLinuxVirtualMachineScaleSet() *schema.Resource { "identity": VirtualMachineScaleSetIdentitySchema(), "max_bid_price": { - Type: schema.TypeFloat, - Optional: true, - Default: -1, + Type: schema.TypeFloat, + Optional: true, + Default: -1, + ValidateFunc: validate.SpotMaxPrice, }, "overprovision": { diff --git a/azurerm/internal/services/compute/validate/spot_max_price.go b/azurerm/internal/services/compute/validate/spot_max_price.go new file mode 100644 index 000000000000..de2911c8b9ff --- /dev/null +++ b/azurerm/internal/services/compute/validate/spot_max_price.go @@ -0,0 +1,28 @@ +package validate + +import ( + "fmt" +) + +// SpotMaxPrice validates the price provided is a valid Spot Price for the Compute +// API (and downstream API's which use this like AKS) +func SpotMaxPrice(i interface{}, k string) (warnings []string, errors []error) { + v, ok := i.(float64) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %q to be float", k)) + return + } + + // either -1 (the current VM price) + if v == -1.0 { + return + } + + // at least 0.00001 + if v < 0.00001 { + errors = append(errors, fmt.Errorf("expected %q to be > 0.00001 but got %.5f", k, v)) + return + } + + return +} diff --git a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource.go b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource.go index ee3b8fc9e3eb..30bebdc27af9 100644 --- a/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource.go +++ b/azurerm/internal/services/compute/windows_virtual_machine_scale_set_resource.go @@ -15,6 +15,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/parse" + computeValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/base64" azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" @@ -159,9 +160,10 @@ func resourceArmWindowsVirtualMachineScaleSet() *schema.Resource { }, "max_bid_price": { - Type: schema.TypeFloat, - Optional: true, - Default: -1, + Type: schema.TypeFloat, + Optional: true, + Default: -1, + ValidateFunc: computeValidate.SpotMaxPrice, }, "overprovision": { diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 59382de756ea..2f03fabfed7b 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -13,6 +13,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + computeValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/parse" containerValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" @@ -186,11 +187,11 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { }, "spot_max_price": { - Type: schema.TypeFloat, - Optional: true, - ForceNew: true, - Default: -1.0, - // TODO: validation function + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + Default: -1.0, + ValidateFunc: computeValidate.SpotMaxPrice, }, "vnet_subnet_id": { From e71e9aed6eb8c4d9ea42f32561e2a78fda9f76bf Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 3 Jun 2020 12:54:07 +0200 Subject: [PATCH 28/55] r/kubernetes_cluster_node_pool: fixing the test --- .../tests/kubernetes_cluster_node_pool_resource_test.go | 6 +++--- website/docs/r/kubernetes_cluster_node_pool.html.markdown | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index 62d0319d5e95..fea508a5a2ef 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -1356,9 +1356,9 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { priority = "Spot" eviction_policy = "Delete" spot_max_price = 0.5 # high, but this is a maximum (we pay less) so ensures this won't fail - node_labels = [ - "kubernetes.azure.com/scalesetpriority" = "spot" - ] + node_labels = { + "kubernetes.azure.com/scalesetpriority" = "spot" + } node_taints = [ "kubernetes.azure.com/scalesetpriority=spot:NoSchedule" ] diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index b9daaa747ce7..ac5a56ba111a 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -101,6 +101,8 @@ The following arguments are supported: -> **Note:** When setting `priority` to Spot - you must configure an `eviction_policy`, `spot_max_price` and add the applicable `node_labels` and `node_taints` [as per the Azure Documentation](https://docs.microsoft.com/en-us/azure/aks/spot-node-pool). +~> **Note:** Spot Node Pools are in Preview and must be opted-into - [more information on how to opt into this Preview can be found in the AKS Documentation](https://docs.microsoft.com/en-us/azure/aks/spot-node-pool). + * `spot_max_price` - (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are -1 (the current Virtual Machine price) or a positive value with up to five decimal places. Changing this forces a new resource to be created. ~> **Note:** This field can only be configured when `priority` is set to `Spot`. From 1e186411625f9cee86e7b89aef5cf9e8218393e1 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 3 Jun 2020 12:55:37 +0200 Subject: [PATCH 29/55] r/kubernetes_cluster_node_pool: changing node_labels/node_taints forces a new resource --- .../kubernetes_cluster_node_pool_resource.go | 11 ++++------- .../docs/r/kubernetes_cluster_node_pool.html.markdown | 4 ++-- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 2f03fabfed7b..484b7ec14b1e 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -146,7 +146,10 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { "node_taints": { Type: schema.TypeList, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, "orchestrator_version": { @@ -450,12 +453,6 @@ func resourceArmKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta int props.Count = utils.Int32(int32(d.Get("node_count").(int))) } - if d.HasChange("node_taints") { - nodeTaintsRaw := d.Get("node_taints").([]interface{}) - nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw) - props.NodeTaints = nodeTaints - } - if d.HasChange("orchestrator_version") { // Spot Node pool's can't be updated - Azure Docs: https://docs.microsoft.com/en-us/azure/aks/spot-node-pool // > You can't upgrade a spot node pool since spot node pools can't guarantee cordon and drain. diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index ac5a56ba111a..9fc2d98e2d66 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -85,9 +85,9 @@ The following arguments are supported: * `mode` - (Optional) Should this Node Pool be used for System or User resources? Possible values are `System` and `User`. Defaults to `User`. -* `node_labels` - (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool. +* `node_labels` - (Optional) A map of Kubernetes labels which should be applied to nodes in this Node Pool. Changing this forces a new resource to be created. -* `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). +* `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created. * `orchestrator_version` - (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade) From 8fded1e4a55d0a660504d586461668ee3f275f6c Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 3 Jun 2020 12:56:59 +0200 Subject: [PATCH 30/55] r/kubernetes_cluster: node_labels and node_taints within the default_node_pool are ForceNew which matches the behaviour of the azure api --- azurerm/internal/services/containers/kubernetes_nodepool.go | 5 ++++- website/docs/r/kubernetes_cluster.html.markdown | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 9696e30fb3e0..4bc76daa7a3e 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -104,8 +104,11 @@ func SchemaDefaultNodePool() *schema.Schema { "node_taints": { Type: schema.TypeList, + ForceNew: true, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, "tags": tags.Schema(), diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 588ee8b0a205..e9f44075ebe5 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -242,9 +242,9 @@ A `default_node_pool` block supports the following: * `max_pods` - (Optional) The maximum number of pods that can run on each agent. Changing this forces a new resource to be created. -* `node_labels` - (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. +* `node_labels` - (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created. -* `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). +* `node_taints` - (Optional) A list of Kubernetes taints which should be applied to nodes in the agent pool (e.g `key=value:NoSchedule`). Changing this forces a new resource to be created. * `os_disk_size_gb` - (Optional) The size of the OS Disk which should be used for each agent in the Node Pool. Changing this forces a new resource to be created. From 086f9cc851c426d32ffeeb86bf6a014b54d1a90f Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 3 Jun 2020 13:21:50 +0200 Subject: [PATCH 31/55] r/kubernetes_cluster: test for azure policy v1 to v2 is combined --- .../containers/tests/kubernetes_cluster_addons_resource_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go index 3e3203ca05cb..f31c1818aea0 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go @@ -12,8 +12,6 @@ var kubernetesAddOnTests = map[string]func(t *testing.T){ "addonProfileAciConnectorLinux": testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinux, "addonProfileAciConnectorLinuxDisabled": testAccAzureRMKubernetesCluster_addonProfileAciConnectorLinuxDisabled, "addonProfileAzurePolicy": testAccAzureRMKubernetesCluster_addonProfileAzurePolicy, - "addonProfileAzurePolicyV1ToV2": testAccAzureRMKubernetesCluster_addonProfileAzurePolicyV1ToV2, - "addonProfileAzurePolicyV2": testAccAzureRMKubernetesCluster_addonProfileAzurePolicyV2, "addonProfileKubeDashboard": testAccAzureRMKubernetesCluster_addonProfileKubeDashboard, "addonProfileOMS": testAccAzureRMKubernetesCluster_addonProfileOMS, "addonProfileOMSToggle": testAccAzureRMKubernetesCluster_addonProfileOMSToggle, From a214236738ada3abc11ac7cdbd9af4c543678db5 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 3 Jun 2020 16:11:12 +0200 Subject: [PATCH 32/55] r/kubernetes_cluster: updating the field name to match the schema --- .../internal/services/containers/kubernetes_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 6f445bf9222a..907a04bb0c2b 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -1103,7 +1103,7 @@ func resourceArmKubernetesClusterRead(d *schema.ResourceData, meta interface{}) d.Set("dns_prefix", props.DNSPrefix) d.Set("fqdn", props.Fqdn) d.Set("private_fqdn", props.PrivateFQDN) - d.Set("disk_encryption_set", props.DiskEncryptionSetID) + d.Set("disk_encryption_set_id", props.DiskEncryptionSetID) d.Set("kubernetes_version", props.KubernetesVersion) d.Set("node_resource_group", props.NodeResourceGroup) d.Set("enable_pod_security_policy", props.EnablePodSecurityPolicy) From b7d9f14f8331b693b7869de53b06e41bb9ce4eab Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 4 Jun 2020 08:08:57 +0200 Subject: [PATCH 33/55] r/kubernetes_cluster: fixing the test TestAccAzureRMKubernetesCluster_addonProfileAzurePolicy --- ...kubernetes_cluster_addons_resource_test.go | 34 ++++++++++++++++--- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go index f31c1818aea0..53296d2f9e3e 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go @@ -89,7 +89,8 @@ func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, "v1"), + // Enable with V1 + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, true, "v1"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), @@ -99,7 +100,19 @@ func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { }, data.ImportStep(), { - Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, "v2"), + // Disable it + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, false, "v1"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "false"), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.version", "v1"), + ), + }, + data.ImportStep(), + { + // Enable with V2 + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, true, "v2"), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), @@ -108,6 +121,17 @@ func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { ), }, data.ImportStep(), + { + // Disable with V2 + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, false, "v2"), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "false"), + resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.version", "v2"), + ), + }, + data.ImportStep(), }, }) } @@ -393,7 +417,7 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) } -func testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data acceptance.TestData, version string) string { +func testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data acceptance.TestData, enabled bool, version string) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -426,7 +450,7 @@ resource "azurerm_kubernetes_cluster" "test" { addon_profile { azure_policy { - enabled = true + enabled = %t version = "%s" } } @@ -435,7 +459,7 @@ resource "azurerm_kubernetes_cluster" "test" { type = "SystemAssigned" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, version) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, enabled, version) } func testAccAzureRMKubernetesCluster_addonProfileKubeDashboardConfig(data acceptance.TestData) string { From 58771090fbaf544ed9f6136a65d33f02fd5609d0 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 4 Jun 2020 09:19:13 +0200 Subject: [PATCH 34/55] r/kubernetes_cluster: updating the versions being used for testing --- .../containers/tests/kubernetes_cluster_resource_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go index a4477229c8ba..a03b6ee3b2c0 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go @@ -12,8 +12,8 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) -var olderKubernetesVersion = "1.15.10" -var currentKubernetesVersion = "1.16.7" +var olderKubernetesVersion = "1.16.9" +var currentKubernetesVersion = "1.17.5" func TestAccAzureRMKubernetes_all(t *testing.T) { // we can conditionally run tests tests individually, or combined From 4955cf5f68ebc29f2a49c8ebc9706b672e931f96 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 4 Jun 2020 09:21:47 +0200 Subject: [PATCH 35/55] r/kubernetes_cluster: fixing the test TestAccAzureRMKubernetesCluster_diskEncryption --- .../containers/tests/kubernetes_cluster_other_resource_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go index 0233ff1fd9ab..c5e71afbab72 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go @@ -465,7 +465,7 @@ func testAccAzureRMKubernetesCluster_diskEncryption(t *testing.T) { Config: testAccAzureRMKubernetesCluster_diskEncryptionConfig(data), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(data.ResourceName), - resource.TestCheckResourceAttrSet(data.ResourceName, "disk_encryption_set"), + resource.TestCheckResourceAttrSet(data.ResourceName, "disk_encryption_set_id"), ), }, data.ImportStep( From d00392d461c435711c472f4447ba9ee030806307 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 4 Jun 2020 09:35:26 +0200 Subject: [PATCH 36/55] r/kubernetes_cluster: always setting the `idle_timeout_in_minutes` field --- .../services/containers/kubernetes_cluster_resource.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 907a04bb0c2b..19693bad83c4 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -1441,16 +1441,14 @@ func expandLoadBalancerProfile(d []interface{}, loadBalancerType string, ipCount config := d[0].(map[string]interface{}) - profile := &containerservice.ManagedClusterLoadBalancerProfile{} + profile := &containerservice.ManagedClusterLoadBalancerProfile{ + IdleTimeoutInMinutes: utils.Int32(int32(config["idle_timeout_in_minutes"].(int))), + } if port, ok := config["outbound_ports_allocated"].(int); ok { profile.AllocatedOutboundPorts = utils.Int32(int32(port)) } - if idleTimeout, ok := config["idle_timeout_in_minutes"].(int); ok { - profile.IdleTimeoutInMinutes = utils.Int32(int32(idleTimeout)) - } - noChangesForLoadBalancerIps := !ipCountChanges && !ipPrefixesChanges && !outboundIpChanges allowToSetIpCount := ipCountChanges || noChangesForLoadBalancerIps allowToSetIpPrefixes := ipPrefixesChanges || noChangesForLoadBalancerIps From 82dedbb1211386eed7975f852885554e7d1b54f5 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 4 Jun 2020 10:18:47 +0200 Subject: [PATCH 37/55] d/kubernetes_cluster: fixing a test compilation issue --- .../containers/tests/kubernetes_cluster_data_source_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go index 6dfb2b04dba2..6e8e3258e03e 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go @@ -834,7 +834,7 @@ data "azurerm_kubernetes_cluster" "test" { } func testAccDataSourceAzureRMKubernetesCluster_addOnProfileAzurePolicyConfig(data acceptance.TestData) string { - r := testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, "v2") + r := testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, true, "v2") return fmt.Sprintf(` %s From 56824c47e5c8841f183c9a43216df24787e6bd42 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 4 Jun 2020 12:06:04 +0200 Subject: [PATCH 38/55] r/kubernetes_cluster: only searching for system node pools --- azurerm/internal/services/containers/kubernetes_nodepool.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 4bc76daa7a3e..be25a750d42f 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -398,6 +398,9 @@ func findDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolProfil if v.Name == nil { continue } + if v.Mode != containerservice.System { + continue + } defaultNodePoolName = *v.Name agentPool = &v From bc6226f152a6f46fd27e809d8e8da77f2c4daf61 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Thu, 4 Jun 2020 12:06:15 +0200 Subject: [PATCH 39/55] r/kubernetes_cluster: fixing a test assertion --- .../containers/tests/kubernetes_cluster_addons_resource_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go index 53296d2f9e3e..05034e438c6e 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go @@ -128,7 +128,6 @@ func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "false"), - resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.version", "v2"), ), }, data.ImportStep(), From cd1282f0f3d955fd0502504c4c847104f18fd473 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 5 Jun 2020 11:07:44 +0200 Subject: [PATCH 40/55] r/kubernetes_cluster: fixing the broken assertion --- .../containers/tests/kubernetes_cluster_addons_resource_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go index 05034e438c6e..28fa3636260a 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go @@ -106,7 +106,6 @@ func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "false"), - resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.version", "v1"), ), }, data.ImportStep(), From 3eabe45bb70fe77ab2510c18a0206a61b2492eca Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 5 Jun 2020 11:07:49 +0200 Subject: [PATCH 41/55] New Data Source: `azurerm_kubernetes_cluster_node_pool` Fixes #5134 --- ...ubernetes_cluster_node_pool_data_source.go | 262 ++++++++++++++++++ .../services/containers/registration.go | 7 +- ...etes_cluster_node_pool_data_source_test.go | 52 ++++ .../tests/kubernetes_cluster_resource_test.go | 17 +- website/azurerm.erb | 4 + ...kubernetes_cluster_node_pool.html.markdown | 85 ++++++ ...kubernetes_cluster_node_pool.html.markdown | 2 +- 7 files changed, 417 insertions(+), 12 deletions(-) create mode 100644 azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go create mode 100644 azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go create mode 100644 website/docs/d/kubernetes_cluster_node_pool.html.markdown diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go new file mode 100644 index 000000000000..26dd83b3b6c9 --- /dev/null +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go @@ -0,0 +1,262 @@ +package containers + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceKubernetesClusterNodePool() *schema.Resource { + return &schema.Resource{ + Read: dataSourceKubernetesClusterNodePoolRead, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.KubernetesAgentPoolName, + }, + + "kubernetes_cluster_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), + + // Computed + "availability_zones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "enable_auto_scaling": { + Type: schema.TypeBool, + Computed: true, + }, + + "enable_node_public_ip": { + Type: schema.TypeBool, + Computed: true, + }, + + "eviction_policy": { + Type: schema.TypeString, + Computed: true, + }, + + "max_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "max_pods": { + Type: schema.TypeInt, + Computed: true, + }, + + "mode": { + Type: schema.TypeString, + Computed: true, + }, + + "min_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "node_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "node_labels": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "node_taints": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "orchestrator_version": { + Type: schema.TypeString, + Computed: true, + }, + + "os_disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + }, + + "os_type": { + Type: schema.TypeString, + Computed: true, + }, + + "priority": { + Type: schema.TypeString, + Computed: true, + }, + + "spot_max_price": { + Type: schema.TypeFloat, + Computed: true, + }, + + "tags": tags.SchemaDataSource(), + + "vm_size": { + Type: schema.TypeString, + Computed: true, + }, + + "vnet_subnet_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interface{}) error { + clustersClient := meta.(*clients.Client).Containers.KubernetesClustersClient + poolsClient := meta.(*clients.Client).Containers.AgentPoolsClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + nodePoolName := d.Get("name").(string) + clusterName := d.Get("kubernetes_cluster_name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + // if the parent cluster doesn't exist then the node pool won't + cluster, err := clustersClient.Get(ctx, resourceGroup, clusterName) + if err != nil { + if utils.ResponseWasNotFound(cluster.Response) { + return fmt.Errorf("Kubernetes Cluster %q was not found in Resource Group %q", clusterName, resourceGroup) + } + + return fmt.Errorf("retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", clusterName, resourceGroup, err) + } + + resp, err := poolsClient.Get(ctx, resourceGroup, clusterName, nodePoolName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Node Pool %q was not found in Managed Kubernetes Cluster %q / Resource Group %q", nodePoolName, clusterName, resourceGroup) + } + + return fmt.Errorf("retrieving Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): %+v", nodePoolName, clusterName, resourceGroup, err) + } + + if resp.ID == nil || *resp.ID == "" { + return fmt.Errorf("retrieving Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): `id` was nil", nodePoolName, clusterName, resourceGroup) + } + + d.SetId(*resp.ID) + d.Set("name", nodePoolName) + d.Set("kubernetes_cluster_name", clusterName) + d.Set("resource_group_name", resourceGroup) + + if props := resp.ManagedClusterAgentPoolProfileProperties; props != nil { + if err := d.Set("availability_zones", utils.FlattenStringSlice(props.AvailabilityZones)); err != nil { + return fmt.Errorf("setting `availability_zones`: %+v", err) + } + + d.Set("enable_auto_scaling", props.EnableAutoScaling) + d.Set("enable_node_public_ip", props.EnableNodePublicIP) + + evictionPolicy := "" + if props.ScaleSetEvictionPolicy != "" { + evictionPolicy = string(props.ScaleSetEvictionPolicy) + } + d.Set("eviction_policy", evictionPolicy) + + maxCount := 0 + if props.MaxCount != nil { + maxCount = int(*props.MaxCount) + } + d.Set("max_count", maxCount) + + maxPods := 0 + if props.MaxPods != nil { + maxPods = int(*props.MaxPods) + } + d.Set("max_pods", maxPods) + + minCount := 0 + if props.MinCount != nil { + minCount = int(*props.MinCount) + } + d.Set("min_count", minCount) + + mode := string(containerservice.User) + if props.Mode != "" { + mode = string(props.Mode) + } + d.Set("mode", mode) + + count := 0 + if props.Count != nil { + count = int(*props.Count) + } + d.Set("node_count", count) + + if err := d.Set("node_labels", props.NodeLabels); err != nil { + return fmt.Errorf("setting `node_labels`: %+v", err) + } + + if err := d.Set("node_taints", utils.FlattenStringSlice(props.NodeTaints)); err != nil { + return fmt.Errorf("setting `node_taints`: %+v", err) + } + + d.Set("orchestrator_version", props.OrchestratorVersion) + osDiskSizeGB := 0 + if props.OsDiskSizeGB != nil { + osDiskSizeGB = int(*props.OsDiskSizeGB) + } + d.Set("os_disk_size_gb", osDiskSizeGB) + d.Set("os_type", string(props.OsType)) + + // not returned from the API if not Spot + priority := string(containerservice.Regular) + if props.ScaleSetPriority != "" { + priority = string(props.ScaleSetPriority) + } + d.Set("priority", priority) + + spotMaxPrice := -1.0 + if props.SpotMaxPrice != nil { + spotMaxPrice = *props.SpotMaxPrice + } + d.Set("spot_max_price", spotMaxPrice) + + d.Set("vnet_subnet_id", props.VnetSubnetID) + d.Set("vm_size", string(props.VMSize)) + } + + return tags.FlattenAndSet(d, resp.Tags) +} diff --git a/azurerm/internal/services/containers/registration.go b/azurerm/internal/services/containers/registration.go index edb8f6fe732b..7bb1cba4efc4 100644 --- a/azurerm/internal/services/containers/registration.go +++ b/azurerm/internal/services/containers/registration.go @@ -21,9 +21,10 @@ func (r Registration) WebsiteCategories() []string { // SupportedDataSources returns the supported Data Sources supported by this Service func (r Registration) SupportedDataSources() map[string]*schema.Resource { return map[string]*schema.Resource{ - "azurerm_kubernetes_service_versions": dataSourceArmKubernetesServiceVersions(), - "azurerm_container_registry": dataSourceArmContainerRegistry(), - "azurerm_kubernetes_cluster": dataSourceArmKubernetesCluster(), + "azurerm_kubernetes_service_versions": dataSourceArmKubernetesServiceVersions(), + "azurerm_container_registry": dataSourceArmContainerRegistry(), + "azurerm_kubernetes_cluster": dataSourceArmKubernetesCluster(), + "azurerm_kubernetes_cluster_node_pool": dataSourceKubernetesClusterNodePool(), } } diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go new file mode 100644 index 000000000000..46669edabbe7 --- /dev/null +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go @@ -0,0 +1,52 @@ +package tests + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" +) + +var kubernetesNodePoolDataSourceTests = map[string]func(t *testing.T){ + "basic": testAccAzureRMKubernetesClusterNodePoolDataSource_basic, +} + +func TestAccAzureRMKubernetesClusterNodePoolDataSource_basic(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesClusterNodePoolDataSource_basic(t) +} + +func testAccAzureRMKubernetesClusterNodePoolDataSource_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster_node_pool", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePoolDataSource_basicConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "node_count", "1"), + resource.TestCheckResourceAttr(data.ResourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(data.ResourceName, "tags.environment", "Staging"), + ), + }, + }, + }) +} + +func testAccAzureRMKubernetesClusterNodePoolDataSource_basicConfig(data acceptance.TestData) string { + template := testAccAzureRMKubernetesClusterNodePool_manualScaleConfig(data) + return fmt.Sprintf(` +%s + +data "azurerm_kubernetes_cluster_node_pool" "test" { + name = azurerm_kubernetes_cluster_node_pool.test.name + kubernetes_cluster_name = azurerm_kubernetes_cluster.test.name + resource_group_name = azurerm_kubernetes_cluster.test.resource_group_name +} +`, template) +} diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go index a03b6ee3b2c0..63b29bafd2ce 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go @@ -22,14 +22,15 @@ func TestAccAzureRMKubernetes_all(t *testing.T) { // NOTE: this is a combined test rather than separate split out tests to // ease the load on the kubernetes api testCases := map[string]map[string]func(t *testing.T){ - "auth": kubernetesAuthTests, - "clusterAddOn": kubernetesAddOnTests, - "datasource": kubernetesDataSourceTests, - "network": kubernetesNetworkAuthTests, - "nodePool": kubernetesNodePoolTests, - "other": kubernetesOtherTests, - "scaling": kubernetesScalingTests, - "upgrade": kubernetesUpgradeTests, + "auth": kubernetesAuthTests, + "clusterAddOn": kubernetesAddOnTests, + "datasource": kubernetesDataSourceTests, + "network": kubernetesNetworkAuthTests, + "nodePool": kubernetesNodePoolTests, + "nodePoolDataSource": kubernetesNodePoolDataSourceTests, + "other": kubernetesOtherTests, + "scaling": kubernetesScalingTests, + "upgrade": kubernetesUpgradeTests, } for group, m := range testCases { diff --git a/website/azurerm.erb b/website/azurerm.erb index f343f042cd93..f0d8a2f57642 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -297,6 +297,10 @@ azurerm_kubernetes_cluster +
  • + azurerm_kubernetes_cluster_node_pool +
  • +
  • azurerm_kubernetes_service_versions
  • diff --git a/website/docs/d/kubernetes_cluster_node_pool.html.markdown b/website/docs/d/kubernetes_cluster_node_pool.html.markdown new file mode 100644 index 000000000000..c487d4e976b0 --- /dev/null +++ b/website/docs/d/kubernetes_cluster_node_pool.html.markdown @@ -0,0 +1,85 @@ +--- +subcategory: "Container" +layout: "azurerm" +page_title: "Azure Resource Manager: Data Source: azurerm_kubernetes_cluster_node_pool" +description: |- + Gets information about an existing Kubernetes Cluster Node Pool. +--- + +# Data Source: azurerm_kubernetes_cluster_node_pool + +Use this data source to access information about an existing Kubernetes Cluster Node Pool. + +## Example Usage + +```hcl +data "azurerm_kubernetes_cluster_node_pool" "example" { + name = "existing" + kubernetes_cluster_name = "existing-cluster" + resource_group_name = "existing-resource-group" +} + +output "id" { + value = data.azurerm_kubernetes_cluster_node_pool.example.id +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `kubernetes_cluster_name` - (Required) The Name of the Kubernetes Cluster where this Node Pool is located. + +* `name` - (Required) The name of this Kubernetes Cluster Node Pool. + +* `resource_group_name` - (Required) The name of the Resource Group where the Kubernetes Cluster exists. + +## Attributes Reference + +In addition to the Arguments listed above - the following Attributes are exported: + +* `id` - The ID of the Kubernetes Cluster Node Pool. + +* `availability_zones` - A list of Availability Zones in which the Nodes in this Node Pool exists. + +* `enable_auto_scaling` - Does this Node Pool have Auto-Scaling enabled? + +* `enable_node_public_ip` - Do nodes in this Node Pool have a Public IP Address? + +* `eviction_policy` - The eviction policy used for Virtual Machines in the Virtual Machine Scale Set, when `priority` is set to `Spot`. + +* `max_count` - The maximum number of Nodes allowed when auto-scaling is enabled. + +* `max_pods` - The maximum number of Pods allowed on each Node in this Node Pool. + +* `min_count` - The minimum number of Nodes allowed when auto-scaling is enabled. + +* `mode` - The Mode for this Node Pool, specifying how these Nodes should be used (for either System or User resources). + +* `node_count` - The current number of Nodes in the Node Pool. + +* `node_labels` - A map of Kubernetes Labels applied to each Node in this Node Pool. + +* `node_taints` - A map of Kubernetes Taints applied to each Node in this Node Pool. + +* `orchestrator_version` - The version of Kubernetes configured on each Node in this Node Pool. + +* `os_disk_size_gb` - The size of the OS Disk on each Node in this Node Pool. + +* `os_type` - The operating system used on each Node in this Node Pool. + +* `priority` - The priority of the Virtual Machines in the Virtual Machine Scale Set backing this Node Pool. + +* `spot_max_price` - The maximum price being paid for Virtual Machines in this Scale Set. `-1` means the current on-demand price for a Virtual Machine. + +* `tags` - A mapping of tags assigned to the Kubernetes Cluster Node Pool. + +* `vm_size` - The size of the Virtual Machines used in the Virtual Machine Scale Set backing this Node Pool. + +* `vnet_subnet_id` - The ID of the Subnet in which this Node Pool exists. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `read` - (Defaults to 5 minutes) Used when retrieving the Kubernetes Cluster Node Pool. \ No newline at end of file diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index 9fc2d98e2d66..556ab2832a98 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -103,7 +103,7 @@ The following arguments are supported: ~> **Note:** Spot Node Pools are in Preview and must be opted-into - [more information on how to opt into this Preview can be found in the AKS Documentation](https://docs.microsoft.com/en-us/azure/aks/spot-node-pool). -* `spot_max_price` - (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are -1 (the current Virtual Machine price) or a positive value with up to five decimal places. Changing this forces a new resource to be created. +* `spot_max_price` - (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. ~> **Note:** This field can only be configured when `priority` is set to `Spot`. From 1dadbca73586ae01d8bf2a95d5be058b7bdaf457 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 5 Jun 2020 15:53:02 +0200 Subject: [PATCH 42/55] cleanup --- .../resource_arm_container_service.go | 683 ------------------ .../docs/r/kubernetes_cluster.html.markdown | 3 +- 2 files changed, 1 insertion(+), 685 deletions(-) delete mode 100644 azurerm/internal/services/containers/resource_arm_container_service.go diff --git a/azurerm/internal/services/containers/resource_arm_container_service.go b/azurerm/internal/services/containers/resource_arm_container_service.go deleted file mode 100644 index 2ddbb4eea738..000000000000 --- a/azurerm/internal/services/containers/resource_arm_container_service.go +++ /dev/null @@ -1,683 +0,0 @@ -package containers - -import ( - "bytes" - "context" - "fmt" - "log" - "time" - - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" - "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" - "github.com/hashicorp/terraform-plugin-sdk/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" - "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" -) - -func resourceArmContainerService() *schema.Resource { - return &schema.Resource{ - Create: resourceArmContainerServiceCreateUpdate, - Read: resourceArmContainerServiceRead, - Update: resourceArmContainerServiceCreateUpdate, - Delete: resourceArmContainerServiceDelete, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(90 * time.Minute), - Read: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(90 * time.Minute), - Delete: schema.DefaultTimeout(90 * time.Minute), - }, - - DeprecationMessage: `Azure Container Service (ACS) has been deprecated in favour of Azure (Managed) Kubernetes Service (AKS). - -Azure will remove support for ACS Clusters on January 31, 2020. In preparation for this, the AzureRM Provider will remove support for the 'azurerm_container_service' resource in the next major version of the AzureRM Provider, which is targeted for Early 2019. - -If you're using ACS with Kubernetes, we'd recommend migrating to AKS / the 'azurerm_kubernetes_cluster' resource. - -More information can be found here: https://azure.microsoft.com/en-us/updates/azure-container-service-will-retire-on-january-31-2020/ -`, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "location": azure.SchemaLocation(), - - "resource_group_name": azure.SchemaResourceGroupName(), - - "orchestration_platform": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: ValidateArmContainerServiceOrchestrationPlatform, - }, - - //lintignore:S018 - "master_profile": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: ValidateArmContainerServiceMasterProfileCount, - }, - - "dns_prefix": { - Type: schema.TypeString, - Required: true, - }, - - "fqdn": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Set: resourceAzureRMContainerServiceMasterProfileHash, - }, - - //lintignore:S018 - "linux_profile": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "admin_username": { - Type: schema.TypeString, - Required: true, - }, - "ssh_key": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key_data": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - Set: resourceAzureRMContainerServiceLinuxProfilesHash, - }, - - //lintignore:S018 - "agent_pool_profile": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: ValidateArmContainerServiceAgentPoolProfileCount, - }, - - "dns_prefix": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "fqdn": { - Type: schema.TypeString, - Computed: true, - }, - - "vm_size": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: suppress.CaseDifference, - }, - }, - }, - Set: resourceAzureRMContainerServiceAgentPoolProfilesHash, - }, - - //lintignore:S018 - "service_principal": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_id": { - Type: schema.TypeString, - Required: true, - }, - - "client_secret": { - Type: schema.TypeString, - Required: true, - Sensitive: true, - }, - }, - }, - Set: resourceAzureRMContainerServiceServicePrincipalProfileHash, - }, - - //lintignore:S018 - "diagnostics_profile": { - Type: schema.TypeSet, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - }, - - "storage_uri": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - Set: resourceAzureRMContainerServiceDiagnosticProfilesHash, - }, - - "tags": tags.Schema(), - }, - } -} - -func resourceArmContainerServiceCreateUpdate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clients.Client) - ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) - defer cancel() - containerServiceClient := client.Containers.ServicesClient - - log.Printf("[INFO] preparing arguments for Azure ARM Container Service creation.") - - resGroup := d.Get("resource_group_name").(string) - name := d.Get("name").(string) - - if features.ShouldResourcesBeImported() && d.IsNewResource() { - existing, err := containerServiceClient.Get(ctx, resGroup, name) - if err != nil { - if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing Container Service %q (Resource Group %q): %s", name, resGroup, err) - } - } - - if existing.ID != nil && *existing.ID != "" { - return tf.ImportAsExistsError("azurerm_container_service", *existing.ID) - } - } - - location := azure.NormalizeLocation(d.Get("location").(string)) - - orchestrationPlatform := d.Get("orchestration_platform").(string) - - masterProfile := expandAzureRmContainerServiceMasterProfile(d) - linuxProfile := expandAzureRmContainerServiceLinuxProfile(d) - agentProfiles := expandAzureRmContainerServiceAgentProfiles(d) - diagnosticsProfile := expandAzureRmContainerServiceDiagnostics(d) - - t := d.Get("tags").(map[string]interface{}) - - parameters := containerservice.ContainerService{ - Name: &name, - Location: &location, - Properties: &containerservice.Properties{ - MasterProfile: &masterProfile, - LinuxProfile: &linuxProfile, - OrchestratorProfile: &containerservice.OrchestratorProfileType{ - OrchestratorType: containerservice.OrchestratorTypes(orchestrationPlatform), - }, - AgentPoolProfiles: &agentProfiles, - DiagnosticsProfile: &diagnosticsProfile, - }, - Tags: tags.Expand(t), - } - - servicePrincipalProfile := expandAzureRmContainerServiceServicePrincipal(d) - if servicePrincipalProfile != nil { - parameters.ServicePrincipalProfile = servicePrincipalProfile - } - - if _, err := containerServiceClient.CreateOrUpdate(ctx, resGroup, name, parameters); err != nil { - return err - } - - read, err := containerServiceClient.Get(ctx, resGroup, name) - if err != nil { - return err - } - - if read.ID == nil { - return fmt.Errorf("Cannot read Container Service %s (resource group %s) ID", name, resGroup) - } - - log.Printf("[DEBUG] Waiting for Container Service (%s) to become available", d.Get("name")) - stateConf := &resource.StateChangeConf{ - Pending: []string{"Updating", "Creating"}, - Target: []string{"Succeeded"}, - Refresh: containerServiceStateRefreshFunc(ctx, client, resGroup, name), - MinTimeout: 15 * time.Second, - } - - if features.SupportsCustomTimeouts() { - if d.IsNewResource() { - stateConf.Timeout = d.Timeout(schema.TimeoutCreate) - } else { - stateConf.Timeout = d.Timeout(schema.TimeoutUpdate) - } - } else { - stateConf.Timeout = 30 * time.Minute - } - - if _, err := stateConf.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for Container Service (%s) to become available: %s", d.Get("name"), err) - } - - d.SetId(*read.ID) - - return resourceArmContainerServiceRead(d, meta) -} - -func resourceArmContainerServiceRead(d *schema.ResourceData, meta interface{}) error { - containerServiceClient := meta.(*clients.Client).Containers.ServicesClient - ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) - defer cancel() - - id, err := azure.ParseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["containerServices"] - - resp, err := containerServiceClient.Get(ctx, resGroup, name) - if err != nil { - if utils.ResponseWasNotFound(resp.Response) { - d.SetId("") - return nil - } - - return fmt.Errorf("Error making Read request on Azure Container Service %s: %s", name, err) - } - - d.Set("name", resp.Name) - d.Set("resource_group_name", resGroup) - if location := resp.Location; location != nil { - d.Set("location", azure.NormalizeLocation(*location)) - } - - d.Set("orchestration_platform", string(resp.Properties.OrchestratorProfile.OrchestratorType)) - - masterProfiles := flattenAzureRmContainerServiceMasterProfile(*resp.Properties.MasterProfile) - d.Set("master_profile", &masterProfiles) - - linuxProfile := flattenAzureRmContainerServiceLinuxProfile(*resp.Properties.LinuxProfile) - d.Set("linux_profile", &linuxProfile) - - agentPoolProfiles := flattenAzureRmContainerServiceAgentPoolProfiles(resp.Properties.AgentPoolProfiles) - d.Set("agent_pool_profile", &agentPoolProfiles) - - servicePrincipal := flattenAzureRmContainerServiceServicePrincipalProfile(resp.Properties.ServicePrincipalProfile) - if servicePrincipal != nil { - d.Set("service_principal", servicePrincipal) - } - - diagnosticProfile := flattenAzureRmContainerServiceDiagnosticsProfile(resp.Properties.DiagnosticsProfile) - if diagnosticProfile != nil { - d.Set("diagnostics_profile", diagnosticProfile) - } - - return tags.FlattenAndSet(d, resp.Tags) -} - -func resourceArmContainerServiceDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*clients.Client) - ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) - defer cancel() - containerServiceClient := client.Containers.ServicesClient - - id, err := azure.ParseAzureResourceID(d.Id()) - if err != nil { - return err - } - resGroup := id.ResourceGroup - name := id.Path["containerServices"] - - future, err := containerServiceClient.Delete(ctx, resGroup, name) - - if err != nil { - return fmt.Errorf("Error issuing Azure ARM delete request of Container Service '%s': %s", name, err) - } - - return future.WaitForCompletionRef(ctx, containerServiceClient.Client) -} - -func flattenAzureRmContainerServiceMasterProfile(profile containerservice.MasterProfile) *schema.Set { - masterProfiles := &schema.Set{ - F: resourceAzureRMContainerServiceMasterProfileHash, - } - - masterProfile := make(map[string]interface{}, 3) - - masterProfile["count"] = int(*profile.Count) - masterProfile["dns_prefix"] = *profile.DNSPrefix - masterProfile["fqdn"] = *profile.Fqdn - - masterProfiles.Add(masterProfile) - - return masterProfiles -} - -func flattenAzureRmContainerServiceLinuxProfile(profile containerservice.LinuxProfile) *schema.Set { - profiles := &schema.Set{ - F: resourceAzureRMContainerServiceLinuxProfilesHash, - } - - values := map[string]interface{}{} - - sshKeys := &schema.Set{ - F: resourceAzureRMContainerServiceLinuxProfilesSSHKeysHash, - } - for _, ssh := range *profile.SSH.PublicKeys { - keys := map[string]interface{}{} - keys["key_data"] = *ssh.KeyData - sshKeys.Add(keys) - } - - values["admin_username"] = *profile.AdminUsername - values["ssh_key"] = sshKeys - profiles.Add(values) - - return profiles -} - -func flattenAzureRmContainerServiceAgentPoolProfiles(profiles *[]containerservice.AgentPoolProfile) *schema.Set { - agentPoolProfiles := &schema.Set{ - F: resourceAzureRMContainerServiceAgentPoolProfilesHash, - } - - for _, profile := range *profiles { - agentPoolProfile := map[string]interface{}{} - agentPoolProfile["count"] = int(*profile.Count) - agentPoolProfile["dns_prefix"] = *profile.DNSPrefix - agentPoolProfile["fqdn"] = *profile.Fqdn - agentPoolProfile["name"] = *profile.Name - agentPoolProfile["vm_size"] = string(profile.VMSize) - agentPoolProfiles.Add(agentPoolProfile) - } - - return agentPoolProfiles -} - -func flattenAzureRmContainerServiceServicePrincipalProfile(profile *containerservice.ServicePrincipalProfile) *schema.Set { - if profile == nil { - return nil - } - - servicePrincipalProfiles := &schema.Set{ - F: resourceAzureRMContainerServiceServicePrincipalProfileHash, - } - - values := map[string]interface{}{} - - values["client_id"] = *profile.ClientID - if profile.Secret != nil { - values["client_secret"] = *profile.Secret - } - - servicePrincipalProfiles.Add(values) - - return servicePrincipalProfiles -} - -func flattenAzureRmContainerServiceDiagnosticsProfile(profile *containerservice.DiagnosticsProfile) *schema.Set { - diagnosticProfiles := &schema.Set{ - F: resourceAzureRMContainerServiceDiagnosticProfilesHash, - } - - values := map[string]interface{}{} - - values["enabled"] = *profile.VMDiagnostics.Enabled - if profile.VMDiagnostics.StorageURI != nil { - values["storage_uri"] = *profile.VMDiagnostics.StorageURI - } - diagnosticProfiles.Add(values) - - return diagnosticProfiles -} - -func expandAzureRmContainerServiceDiagnostics(d *schema.ResourceData) containerservice.DiagnosticsProfile { - configs := d.Get("diagnostics_profile").(*schema.Set).List() - - data := configs[0].(map[string]interface{}) - - enabled := data["enabled"].(bool) - - return containerservice.DiagnosticsProfile{ - VMDiagnostics: &containerservice.VMDiagnostics{ - Enabled: &enabled, - }, - } -} - -func expandAzureRmContainerServiceLinuxProfile(d *schema.ResourceData) containerservice.LinuxProfile { - profiles := d.Get("linux_profile").(*schema.Set).List() - config := profiles[0].(map[string]interface{}) - - adminUsername := config["admin_username"].(string) - - linuxKeys := config["ssh_key"].(*schema.Set).List() - sshPublicKeys := make([]containerservice.SSHPublicKey, 0) - - key := linuxKeys[0].(map[string]interface{}) - keyData := key["key_data"].(string) - - sshPublicKey := containerservice.SSHPublicKey{ - KeyData: &keyData, - } - - sshPublicKeys = append(sshPublicKeys, sshPublicKey) - - profile := containerservice.LinuxProfile{ - AdminUsername: &adminUsername, - SSH: &containerservice.SSHConfiguration{ - PublicKeys: &sshPublicKeys, - }, - } - - return profile -} - -func expandAzureRmContainerServiceMasterProfile(d *schema.ResourceData) containerservice.MasterProfile { - configs := d.Get("master_profile").(*schema.Set).List() - config := configs[0].(map[string]interface{}) - - count := int32(config["count"].(int)) - dnsPrefix := config["dns_prefix"].(string) - - profile := containerservice.MasterProfile{ - Count: &count, - DNSPrefix: &dnsPrefix, - } - - return profile -} - -func expandAzureRmContainerServiceServicePrincipal(d *schema.ResourceData) *containerservice.ServicePrincipalProfile { - value, exists := d.GetOk("service_principal") - if !exists { - return nil - } - - configs := value.(*schema.Set).List() - - config := configs[0].(map[string]interface{}) - - clientId := config["client_id"].(string) - clientSecret := config["client_secret"].(string) - - principal := containerservice.ServicePrincipalProfile{ - ClientID: &clientId, - Secret: &clientSecret, - } - - return &principal -} - -func expandAzureRmContainerServiceAgentProfiles(d *schema.ResourceData) []containerservice.AgentPoolProfile { - configs := d.Get("agent_pool_profile").(*schema.Set).List() - config := configs[0].(map[string]interface{}) - profiles := make([]containerservice.AgentPoolProfile, 0, len(configs)) - - name := config["name"].(string) - count := int32(config["count"].(int)) - dnsPrefix := config["dns_prefix"].(string) - vmSize := config["vm_size"].(string) - - profile := containerservice.AgentPoolProfile{ - Name: &name, - Count: &count, - VMSize: containerservice.VMSizeTypes(vmSize), - DNSPrefix: &dnsPrefix, - } - - profiles = append(profiles, profile) - - return profiles -} - -func containerServiceStateRefreshFunc(ctx context.Context, client *clients.Client, resourceGroupName string, containerServiceName string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - res, err := client.Containers.ServicesClient.Get(ctx, resourceGroupName, containerServiceName) - if err != nil { - return nil, "", fmt.Errorf("Error issuing read request in containerServiceStateRefreshFunc to Azure ARM for Container Service '%s' (RG: '%s'): %s", containerServiceName, resourceGroupName, err) - } - - return res, *res.Properties.ProvisioningState, nil - } -} - -func resourceAzureRMContainerServiceMasterProfileHash(v interface{}) int { - var buf bytes.Buffer - - if m, ok := v.(map[string]interface{}); ok { - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["dns_prefix"].(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceLinuxProfilesHash(v interface{}) int { - var buf bytes.Buffer - - if m, ok := v.(map[string]interface{}); ok { - buf.WriteString(fmt.Sprintf("%s-", m["admin_username"].(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceLinuxProfilesSSHKeysHash(v interface{}) int { - var buf bytes.Buffer - - if m, ok := v.(map[string]interface{}); ok { - buf.WriteString(fmt.Sprintf("%s-", m["key_data"].(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceAgentPoolProfilesHash(v interface{}) int { - var buf bytes.Buffer - - if m, ok := v.(map[string]interface{}); ok { - buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) - buf.WriteString(fmt.Sprintf("%s-", m["dns_prefix"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["vm_size"].(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceServicePrincipalProfileHash(v interface{}) int { - var buf bytes.Buffer - - if m, ok := v.(map[string]interface{}); ok { - buf.WriteString(fmt.Sprintf("%s-", m["client_id"].(string))) - } - - return hashcode.String(buf.String()) -} - -func resourceAzureRMContainerServiceDiagnosticProfilesHash(v interface{}) int { - var buf bytes.Buffer - - if m, ok := v.(map[string]interface{}); ok { - buf.WriteString(fmt.Sprintf("%t", m["enabled"].(bool))) - } - - return hashcode.String(buf.String()) -} - -func ValidateArmContainerServiceOrchestrationPlatform(v interface{}, _ string) (warnings []string, errors []error) { - value := v.(string) - capacities := map[string]bool{ - "DCOS": true, - "Kubernetes": true, - "Swarm": true, - } - - if !capacities[value] { - errors = append(errors, fmt.Errorf("Container Service: Orchestration Platgorm can only be DCOS / Kubernetes / Swarm")) - } - return warnings, errors -} - -func ValidateArmContainerServiceMasterProfileCount(v interface{}, _ string) (warnings []string, errors []error) { - value := v.(int) - capacities := map[int]bool{ - 1: true, - 3: true, - 5: true, - } - - if !capacities[value] { - errors = append(errors, fmt.Errorf("The number of master nodes must be 1, 3 or 5.")) - } - return warnings, errors -} - -func ValidateArmContainerServiceAgentPoolProfileCount(v interface{}, _ string) (warnings []string, errors []error) { - value := v.(int) - if value > 100 || 0 >= value { - errors = append(errors, fmt.Errorf("The Count for an Agent Pool Profile can only be between 1 and 100.")) - } - return warnings, errors -} diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index e9f44075ebe5..ea62d4116617 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -153,7 +153,7 @@ A `addon_profile` block supports the following: * `azure_policy` - (Optional) A `azure_policy` block as defined below. For more details please visit [Understand Azure Policy for Azure Kubernetes Service](https://docs.microsoft.com/en-ie/azure/governance/policy/concepts/rego-for-aks) --> **NOTE:** At this time Azure Policy is not supported in Azure US Government. +-> **NOTE:** At this time Azure Policy is not supported in Azure China or Azure US Government. ~> **Note:** Azure Policy is in Public Preview - more information and details on how to opt into the Preview [can be found in this article](https://docs.microsoft.com/en-gb/azure/governance/policy/concepts/policy-for-kubernetes). @@ -165,7 +165,6 @@ A `addon_profile` block supports the following: * `oms_agent` - (Optional) A `oms_agent` block as defined below. For more details, please visit [How to onboard Azure Monitor for containers](https://docs.microsoft.com/en-us/azure/monitoring/monitoring-container-insights-onboard). - --- A `auto_scaler_profile` block supports the following: From 6dee2b2f30b08708abca957969b8201550abf832 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 5 Jun 2020 17:40:45 +0200 Subject: [PATCH 43/55] terrafmt --- .../tests/kubernetes_cluster_node_pool_resource_test.go | 4 ++-- website/docs/d/kubernetes_cluster_node_pool.html.markdown | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index fea508a5a2ef..5f18e085c228 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -1356,8 +1356,8 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { priority = "Spot" eviction_policy = "Delete" spot_max_price = 0.5 # high, but this is a maximum (we pay less) so ensures this won't fail - node_labels = { - "kubernetes.azure.com/scalesetpriority" = "spot" + node_labels = { + "kubernetes.azure.com/scalesetpriority" = "spot" } node_taints = [ "kubernetes.azure.com/scalesetpriority=spot:NoSchedule" diff --git a/website/docs/d/kubernetes_cluster_node_pool.html.markdown b/website/docs/d/kubernetes_cluster_node_pool.html.markdown index c487d4e976b0..03872c539d8f 100644 --- a/website/docs/d/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/d/kubernetes_cluster_node_pool.html.markdown @@ -82,4 +82,4 @@ In addition to the Arguments listed above - the following Attributes are exporte The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: -* `read` - (Defaults to 5 minutes) Used when retrieving the Kubernetes Cluster Node Pool. \ No newline at end of file +* `read` - (Defaults to 5 minutes) Used when retrieving the Kubernetes Cluster Node Pool. From 53bbabd2fbd2ee3a433423bebaa2c13bc0045d92 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 5 Jun 2020 17:51:03 +0200 Subject: [PATCH 44/55] r/kubewrnetes_cluster: keeping the linter happy --- .../kubernetes_cluster_node_pool_resource.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index 484b7ec14b1e..aaa8df9f5289 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -287,10 +287,14 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int if priority == string(containerservice.Spot) { profile.ScaleSetEvictionPolicy = containerservice.ScaleSetEvictionPolicy(evictionPolicy) profile.SpotMaxPrice = utils.Float(spotMaxPrice) - } else if evictionPolicy != "" { - return fmt.Errorf("`eviction_policy` can only be set when `priority` is set to `Spot`") - } else if spotMaxPrice != -1.0 { - return fmt.Errorf("`spot_max_price` can only be set when `priority` is set to `Spot`") + } else { + if evictionPolicy != "" { + return fmt.Errorf("`eviction_policy` can only be set when `priority` is set to `Spot`") + } + + if spotMaxPrice != -1.0 { + return fmt.Errorf("`spot_max_price` can only be set when `priority` is set to `Spot`") + } } orchestratorVersion := d.Get("orchestrator_version").(string) From b9e698f4f1669da2b4f05b14af368a0d0167a5dc Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 5 Jun 2020 17:54:09 +0200 Subject: [PATCH 45/55] r/kubernetes_cluster: fixing pr comments --- .../services/containers/kubernetes_cluster_data_source.go | 2 +- .../containers/kubernetes_cluster_node_pool_resource.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index 0eff5231c2cd..c987fa8b9ebe 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -910,7 +910,7 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi agentPoolProfile["os_type"] = string(profile.OsType) } - if *profile.OrchestratorVersion != "" { + if profile.OrchestratorVersion != nil && *profile.OrchestratorVersion != "" { agentPoolProfile["orchestrator_version"] = *profile.OrchestratorVersion } diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go index aaa8df9f5289..65bdc2f0b198 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_resource.go @@ -119,8 +119,7 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { "mode": { Type: schema.TypeString, Optional: true, - //ForceNew: true, - Default: string(containerservice.User), + Default: string(containerservice.User), ValidateFunc: validation.StringInSlice([]string{ string(containerservice.System), string(containerservice.User), From 2d5198b76da756e31398ac02f73db132c427dd25 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 5 Jun 2020 18:14:22 +0200 Subject: [PATCH 46/55] r/kubernetes_cluster: fixing the key vault test --- .../kubernetes_cluster_other_resource_test.go | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go index c5e71afbab72..62a163ee9697 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_other_resource_test.go @@ -1043,17 +1043,18 @@ resource "azurerm_key_vault" "test" { enabled_for_disk_encryption = true soft_delete_enabled = true purge_protection_enabled = true +} - access_policy { - tenant_id = data.azurerm_client_config.current.tenant_id - object_id = data.azurerm_client_config.current.object_id +resource "azurerm_key_vault_access_policy" "acctest" { + key_vault_id = azurerm_key_vault.test.id + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id - key_permissions = [ - "get", - "create", - "delete" - ] - } + key_permissions = [ + "get", + "create", + "delete" + ] } resource "azurerm_key_vault_key" "test" { @@ -1070,6 +1071,8 @@ resource "azurerm_key_vault_key" "test" { "verify", "wrapKey", ] + + depends_on = [azurerm_key_vault_access_policy.acctest] } resource "azurerm_disk_encryption_set" "test" { From 8c22d5823757beba194e02c5e084f96f08976085 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 8 Jun 2020 12:49:52 +0200 Subject: [PATCH 47/55] r/kubernetes_cluster: supporting delta-updates for the `load_balancer_profile` block within the `network_profile` block --- .../containers/kubernetes_cluster_resource.go | 79 +++++++++++++------ 1 file changed, 55 insertions(+), 24 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 19693bad83c4..64d5278c8225 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -690,7 +690,7 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{} } networkProfileRaw := d.Get("network_profile").([]interface{}) - networkProfile, err := expandKubernetesClusterNetworkProfile(networkProfileRaw, false, false, false) + networkProfile, err := expandKubernetesClusterNetworkProfile(networkProfileRaw) if err != nil { return err } @@ -953,19 +953,52 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} if d.HasChange("network_profile") { updateCluster = true - networkProfileRaw := d.Get("network_profile").([]interface{}) - // Check for changes to make sure only the configured load_balacer_profile variable is set - changeManagedIps := d.HasChange("network_profile.0.load_balancer_profile.0.managed_outbound_ip_count") - changeIpPrefixes := d.HasChange("network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids") - changeOutboundIps := d.HasChange("network_profile.0.load_balancer_profile.0.outbound_ip_address_ids") + networkProfile := *existing.ManagedClusterProperties.NetworkProfile + if networkProfile.LoadBalancerProfile == nil { + // an existing LB Profile must be present, since it's Optional & Computed + return fmt.Errorf("`loadBalancerProfile` was nil in Azure") + } - networkProfile, err := expandKubernetesClusterNetworkProfile(networkProfileRaw, changeManagedIps, changeIpPrefixes, changeOutboundIps) - if err != nil { - return err + loadBalancerProfile := *networkProfile.LoadBalancerProfile + + if key := "network_profile.0.load_balancer_profile.0.effective_outbound_ips"; d.HasChange(key) { + effectiveOutboundIPs := idsToResourceReferences(d.Get(key)) + loadBalancerProfile.EffectiveOutboundIPs = effectiveOutboundIPs + } + + if key := "network_profile.0.load_balancer_profile.0.idle_timeout_in_minutes"; d.HasChange(key) { + idleTimeoutInMinutes := d.Get(key).(int) + loadBalancerProfile.IdleTimeoutInMinutes = utils.Int32(int32(idleTimeoutInMinutes)) } - existing.ManagedClusterProperties.NetworkProfile = networkProfile + if key := "network_profile.0.load_balancer_profile.0.managed_outbound_ip_count"; d.HasChange(key) { + managedOutboundIPCount := d.Get(key).(int) + loadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{ + Count: utils.Int32(int32(managedOutboundIPCount)), + } + } + + if key := "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"; d.HasChange(key) { + publicIPAddressIDs := idsToResourceReferences(d.Get(key)) + loadBalancerProfile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{ + PublicIPs: publicIPAddressIDs, + } + } + + if key := "network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids"; d.HasChange(key) { + outboundIPPrefixIDs := idsToResourceReferences(d.Get(key)) + loadBalancerProfile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{ + PublicIPPrefixes: outboundIPPrefixIDs, + } + } + + if key := "network_profile.0.load_balancer_profile.0.outbound_ports_allocated"; d.HasChange(key) { + allocatedOutboundPorts := d.Get(key).(int) + loadBalancerProfile.AllocatedOutboundPorts = utils.Int32(int32(allocatedOutboundPorts)) + } + + existing.ManagedClusterProperties.NetworkProfile.LoadBalancerProfile = &loadBalancerProfile } if d.HasChange("tags") { @@ -1382,7 +1415,7 @@ func flattenKubernetesClusterWindowsProfile(profile *containerservice.ManagedClu } } -func expandKubernetesClusterNetworkProfile(input []interface{}, changeManagedIps bool, changeIpPrefixes bool, changeOutboundIps bool) (*containerservice.NetworkProfileType, error) { +func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservice.NetworkProfileType, error) { if len(input) == 0 { return nil, nil } @@ -1391,10 +1424,11 @@ func expandKubernetesClusterNetworkProfile(input []interface{}, changeManagedIps networkPlugin := config["network_plugin"].(string) networkPolicy := config["network_policy"].(string) + loadBalancerProfileRaw := config["load_balancer_profile"].([]interface{}) loadBalancerSku := config["load_balancer_sku"].(string) outboundType := config["outbound_type"].(string) - loadBalancerProfile, err := expandLoadBalancerProfile(config["load_balancer_profile"].([]interface{}), loadBalancerSku, changeManagedIps, changeIpPrefixes, changeOutboundIps) + loadBalancerProfile, err := expandLoadBalancerProfile(loadBalancerProfileRaw, loadBalancerSku) if err != nil { return nil, err } @@ -1430,8 +1464,8 @@ func expandKubernetesClusterNetworkProfile(input []interface{}, changeManagedIps return &networkProfile, nil } -func expandLoadBalancerProfile(d []interface{}, loadBalancerType string, ipCountChanges bool, ipPrefixesChanges bool, outboundIpChanges bool) (*containerservice.ManagedClusterLoadBalancerProfile, error) { - if len(d) == 0 || d[0] == nil { +func expandLoadBalancerProfile(d []interface{}, loadBalancerType string) (*containerservice.ManagedClusterLoadBalancerProfile, error) { + if d == nil || len(d) == 0 { return nil, nil } @@ -1441,30 +1475,27 @@ func expandLoadBalancerProfile(d []interface{}, loadBalancerType string, ipCount config := d[0].(map[string]interface{}) - profile := &containerservice.ManagedClusterLoadBalancerProfile{ - IdleTimeoutInMinutes: utils.Int32(int32(config["idle_timeout_in_minutes"].(int))), + profile := &containerservice.ManagedClusterLoadBalancerProfile{} + + if mins, ok := config["idle_timeout_in_minutes"]; ok && mins.(int) != 0 { + profile.IdleTimeoutInMinutes = utils.Int32(int32(mins.(int))) } if port, ok := config["outbound_ports_allocated"].(int); ok { profile.AllocatedOutboundPorts = utils.Int32(int32(port)) } - noChangesForLoadBalancerIps := !ipCountChanges && !ipPrefixesChanges && !outboundIpChanges - allowToSetIpCount := ipCountChanges || noChangesForLoadBalancerIps - allowToSetIpPrefixes := ipPrefixesChanges || noChangesForLoadBalancerIps - allowToSetOutboundIp := outboundIpChanges || noChangesForLoadBalancerIps - - if ipCount := config["managed_outbound_ip_count"]; ipCount != nil && allowToSetIpCount { + if ipCount := config["managed_outbound_ip_count"]; ipCount != nil { if c := int32(ipCount.(int)); c > 0 { profile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{Count: &c} } } - if ipPrefixes := idsToResourceReferences(config["outbound_ip_prefix_ids"]); ipPrefixes != nil && allowToSetIpPrefixes { + if ipPrefixes := idsToResourceReferences(config["outbound_ip_prefix_ids"]); ipPrefixes != nil { profile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{PublicIPPrefixes: ipPrefixes} } - if outIps := idsToResourceReferences(config["outbound_ip_address_ids"]); outIps != nil && allowToSetOutboundIp { + if outIps := idsToResourceReferences(config["outbound_ip_address_ids"]); outIps != nil { profile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{PublicIPs: outIps} } From fe08fcef59c1c0503f0b056042f476811c4766eb Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 8 Jun 2020 15:47:18 +0200 Subject: [PATCH 48/55] linting --- .../internal/services/containers/kubernetes_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 64d5278c8225..60a4f8d6bb4a 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -1465,7 +1465,7 @@ func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservi } func expandLoadBalancerProfile(d []interface{}, loadBalancerType string) (*containerservice.ManagedClusterLoadBalancerProfile, error) { - if d == nil || len(d) == 0 { + if len(d) == 0 || d[0] != nil { return nil, nil } From ed3c9ce2effd1f2715ec740c16235eded70277df Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 8 Jun 2020 16:48:39 +0200 Subject: [PATCH 49/55] r/kubernetes_cluster: conditionally setting the `managed_outbound_ip_count`, `outbound_ip_address_ids` and `outbound_ip_prefix_ids` fields" --- .../services/containers/kubernetes_cluster_resource.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 60a4f8d6bb4a..0dbae2e90e4c 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -977,6 +977,9 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} loadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{ Count: utils.Int32(int32(managedOutboundIPCount)), } + } else { + // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs.{ + loadBalancerProfile.ManagedOutboundIPs = nil } if key := "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"; d.HasChange(key) { @@ -984,6 +987,9 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} loadBalancerProfile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{ PublicIPs: publicIPAddressIDs, } + } else { + // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs.{ + loadBalancerProfile.OutboundIPs = nil } if key := "network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids"; d.HasChange(key) { @@ -991,6 +997,9 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} loadBalancerProfile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{ PublicIPPrefixes: outboundIPPrefixIDs, } + } else { + // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs.{ + loadBalancerProfile.OutboundIPPrefixes = nil } if key := "network_profile.0.load_balancer_profile.0.outbound_ports_allocated"; d.HasChange(key) { From e3b88ce6189e30334158bfd178a0cbbcce0a4d0a Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 8 Jun 2020 16:52:56 +0200 Subject: [PATCH 50/55] r/kubernetes_cluster_node_pool: fixing the test `TestAccAzureRMKubernetesClusterNodePoolDataSource_basic` --- .../tests/kubernetes_cluster_node_pool_data_source_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go index 46669edabbe7..e08a6cb0e5bb 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go @@ -28,7 +28,6 @@ func testAccAzureRMKubernetesClusterNodePoolDataSource_basic(t *testing.T) { { Config: testAccAzureRMKubernetesClusterNodePoolDataSource_basicConfig(data), Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "node_count", "1"), resource.TestCheckResourceAttr(data.ResourceName, "tags.%", "1"), resource.TestCheckResourceAttr(data.ResourceName, "tags.environment", "Staging"), From 6cd668188cdabac40a9fb662d5a23951dbd5fcb9 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Mon, 8 Jun 2020 18:28:34 +0200 Subject: [PATCH 51/55] (d|r)/kubernetes_cluster: `version` is no longer applicable for `azure_policy` In a change from last week - the API now defaults to v2. Since v1 is deprecated, this commit removes support for it. --- .../services/containers/kubernetes_addons.go | 22 +--------------- ...kubernetes_cluster_addons_resource_test.go | 25 +++++-------------- .../kubernetes_cluster_data_source_test.go | 2 +- .../docs/d/kubernetes_cluster.html.markdown | 2 -- .../docs/r/kubernetes_cluster.html.markdown | 4 --- 5 files changed, 8 insertions(+), 47 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_addons.go b/azurerm/internal/services/containers/kubernetes_addons.go index 22d3a6311d4f..31b7a5b76fd1 100644 --- a/azurerm/internal/services/containers/kubernetes_addons.go +++ b/azurerm/internal/services/containers/kubernetes_addons.go @@ -77,18 +77,6 @@ func schemaKubernetesAddOnProfiles() *schema.Schema { Type: schema.TypeBool, Required: true, }, - - "version": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - // NOTE: v1 will be removed "after Spring 2020" - https://github.com/terraform-providers/terraform-provider-azurerm/issues/6994 - // The current cluster uses policy add-on V1. Please migrate to V2 by disabling the add-on, and re-enabling it. - // Azure Policy will not support V1 after spring 2020. V2 is a breaking change, so please read carefully on the instruction and impact at: https://aka.ms/akspolicydoc - "v1", - "v2", - }, false), - }, }, }, }, @@ -246,12 +234,11 @@ func expandKubernetesAddOnProfiles(input []interface{}, env azure.Environment) ( if len(azurePolicy) > 0 && azurePolicy[0] != nil { value := azurePolicy[0].(map[string]interface{}) enabled := value["enabled"].(bool) - version := value["version"].(string) addonProfiles[azurePolicyKey] = &containerservice.ManagedClusterAddonProfile{ Enabled: utils.Bool(enabled), Config: map[string]*string{ - "version": utils.String(version), + "version": utils.String("v2"), }, } } @@ -326,15 +313,8 @@ func flattenKubernetesAddOnProfiles(profile map[string]*containerservice.Managed enabled = *enabledVal } - // not returned for v1 - version := "v1" - if versionVal, ok := azurePolicy.Config["version"]; ok && *versionVal != "" { - version = *versionVal - } - azurePolicies = append(azurePolicies, map[string]interface{}{ "enabled": enabled, - "version": version, }) } diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go index 28fa3636260a..5638a10a7e7f 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_addons_resource_test.go @@ -89,19 +89,18 @@ func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, Steps: []resource.TestStep{ { - // Enable with V1 - Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, true, "v1"), + // Enable with V2 + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, true), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "true"), - resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.version", "v1"), ), }, data.ImportStep(), { // Disable it - Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, false, "v1"), + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, false), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), @@ -111,22 +110,11 @@ func testAccAzureRMKubernetesCluster_addonProfileAzurePolicy(t *testing.T) { data.ImportStep(), { // Enable with V2 - Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, true, "v2"), + Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, true), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "true"), - resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.version", "v2"), - ), - }, - data.ImportStep(), - { - // Disable with V2 - Config: testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, false, "v2"), - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMKubernetesClusterExists(data.ResourceName), - resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.#", "1"), - resource.TestCheckResourceAttr(data.ResourceName, "addon_profile.0.azure_policy.0.enabled", "false"), ), }, data.ImportStep(), @@ -415,7 +403,7 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger) } -func testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data acceptance.TestData, enabled bool, version string) string { +func testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data acceptance.TestData, enabled bool) string { return fmt.Sprintf(` provider "azurerm" { features {} @@ -449,7 +437,6 @@ resource "azurerm_kubernetes_cluster" "test" { addon_profile { azure_policy { enabled = %t - version = "%s" } } @@ -457,7 +444,7 @@ resource "azurerm_kubernetes_cluster" "test" { type = "SystemAssigned" } } -`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, enabled, version) +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, enabled) } func testAccAzureRMKubernetesCluster_addonProfileKubeDashboardConfig(data acceptance.TestData) string { diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go index 6e8e3258e03e..ad24f17ffcac 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_data_source_test.go @@ -834,7 +834,7 @@ data "azurerm_kubernetes_cluster" "test" { } func testAccDataSourceAzureRMKubernetesCluster_addOnProfileAzurePolicyConfig(data acceptance.TestData) string { - r := testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, true, "v2") + r := testAccAzureRMKubernetesCluster_addonProfileAzurePolicyConfig(data, true) return fmt.Sprintf(` %s diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index b629c9a68757..9775e86aa05d 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -246,8 +246,6 @@ A `azure_policy` block supports the following: * `enabled` - Is Azure Policy for Kubernetes enabled? -* `version`- The version of Azure Policy being used. - --- A `role_based_access_control` block exports the following: diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index ea62d4116617..2caf9b7f2e23 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -215,10 +215,6 @@ A `azure_policy` block supports the following: * `enabled` - (Required) Is the Azure Policy for Kubernetes Add On enabled? -* `version` - (Required) The Version of Azure Policy which should be installed on this Kubernetes Cluster. Possible values are `v1` and `v2`. - -!> **Note:** Support for `v1` is in Private Preview will be removed by AKS "after Spring 2020". - --- A `default_node_pool` block supports the following: From d4e1e0a78b6796df80ca5102ab9abcebea2d91c5 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 9 Jun 2020 09:44:35 +0200 Subject: [PATCH 52/55] r/kubernetes_cluster: conditionally nil-ing the load balancer profile --- .../containers/kubernetes_cluster_resource.go | 54 ++++++++++--------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 0dbae2e90e4c..24cda4333997 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -977,9 +977,10 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} loadBalancerProfile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{ Count: utils.Int32(int32(managedOutboundIPCount)), } - } else { - // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs.{ - loadBalancerProfile.ManagedOutboundIPs = nil + + // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs. + loadBalancerProfile.OutboundIPs = nil + loadBalancerProfile.OutboundIPPrefixes = nil } if key := "network_profile.0.load_balancer_profile.0.outbound_ip_address_ids"; d.HasChange(key) { @@ -987,9 +988,10 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} loadBalancerProfile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{ PublicIPs: publicIPAddressIDs, } - } else { - // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs.{ - loadBalancerProfile.OutboundIPs = nil + + // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs. + loadBalancerProfile.ManagedOutboundIPs = nil + loadBalancerProfile.OutboundIPPrefixes = nil } if key := "network_profile.0.load_balancer_profile.0.outbound_ip_prefix_ids"; d.HasChange(key) { @@ -997,9 +999,10 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{} loadBalancerProfile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{ PublicIPPrefixes: outboundIPPrefixIDs, } - } else { - // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs.{ - loadBalancerProfile.OutboundIPPrefixes = nil + + // fixes: Load balancer profile must specify one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs. + loadBalancerProfile.ManagedOutboundIPs = nil + loadBalancerProfile.OutboundIPs = nil } if key := "network_profile.0.load_balancer_profile.0.outbound_ports_allocated"; d.HasChange(key) { @@ -1437,17 +1440,24 @@ func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservi loadBalancerSku := config["load_balancer_sku"].(string) outboundType := config["outbound_type"].(string) - loadBalancerProfile, err := expandLoadBalancerProfile(loadBalancerProfileRaw, loadBalancerSku) - if err != nil { - return nil, err + networkProfile := containerservice.NetworkProfileType{ + NetworkPlugin: containerservice.NetworkPlugin(networkPlugin), + NetworkPolicy: containerservice.NetworkPolicy(networkPolicy), + LoadBalancerSku: containerservice.LoadBalancerSku(loadBalancerSku), + OutboundType: containerservice.OutboundType(outboundType), } - networkProfile := containerservice.NetworkProfileType{ - NetworkPlugin: containerservice.NetworkPlugin(networkPlugin), - NetworkPolicy: containerservice.NetworkPolicy(networkPolicy), - LoadBalancerSku: containerservice.LoadBalancerSku(loadBalancerSku), - LoadBalancerProfile: loadBalancerProfile, - OutboundType: containerservice.OutboundType(outboundType), + if len(loadBalancerProfileRaw) > 0 { + if !strings.EqualFold(loadBalancerSku, "standard") { + return nil, fmt.Errorf("only load balancer SKU 'Standard' supports load balancer profiles. Provided load balancer type: %s", loadBalancerSku) + } + + loadBalancerProfile, err := expandLoadBalancerProfile(loadBalancerProfileRaw) + if err != nil { + return nil, err + } + + networkProfile.LoadBalancerProfile = loadBalancerProfile } if v, ok := config["dns_service_ip"]; ok && v.(string) != "" { @@ -1473,15 +1483,11 @@ func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservi return &networkProfile, nil } -func expandLoadBalancerProfile(d []interface{}, loadBalancerType string) (*containerservice.ManagedClusterLoadBalancerProfile, error) { - if len(d) == 0 || d[0] != nil { +func expandLoadBalancerProfile(d []interface{}) (*containerservice.ManagedClusterLoadBalancerProfile, error) { + if d[0] == nil { return nil, nil } - if strings.ToLower(loadBalancerType) != "standard" { - return nil, fmt.Errorf("only load balancer SKU 'Standard' supports load balancer profiles. Provided load balancer type: %s", loadBalancerType) - } - config := d[0].(map[string]interface{}) profile := &containerservice.ManagedClusterLoadBalancerProfile{} From 05378e6700eb4ab75a23a0ca83120e1f794815d2 Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Tue, 9 Jun 2020 11:04:15 +0200 Subject: [PATCH 53/55] gosimple --- .../containers/kubernetes_cluster_resource.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 24cda4333997..654cee40799b 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -1452,12 +1452,7 @@ func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservi return nil, fmt.Errorf("only load balancer SKU 'Standard' supports load balancer profiles. Provided load balancer type: %s", loadBalancerSku) } - loadBalancerProfile, err := expandLoadBalancerProfile(loadBalancerProfileRaw) - if err != nil { - return nil, err - } - - networkProfile.LoadBalancerProfile = loadBalancerProfile + networkProfile.LoadBalancerProfile = expandLoadBalancerProfile(loadBalancerProfileRaw) } if v, ok := config["dns_service_ip"]; ok && v.(string) != "" { @@ -1483,9 +1478,9 @@ func expandKubernetesClusterNetworkProfile(input []interface{}) (*containerservi return &networkProfile, nil } -func expandLoadBalancerProfile(d []interface{}) (*containerservice.ManagedClusterLoadBalancerProfile, error) { +func expandLoadBalancerProfile(d []interface{}) *containerservice.ManagedClusterLoadBalancerProfile { if d[0] == nil { - return nil, nil + return nil } config := d[0].(map[string]interface{}) @@ -1514,7 +1509,7 @@ func expandLoadBalancerProfile(d []interface{}) (*containerservice.ManagedCluste profile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{PublicIPs: outIps} } - return profile, nil + return profile } func idsToResourceReferences(set interface{}) *[]containerservice.ResourceReference { From c4cee4438a6124b227e9ea3a5a3b8f8e8265c4fa Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 10 Jun 2020 15:39:41 +0200 Subject: [PATCH 54/55] d/kubernetes-cluster: removing the `version` field from the `azure_policy` block --- .../containers/kubernetes_cluster_data_source.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go index c987fa8b9ebe..aceaa0a33857 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_data_source.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_data_source.go @@ -114,10 +114,6 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "version": { - Type: schema.TypeString, - Computed: true, - }, }, }, }, @@ -812,14 +808,8 @@ func flattenKubernetesClusterDataSourceAddonProfiles(profile map[string]*contain enabled = *enabledVal } - version := "v1" - if versionVal, ok := azurePolicy.Config["version"]; ok && *versionVal != "" { - version = *versionVal - } - output := map[string]interface{}{ "enabled": enabled, - "version": version, } azurePolicies = append(azurePolicies, output) } From 87d0519c7666847ca146b367e06f7a1447f5870e Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Wed, 10 Jun 2020 16:19:37 +0200 Subject: [PATCH 55/55] r/kubernetes_cluster_node_pool: making the CheckDestroy test support both the Data Source & Resource --- .../kubernetes_cluster_node_pool_resource_test.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go index 5f18e085c228..cb1554c1859d 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_resource_test.go @@ -738,21 +738,19 @@ func testCheckAzureRMKubernetesClusterNodePoolDestroy(s *terraform.State) error continue } - name := rs.Primary.Attributes["name"] - kubernetesClusterId := rs.Primary.Attributes["kubernetes_cluster_id"] - parsedK8sId, err := parse.KubernetesClusterID(kubernetesClusterId) + parsedK8sId, err := parse.KubernetesNodePoolID(rs.Primary.ID) if err != nil { - return fmt.Errorf("Error parsing kubernetes cluster id: %+v", err) + return fmt.Errorf("Error parsing kubernetes node pool id: %+v", err) } - resp, err := client.Get(ctx, parsedK8sId.ResourceGroup, parsedK8sId.Name, name) + resp, err := client.Get(ctx, parsedK8sId.ResourceGroup, parsedK8sId.ClusterName, parsedK8sId.Name) if err != nil { return nil } if resp.StatusCode != http.StatusNotFound { - return fmt.Errorf("Managed Kubernetes Cluster still exists:\n%#v", resp) + return fmt.Errorf("Kubernetes Cluster Node Pool still exists:\n%#v", resp) } }