diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 8d297911c291..b080d9f2e7a9 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -267,6 +267,18 @@ func resourceArmKubernetesCluster() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "outbound_ports_allocated": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.IntBetween(0, 64000), + }, + "idle_timeout_in_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 30, + ValidateFunc: validation.IntBetween(4, 120), + }, "managed_outbound_ip_count": { Type: schema.TypeInt, Optional: true, @@ -1277,9 +1289,15 @@ func expandLoadBalancerProfile(d []interface{}, loadBalancerType string, ipCount config := d[0].(map[string]interface{}) - var managedOutboundIps *containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs - var outboundIpPrefixes *containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes - var outboundIps *containerservice.ManagedClusterLoadBalancerProfileOutboundIPs + profile := &containerservice.ManagedClusterLoadBalancerProfile{} + + if port, ok := config["outbound_ports_allocated"].(int); ok { + profile.AllocatedOutboundPorts = utils.Int32(int32(port)) + } + + if idleTimeout, ok := config["idle_timeout_in_minutes"].(int); ok { + profile.IdleTimeoutInMinutes = utils.Int32(int32(idleTimeout)) + } noChangesForLoadBalancerIps := !ipCountChanges && !ipPrefixesChanges && !outboundIpChanges allowToSetIpCount := ipCountChanges || noChangesForLoadBalancerIps @@ -1288,23 +1306,19 @@ func expandLoadBalancerProfile(d []interface{}, loadBalancerType string, ipCount if ipCount := config["managed_outbound_ip_count"]; ipCount != nil && allowToSetIpCount { if c := int32(ipCount.(int)); c > 0 { - managedOutboundIps = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{Count: &c} + profile.ManagedOutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileManagedOutboundIPs{Count: &c} } } if ipPrefixes := idsToResourceReferences(config["outbound_ip_prefix_ids"]); ipPrefixes != nil && allowToSetIpPrefixes { - outboundIpPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{PublicIPPrefixes: ipPrefixes} + profile.OutboundIPPrefixes = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPPrefixes{PublicIPPrefixes: ipPrefixes} } if outIps := idsToResourceReferences(config["outbound_ip_address_ids"]); outIps != nil && allowToSetOutboundIp { - outboundIps = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{PublicIPs: outIps} + profile.OutboundIPs = &containerservice.ManagedClusterLoadBalancerProfileOutboundIPs{PublicIPs: outIps} } - return &containerservice.ManagedClusterLoadBalancerProfile{ - ManagedOutboundIPs: managedOutboundIps, - OutboundIPPrefixes: outboundIpPrefixes, - OutboundIPs: outboundIps, - }, nil + return profile, nil } func idsToResourceReferences(set interface{}) *[]containerservice.ResourceReference { @@ -1376,6 +1390,14 @@ func flattenKubernetesClusterNetworkProfile(profile *containerservice.NetworkPro if lbp := profile.LoadBalancerProfile; lbp != nil { lb := make(map[string]interface{}) + if v := lbp.AllocatedOutboundPorts; v != nil { + lb["outbound_ports_allocated"] = v + } + + if v := lbp.IdleTimeoutInMinutes; v != nil { + lb["idle_timeout_in_minutes"] = v + } + if ips := lbp.ManagedOutboundIPs; ips != nil { if count := ips.Count; count != nil { lb["managed_outbound_ip_count"] = count diff --git a/azurerm/internal/services/containers/resource_arm_container_service.go b/azurerm/internal/services/containers/resource_arm_container_service.go new file mode 100644 index 000000000000..e1a896b20304 --- /dev/null +++ b/azurerm/internal/services/containers/resource_arm_container_service.go @@ -0,0 +1,683 @@ +package containers + +import ( + "bytes" + "context" + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/suppress" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func resourceArmContainerService() *schema.Resource { + return &schema.Resource{ + Create: resourceArmContainerServiceCreateUpdate, + Read: resourceArmContainerServiceRead, + Update: resourceArmContainerServiceCreateUpdate, + Delete: resourceArmContainerServiceDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + DeprecationMessage: `Azure Container Service (ACS) has been deprecated in favour of Azure (Managed) Kubernetes Service (AKS). + +Azure will remove support for ACS Clusters on January 31, 2020. In preparation for this, the AzureRM Provider will remove support for the 'azurerm_container_service' resource in the next major version of the AzureRM Provider, which is targeted for Early 2019. + +If you're using ACS with Kubernetes, we'd recommend migrating to AKS / the 'azurerm_kubernetes_cluster' resource. + +More information can be found here: https://azure.microsoft.com/en-us/updates/azure-container-service-will-retire-on-january-31-2020/ +`, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "location": azure.SchemaLocation(), + + "resource_group_name": azure.SchemaResourceGroupName(), + + "orchestration_platform": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: ValidateArmContainerServiceOrchestrationPlatform, + }, + + //lintignore:S018 + "master_profile": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: ValidateArmContainerServiceMasterProfileCount, + }, + + "dns_prefix": { + Type: schema.TypeString, + Required: true, + }, + + "fqdn": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceAzureRMContainerServiceMasterProfileHash, + }, + + //lintignore:S018 + "linux_profile": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "admin_username": { + Type: schema.TypeString, + Required: true, + }, + "ssh_key": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_data": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + Set: resourceAzureRMContainerServiceLinuxProfilesHash, + }, + + //lintignore:S018 + "agent_pool_profile": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: ValidateArmContainerServiceAgentPoolProfileCount, + }, + + "dns_prefix": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "fqdn": { + Type: schema.TypeString, + Computed: true, + }, + + "vm_size": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: suppress.CaseDifference, + }, + }, + }, + Set: resourceAzureRMContainerServiceAgentPoolProfilesHash, + }, + + //lintignore:S018 + "service_principal": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_id": { + Type: schema.TypeString, + Required: true, + }, + + "client_secret": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + }, + }, + Set: resourceAzureRMContainerServiceServicePrincipalProfileHash, + }, + + //lintignore:S018 + "diagnostics_profile": { + Type: schema.TypeSet, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + + "storage_uri": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceAzureRMContainerServiceDiagnosticProfilesHash, + }, + + "tags": tags.Schema(), + }, + } +} + +func resourceArmContainerServiceCreateUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client) + ctx, cancel := timeouts.ForCreateUpdate(meta.(*clients.Client).StopContext, d) + defer cancel() + containerServiceClient := client.Containers.ServicesClient + + log.Printf("[INFO] preparing arguments for Azure ARM Container Service creation.") + + resGroup := d.Get("resource_group_name").(string) + name := d.Get("name").(string) + + if features.ShouldResourcesBeImported() && d.IsNewResource() { + existing, err := containerServiceClient.Get(ctx, resGroup, name) + if err != nil { + if !utils.ResponseWasNotFound(existing.Response) { + return fmt.Errorf("Error checking for presence of existing Container Service %q (Resource Group %q): %s", name, resGroup, err) + } + } + + if existing.ID != nil && *existing.ID != "" { + return tf.ImportAsExistsError("azurerm_container_service", *existing.ID) + } + } + + location := azure.NormalizeLocation(d.Get("location").(string)) + + orchestrationPlatform := d.Get("orchestration_platform").(string) + + masterProfile := expandAzureRmContainerServiceMasterProfile(d) + linuxProfile := expandAzureRmContainerServiceLinuxProfile(d) + agentProfiles := expandAzureRmContainerServiceAgentProfiles(d) + diagnosticsProfile := expandAzureRmContainerServiceDiagnostics(d) + + t := d.Get("tags").(map[string]interface{}) + + parameters := containerservice.ContainerService{ + Name: &name, + Location: &location, + Properties: &containerservice.Properties{ + MasterProfile: &masterProfile, + LinuxProfile: &linuxProfile, + OrchestratorProfile: &containerservice.OrchestratorProfileType{ + OrchestratorType: containerservice.OrchestratorTypes(orchestrationPlatform), + }, + AgentPoolProfiles: &agentProfiles, + DiagnosticsProfile: &diagnosticsProfile, + }, + Tags: tags.Expand(t), + } + + servicePrincipalProfile := expandAzureRmContainerServiceServicePrincipal(d) + if servicePrincipalProfile != nil { + parameters.ServicePrincipalProfile = servicePrincipalProfile + } + + if _, err := containerServiceClient.CreateOrUpdate(ctx, resGroup, name, parameters); err != nil { + return err + } + + read, err := containerServiceClient.Get(ctx, resGroup, name) + if err != nil { + return err + } + + if read.ID == nil { + return fmt.Errorf("Cannot read Container Service %s (resource group %s) ID", name, resGroup) + } + + log.Printf("[DEBUG] Waiting for Container Service (%s) to become available", d.Get("name")) + stateConf := &resource.StateChangeConf{ + Pending: []string{"Updating", "Creating"}, + Target: []string{"Succeeded"}, + Refresh: containerServiceStateRefreshFunc(ctx, client, resGroup, name), + MinTimeout: 15 * time.Second, + } + + if features.SupportsCustomTimeouts() { + if d.IsNewResource() { + stateConf.Timeout = d.Timeout(schema.TimeoutCreate) + } else { + stateConf.Timeout = d.Timeout(schema.TimeoutUpdate) + } + } else { + stateConf.Timeout = 30 * time.Minute + } + + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Container Service (%s) to become available: %s", d.Get("name"), err) + } + + d.SetId(*read.ID) + + return resourceArmContainerServiceRead(d, meta) +} + +func resourceArmContainerServiceRead(d *schema.ResourceData, meta interface{}) error { + containerServiceClient := meta.(*clients.Client).Containers.ServicesClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["containerServices"] + + resp, err := containerServiceClient.Get(ctx, resGroup, name) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + d.SetId("") + return nil + } + + return fmt.Errorf("Error making Read request on Azure Container Service %s: %s", name, err) + } + + d.Set("name", resp.Name) + d.Set("resource_group_name", resGroup) + if location := resp.Location; location != nil { + d.Set("location", azure.NormalizeLocation(*location)) + } + + d.Set("orchestration_platform", string(resp.Properties.OrchestratorProfile.OrchestratorType)) + + masterProfiles := flattenAzureRmContainerServiceMasterProfile(*resp.Properties.MasterProfile) + d.Set("master_profile", &masterProfiles) + + linuxProfile := flattenAzureRmContainerServiceLinuxProfile(*resp.Properties.LinuxProfile) + d.Set("linux_profile", &linuxProfile) + + agentPoolProfiles := flattenAzureRmContainerServiceAgentPoolProfiles(resp.Properties.AgentPoolProfiles) + d.Set("agent_pool_profile", &agentPoolProfiles) + + servicePrincipal := flattenAzureRmContainerServiceServicePrincipalProfile(resp.Properties.ServicePrincipalProfile) + if servicePrincipal != nil { + d.Set("service_principal", servicePrincipal) + } + + diagnosticProfile := flattenAzureRmContainerServiceDiagnosticsProfile(resp.Properties.DiagnosticsProfile) + if diagnosticProfile != nil { + d.Set("diagnostics_profile", diagnosticProfile) + } + + return tags.FlattenAndSet(d, resp.Tags) +} + +func resourceArmContainerServiceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client) + ctx, cancel := timeouts.ForDelete(meta.(*clients.Client).StopContext, d) + defer cancel() + containerServiceClient := client.Containers.ServicesClient + + id, err := azure.ParseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["containerServices"] + + future, err := containerServiceClient.Delete(ctx, resGroup, name) + + if err != nil { + return fmt.Errorf("Error issuing Azure ARM delete request of Container Service '%s': %s", name, err) + } + + return future.WaitForCompletionRef(ctx, containerServiceClient.Client) +} + +func flattenAzureRmContainerServiceMasterProfile(profile containerservice.MasterProfile) *schema.Set { + masterProfiles := &schema.Set{ + F: resourceAzureRMContainerServiceMasterProfileHash, + } + + masterProfile := make(map[string]interface{}, 3) + + masterProfile["count"] = int(*profile.Count) + masterProfile["dns_prefix"] = *profile.DNSPrefix + masterProfile["fqdn"] = *profile.Fqdn + + masterProfiles.Add(masterProfile) + + return masterProfiles +} + +func flattenAzureRmContainerServiceLinuxProfile(profile containerservice.LinuxProfile) *schema.Set { + profiles := &schema.Set{ + F: resourceAzureRMContainerServiceLinuxProfilesHash, + } + + values := map[string]interface{}{} + + sshKeys := &schema.Set{ + F: resourceAzureRMContainerServiceLinuxProfilesSSHKeysHash, + } + for _, ssh := range *profile.SSH.PublicKeys { + keys := map[string]interface{}{} + keys["key_data"] = *ssh.KeyData + sshKeys.Add(keys) + } + + values["admin_username"] = *profile.AdminUsername + values["ssh_key"] = sshKeys + profiles.Add(values) + + return profiles +} + +func flattenAzureRmContainerServiceAgentPoolProfiles(profiles *[]containerservice.AgentPoolProfile) *schema.Set { + agentPoolProfiles := &schema.Set{ + F: resourceAzureRMContainerServiceAgentPoolProfilesHash, + } + + for _, profile := range *profiles { + agentPoolProfile := map[string]interface{}{} + agentPoolProfile["count"] = int(*profile.Count) + agentPoolProfile["dns_prefix"] = *profile.DNSPrefix + agentPoolProfile["fqdn"] = *profile.Fqdn + agentPoolProfile["name"] = *profile.Name + agentPoolProfile["vm_size"] = string(profile.VMSize) + agentPoolProfiles.Add(agentPoolProfile) + } + + return agentPoolProfiles +} + +func flattenAzureRmContainerServiceServicePrincipalProfile(profile *containerservice.ServicePrincipalProfile) *schema.Set { + if profile == nil { + return nil + } + + servicePrincipalProfiles := &schema.Set{ + F: resourceAzureRMContainerServiceServicePrincipalProfileHash, + } + + values := map[string]interface{}{} + + values["client_id"] = *profile.ClientID + if profile.Secret != nil { + values["client_secret"] = *profile.Secret + } + + servicePrincipalProfiles.Add(values) + + return servicePrincipalProfiles +} + +func flattenAzureRmContainerServiceDiagnosticsProfile(profile *containerservice.DiagnosticsProfile) *schema.Set { + diagnosticProfiles := &schema.Set{ + F: resourceAzureRMContainerServiceDiagnosticProfilesHash, + } + + values := map[string]interface{}{} + + values["enabled"] = *profile.VMDiagnostics.Enabled + if profile.VMDiagnostics.StorageURI != nil { + values["storage_uri"] = *profile.VMDiagnostics.StorageURI + } + diagnosticProfiles.Add(values) + + return diagnosticProfiles +} + +func expandAzureRmContainerServiceDiagnostics(d *schema.ResourceData) containerservice.DiagnosticsProfile { + configs := d.Get("diagnostics_profile").(*schema.Set).List() + + data := configs[0].(map[string]interface{}) + + enabled := data["enabled"].(bool) + + return containerservice.DiagnosticsProfile{ + VMDiagnostics: &containerservice.VMDiagnostics{ + Enabled: &enabled, + }, + } +} + +func expandAzureRmContainerServiceLinuxProfile(d *schema.ResourceData) containerservice.LinuxProfile { + profiles := d.Get("linux_profile").(*schema.Set).List() + config := profiles[0].(map[string]interface{}) + + adminUsername := config["admin_username"].(string) + + linuxKeys := config["ssh_key"].(*schema.Set).List() + sshPublicKeys := make([]containerservice.SSHPublicKey, 0) + + key := linuxKeys[0].(map[string]interface{}) + keyData := key["key_data"].(string) + + sshPublicKey := containerservice.SSHPublicKey{ + KeyData: &keyData, + } + + sshPublicKeys = append(sshPublicKeys, sshPublicKey) + + profile := containerservice.LinuxProfile{ + AdminUsername: &adminUsername, + SSH: &containerservice.SSHConfiguration{ + PublicKeys: &sshPublicKeys, + }, + } + + return profile +} + +func expandAzureRmContainerServiceMasterProfile(d *schema.ResourceData) containerservice.MasterProfile { + configs := d.Get("master_profile").(*schema.Set).List() + config := configs[0].(map[string]interface{}) + + count := int32(config["count"].(int)) + dnsPrefix := config["dns_prefix"].(string) + + profile := containerservice.MasterProfile{ + Count: &count, + DNSPrefix: &dnsPrefix, + } + + return profile +} + +func expandAzureRmContainerServiceServicePrincipal(d *schema.ResourceData) *containerservice.ServicePrincipalProfile { + value, exists := d.GetOk("service_principal") + if !exists { + return nil + } + + configs := value.(*schema.Set).List() + + config := configs[0].(map[string]interface{}) + + clientId := config["client_id"].(string) + clientSecret := config["client_secret"].(string) + + principal := containerservice.ServicePrincipalProfile{ + ClientID: &clientId, + Secret: &clientSecret, + } + + return &principal +} + +func expandAzureRmContainerServiceAgentProfiles(d *schema.ResourceData) []containerservice.AgentPoolProfile { + configs := d.Get("agent_pool_profile").(*schema.Set).List() + config := configs[0].(map[string]interface{}) + profiles := make([]containerservice.AgentPoolProfile, 0, len(configs)) + + name := config["name"].(string) + count := int32(config["count"].(int)) + dnsPrefix := config["dns_prefix"].(string) + vmSize := config["vm_size"].(string) + + profile := containerservice.AgentPoolProfile{ + Name: &name, + Count: &count, + VMSize: containerservice.VMSizeTypes(vmSize), + DNSPrefix: &dnsPrefix, + } + + profiles = append(profiles, profile) + + return profiles +} + +func containerServiceStateRefreshFunc(ctx context.Context, client *clients.Client, resourceGroupName string, containerServiceName string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + res, err := client.Containers.ServicesClient.Get(ctx, resourceGroupName, containerServiceName) + if err != nil { + return nil, "", fmt.Errorf("Error issuing read request in containerServiceStateRefreshFunc to Azure ARM for Container Service '%s' (RG: '%s'): %s", containerServiceName, resourceGroupName, err) + } + + return res, *res.Properties.ProvisioningState, nil + } +} + +func resourceAzureRMContainerServiceMasterProfileHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["dns_prefix"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceLinuxProfilesHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%s-", m["admin_username"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceLinuxProfilesSSHKeysHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%s-", m["key_data"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceAgentPoolProfilesHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["dns_prefix"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["vm_size"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceServicePrincipalProfileHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%s-", m["client_id"].(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceAzureRMContainerServiceDiagnosticProfilesHash(v interface{}) int { + var buf bytes.Buffer + + if m, ok := v.(map[string]interface{}); ok { + buf.WriteString(fmt.Sprintf("%t", m["enabled"].(bool))) + } + + return hashcode.String(buf.String()) +} + +func ValidateArmContainerServiceOrchestrationPlatform(v interface{}, _ string) (warnings []string, errors []error) { + value := v.(string) + capacities := map[string]bool{ + "DCOS": true, + "Kubernetes": true, + "Swarm": true, + } + + if !capacities[value] { + errors = append(errors, fmt.Errorf("Container Service: Orchestration Platgorm can only be DCOS / Kubernetes / Swarm")) + } + return warnings, errors +} + +func ValidateArmContainerServiceMasterProfileCount(v interface{}, _ string) (warnings []string, errors []error) { + value := v.(int) + capacities := map[int]bool{ + 1: true, + 3: true, + 5: true, + } + + if !capacities[value] { + errors = append(errors, fmt.Errorf("The number of master nodes must be 1, 3 or 5.")) + } + return warnings, errors +} + +func ValidateArmContainerServiceAgentPoolProfileCount(v interface{}, _ string) (warnings []string, errors []error) { + value := v.(int) + if value > 100 || 0 >= value { + errors = append(errors, fmt.Errorf("The Count for an Agent Pool Profile can only be between 1 and 100.")) + } + return warnings, errors +} diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go index e158519e2060..010bd93b964d 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_network_resource_test.go @@ -2,6 +2,7 @@ package tests import ( "fmt" + "os" "regexp" "testing" @@ -451,6 +452,8 @@ func testAccAzureRMKubernetesCluster_standardLoadBalancerProfile(t *testing.T) { resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_sku", "Standard"), resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.managed_outbound_ip_count", "3"), resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.effective_outbound_ips.#", "3"), + resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.idle_timeout_in_minutes", "30"), + resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.outbound_ports_allocated", "0"), ), }, data.ImportStep(), @@ -485,6 +488,34 @@ func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileComplete(t *test }) } +func TestAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout(t) +} + +func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + clientId := os.Getenv("ARM_CLIENT_ID") + clientSecret := os.Getenv("ARM_CLIENT_SECRET") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterDestroy, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeoutConfig(data, clientId, clientSecret), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesClusterExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.outbound_ports_allocated", "8000"), + resource.TestCheckResourceAttr(data.ResourceName, "network_profile.0.load_balancer_profile.0.idle_timeout_in_minutes", "10"), + ), + }, + }, + }) +} + func TestAccAzureRMKubernetesCluster_basicLoadBalancerProfile(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccAzureRMKubernetesCluster_basicLoadBalancerProfile(t) @@ -1387,7 +1418,57 @@ resource "azurerm_kubernetes_cluster" "test" { `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger) } -func testAccAzureRMKubernetesCluster_basicLoadBalancerProfileConfig(data acceptance.TestData) string { +func testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeoutConfig(data acceptance.TestData, clientId string, clientSecret string) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + kubernetes_version = "%s" + + linux_profile { + admin_username = "acctestuser%d" + + ssh_key { + key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld" + } + } + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + service_principal { + client_id = "%s" + client_secret = "%s" + } + + network_profile { + network_plugin = "kubenet" + load_balancer_sku = "Standard" + load_balancer_profile { + managed_outbound_ip_count = 2 + outbound_ports_allocated = 8000 + idle_timeout_in_minutes = 10 + } + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, currentKubernetesVersion, data.RandomInteger, clientId, clientSecret) +} + +func testAccAzureRMKubernetesCluster_basicLoadBalancerProfileConfig(data acceptance.TestData, clientId string, clientSecret string) string { return fmt.Sprintf(` provider "azurerm" { features {} diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go index 67968d3b51f9..8f7704aa7e51 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go @@ -40,23 +40,24 @@ func TestAccAzureRMKubernetes_all(t *testing.T) { "servicePrincipal": testAccAzureRMKubernetesCluster_servicePrincipal, }, "network": { - "advancedNetworkingKubenet": testAccAzureRMKubernetesCluster_advancedNetworkingKubenet, - "advancedNetworkingKubenetComplete": testAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete, - "advancedNetworkingAzure": testAccAzureRMKubernetesCluster_advancedNetworkingAzure, - "advancedNetworkingAzureComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete, - "advancedNetworkingAzureCalicoPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy, - "advancedNetworkingAzureCalicoPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete, - "advancedNetworkingAzureNPMPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy, - "advancedNetworkingAzureNPMPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete, "enableNodePublicIP": testAccAzureRMKubernetesCluster_enableNodePublicIP, "internalNetwork": testAccAzureRMKubernetesCluster_internalNetwork, - "basicLoadBalancerProfile": testAccAzureRMKubernetesCluster_basicLoadBalancerProfile, "changingLoadBalancerProfile": testAccAzureRMKubernetesCluster_changingLoadBalancerProfile, "prefixedLoadBalancerProfile": testAccAzureRMKubernetesCluster_prefixedLoadBalancerProfile, "standardLoadBalancer": testAccAzureRMKubernetesCluster_standardLoadBalancer, "standardLoadBalancerComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerComplete, "standardLoadBalancerProfile": testAccAzureRMKubernetesCluster_standardLoadBalancerProfile, "standardLoadBalancerProfileComplete": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileComplete, + "advancedNetworkingKubenet": testAccAzureRMKubernetesCluster_advancedNetworkingKubenet, + "advancedNetworkingKubenetComplete": testAccAzureRMKubernetesCluster_advancedNetworkingKubenetComplete, + "advancedNetworkingAzure": testAccAzureRMKubernetesCluster_advancedNetworkingAzure, + "advancedNetworkingAzureComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureComplete, + "advancedNetworkingAzureCalicoPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicy, + "advancedNetworkingAzureCalicoPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureCalicoPolicyComplete, + "advancedNetworkingAzureNPMPolicy": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicy, + "advancedNetworkingAzureNPMPolicyComplete": testAccAzureRMKubernetesCluster_advancedNetworkingAzureNPMPolicyComplete, + "basicLoadBalancerProfile": testAccAzureRMKubernetesCluster_basicLoadBalancerProfile, + "standardLoadBalancerProfileWithPortAndTimeout": testAccAzureRMKubernetesCluster_standardLoadBalancerProfileWithPortAndTimeout, }, "nodePool": { "autoScale": testAccAzureRMKubernetesClusterNodePool_autoScale, diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index c27b5ff5a4ac..9fbdd45512a0 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -293,7 +293,11 @@ A `load_balancer_profile` block supports the following: ~> **NOTE:** These options are mutually exclusive. Note that when specifying `outbound_ip_address_ids` ([azurerm_public_ip](/docs/providers/azurerm/r/public_ip.html)) the SKU must be `Standard`. -* `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be in the range of [1, 100]. +* `outbound_ports_allocated` - (Optional) Number of desired SNAT port for each VM in the clusters load balancer. Must be between `0` and `64000` inclusive. Defaults to `0`. + +* `idle_timeout_in_minutes` - (Optional) Desired outbound flow idle timeout in minutes for the cluster load balancer. Must be between `4` and `120` inclusive. Defaults to `30`. + +* `managed_outbound_ip_count` - (Optional) Count of desired managed outbound IPs for the cluster load balancer. Must be between `1` and `100` inclusive. -> **NOTE** User has to explicitly set `managed_outbound_ip_count` to empty slice (`[]`) to remove it.