From dc08befd1aaa7b7debfda5eb6a2cc4316ea4cc3b Mon Sep 17 00:00:00 2001 From: dstrebel Date: Wed, 11 Nov 2020 23:57:14 -0600 Subject: [PATCH 1/3] added autoscale property --- .../containers/kubernetes_cluster_resource.go | 14 ++++++++++++++ .../kubernetes_cluster_scaling_resource_test.go | 5 ++++- website/docs/r/kubernetes_cluster.html.markdown | 2 ++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/kubernetes_cluster_resource.go b/azurerm/internal/services/containers/kubernetes_cluster_resource.go index 5b726696af0b..b37f459a73d7 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_resource.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_resource.go @@ -109,6 +109,12 @@ func resourceArmKubernetesCluster() *schema.Resource { Optional: true, Computed: true, }, + "new_pod_scale_up_delay": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: containerValidate.Duration, + }, "scan_interval": { Type: schema.TypeString, Optional: true, @@ -1896,6 +1902,11 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed maxGracefulTerminationSec = *profile.MaxGracefulTerminationSec } + newPodScaleUpDelay := "" + if profile.NewPodScaleUpDelay != nil { + newPodScaleUpDelay = *profile.NewPodScaleUpDelay + } + scaleDownDelayAfterAdd := "" if profile.ScaleDownDelayAfterAdd != nil { scaleDownDelayAfterAdd = *profile.ScaleDownDelayAfterAdd @@ -1935,6 +1946,7 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed map[string]interface{}{ "balance_similar_node_groups": balanceSimilarNodeGroups, "max_graceful_termination_sec": maxGracefulTerminationSec, + "new_pod_scale_up_delay": newPodScaleUpDelay, "scale_down_delay_after_add": scaleDownDelayAfterAdd, "scale_down_delay_after_delete": scaleDownDelayAfterDelete, "scale_down_delay_after_failure": scaleDownDelayAfterFailure, @@ -1955,6 +1967,7 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser balanceSimilarNodeGroups := config["balance_similar_node_groups"].(bool) maxGracefulTerminationSec := config["max_graceful_termination_sec"].(string) + newPodScaleUpDelay := config["new_pod_scale_up_delay"].(string) scaleDownDelayAfterAdd := config["scale_down_delay_after_add"].(string) scaleDownDelayAfterDelete := config["scale_down_delay_after_delete"].(string) scaleDownDelayAfterFailure := config["scale_down_delay_after_failure"].(string) @@ -1966,6 +1979,7 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser return &containerservice.ManagedClusterPropertiesAutoScalerProfile{ BalanceSimilarNodeGroups: utils.String(strconv.FormatBool(balanceSimilarNodeGroups)), MaxGracefulTerminationSec: utils.String(maxGracefulTerminationSec), + NewPodScaleUpDelay: utils.String(newPodScaleUpDelay), ScaleDownDelayAfterAdd: utils.String(scaleDownDelayAfterAdd), ScaleDownDelayAfterDelete: utils.String(scaleDownDelayAfterDelete), ScaleDownDelayAfterFailure: utils.String(scaleDownDelayAfterFailure), diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go index 08292ca31b10..a3a06a484dca 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go @@ -133,6 +133,7 @@ func testAccAzureRMKubernetesCluster_autoScalingNodeCountUnset(t *testing.T) { resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.max_count", "4"), resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.enable_auto_scaling", "true"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.max_graceful_termination_sec", "600"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.new_pod_scale_up_delay", "10s"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_add", "10m"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_delete", "10s"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_failure", "3m"), @@ -225,6 +226,7 @@ func testAccAzureRMKubernetesCluster_autoScalingProfile(t *testing.T) { testCheckAzureRMKubernetesClusterExists(data.ResourceName), resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.enable_auto_scaling", "true"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.max_graceful_termination_sec", "15"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.new_pod_scale_up_delay", "10s"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_add", "10m"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_delete", "10s"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_failure", "15m"), @@ -481,7 +483,8 @@ resource "azurerm_kubernetes_cluster" "test" { auto_scaler_profile { balance_similar_node_groups = true - max_graceful_termination_sec = 15 + max_graceful_termination_sec = 15 + new_pod_scale_up_delay = "10s" scan_interval = "10s" scale_down_delay_after_add = "10m" scale_down_delay_after_delete = "10s" diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 5bca37920a11..67f16a0a5b6b 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -169,6 +169,8 @@ A `auto_scaler_profile` block supports the following: * `max_graceful_termination_sec` - Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`. +* `new_pod_scale_up_delay` - For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`. + * `scale_down_delay_after_add` - How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`. * `scale_down_delay_after_delete` - How long after node deletion that scale down evaluation resumes. Defaults to the value used for `scan_interval`. From bdb96662c948c94d9774d6c5ea431d6e9c071539 Mon Sep 17 00:00:00 2001 From: kt Date: Sun, 27 Dec 2020 06:55:01 -0800 Subject: [PATCH 2/3] make terrafmt --- .../tests/kubernetes_cluster_scaling_resource_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go index dce0c16b6aa5..05664a40a84b 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go @@ -588,8 +588,8 @@ resource "azurerm_kubernetes_cluster" "test" { auto_scaler_profile { balance_similar_node_groups = true - max_graceful_termination_sec = 15 - new_pod_scale_up_delay = "10s" + max_graceful_termination_sec = 15 + new_pod_scale_up_delay = "10s" scan_interval = "10s" scale_down_delay_after_add = "10m" scale_down_delay_after_delete = "10s" From 187ef46df5e0e1d2fa8d8f4024ea9a094aa8e4ce Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Mon, 18 Jan 2021 10:29:51 +0100 Subject: [PATCH 3/3] Update azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go --- .../tests/kubernetes_cluster_scaling_resource_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go index 05664a40a84b..4c5d084ceb93 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_scaling_resource_test.go @@ -238,7 +238,7 @@ func testAccAzureRMKubernetesCluster_autoScalingNodeCountUnset(t *testing.T) { resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.max_count", "4"), resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.enable_auto_scaling", "true"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.max_graceful_termination_sec", "600"), - resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.new_pod_scale_up_delay", "10s"), + resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.new_pod_scale_up_delay", "0s"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_add", "10m"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_delete", "10s"), resource.TestCheckResourceAttr(data.ResourceName, "auto_scaler_profile.0.scale_down_delay_after_failure", "3m"),