Skip to content

Commit

Permalink
"azurerm_kubernetes_cluster" - supports "max_node_provisioning_time",…
Browse files Browse the repository at this point in the history
…"max_unready_percentage" and "max_unready_nodes" (#11406)

This PR adds support for the max_node_provision_time, max_total_unready_percentage and ok_total_unready_count property in the auto_scaler_profile block for azurerm_kubernetes_cluster.

As per docs.microsoft.com/en-us/azure/aks/cluster-autoscaler#using-the-autoscaler-profile

the original acctest TestAccKubernetesCluster_autoScalingProfile fails because the version "1.19.6" is not available in the westeurope
  • Loading branch information
njuCZ authored Apr 27, 2021
1 parent 7cd9cad commit 2254334
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,24 @@ func resourceKubernetesCluster() *schema.Resource {
Optional: true,
Computed: true,
},
"max_node_provisioning_time": {
Type: schema.TypeString,
Optional: true,
Default: "15m",
ValidateFunc: containerValidate.Duration,
},
"max_unready_nodes": {
Type: schema.TypeInt,
Optional: true,
Default: 3,
ValidateFunc: validation.IntAtLeast(0),
},
"max_unready_percentage": {
Type: schema.TypeFloat,
Optional: true,
Default: 45,
ValidateFunc: validation.FloatBetween(0, 100),
},
"new_pod_scale_up_delay": {
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -1356,7 +1374,10 @@ func resourceKubernetesClusterRead(d *schema.ResourceData, meta interface{}) err
return fmt.Errorf("setting `addon_profile`: %+v", err)
}

autoScalerProfile := flattenKubernetesClusterAutoScalerProfile(props.AutoScalerProfile)
autoScalerProfile, err := flattenKubernetesClusterAutoScalerProfile(props.AutoScalerProfile)
if err != nil {
return err
}
if err := d.Set("auto_scaler_profile", autoScalerProfile); err != nil {
return fmt.Errorf("setting `auto_scaler_profile`: %+v", err)
}
Expand Down Expand Up @@ -2094,9 +2115,9 @@ func flattenKubernetesClusterManagedClusterIdentity(input *containerservice.Mana
return []interface{}{identity}, nil
}

func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.ManagedClusterPropertiesAutoScalerProfile) []interface{} {
func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.ManagedClusterPropertiesAutoScalerProfile) ([]interface{}, error) {
if profile == nil {
return []interface{}{}
return []interface{}{}, nil
}

balanceSimilarNodeGroups := false
Expand All @@ -2111,6 +2132,29 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed
maxGracefulTerminationSec = *profile.MaxGracefulTerminationSec
}

maxNodeProvisionTime := ""
if profile.MaxNodeProvisionTime != nil {
maxNodeProvisionTime = *profile.MaxNodeProvisionTime
}

maxUnreadyNodes := 0
if profile.OkTotalUnreadyCount != nil {
var err error
maxUnreadyNodes, err = strconv.Atoi(*profile.OkTotalUnreadyCount)
if err != nil {
return nil, err
}
}

maxUnreadyPercentage := 0.0
if profile.MaxTotalUnreadyPercentage != nil {
var err error
maxUnreadyPercentage, err = strconv.ParseFloat(*profile.MaxTotalUnreadyPercentage, 64)
if err != nil {
return nil, err
}
}

newPodScaleUpDelay := ""
if profile.NewPodScaleUpDelay != nil {
newPodScaleUpDelay = *profile.NewPodScaleUpDelay
Expand Down Expand Up @@ -2171,6 +2215,9 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed
"balance_similar_node_groups": balanceSimilarNodeGroups,
"expander": string(profile.Expander),
"max_graceful_termination_sec": maxGracefulTerminationSec,
"max_node_provisioning_time": maxNodeProvisionTime,
"max_unready_nodes": maxUnreadyNodes,
"max_unready_percentage": maxUnreadyPercentage,
"new_pod_scale_up_delay": newPodScaleUpDelay,
"scale_down_delay_after_add": scaleDownDelayAfterAdd,
"scale_down_delay_after_delete": scaleDownDelayAfterDelete,
Expand All @@ -2183,7 +2230,7 @@ func flattenKubernetesClusterAutoScalerProfile(profile *containerservice.Managed
"skip_nodes_with_local_storage": skipNodesWithLocalStorage,
"skip_nodes_with_system_pods": skipNodesWithSystemPods,
},
}
}, nil
}

func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerservice.ManagedClusterPropertiesAutoScalerProfile {
Expand All @@ -2196,6 +2243,9 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser
balanceSimilarNodeGroups := config["balance_similar_node_groups"].(bool)
expander := config["expander"].(string)
maxGracefulTerminationSec := config["max_graceful_termination_sec"].(string)
maxNodeProvisionTime := config["max_node_provisioning_time"].(string)
maxUnreadyNodes := fmt.Sprint(config["max_unready_nodes"].(int))
maxUnreadyPercentage := fmt.Sprint(config["max_unready_percentage"].(float64))
newPodScaleUpDelay := config["new_pod_scale_up_delay"].(string)
scaleDownDelayAfterAdd := config["scale_down_delay_after_add"].(string)
scaleDownDelayAfterDelete := config["scale_down_delay_after_delete"].(string)
Expand All @@ -2212,7 +2262,10 @@ func expandKubernetesClusterAutoScalerProfile(input []interface{}) *containerser
BalanceSimilarNodeGroups: utils.String(strconv.FormatBool(balanceSimilarNodeGroups)),
Expander: containerservice.Expander(expander),
MaxGracefulTerminationSec: utils.String(maxGracefulTerminationSec),
MaxNodeProvisionTime: utils.String(maxNodeProvisionTime),
MaxTotalUnreadyPercentage: utils.String(maxUnreadyPercentage),
NewPodScaleUpDelay: utils.String(newPodScaleUpDelay),
OkTotalUnreadyCount: utils.String(maxUnreadyNodes),
ScaleDownDelayAfterAdd: utils.String(scaleDownDelayAfterAdd),
ScaleDownDelayAfterDelete: utils.String(scaleDownDelayAfterDelete),
ScaleDownDelayAfterFailure: utils.String(scaleDownDelayAfterFailure),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,10 @@ func testAccKubernetesCluster_autoScalingProfile(t *testing.T) {
check.That(data.ResourceName).Key("default_node_pool.0.enable_auto_scaling").HasValue("true"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.expander").HasValue("least-waste"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.max_graceful_termination_sec").HasValue("15"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.max_node_provisioning_time").HasValue("10m"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.max_unready_percentage").HasValue("50"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.new_pod_scale_up_delay").HasValue("10s"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.max_unready_nodes").HasValue("5"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.scale_down_delay_after_add").HasValue("10m"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.scale_down_delay_after_delete").HasValue("10s"),
check.That(data.ResourceName).Key("auto_scaler_profile.0.scale_down_delay_after_failure").HasValue("15m"),
Expand Down Expand Up @@ -551,6 +554,9 @@ resource "azurerm_kubernetes_cluster" "test" {
balance_similar_node_groups = true
expander = "least-waste"
max_graceful_termination_sec = 15
max_node_provisioning_time = "10m"
max_unready_nodes = 5
max_unready_percentage = 50
new_pod_scale_up_delay = "10s"
scan_interval = "10s"
scale_down_delay_after_add = "10m"
Expand Down
8 changes: 7 additions & 1 deletion website/docs/r/kubernetes_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -219,14 +219,20 @@ A `addon_profile` block supports the following:

---

A `auto_scaler_profile` block supports the following:
An `auto_scaler_profile` block supports the following:

* `balance_similar_node_groups` - Detect similar node groups and balance the number of nodes between them. Defaults to `false`.

* `expander` - Expander to use. Possible values are `least-waste`, `priority`, `most-pods` and `random`. Defaults to `random`.

* `max_graceful_termination_sec` - Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node. Defaults to `600`.

* `max_node_provisioning_time` - Maximum time the autoscaler waits for a node to be provisioned. Defaults to `15m`.

* `max_unready_nodes` - Maximum Number of allowed unready nodes. Defaults to `3`.

* `max_unready_percentage` - Maximum percentage of unready nodes the cluster autoscaler will stop if the percentage is exceeded. Defaults to `45`.

* `new_pod_scale_up_delay` - For scenarios like burst/batch scale where you don't want CA to act before the kubernetes scheduler could schedule all the pods, you can tell CA to ignore unscheduled pods before they're a certain age. Defaults to `10s`.

* `scale_down_delay_after_add` - How long after the scale up of AKS nodes the scale down evaluation resumes. Defaults to `10m`.
Expand Down

0 comments on commit 2254334

Please sign in to comment.