diff --git a/azurerm/internal/services/containers/kubernetes_cluster_other_resource_test.go b/azurerm/internal/services/containers/kubernetes_cluster_other_resource_test.go index e5cf87dafa5e..6d7db2e0685f 100644 --- a/azurerm/internal/services/containers/kubernetes_cluster_other_resource_test.go +++ b/azurerm/internal/services/containers/kubernetes_cluster_other_resource_test.go @@ -14,6 +14,7 @@ var kubernetesOtherTests = map[string]func(t *testing.T){ "basicAvailabilitySet": testAccKubernetesCluster_basicAvailabilitySet, "basicVMSS": testAccKubernetesCluster_basicVMSS, "requiresImport": testAccKubernetesCluster_requiresImport, + "criticalAddonsTaint": testAccKubernetesCluster_criticalAddonsTaint, "linuxProfile": testAccKubernetesCluster_linuxProfile, "nodeLabels": testAccKubernetesCluster_nodeLabels, "nodeResourceGroup": testAccKubernetesCluster_nodeResourceGroup, @@ -145,6 +146,27 @@ func testAccKubernetesCluster_requiresImport(t *testing.T) { }) } +func TestAccKubernetesCluster_criticalAddonsTaint(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccKubernetesCluster_criticalAddonsTaint(t) +} + +func testAccKubernetesCluster_criticalAddonsTaint(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []resource.TestStep{ + { + Config: r.criticalAddonsTaintConfig(data), + Check: resource.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + check.That(data.ResourceName).Key("default_node_pool.0.only_critical_addons_enabled").HasValue("true"), + ), + }, + data.ImportStep(), + }) +} + func TestAccKubernetesCluster_linuxProfile(t *testing.T) { checkIfShouldRunTestsIndividually(t) testAccKubernetesCluster_linuxProfile(t) @@ -666,6 +688,38 @@ resource "azurerm_kubernetes_cluster" "import" { `, r.basicVMSSConfig(data)) } +func (KubernetesClusterResource) criticalAddonsTaintConfig(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%d" + location = "%s" +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%d" + + default_node_pool { + name = "default" + node_count = 1 + type = "AvailabilitySet" + vm_size = "Standard_DS2_v2" + only_critical_addons_enabled = true + } + + identity { + type = "SystemAssigned" + } +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger) +} + func (KubernetesClusterResource) linuxProfileConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 04d9fad43f26..7c511b73cee6 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -2,6 +2,7 @@ package containers import ( "fmt" + "strings" computeValidate "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/compute/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/services/containers/validate" @@ -159,6 +160,11 @@ func SchemaDefaultNodePool() *schema.Schema { ForceNew: true, ValidateFunc: computeValidate.ProximityPlacementGroupID, }, + "only_critical_addons_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, }, }, } @@ -206,7 +212,12 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw) if len(*nodeTaints) != 0 { - return nil, fmt.Errorf("The AKS API has removed support for tainting all nodes in the default node pool and it is no longer possible to configure this. To taint a node pool, create a separate one") + return nil, fmt.Errorf("The AKS API has removed support for tainting all nodes in the default node pool and it is no longer possible to configure this. To taint a node pool, create a separate one.") + } + + criticalAddonsEnabled := raw["only_critical_addons_enabled"].(bool) + if criticalAddonsEnabled { + *nodeTaints = append(*nodeTaints, "CriticalAddonsOnly=true:NoSchedule") } t := raw["tags"].(map[string]interface{}) @@ -217,6 +228,7 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC EnableEncryptionAtHost: utils.Bool(raw["enable_host_encryption"].(bool)), Name: utils.String(raw["name"].(string)), NodeLabels: nodeLabels, + NodeTaints: nodeTaints, Tags: tags.Expand(t), Type: containerservice.AgentPoolType(raw["type"].(string)), VMSize: containerservice.VMSizeTypes(raw["vm_size"].(string)), @@ -384,6 +396,15 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro } } + criticalAddonsEnabled := false + if agentPool.NodeTaints != nil { + for _, taint := range *agentPool.NodeTaints { + if strings.EqualFold(taint, "CriticalAddonsOnly=true:NoSchedule") { + criticalAddonsEnabled = true + } + } + } + osDiskSizeGB := 0 if agentPool.OsDiskSizeGB != nil { osDiskSizeGB = int(*agentPool.OsDiskSizeGB) @@ -430,6 +451,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro "orchestrator_version": orchestratorVersion, "proximity_placement_group_id": proximityPlacementGroupId, "vnet_subnet_id": vnetSubnetId, + "only_critical_addons_enabled": criticalAddonsEnabled, }, }, nil } diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 4e47bc809a3b..917a1568817c 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -237,6 +237,8 @@ A `default_node_pool` block supports the following: * `node_labels` - (Optional) A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created. +* `only_critical_addons_enabled` - (Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created. + * `orchestrator_version` - (Optional) Version of Kubernetes used for the Agents. If not specified, the latest recommended version will be used at provisioning time (but won't auto-upgrade) -> **Note:** This version must be supported by the Kubernetes Cluster - as such the version of Kubernetes used on the Cluster/Control Plane may need to be upgraded first.