Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add os_disk_type to kubernetes_cluster_node_pool #9166

Merged
merged 10 commits into from
Nov 18, 2020
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,11 @@ func dataSourceKubernetesClusterNodePool() *schema.Resource {
Computed: true,
},

"os_disk_type": {
Type: schema.TypeString,
Computed: true,
},

"os_type": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -244,6 +249,12 @@ func dataSourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interf
osDiskSizeGB = int(*props.OsDiskSizeGB)
}
d.Set("os_disk_size_gb", osDiskSizeGB)

osDiskType := containerservice.Managed
if props.OsDiskType != "" {
osDiskType = props.OsDiskType
}
d.Set("os_disk_type", string(osDiskType))
d.Set("os_type", string(props.OsType))

// not returned from the API if not Spot
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,17 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource {
ValidateFunc: validation.IntAtLeast(1),
},

"os_disk_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: containerservice.Managed,
ValidateFunc: validation.StringInSlice([]string{
string(containerservice.Ephemeral),
string(containerservice.Managed),
}, false),
},

"os_type": {
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -339,6 +350,10 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int
profile.ProximityPlacementGroupID = &proximityPlacementGroupId
}

if osDiskType := d.Get("os_disk_type").(string); osDiskType != "" {
profile.OsDiskType = containerservice.OSDiskType(osDiskType)
}

if vnetSubnetID := d.Get("vnet_subnet_id").(string); vnetSubnetID != "" {
profile.VnetSubnetID = utils.String(vnetSubnetID)
}
Expand Down Expand Up @@ -629,6 +644,12 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter
osDiskSizeGB = int(*props.OsDiskSizeGB)
}
d.Set("os_disk_size_gb", osDiskSizeGB)

osDiskType := containerservice.Managed
if props.OsDiskType != "" {
osDiskType = props.OsDiskType
}
d.Set("os_disk_type", osDiskType)
d.Set("os_type", string(props.OsType))

// not returned from the API if not Spot
Expand Down
23 changes: 23 additions & 0 deletions azurerm/internal/services/containers/kubernetes_nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,17 @@ func SchemaDefaultNodePool() *schema.Schema {
ValidateFunc: validation.IntAtLeast(1),
},

"os_disk_type": {
Type: schema.TypeString,
Optional: true,
favoretti marked this conversation as resolved.
Show resolved Hide resolved
ForceNew: true,
Default: containerservice.Managed,
ValidateFunc: validation.StringInSlice([]string{
string(containerservice.Ephemeral),
string(containerservice.Managed),
}, false),
},

"vnet_subnet_id": {
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -155,6 +166,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA
Count: defaultCluster.Count,
VMSize: defaultCluster.VMSize,
OsDiskSizeGB: defaultCluster.OsDiskSizeGB,
OsDiskType: defaultCluster.OsDiskType,
VnetSubnetID: defaultCluster.VnetSubnetID,
MaxPods: defaultCluster.MaxPods,
OsType: defaultCluster.OsType,
Expand Down Expand Up @@ -233,6 +245,11 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC
profile.OsDiskSizeGB = utils.Int32(osDiskSizeGB)
}

profile.OsDiskType = containerservice.Managed
if osDiskType := raw["os_disk_type"].(string); osDiskType != "" {
profile.OsDiskType = containerservice.OSDiskType(raw["os_disk_type"].(string))
}

if vnetSubnetID := raw["vnet_subnet_id"].(string); vnetSubnetID != "" {
profile.VnetSubnetID = utils.String(vnetSubnetID)
}
Expand Down Expand Up @@ -360,6 +377,11 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
osDiskSizeGB = int(*agentPool.OsDiskSizeGB)
}

osDiskType := containerservice.Managed
if agentPool.OsDiskType != "" {
osDiskType = agentPool.OsDiskType
}

vnetSubnetId := ""
if agentPool.VnetSubnetID != nil {
vnetSubnetId = *agentPool.VnetSubnetID
Expand Down Expand Up @@ -388,6 +410,7 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
"node_labels": nodeLabels,
"node_taints": []string{},
"os_disk_size_gb": osDiskSizeGB,
"os_disk_type": string(osDiskType),
"tags": tags.Flatten(agentPool.Tags),
"type": string(agentPool.Type),
"vm_size": string(agentPool.VMSize),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ var kubernetesNodePoolTests = map[string]func(t *testing.T){
"spot": testAccAzureRMKubernetesClusterNodePool_spot,
"osDiskSizeGB": testAccAzureRMKubernetesClusterNodePool_osDiskSizeGB,
"proximityPlacementGroupId": testAccAzureRMKubernetesClusterNodePool_proximityPlacementGroupId,
"osDiskType": testAccAzureRMKubernetesClusterNodePool_osDiskType,
"modeSystem": testAccAzureRMKubernetesClusterNodePool_modeSystem,
"modeUpdate": testAccAzureRMKubernetesClusterNodePool_modeUpdate,
"virtualNetworkAutomatic": testAccAzureRMKubernetesClusterNodePool_virtualNetworkAutomatic,
Expand Down Expand Up @@ -597,6 +598,30 @@ func testAccAzureRMKubernetesClusterNodePool_proximityPlacementGroupId(t *testin
})
}

func TestAccAzureRMKubernetesClusterNodePool_osDiskType(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccAzureRMKubernetesClusterNodePool_osDiskType(t)
}

func testAccAzureRMKubernetesClusterNodePool_osDiskType(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acceptance.PreCheck(t) },
Providers: acceptance.SupportedProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccAzureRMKubernetesClusterNodePool_osDiskTypeConfig(data),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesNodePoolExists(data.ResourceName),
),
},
data.ImportStep(),
},
})
}

func TestAccAzureRMKubernetesClusterNodePool_requiresImport(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccAzureRMKubernetesClusterNodePool_requiresImport(t)
Expand Down Expand Up @@ -1416,50 +1441,60 @@ func testAccAzureRMKubernetesClusterNodePool_proximityPlacementGroupIdConfig(dat
provider "azurerm" {
features {}
}

resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%d"
location = "%s"
}

resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"

default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
}

identity {
type = "SystemAssigned"
}
}

resource "azurerm_proximity_placement_group" "test" {
name = "acctestPPG-aks-%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name

tags = {
environment = "Production"
}
}

resource "azurerm_kubernetes_cluster_node_pool" "test" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_DS2_v2"
node_count = 1
proximity_placement_group_id = azurerm_proximity_placement_group.test.id
}

`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger)
}

func testAccAzureRMKubernetesClusterNodePool_osDiskTypeConfig(data acceptance.TestData) string {
template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data)
return fmt.Sprintf(`
provider "azurerm" {
features {}
}
%s
resource "azurerm_kubernetes_cluster_node_pool" "test" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_DS3_v2"
node_count = 1
os_disk_size_gb = 100
os_disk_type = "Ephemeral"
}
`, template)
}

func testAccAzureRMKubernetesClusterNodePool_spotConfig(data acceptance.TestData) string {
template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data)
return fmt.Sprintf(`
Expand Down
2 changes: 2 additions & 0 deletions website/docs/d/kubernetes_cluster_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ In addition to the Arguments listed above - the following Attributes are exporte

* `os_disk_size_gb` - The size of the OS Disk on each Node in this Node Pool.

* `os_disk_type` - The type of the OS Disk on each Node in this Node Pool.

* `os_type` - The operating system used on each Node in this Node Pool.

* `priority` - The priority of the Virtual Machines in the Virtual Machine Scale Set backing this Node Pool.
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/kubernetes_cluster_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ The following arguments are supported:

* `os_disk_size_gb` - (Optional) The Agent Operating System disk size in GB. Changing this forces a new resource to be created.

* `os_disk_type` - (Optional) The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created.

* `os_type` - (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`.

* `priority` - (Optional) The Priority for Virtual Machines within the Virtual Machine Scale Set that powers this Node Pool. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this forces a new resource to be created.
Expand Down