From 3eabe45bb70fe77ab2510c18a0206a61b2492eca Mon Sep 17 00:00:00 2001 From: tombuildsstuff Date: Fri, 5 Jun 2020 11:07:49 +0200 Subject: [PATCH] New Data Source: `azurerm_kubernetes_cluster_node_pool` Fixes #5134 --- ...ubernetes_cluster_node_pool_data_source.go | 262 ++++++++++++++++++ .../services/containers/registration.go | 7 +- ...etes_cluster_node_pool_data_source_test.go | 52 ++++ .../tests/kubernetes_cluster_resource_test.go | 17 +- website/azurerm.erb | 4 + ...kubernetes_cluster_node_pool.html.markdown | 85 ++++++ ...kubernetes_cluster_node_pool.html.markdown | 2 +- 7 files changed, 417 insertions(+), 12 deletions(-) create mode 100644 azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go create mode 100644 azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go create mode 100644 website/docs/d/kubernetes_cluster_node_pool.html.markdown diff --git a/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go new file mode 100644 index 000000000000..26dd83b3b6c9 --- /dev/null +++ b/azurerm/internal/services/containers/kubernetes_cluster_node_pool_data_source.go @@ -0,0 +1,262 @@ +package containers + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-03-01/containerservice" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" +) + +func dataSourceKubernetesClusterNodePool() *schema.Resource { + return &schema.Resource{ + Read: dataSourceKubernetesClusterNodePoolRead, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validate.KubernetesAgentPoolName, + }, + + "kubernetes_cluster_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + + "resource_group_name": azure.SchemaResourceGroupNameForDataSource(), + + // Computed + "availability_zones": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "enable_auto_scaling": { + Type: schema.TypeBool, + Computed: true, + }, + + "enable_node_public_ip": { + Type: schema.TypeBool, + Computed: true, + }, + + "eviction_policy": { + Type: schema.TypeString, + Computed: true, + }, + + "max_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "max_pods": { + Type: schema.TypeInt, + Computed: true, + }, + + "mode": { + Type: schema.TypeString, + Computed: true, + }, + + "min_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "node_count": { + Type: schema.TypeInt, + Computed: true, + }, + + "node_labels": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "node_taints": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "orchestrator_version": { + Type: schema.TypeString, + Computed: true, + }, + + "os_disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + }, + + "os_type": { + Type: schema.TypeString, + Computed: true, + }, + + "priority": { + Type: schema.TypeString, + Computed: true, + }, + + "spot_max_price": { + Type: schema.TypeFloat, + Computed: true, + }, + + "tags": tags.SchemaDataSource(), + + "vm_size": { + Type: schema.TypeString, + Computed: true, + }, + + "vnet_subnet_id": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceKubernetesClusterNodePoolRead(d *schema.ResourceData, meta interface{}) error { + clustersClient := meta.(*clients.Client).Containers.KubernetesClustersClient + poolsClient := meta.(*clients.Client).Containers.AgentPoolsClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + nodePoolName := d.Get("name").(string) + clusterName := d.Get("kubernetes_cluster_name").(string) + resourceGroup := d.Get("resource_group_name").(string) + + // if the parent cluster doesn't exist then the node pool won't + cluster, err := clustersClient.Get(ctx, resourceGroup, clusterName) + if err != nil { + if utils.ResponseWasNotFound(cluster.Response) { + return fmt.Errorf("Kubernetes Cluster %q was not found in Resource Group %q", clusterName, resourceGroup) + } + + return fmt.Errorf("retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", clusterName, resourceGroup, err) + } + + resp, err := poolsClient.Get(ctx, resourceGroup, clusterName, nodePoolName) + if err != nil { + if utils.ResponseWasNotFound(resp.Response) { + return fmt.Errorf("Node Pool %q was not found in Managed Kubernetes Cluster %q / Resource Group %q", nodePoolName, clusterName, resourceGroup) + } + + return fmt.Errorf("retrieving Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): %+v", nodePoolName, clusterName, resourceGroup, err) + } + + if resp.ID == nil || *resp.ID == "" { + return fmt.Errorf("retrieving Node Pool %q (Managed Kubernetes Cluster %q / Resource Group %q): `id` was nil", nodePoolName, clusterName, resourceGroup) + } + + d.SetId(*resp.ID) + d.Set("name", nodePoolName) + d.Set("kubernetes_cluster_name", clusterName) + d.Set("resource_group_name", resourceGroup) + + if props := resp.ManagedClusterAgentPoolProfileProperties; props != nil { + if err := d.Set("availability_zones", utils.FlattenStringSlice(props.AvailabilityZones)); err != nil { + return fmt.Errorf("setting `availability_zones`: %+v", err) + } + + d.Set("enable_auto_scaling", props.EnableAutoScaling) + d.Set("enable_node_public_ip", props.EnableNodePublicIP) + + evictionPolicy := "" + if props.ScaleSetEvictionPolicy != "" { + evictionPolicy = string(props.ScaleSetEvictionPolicy) + } + d.Set("eviction_policy", evictionPolicy) + + maxCount := 0 + if props.MaxCount != nil { + maxCount = int(*props.MaxCount) + } + d.Set("max_count", maxCount) + + maxPods := 0 + if props.MaxPods != nil { + maxPods = int(*props.MaxPods) + } + d.Set("max_pods", maxPods) + + minCount := 0 + if props.MinCount != nil { + minCount = int(*props.MinCount) + } + d.Set("min_count", minCount) + + mode := string(containerservice.User) + if props.Mode != "" { + mode = string(props.Mode) + } + d.Set("mode", mode) + + count := 0 + if props.Count != nil { + count = int(*props.Count) + } + d.Set("node_count", count) + + if err := d.Set("node_labels", props.NodeLabels); err != nil { + return fmt.Errorf("setting `node_labels`: %+v", err) + } + + if err := d.Set("node_taints", utils.FlattenStringSlice(props.NodeTaints)); err != nil { + return fmt.Errorf("setting `node_taints`: %+v", err) + } + + d.Set("orchestrator_version", props.OrchestratorVersion) + osDiskSizeGB := 0 + if props.OsDiskSizeGB != nil { + osDiskSizeGB = int(*props.OsDiskSizeGB) + } + d.Set("os_disk_size_gb", osDiskSizeGB) + d.Set("os_type", string(props.OsType)) + + // not returned from the API if not Spot + priority := string(containerservice.Regular) + if props.ScaleSetPriority != "" { + priority = string(props.ScaleSetPriority) + } + d.Set("priority", priority) + + spotMaxPrice := -1.0 + if props.SpotMaxPrice != nil { + spotMaxPrice = *props.SpotMaxPrice + } + d.Set("spot_max_price", spotMaxPrice) + + d.Set("vnet_subnet_id", props.VnetSubnetID) + d.Set("vm_size", string(props.VMSize)) + } + + return tags.FlattenAndSet(d, resp.Tags) +} diff --git a/azurerm/internal/services/containers/registration.go b/azurerm/internal/services/containers/registration.go index edb8f6fe732b..7bb1cba4efc4 100644 --- a/azurerm/internal/services/containers/registration.go +++ b/azurerm/internal/services/containers/registration.go @@ -21,9 +21,10 @@ func (r Registration) WebsiteCategories() []string { // SupportedDataSources returns the supported Data Sources supported by this Service func (r Registration) SupportedDataSources() map[string]*schema.Resource { return map[string]*schema.Resource{ - "azurerm_kubernetes_service_versions": dataSourceArmKubernetesServiceVersions(), - "azurerm_container_registry": dataSourceArmContainerRegistry(), - "azurerm_kubernetes_cluster": dataSourceArmKubernetesCluster(), + "azurerm_kubernetes_service_versions": dataSourceArmKubernetesServiceVersions(), + "azurerm_container_registry": dataSourceArmContainerRegistry(), + "azurerm_kubernetes_cluster": dataSourceArmKubernetesCluster(), + "azurerm_kubernetes_cluster_node_pool": dataSourceKubernetesClusterNodePool(), } } diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go new file mode 100644 index 000000000000..46669edabbe7 --- /dev/null +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_node_pool_data_source_test.go @@ -0,0 +1,52 @@ +package tests + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance" +) + +var kubernetesNodePoolDataSourceTests = map[string]func(t *testing.T){ + "basic": testAccAzureRMKubernetesClusterNodePoolDataSource_basic, +} + +func TestAccAzureRMKubernetesClusterNodePoolDataSource_basic(t *testing.T) { + checkIfShouldRunTestsIndividually(t) + testAccAzureRMKubernetesClusterNodePoolDataSource_basic(t) +} + +func testAccAzureRMKubernetesClusterNodePoolDataSource_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster_node_pool", "test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMKubernetesClusterNodePoolDataSource_basicConfig(data), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "node_count", "1"), + resource.TestCheckResourceAttr(data.ResourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(data.ResourceName, "tags.environment", "Staging"), + ), + }, + }, + }) +} + +func testAccAzureRMKubernetesClusterNodePoolDataSource_basicConfig(data acceptance.TestData) string { + template := testAccAzureRMKubernetesClusterNodePool_manualScaleConfig(data) + return fmt.Sprintf(` +%s + +data "azurerm_kubernetes_cluster_node_pool" "test" { + name = azurerm_kubernetes_cluster_node_pool.test.name + kubernetes_cluster_name = azurerm_kubernetes_cluster.test.name + resource_group_name = azurerm_kubernetes_cluster.test.resource_group_name +} +`, template) +} diff --git a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go index a03b6ee3b2c0..63b29bafd2ce 100644 --- a/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go +++ b/azurerm/internal/services/containers/tests/kubernetes_cluster_resource_test.go @@ -22,14 +22,15 @@ func TestAccAzureRMKubernetes_all(t *testing.T) { // NOTE: this is a combined test rather than separate split out tests to // ease the load on the kubernetes api testCases := map[string]map[string]func(t *testing.T){ - "auth": kubernetesAuthTests, - "clusterAddOn": kubernetesAddOnTests, - "datasource": kubernetesDataSourceTests, - "network": kubernetesNetworkAuthTests, - "nodePool": kubernetesNodePoolTests, - "other": kubernetesOtherTests, - "scaling": kubernetesScalingTests, - "upgrade": kubernetesUpgradeTests, + "auth": kubernetesAuthTests, + "clusterAddOn": kubernetesAddOnTests, + "datasource": kubernetesDataSourceTests, + "network": kubernetesNetworkAuthTests, + "nodePool": kubernetesNodePoolTests, + "nodePoolDataSource": kubernetesNodePoolDataSourceTests, + "other": kubernetesOtherTests, + "scaling": kubernetesScalingTests, + "upgrade": kubernetesUpgradeTests, } for group, m := range testCases { diff --git a/website/azurerm.erb b/website/azurerm.erb index f343f042cd93..f0d8a2f57642 100644 --- a/website/azurerm.erb +++ b/website/azurerm.erb @@ -297,6 +297,10 @@ azurerm_kubernetes_cluster +
  • + azurerm_kubernetes_cluster_node_pool +
  • +
  • azurerm_kubernetes_service_versions
  • diff --git a/website/docs/d/kubernetes_cluster_node_pool.html.markdown b/website/docs/d/kubernetes_cluster_node_pool.html.markdown new file mode 100644 index 000000000000..c487d4e976b0 --- /dev/null +++ b/website/docs/d/kubernetes_cluster_node_pool.html.markdown @@ -0,0 +1,85 @@ +--- +subcategory: "Container" +layout: "azurerm" +page_title: "Azure Resource Manager: Data Source: azurerm_kubernetes_cluster_node_pool" +description: |- + Gets information about an existing Kubernetes Cluster Node Pool. +--- + +# Data Source: azurerm_kubernetes_cluster_node_pool + +Use this data source to access information about an existing Kubernetes Cluster Node Pool. + +## Example Usage + +```hcl +data "azurerm_kubernetes_cluster_node_pool" "example" { + name = "existing" + kubernetes_cluster_name = "existing-cluster" + resource_group_name = "existing-resource-group" +} + +output "id" { + value = data.azurerm_kubernetes_cluster_node_pool.example.id +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `kubernetes_cluster_name` - (Required) The Name of the Kubernetes Cluster where this Node Pool is located. + +* `name` - (Required) The name of this Kubernetes Cluster Node Pool. + +* `resource_group_name` - (Required) The name of the Resource Group where the Kubernetes Cluster exists. + +## Attributes Reference + +In addition to the Arguments listed above - the following Attributes are exported: + +* `id` - The ID of the Kubernetes Cluster Node Pool. + +* `availability_zones` - A list of Availability Zones in which the Nodes in this Node Pool exists. + +* `enable_auto_scaling` - Does this Node Pool have Auto-Scaling enabled? + +* `enable_node_public_ip` - Do nodes in this Node Pool have a Public IP Address? + +* `eviction_policy` - The eviction policy used for Virtual Machines in the Virtual Machine Scale Set, when `priority` is set to `Spot`. + +* `max_count` - The maximum number of Nodes allowed when auto-scaling is enabled. + +* `max_pods` - The maximum number of Pods allowed on each Node in this Node Pool. + +* `min_count` - The minimum number of Nodes allowed when auto-scaling is enabled. + +* `mode` - The Mode for this Node Pool, specifying how these Nodes should be used (for either System or User resources). + +* `node_count` - The current number of Nodes in the Node Pool. + +* `node_labels` - A map of Kubernetes Labels applied to each Node in this Node Pool. + +* `node_taints` - A map of Kubernetes Taints applied to each Node in this Node Pool. + +* `orchestrator_version` - The version of Kubernetes configured on each Node in this Node Pool. + +* `os_disk_size_gb` - The size of the OS Disk on each Node in this Node Pool. + +* `os_type` - The operating system used on each Node in this Node Pool. + +* `priority` - The priority of the Virtual Machines in the Virtual Machine Scale Set backing this Node Pool. + +* `spot_max_price` - The maximum price being paid for Virtual Machines in this Scale Set. `-1` means the current on-demand price for a Virtual Machine. + +* `tags` - A mapping of tags assigned to the Kubernetes Cluster Node Pool. + +* `vm_size` - The size of the Virtual Machines used in the Virtual Machine Scale Set backing this Node Pool. + +* `vnet_subnet_id` - The ID of the Subnet in which this Node Pool exists. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions: + +* `read` - (Defaults to 5 minutes) Used when retrieving the Kubernetes Cluster Node Pool. \ No newline at end of file diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index 9fc2d98e2d66..556ab2832a98 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -103,7 +103,7 @@ The following arguments are supported: ~> **Note:** Spot Node Pools are in Preview and must be opted-into - [more information on how to opt into this Preview can be found in the AKS Documentation](https://docs.microsoft.com/en-us/azure/aks/spot-node-pool). -* `spot_max_price` - (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are -1 (the current Virtual Machine price) or a positive value with up to five decimal places. Changing this forces a new resource to be created. +* `spot_max_price` - (Optional) The maximum price you're willing to pay in USD per Virtual Machine. Valid values are `-1` (the current on-demand price for a Virtual Machine) or a positive value with up to five decimal places. Changing this forces a new resource to be created. ~> **Note:** This field can only be configured when `priority` is set to `Spot`.