Skip to content

Commit

Permalink
Add tests for kubernetes cluster node labels
Browse files Browse the repository at this point in the history
  • Loading branch information
dintel committed Feb 13, 2020
1 parent 63e8ec9 commit 099f40a
Show file tree
Hide file tree
Showing 4 changed files with 224 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,33 @@ func testAccDataSourceAzureRMKubernetesCluster_autoscalingWithAvailabilityZones(
})
}

func TestAccDataSourceAzureRMKubernetesCluster_nodeLabels(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccDataSourceAzureRMKubernetesCluster_nodeLabels(t)
}

func testAccDataSourceAzureRMKubernetesCluster_nodeLabels(t *testing.T) {
data := acceptance.BuildTestData(t, "data.azurerm_kubernetes_cluster", "test")
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")
labels := map[string]string{"key": "value"}

resource.Test(t, resource.TestCase{
PreCheck: func() { acceptance.PreCheck(t) },
Providers: acceptance.SupportedProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccDataSourceAzureRMKubernetesCluster_nodeLabelsConfig(data, clientId, clientSecret, labels),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(data.ResourceName),
resource.TestCheckResourceAttr(data.ResourceName, "agent_pool_profile.1.node_labels.key", "value"),
),
},
},
})
}

func TestAccDataSourceAzureRMKubernetesCluster_nodeTaints(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccDataSourceAzureRMKubernetesCluster_nodeTaints(t)
Expand Down Expand Up @@ -864,6 +891,18 @@ data "azurerm_kubernetes_cluster" "test" {
`, r)
}

func testAccDataSourceAzureRMKubernetesCluster_nodeLabelsConfig(data acceptance.TestData, clientId string, clientSecret string, labels map[string]string) string {
r := testAccAzureRMKubernetesCluster_nodeLabelsConfig(data, clientId, clientSecret, labels)
return fmt.Sprintf(`
%s
data "azurerm_kubernetes_cluster" "test" {
name = "${azurerm_kubernetes_cluster.test.name}"
resource_group_name = "${azurerm_kubernetes_cluster.test.resource_group_name}"
}
`, r)
}

func testAccDataSourceAzureRMKubernetesCluster_nodeTaintsConfig(data acceptance.TestData, clientId string, clientSecret string) string {
r := testAccAzureRMKubernetesCluster_nodeTaintsConfig(data, clientId, clientSecret)
return fmt.Sprintf(`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ import (
"fmt"
"net/http"
"os"
"reflect"
"regexp"
"strings"
"testing"

"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
Expand Down Expand Up @@ -372,6 +374,46 @@ func testAccAzureRMKubernetesClusterNodePool_manualScaleVMSku(t *testing.T) {
})
}

func TestAccAzureRMKubernetesClusterNodePool_nodeLabels(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccAzureRMKubernetesClusterNodePool_nodeLabelds(t)
}

func testAccAzureRMKubernetesClusterNodePool_nodeLabelds(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")
labels1 := map[string]string{"key": "value"}
labels2 := map[string]string{"key2": "value2"}
labels3 := map[string]string{}

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acceptance.PreCheck(t) },
Providers: acceptance.SupportedProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccAzureRMKubernetesClusterNodePool_nodeLabelsConfig(data, clientId, clientSecret, labels1),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesNodePoolNodeLabels(data.ResourceName, labels1),
),
},
{
Config: testAccAzureRMKubernetesClusterNodePool_nodeLabelsConfig(data, clientId, clientSecret, labels2),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesNodePoolNodeLabels(data.ResourceName, labels1),
),
},
{
Config: testAccAzureRMKubernetesClusterNodePool_nodeLabelsConfig(data, clientId, clientSecret, labels3),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesNodePoolNodeLabels(data.ResourceName, labels1),
),
},
},
})
}

func TestAccAzureRMKubernetesClusterNodePool_nodePublicIP(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccAzureRMKubernetesClusterNodePool_nodePublicIP(t)
Expand Down Expand Up @@ -659,6 +701,45 @@ func testCheckAzureRMKubernetesNodePoolExists(resourceName string) resource.Test
}
}

func testCheckAzureRMKubernetesNodePoolNodeLabels(resourceName string, expectedLabels map[string]string) resource.TestCheckFunc {
return func(s *terraform.State) error {
client := acceptance.AzureProvider.Meta().(*clients.Client).Containers.AgentPoolsClient
ctx := acceptance.AzureProvider.Meta().(*clients.Client).StopContext

// Ensure we have enough information in state to look up in API
rs, ok := s.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("Not found: %s", resourceName)
}

name := rs.Primary.Attributes["name"]
kubernetesClusterId := rs.Primary.Attributes["kubernetes_cluster_id"]
parsedK8sId, err := containers.ParseKubernetesClusterID(kubernetesClusterId)
if err != nil {
return fmt.Errorf("Error parsing kubernetes cluster id: %+v", err)
}

agent_pool, err := client.Get(ctx, parsedK8sId.ResourceGroup, parsedK8sId.Name, name)
if err != nil {
return fmt.Errorf("Bad: Get on kubernetesClustersClient: %+v", err)
}

if agent_pool.StatusCode == http.StatusNotFound {
return fmt.Errorf("Bad: Node Pool %q (Kubernetes Cluster %q / Resource Group: %q) does not exist", name, parsedK8sId.Name, parsedK8sId.ResourceGroup)
}

labels := make(map[string]string)
for k, v := range agent_pool.NodeLabels {
labels[k] = *v
}
if !reflect.DeepEqual(labels, expectedLabels) {
return fmt.Errorf("Bad: Node Pool %q (Kubernetes Cluster %q / Resource Group: %q) nodeLabels %v do not match expected %v", name, parsedK8sId.Name, parsedK8sId.ResourceGroup, labels, expectedLabels)
}

return nil
}
}

func testAccAzureRMKubernetesClusterNodePool_autoScaleConfig(data acceptance.TestData, clientId, clientSecret string) string {
template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data, clientId, clientSecret)
return fmt.Sprintf(`
Expand Down Expand Up @@ -890,6 +971,28 @@ resource "azurerm_kubernetes_cluster_node_pool" "manual" {
`, template, numberOfAgents)
}

func testAccAzureRMKubernetesClusterNodePool_nodeLabelsConfig(data acceptance.TestData, clientId, clientSecret string, labels map[string]string) string {
template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data, clientId, clientSecret)
labelsSlice := make([]string, 0, len(labels))
for k, v := range labels {
labelsSlice = append(labelsSlice, fmt.Sptrinf(" \"%s\" = \"%s\"", k, v))
}
labelsStr := strings.Join(labelsSlice, "\n")
return fmt.Sprintf(`
%s
resource "azurerm_kubernetes_cluster_node_pool" "test" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_DS2_v2"
node_count = 1
node_labels = {
%s
}
}
`, template, labelsStr)
}

func testAccAzureRMKubernetesClusterNodePool_nodePublicIPConfig(data acceptance.TestData, clientId, clientSecret string) string {
template := testAccAzureRMKubernetesClusterNodePool_templateConfig(data, clientId, clientSecret)
return fmt.Sprintf(`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package tests
import (
"fmt"
"os"
"strings"
"testing"

"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
Expand Down Expand Up @@ -153,6 +154,49 @@ func testAccAzureRMKubernetesCluster_linuxProfile(t *testing.T) {
})
}

func TestAccAzureRMKubernetesCluster_nodeLabels(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccAzureRMKubernetesCluster_nodeTaints(t)
}

func testAccAzureRMKubernetesCluster_nodeLabels(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test")
clientId := os.Getenv("ARM_CLIENT_ID")
clientSecret := os.Getenv("ARM_CLIENT_SECRET")
labels1 := map[string]string{"key": "value"}
labels2 := map[string]string{"key2": "value2"}
labels3 := map[string]string{}

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acceptance.PreCheck(t) },
Providers: acceptance.SupportedProviders,
CheckDestroy: testCheckAzureRMKubernetesClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAzureRMKubernetesCluster_nodeLabelsConfig(data, clientId, clientSecret, labels1),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(data.ResourceName),
resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.node_labels.key", "value"),
),
},
{
Config: testAccAzureRMKubernetesCluster_nodeLabelsConfig(data, clientId, clientSecret, labels2),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(data.ResourceName),
resource.TestCheckResourceAttr(data.ResourceName, "default_node_pool.0.node_labels.key2", "value2"),
),
},
{
Config: testAccAzureRMKubernetesCluster_nodeLabelsConfig(data, clientId, clientSecret, labels3),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMKubernetesClusterExists(data.ResourceName),
resource.TestCheckNoResourceAttr(data.ResourceName, "default_node_pool.0.node_labels"),
),
},
},
})
}

func TestAccAzureRMKubernetesCluster_nodeTaints(t *testing.T) {
checkIfShouldRunTestsIndividually(t)
testAccAzureRMKubernetesCluster_nodeTaints(t)
Expand Down Expand Up @@ -425,6 +469,41 @@ resource "azurerm_kubernetes_cluster" "test" {
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, clientId, clientSecret)
}

func testAccAzureRMKubernetesCluster_nodeLabelsConfig(data acceptance.TestData, clientId string, clientSecret string, labels map[string]string) string {
labelsSlice := make([]string, 0, len(labels))
for k, v := range labels {
labelsSlice = append(labelsSlice, fmt.Sptrinf(" \"%s\" = \"%s\"", k, v))
}
labelsStr := strings.Join(labelsSlice, "\n")
return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestRG-%d"
location = "%s"
}
resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
node_labels = [
%s
]
}
service_principal {
client_id = "%s"
client_secret = "%s"
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, labelsStr, clientId, clientSecret)
}

func testAccAzureRMKubernetesCluster_nodeTaintsConfig(data acceptance.TestData, clientId string, clientSecret string) string {
return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ func TestAccAzureRMKubernetes_all(t *testing.T) {
"manualScaleMultiplePoolsUpdate": testAccAzureRMKubernetesClusterNodePool_manualScaleMultiplePoolsUpdate,
"manualScaleUpdate": testAccAzureRMKubernetesClusterNodePool_manualScaleUpdate,
"manualScaleVMSku": testAccAzureRMKubernetesClusterNodePool_manualScaleVMSku,
"nodeLabels": TestAccAzureRMKubernetesClusterNodePool_nodeLabels,
"nodePublicIP": testAccAzureRMKubernetesClusterNodePool_nodePublicIP,
"nodeTaints": testAccAzureRMKubernetesClusterNodePool_nodeTaints,
"requiresImport": testAccAzureRMKubernetesClusterNodePool_requiresImport,
Expand All @@ -85,6 +86,7 @@ func TestAccAzureRMKubernetes_all(t *testing.T) {
"basicVMSS": testAccAzureRMKubernetesCluster_basicVMSS,
"requiresImport": testAccAzureRMKubernetesCluster_requiresImport,
"linuxProfile": testAccAzureRMKubernetesCluster_linuxProfile,
"nodeLabels": testAccAzureRMKubernetesCluster_nodeLabels,
"nodeTaints": testAccAzureRMKubernetesCluster_nodeTaints,
"nodeResourceGroup": testAccAzureRMKubernetesCluster_nodeResourceGroup,
"upgradeConfig": testAccAzureRMKubernetesCluster_upgrade,
Expand Down Expand Up @@ -119,6 +121,7 @@ func TestAccAzureRMKubernetes_all(t *testing.T) {
"addOnProfileRouting": testAccDataSourceAzureRMKubernetesCluster_addOnProfileRouting,
"autoscalingNoAvailabilityZones": testAccDataSourceAzureRMKubernetesCluster_autoscalingNoAvailabilityZones,
"autoscalingWithAvailabilityZones": testAccDataSourceAzureRMKubernetesCluster_autoscalingWithAvailabilityZones,
"nodeLabels": TestAccDataSourceAzureRMKubernetesCluster_nodeLabels,
"nodeTaints": testAccDataSourceAzureRMKubernetesCluster_nodeTaints,
"enableNodePublicIP": testAccDataSourceAzureRMKubernetesCluster_enableNodePublicIP,
"privateLink": testAccDataSourceAzureRMKubernetesCluster_privateLink,
Expand Down

0 comments on commit 099f40a

Please sign in to comment.