From 0f9e1ef5a7e2670f756f0aaeb606dde16c6c188a Mon Sep 17 00:00:00 2001 From: The Magician Date: Mon, 9 Jan 2023 12:38:26 -0800 Subject: [PATCH] Promote node system config to GA (#6975) (#13423) Signed-off-by: Modular Magician Signed-off-by: Modular Magician --- .changelog/6975.txt | 3 + google/node_config.go | 122 +++++++++++++ google/resource_container_node_pool.go | 67 +++++++ google/resource_container_node_pool_test.go | 171 ++++++++++++++++++ .../docs/r/container_cluster.html.markdown | 4 +- 5 files changed, 365 insertions(+), 2 deletions(-) create mode 100644 .changelog/6975.txt diff --git a/.changelog/6975.txt b/.changelog/6975.txt new file mode 100644 index 00000000000..443122bb786 --- /dev/null +++ b/.changelog/6975.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +container: promoted node system config in `google_container_node_pool` to GA +``` diff --git a/google/node_config.go b/google/node_config.go index 0d880ee4720..2732f829406 100644 --- a/google/node_config.go +++ b/google/node_config.go @@ -376,6 +376,51 @@ func schemaNodeConfig() *schema.Schema { ForceNew: true, Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, }, + // Note that AtLeastOneOf can't be set because this schema is reused by + // two different resources. + "kubelet_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Node kubelet configs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_manager_policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"static", "none", ""}, false), + Description: `Control the CPU management policy on the node.`, + }, + "cpu_cfs_quota": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable CPU CFS quota enforcement for containers that specify CPU limits.`, + }, + "cpu_cfs_quota_period": { + Type: schema.TypeString, + Optional: true, + Description: `Set the CPU CFS quota period value 'cpu.cfs_period_us'.`, + }, + }, + }, + }, + + "linux_node_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Parameters that can be configured on Linux nodes.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sysctls": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.`, + }, + }, + }, + }, "node_group": { Type: schema.TypeString, Optional: true, @@ -591,6 +636,14 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { nc.BootDiskKmsKey = v.(string) } + if v, ok := nodeConfig["kubelet_config"]; ok { + nc.KubeletConfig = expandKubeletConfig(v) + } + + if v, ok := nodeConfig["linux_node_config"]; ok { + nc.LinuxNodeConfig = expandLinuxNodeConfig(v) + } + if v, ok := nodeConfig["node_group"]; ok { nc.NodeGroup = v.(string) } @@ -617,6 +670,51 @@ func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConf return wmc } +func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + cfg := ls[0].(map[string]interface{}) + kConfig := &container.NodeKubeletConfig{} + if cpuManagerPolicy, ok := cfg["cpu_manager_policy"]; ok { + kConfig.CpuManagerPolicy = cpuManagerPolicy.(string) + } + if cpuCfsQuota, ok := cfg["cpu_cfs_quota"]; ok { + kConfig.CpuCfsQuota = cpuCfsQuota.(bool) + kConfig.ForceSendFields = append(kConfig.ForceSendFields, "CpuCfsQuota") + } + if cpuCfsQuotaPeriod, ok := cfg["cpu_cfs_quota_period"]; ok { + kConfig.CpuCfsQuotaPeriod = cpuCfsQuotaPeriod.(string) + } + return kConfig +} + +func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + cfg := ls[0].(map[string]interface{}) + sysCfgRaw, ok := cfg["sysctls"] + if !ok { + return nil + } + m := make(map[string]string) + for k, v := range sysCfgRaw.(map[string]interface{}) { + m[k] = v.(string) + } + return &container.LinuxNodeConfig{ + Sysctls: m, + } +} + func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]interface{} { result := make([]map[string]interface{}, 0, 1) @@ -661,6 +759,8 @@ func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} { "taint": flattenTaints(c.Taints), "workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig), "boot_disk_kms_key": c.BootDiskKmsKey, + "kubelet_config": flattenKubeletConfig(c.KubeletConfig), + "linux_node_config": flattenLinuxNodeConfig(c.LinuxNodeConfig), "node_group": c.NodeGroup, }) @@ -764,3 +864,25 @@ func flattenWorkloadMetadataConfig(c *container.WorkloadMetadataConfig) []map[st } return result } + +func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "cpu_cfs_quota": c.CpuCfsQuota, + "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, + "cpu_manager_policy": c.CpuManagerPolicy, + }) + } + return result +} + +func flattenLinuxNodeConfig(c *container.LinuxNodeConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "sysctls": c.Sysctls, + }) + } + return result +} diff --git a/google/resource_container_node_pool.go b/google/resource_container_node_pool.go index 64aa3d7c6db..883d1201e93 100644 --- a/google/resource_container_node_pool.go +++ b/google/resource_container_node_pool.go @@ -1404,6 +1404,73 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) } + if d.HasChange(prefix + "node_config.0.kubelet_config") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + KubeletConfig: expandKubeletConfig( + d.Get(prefix + "node_config.0.kubelet_config")), + } + if req.KubeletConfig == nil { + req.ForceSendFields = []string{"KubeletConfig"} + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return containerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool kubelet_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated kubelet_config for node pool %s", name) + } + if d.HasChange(prefix + "node_config.0.linux_node_config") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + LinuxNodeConfig: expandLinuxNodeConfig( + d.Get(prefix + "node_config.0.linux_node_config")), + } + if req.LinuxNodeConfig == nil { + req.ForceSendFields = []string{"LinuxNodeConfig"} + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return containerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool linux_node_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated linux_node_config for node pool %s", name) + } + } if d.HasChange(prefix + "node_count") { diff --git a/google/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go index 0444a075268..89410617a77 100644 --- a/google/resource_container_node_pool_test.go +++ b/google/resource_container_node_pool_test.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "regexp" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -333,6 +334,98 @@ func TestAccContainerNodePool_withWorkloadIdentityConfig(t *testing.T) { }) } +func TestAccContainerNodePool_withKubeletConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "static", "100us", true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", + "node_config.0.kubelet_config.0.cpu_cfs_quota", "true"), + ), + }, + { + ResourceName: "google_container_node_pool.with_kubelet_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "", "", false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", + "node_config.0.kubelet_config.0.cpu_cfs_quota", "false"), + ), + }, + { + ResourceName: "google_container_node_pool.with_kubelet_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withInvalidKubeletCpuManagerPolicy(t *testing.T) { + t.Parallel() + // Unit test, no interactions + skipIfVcr(t) + + cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "dontexist", "100us", true), + ExpectError: regexp.MustCompile(`.*to be one of \[static none \].*`), + }, + }, + }) +} + +func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", randString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, 10000, 12800, "1000 20000 100000", 1), + }, + { + ResourceName: "google_container_node_pool.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + }, + // Perform an update. + { + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, 10000, 12800, "1000 20000 200000", 1), + }, + { + ResourceName: "google_container_node_pool.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccContainerNodePool_withNetworkConfig(t *testing.T) { t.Parallel() @@ -1894,6 +1987,84 @@ resource "google_container_node_pool" "with_workload_metadata_config" { `, projectID, cluster, np) } +func testAccContainerNodePool_withKubeletConfig(cluster, np, policy, period string, quota bool) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version +} + +# cpu_manager_policy & cpu_cfs_quota_period cannot be blank if cpu_cfs_quota is set to true +# cpu_manager_policy & cpu_cfs_quota_period must not set if cpu_cfs_quota is set to false +resource "google_container_node_pool" "with_kubelet_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + image_type = "COS_CONTAINERD" + kubelet_config { + cpu_manager_policy = %q + cpu_cfs_quota = %v + cpu_cfs_quota_period = %q + } + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } +} +`, cluster, np, policy, quota, period) +} + +func testAccContainerNodePool_withLinuxNodeConfig(cluster, np string, maxBacklog, soMaxConn int, tcpMem string, twReuse int) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version +} + +resource "google_container_node_pool" "with_linux_node_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + image_type = "COS_CONTAINERD" + linux_node_config { + sysctls = { + "net.core.netdev_max_backlog" = "%d" + "net.core.rmem_max" = 10000 + "net.core.wmem_default" = 10000 + "net.core.wmem_max" = 20000 + "net.core.optmem_max" = 10000 + "net.core.somaxconn" = %d + "net.ipv4.tcp_rmem" = "%s" + "net.ipv4.tcp_wmem" = "%s" + "net.ipv4.tcp_tw_reuse" = %d + } + } + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } +} +`, cluster, np, maxBacklog, soMaxConn, tcpMem, tcpMem, twReuse) +} + func testAccContainerNodePool_withNetworkConfig(cluster, np, network string) string { return fmt.Sprintf(` resource "google_compute_network" "container_network" { diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index 7134663a935..18656411aa7 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -843,7 +843,7 @@ recommended. Structure is [documented below](#nested_taint). * `workload_metadata_config` - (Optional) Metadata configuration to expose to workloads on the node pool. Structure is [documented below](#nested_workload_metadata_config). -* `kubelet_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) +* `kubelet_config` - (Optional) Kubelet configuration, currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file). Structure is [documented below](#nested_kubelet_config). @@ -855,7 +855,7 @@ kubelet_config { } ``` -* `linux_node_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) +* `linux_node_config` - (Optional) Linux node configuration, currently supported attributes can be found [here](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create#--system-config-from-file). Note that validations happen all server side. All attributes are optional. Structure is [documented below](#nested_linux_node_config).