Skip to content

Commit

Permalink
Promote node system config to GA (#6975) (#13423)
Browse files Browse the repository at this point in the history
Signed-off-by: Modular Magician <[email protected]>

Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored Jan 9, 2023
1 parent 5b51259 commit 0f9e1ef
Show file tree
Hide file tree
Showing 5 changed files with 365 additions and 2 deletions.
3 changes: 3 additions & 0 deletions .changelog/6975.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
container: promoted node system config in `google_container_node_pool` to GA
```
122 changes: 122 additions & 0 deletions google/node_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,51 @@ func schemaNodeConfig() *schema.Schema {
ForceNew: true,
Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`,
},
// Note that AtLeastOneOf can't be set because this schema is reused by
// two different resources.
"kubelet_config": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Description: `Node kubelet configs.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cpu_manager_policy": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"static", "none", ""}, false),
Description: `Control the CPU management policy on the node.`,
},
"cpu_cfs_quota": {
Type: schema.TypeBool,
Optional: true,
Description: `Enable CPU CFS quota enforcement for containers that specify CPU limits.`,
},
"cpu_cfs_quota_period": {
Type: schema.TypeString,
Optional: true,
Description: `Set the CPU CFS quota period value 'cpu.cfs_period_us'.`,
},
},
},
},

"linux_node_config": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Description: `Parameters that can be configured on Linux nodes.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"sysctls": {
Type: schema.TypeMap,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: `The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.`,
},
},
},
},
"node_group": {
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -591,6 +636,14 @@ func expandNodeConfig(v interface{}) *container.NodeConfig {
nc.BootDiskKmsKey = v.(string)
}

if v, ok := nodeConfig["kubelet_config"]; ok {
nc.KubeletConfig = expandKubeletConfig(v)
}

if v, ok := nodeConfig["linux_node_config"]; ok {
nc.LinuxNodeConfig = expandLinuxNodeConfig(v)
}

if v, ok := nodeConfig["node_group"]; ok {
nc.NodeGroup = v.(string)
}
Expand All @@ -617,6 +670,51 @@ func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConf
return wmc
}

func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig {
if v == nil {
return nil
}
ls := v.([]interface{})
if len(ls) == 0 {
return nil
}
cfg := ls[0].(map[string]interface{})
kConfig := &container.NodeKubeletConfig{}
if cpuManagerPolicy, ok := cfg["cpu_manager_policy"]; ok {
kConfig.CpuManagerPolicy = cpuManagerPolicy.(string)
}
if cpuCfsQuota, ok := cfg["cpu_cfs_quota"]; ok {
kConfig.CpuCfsQuota = cpuCfsQuota.(bool)
kConfig.ForceSendFields = append(kConfig.ForceSendFields, "CpuCfsQuota")
}
if cpuCfsQuotaPeriod, ok := cfg["cpu_cfs_quota_period"]; ok {
kConfig.CpuCfsQuotaPeriod = cpuCfsQuotaPeriod.(string)
}
return kConfig
}

func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig {
if v == nil {
return nil
}
ls := v.([]interface{})
if len(ls) == 0 {
return nil
}
cfg := ls[0].(map[string]interface{})
sysCfgRaw, ok := cfg["sysctls"]
if !ok {
return nil
}
m := make(map[string]string)
for k, v := range sysCfgRaw.(map[string]interface{}) {
m[k] = v.(string)
}
return &container.LinuxNodeConfig{
Sysctls: m,
}
}

func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]interface{} {
result := make([]map[string]interface{}, 0, 1)

Expand Down Expand Up @@ -661,6 +759,8 @@ func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} {
"taint": flattenTaints(c.Taints),
"workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig),
"boot_disk_kms_key": c.BootDiskKmsKey,
"kubelet_config": flattenKubeletConfig(c.KubeletConfig),
"linux_node_config": flattenLinuxNodeConfig(c.LinuxNodeConfig),
"node_group": c.NodeGroup,
})

Expand Down Expand Up @@ -764,3 +864,25 @@ func flattenWorkloadMetadataConfig(c *container.WorkloadMetadataConfig) []map[st
}
return result
}

func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} {
result := []map[string]interface{}{}
if c != nil {
result = append(result, map[string]interface{}{
"cpu_cfs_quota": c.CpuCfsQuota,
"cpu_cfs_quota_period": c.CpuCfsQuotaPeriod,
"cpu_manager_policy": c.CpuManagerPolicy,
})
}
return result
}

func flattenLinuxNodeConfig(c *container.LinuxNodeConfig) []map[string]interface{} {
result := []map[string]interface{}{}
if c != nil {
result = append(result, map[string]interface{}{
"sysctls": c.Sysctls,
})
}
return result
}
67 changes: 67 additions & 0 deletions google/resource_container_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -1404,6 +1404,73 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name)
}

if d.HasChange(prefix + "node_config.0.kubelet_config") {
req := &container.UpdateNodePoolRequest{
NodePoolId: name,
KubeletConfig: expandKubeletConfig(
d.Get(prefix + "node_config.0.kubelet_config")),
}
if req.KubeletConfig == nil {
req.ForceSendFields = []string{"KubeletConfig"}
}
updateF := func() error {
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req)
if config.UserProjectOverride {
clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project)
}
op, err := clusterNodePoolsUpdateCall.Do()
if err != nil {
return err
}

// Wait until it's updated
return containerOperationWait(config, op,
nodePoolInfo.project,
nodePoolInfo.location,
"updating GKE node pool kubelet_config", userAgent,
timeout)
}

if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil {
return err
}

log.Printf("[INFO] Updated kubelet_config for node pool %s", name)
}
if d.HasChange(prefix + "node_config.0.linux_node_config") {
req := &container.UpdateNodePoolRequest{
NodePoolId: name,
LinuxNodeConfig: expandLinuxNodeConfig(
d.Get(prefix + "node_config.0.linux_node_config")),
}
if req.LinuxNodeConfig == nil {
req.ForceSendFields = []string{"LinuxNodeConfig"}
}
updateF := func() error {
clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req)
if config.UserProjectOverride {
clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project)
}
op, err := clusterNodePoolsUpdateCall.Do()
if err != nil {
return err
}

// Wait until it's updated
return containerOperationWait(config, op,
nodePoolInfo.project,
nodePoolInfo.location,
"updating GKE node pool linux_node_config", userAgent,
timeout)
}

if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil {
return err
}

log.Printf("[INFO] Updated linux_node_config for node pool %s", name)
}

}

if d.HasChange(prefix + "node_count") {
Expand Down
Loading

0 comments on commit 0f9e1ef

Please sign in to comment.