diff --git a/pkg/controllers/controllers.go b/pkg/controllers/controllers.go index 4b97ef03d3..02d6ed26d2 100644 --- a/pkg/controllers/controllers.go +++ b/pkg/controllers/controllers.go @@ -85,9 +85,6 @@ func NewControllers( informer.NewNodePoolController(kubeClient, cloudProvider, cluster), informer.NewNodeClaimController(kubeClient, cloudProvider, cluster), termination.NewController(clock, kubeClient, cloudProvider, terminator.NewTerminator(clock, kubeClient, evictionQueue, recorder), recorder), - metricspod.NewController(kubeClient, cluster), - metricsnodepool.NewController(kubeClient, cloudProvider), - metricsnode.NewController(cluster), nodepoolreadiness.NewController(kubeClient, cloudProvider), nodepoolregistrationhealth.NewController(kubeClient, cloudProvider), nodepoolcounter.NewController(kubeClient, cloudProvider, cluster), @@ -99,25 +96,32 @@ func NewControllers( nodeclaimdisruption.NewController(clock, kubeClient, cloudProvider), nodeclaimhydration.NewController(kubeClient, cloudProvider), nodehydration.NewController(kubeClient, cloudProvider), - status.NewController[*v1.NodeClaim]( - kubeClient, - mgr.GetEventRecorderFor("karpenter"), - status.EmitDeprecatedMetrics, - status.WithHistogramBuckets(prometheus.ExponentialBuckets(0.5, 2, 15)), // 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192 - status.WithLabels(append(lo.Map(cloudProvider.GetSupportedNodeClasses(), func(obj status.Object, _ int) string { return v1.NodeClassLabelKey(object.GVK(obj).GroupKind()) }), v1.NodePoolLabelKey)...), - ), - status.NewController[*v1.NodePool]( - kubeClient, - mgr.GetEventRecorderFor("karpenter"), - status.EmitDeprecatedMetrics, - status.WithHistogramBuckets(prometheus.ExponentialBuckets(0.5, 2, 15)), // 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192 - ), - status.NewGenericObjectController[*corev1.Node]( - kubeClient, - mgr.GetEventRecorderFor("karpenter"), - status.WithHistogramBuckets(prometheus.ExponentialBuckets(0.5, 2, 15)), // 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192 - status.WithLabels(append(lo.Map(cloudProvider.GetSupportedNodeClasses(), func(obj status.Object, _ int) string { return v1.NodeClassLabelKey(object.GVK(obj).GroupKind()) }), v1.NodePoolLabelKey, v1.NodeInitializedLabelKey)...), - ), + } + + if !options.FromContext(ctx).DisableClusterStateObservability { + controllers = append(controllers, + metricspod.NewController(kubeClient, cluster), + metricsnodepool.NewController(kubeClient, cloudProvider), + metricsnode.NewController(cluster), + status.NewController[*v1.NodeClaim]( + kubeClient, + mgr.GetEventRecorderFor("karpenter"), + status.EmitDeprecatedMetrics, + status.WithHistogramBuckets(prometheus.ExponentialBuckets(0.5, 2, 15)), // 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192 + status.WithLabels(append(lo.Map(cloudProvider.GetSupportedNodeClasses(), func(obj status.Object, _ int) string { return v1.NodeClassLabelKey(object.GVK(obj).GroupKind()) }), v1.NodePoolLabelKey)...), + ), + status.NewController[*v1.NodePool]( + kubeClient, + mgr.GetEventRecorderFor("karpenter"), + status.EmitDeprecatedMetrics, + status.WithHistogramBuckets(prometheus.ExponentialBuckets(0.5, 2, 15)), // 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192 + ), + status.NewGenericObjectController[*corev1.Node]( + kubeClient, + mgr.GetEventRecorderFor("karpenter"), + status.WithHistogramBuckets(prometheus.ExponentialBuckets(0.5, 2, 15)), // 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192 + status.WithLabels(append(lo.Map(cloudProvider.GetSupportedNodeClasses(), func(obj status.Object, _ int) string { return v1.NodeClassLabelKey(object.GVK(obj).GroupKind()) }), v1.NodePoolLabelKey, v1.NodeInitializedLabelKey)...)), + ) } // The cloud provider must define status conditions for the node repair controller to use to detect unhealthy nodes diff --git a/pkg/operator/options/options.go b/pkg/operator/options/options.go index 24a92bb747..ee974df76f 100644 --- a/pkg/operator/options/options.go +++ b/pkg/operator/options/options.go @@ -64,27 +64,28 @@ type FeatureGates struct { // Options contains all CLI flags / env vars for karpenter-core. It adheres to the options.Injectable interface. type Options struct { - ServiceName string - MetricsPort int - HealthProbePort int - KubeClientQPS int - KubeClientBurst int - EnableProfiling bool - DisableLeaderElection bool - LeaderElectionName string - LeaderElectionNamespace string - MemoryLimit int64 - CPURequests int64 - LogLevel string - LogOutputPaths string - LogErrorOutputPaths string - BatchMaxDuration time.Duration - BatchIdleDuration time.Duration - preferencePolicyRaw string - PreferencePolicy PreferencePolicy - minValuesPolicyRaw string - MinValuesPolicy MinValuesPolicy - FeatureGates FeatureGates + ServiceName string + MetricsPort int + HealthProbePort int + KubeClientQPS int + KubeClientBurst int + EnableProfiling bool + DisableLeaderElection bool + DisableClusterStateObservability bool + LeaderElectionName string + LeaderElectionNamespace string + MemoryLimit int64 + CPURequests int64 + LogLevel string + LogOutputPaths string + LogErrorOutputPaths string + BatchMaxDuration time.Duration + BatchIdleDuration time.Duration + preferencePolicyRaw string + PreferencePolicy PreferencePolicy + minValuesPolicyRaw string + MinValuesPolicy MinValuesPolicy + FeatureGates FeatureGates } type FlagSet struct { @@ -112,6 +113,7 @@ func (o *Options) AddFlags(fs *FlagSet) { fs.IntVar(&o.KubeClientBurst, "kube-client-burst", env.WithDefaultInt("KUBE_CLIENT_BURST", 300), "The maximum allowed burst of queries to the kube-apiserver") fs.BoolVarWithEnv(&o.EnableProfiling, "enable-profiling", "ENABLE_PROFILING", false, "Enable the profiling on the metric endpoint") fs.BoolVarWithEnv(&o.DisableLeaderElection, "disable-leader-election", "DISABLE_LEADER_ELECTION", false, "Disable the leader election client before executing the main loop. Disable when running replicated components for high availability is not desired.") + fs.BoolVarWithEnv(&o.DisableClusterStateObservability, "disable-cluster-state-observability", "DISABLE_CLUSTER_STATE_OBSERVABILITY", false, "Disable cluster state metrics and events") fs.StringVar(&o.LeaderElectionName, "leader-election-name", env.WithDefaultString("LEADER_ELECTION_NAME", "karpenter-leader-election"), "Leader election name to create and monitor the lease if running outside the cluster") fs.StringVar(&o.LeaderElectionNamespace, "leader-election-namespace", env.WithDefaultString("LEADER_ELECTION_NAMESPACE", ""), "Leader election namespace to create and monitor the lease if running outside the cluster") fs.Int64Var(&o.MemoryLimit, "memory-limit", env.WithDefaultInt64("MEMORY_LIMIT", -1), "Memory limit on the container running the controller. The GC soft memory limit is set to 90% of this value.") diff --git a/pkg/operator/options/suite_test.go b/pkg/operator/options/suite_test.go index a77a6e7063..fb5233cf2c 100644 --- a/pkg/operator/options/suite_test.go +++ b/pkg/operator/options/suite_test.go @@ -53,6 +53,7 @@ var _ = Describe("Options", func() { "KUBE_CLIENT_BURST", "ENABLE_PROFILING", "DISABLE_LEADER_ELECTION", + "DISABLE_CLUSTER_STATE_OBSERVABILITY", "LEADER_ELECTION_NAMESPACE", "MEMORY_LIMIT", "LOG_LEVEL", @@ -99,23 +100,24 @@ var _ = Describe("Options", func() { err := opts.Parse(fs) Expect(err).To(BeNil()) expectOptionsMatch(opts, test.Options(test.OptionsFields{ - ServiceName: lo.ToPtr(""), - MetricsPort: lo.ToPtr(8080), - HealthProbePort: lo.ToPtr(8081), - KubeClientQPS: lo.ToPtr(200), - KubeClientBurst: lo.ToPtr(300), - EnableProfiling: lo.ToPtr(false), - DisableLeaderElection: lo.ToPtr(false), - LeaderElectionName: lo.ToPtr("karpenter-leader-election"), - LeaderElectionNamespace: lo.ToPtr(""), - MemoryLimit: lo.ToPtr[int64](-1), - LogLevel: lo.ToPtr("info"), - LogOutputPaths: lo.ToPtr("stdout"), - LogErrorOutputPaths: lo.ToPtr("stderr"), - BatchMaxDuration: lo.ToPtr(10 * time.Second), - BatchIdleDuration: lo.ToPtr(time.Second), - PreferencePolicy: lo.ToPtr(options.PreferencePolicyRespect), - MinValuesPolicy: lo.ToPtr(options.MinValuesPolicyStrict), + ServiceName: lo.ToPtr(""), + MetricsPort: lo.ToPtr(8080), + HealthProbePort: lo.ToPtr(8081), + KubeClientQPS: lo.ToPtr(200), + KubeClientBurst: lo.ToPtr(300), + EnableProfiling: lo.ToPtr(false), + DisableLeaderElection: lo.ToPtr(false), + DisableClusterStateObservability: lo.ToPtr(false), + LeaderElectionName: lo.ToPtr("karpenter-leader-election"), + LeaderElectionNamespace: lo.ToPtr(""), + MemoryLimit: lo.ToPtr[int64](-1), + LogLevel: lo.ToPtr("info"), + LogOutputPaths: lo.ToPtr("stdout"), + LogErrorOutputPaths: lo.ToPtr("stderr"), + BatchMaxDuration: lo.ToPtr(10 * time.Second), + BatchIdleDuration: lo.ToPtr(time.Second), + PreferencePolicy: lo.ToPtr(options.PreferencePolicyRespect), + MinValuesPolicy: lo.ToPtr(options.MinValuesPolicyStrict), FeatureGates: test.FeatureGates{ ReservedCapacity: lo.ToPtr(true), NodeRepair: lo.ToPtr(false), @@ -137,6 +139,7 @@ var _ = Describe("Options", func() { "--kube-client-burst", "0", "--enable-profiling", "--disable-leader-election=true", + "--disable-cluster-state-observability=true", "--leader-election-name=karpenter-controller", "--leader-election-namespace=karpenter", "--memory-limit", "0", @@ -151,23 +154,24 @@ var _ = Describe("Options", func() { ) Expect(err).To(BeNil()) expectOptionsMatch(opts, test.Options(test.OptionsFields{ - ServiceName: lo.ToPtr("cli"), - MetricsPort: lo.ToPtr(0), - HealthProbePort: lo.ToPtr(0), - KubeClientQPS: lo.ToPtr(0), - KubeClientBurst: lo.ToPtr(0), - EnableProfiling: lo.ToPtr(true), - DisableLeaderElection: lo.ToPtr(true), - LeaderElectionName: lo.ToPtr("karpenter-controller"), - LeaderElectionNamespace: lo.ToPtr("karpenter"), - MemoryLimit: lo.ToPtr[int64](0), - LogLevel: lo.ToPtr("debug"), - LogOutputPaths: lo.ToPtr("/etc/k8s/test"), - LogErrorOutputPaths: lo.ToPtr("/etc/k8s/testerror"), - BatchMaxDuration: lo.ToPtr(5 * time.Second), - BatchIdleDuration: lo.ToPtr(5 * time.Second), - PreferencePolicy: lo.ToPtr(options.PreferencePolicyIgnore), - MinValuesPolicy: lo.ToPtr(options.MinValuesPolicyBestEffort), + ServiceName: lo.ToPtr("cli"), + MetricsPort: lo.ToPtr(0), + HealthProbePort: lo.ToPtr(0), + KubeClientQPS: lo.ToPtr(0), + KubeClientBurst: lo.ToPtr(0), + EnableProfiling: lo.ToPtr(true), + DisableLeaderElection: lo.ToPtr(true), + DisableClusterStateObservability: lo.ToPtr(true), + LeaderElectionName: lo.ToPtr("karpenter-controller"), + LeaderElectionNamespace: lo.ToPtr("karpenter"), + MemoryLimit: lo.ToPtr[int64](0), + LogLevel: lo.ToPtr("debug"), + LogOutputPaths: lo.ToPtr("/etc/k8s/test"), + LogErrorOutputPaths: lo.ToPtr("/etc/k8s/testerror"), + BatchMaxDuration: lo.ToPtr(5 * time.Second), + BatchIdleDuration: lo.ToPtr(5 * time.Second), + PreferencePolicy: lo.ToPtr(options.PreferencePolicyIgnore), + MinValuesPolicy: lo.ToPtr(options.MinValuesPolicyBestEffort), FeatureGates: test.FeatureGates{ ReservedCapacity: lo.ToPtr(false), NodeRepair: lo.ToPtr(true), @@ -185,6 +189,7 @@ var _ = Describe("Options", func() { os.Setenv("KUBE_CLIENT_BURST", "0") os.Setenv("ENABLE_PROFILING", "true") os.Setenv("DISABLE_LEADER_ELECTION", "true") + os.Setenv("DISABLE_CLUSTER_STATE_OBSERVABILITY", "true") os.Setenv("LEADER_ELECTION_NAME", "karpenter-controller") os.Setenv("LEADER_ELECTION_NAMESPACE", "karpenter") os.Setenv("MEMORY_LIMIT", "0") @@ -203,23 +208,24 @@ var _ = Describe("Options", func() { err := opts.Parse(fs) Expect(err).To(BeNil()) expectOptionsMatch(opts, test.Options(test.OptionsFields{ - ServiceName: lo.ToPtr("env"), - MetricsPort: lo.ToPtr(0), - HealthProbePort: lo.ToPtr(0), - KubeClientQPS: lo.ToPtr(0), - KubeClientBurst: lo.ToPtr(0), - EnableProfiling: lo.ToPtr(true), - DisableLeaderElection: lo.ToPtr(true), - LeaderElectionName: lo.ToPtr("karpenter-controller"), - LeaderElectionNamespace: lo.ToPtr("karpenter"), - MemoryLimit: lo.ToPtr[int64](0), - LogLevel: lo.ToPtr("debug"), - LogOutputPaths: lo.ToPtr("/etc/k8s/test"), - LogErrorOutputPaths: lo.ToPtr("/etc/k8s/testerror"), - BatchMaxDuration: lo.ToPtr(5 * time.Second), - BatchIdleDuration: lo.ToPtr(5 * time.Second), - PreferencePolicy: lo.ToPtr(options.PreferencePolicyIgnore), - MinValuesPolicy: lo.ToPtr(options.MinValuesPolicyBestEffort), + ServiceName: lo.ToPtr("env"), + MetricsPort: lo.ToPtr(0), + HealthProbePort: lo.ToPtr(0), + KubeClientQPS: lo.ToPtr(0), + KubeClientBurst: lo.ToPtr(0), + EnableProfiling: lo.ToPtr(true), + DisableLeaderElection: lo.ToPtr(true), + DisableClusterStateObservability: lo.ToPtr(true), + LeaderElectionName: lo.ToPtr("karpenter-controller"), + LeaderElectionNamespace: lo.ToPtr("karpenter"), + MemoryLimit: lo.ToPtr[int64](0), + LogLevel: lo.ToPtr("debug"), + LogOutputPaths: lo.ToPtr("/etc/k8s/test"), + LogErrorOutputPaths: lo.ToPtr("/etc/k8s/testerror"), + BatchMaxDuration: lo.ToPtr(5 * time.Second), + BatchIdleDuration: lo.ToPtr(5 * time.Second), + PreferencePolicy: lo.ToPtr(options.PreferencePolicyIgnore), + MinValuesPolicy: lo.ToPtr(options.MinValuesPolicyBestEffort), FeatureGates: test.FeatureGates{ ReservedCapacity: lo.ToPtr(false), NodeRepair: lo.ToPtr(true), @@ -236,6 +242,7 @@ var _ = Describe("Options", func() { os.Setenv("KUBE_CLIENT_BURST", "0") os.Setenv("ENABLE_PROFILING", "true") os.Setenv("DISABLE_LEADER_ELECTION", "true") + os.Setenv("DISABLE_CLUSTER_STATE_OBSERVABILITY", "true") os.Setenv("MEMORY_LIMIT", "0") os.Setenv("LOG_LEVEL", "debug") os.Setenv("BATCH_MAX_DURATION", "5s") @@ -257,23 +264,24 @@ var _ = Describe("Options", func() { ) Expect(err).To(BeNil()) expectOptionsMatch(opts, test.Options(test.OptionsFields{ - ServiceName: lo.ToPtr("cli"), - MetricsPort: lo.ToPtr(0), - HealthProbePort: lo.ToPtr(0), - KubeClientQPS: lo.ToPtr(0), - KubeClientBurst: lo.ToPtr(0), - EnableProfiling: lo.ToPtr(true), - DisableLeaderElection: lo.ToPtr(true), - LeaderElectionName: lo.ToPtr("karpenter-leader-election"), - LeaderElectionNamespace: lo.ToPtr(""), - MemoryLimit: lo.ToPtr[int64](0), - LogLevel: lo.ToPtr("debug"), - LogOutputPaths: lo.ToPtr("/etc/k8s/test"), - LogErrorOutputPaths: lo.ToPtr("/etc/k8s/testerror"), - BatchMaxDuration: lo.ToPtr(5 * time.Second), - BatchIdleDuration: lo.ToPtr(5 * time.Second), - PreferencePolicy: lo.ToPtr(options.PreferencePolicyRespect), - MinValuesPolicy: lo.ToPtr(options.MinValuesPolicyStrict), + ServiceName: lo.ToPtr("cli"), + MetricsPort: lo.ToPtr(0), + HealthProbePort: lo.ToPtr(0), + KubeClientQPS: lo.ToPtr(0), + KubeClientBurst: lo.ToPtr(0), + EnableProfiling: lo.ToPtr(true), + DisableLeaderElection: lo.ToPtr(true), + DisableClusterStateObservability: lo.ToPtr(true), + LeaderElectionName: lo.ToPtr("karpenter-leader-election"), + LeaderElectionNamespace: lo.ToPtr(""), + MemoryLimit: lo.ToPtr[int64](0), + LogLevel: lo.ToPtr("debug"), + LogOutputPaths: lo.ToPtr("/etc/k8s/test"), + LogErrorOutputPaths: lo.ToPtr("/etc/k8s/testerror"), + BatchMaxDuration: lo.ToPtr(5 * time.Second), + BatchIdleDuration: lo.ToPtr(5 * time.Second), + PreferencePolicy: lo.ToPtr(options.PreferencePolicyRespect), + MinValuesPolicy: lo.ToPtr(options.MinValuesPolicyStrict), FeatureGates: test.FeatureGates{ ReservedCapacity: lo.ToPtr(false), NodeRepair: lo.ToPtr(true), @@ -355,6 +363,7 @@ func expectOptionsMatch(optsA, optsB *options.Options) { Expect(optsA.KubeClientBurst).To(Equal(optsB.KubeClientBurst)) Expect(optsA.EnableProfiling).To(Equal(optsB.EnableProfiling)) Expect(optsA.DisableLeaderElection).To(Equal(optsB.DisableLeaderElection)) + Expect(optsA.DisableClusterStateObservability).To(Equal(optsB.DisableClusterStateObservability)) Expect(optsA.MemoryLimit).To(Equal(optsB.MemoryLimit)) Expect(optsA.LogLevel).To(Equal(optsB.LogLevel)) Expect(optsA.LogOutputPaths).To(Equal(optsB.LogOutputPaths)) diff --git a/pkg/test/options.go b/pkg/test/options.go index f4f5d846ac..9228a533e0 100644 --- a/pkg/test/options.go +++ b/pkg/test/options.go @@ -28,25 +28,26 @@ import ( type OptionsFields struct { // Vendor Neutral - ServiceName *string - MetricsPort *int - HealthProbePort *int - KubeClientQPS *int - KubeClientBurst *int - EnableProfiling *bool - DisableLeaderElection *bool - LeaderElectionName *string - LeaderElectionNamespace *string - MemoryLimit *int64 - CPURequests *int64 - LogLevel *string - LogOutputPaths *string - LogErrorOutputPaths *string - PreferencePolicy *options.PreferencePolicy - MinValuesPolicy *options.MinValuesPolicy - BatchMaxDuration *time.Duration - BatchIdleDuration *time.Duration - FeatureGates FeatureGates + ServiceName *string + MetricsPort *int + HealthProbePort *int + KubeClientQPS *int + KubeClientBurst *int + EnableProfiling *bool + DisableLeaderElection *bool + DisableClusterStateObservability *bool + LeaderElectionName *string + LeaderElectionNamespace *string + MemoryLimit *int64 + CPURequests *int64 + LogLevel *string + LogOutputPaths *string + LogErrorOutputPaths *string + PreferencePolicy *options.PreferencePolicy + MinValuesPolicy *options.MinValuesPolicy + BatchMaxDuration *time.Duration + BatchIdleDuration *time.Duration + FeatureGates FeatureGates } type FeatureGates struct { @@ -65,22 +66,23 @@ func Options(overrides ...OptionsFields) *options.Options { } return &options.Options{ - ServiceName: lo.FromPtrOr(opts.ServiceName, ""), - MetricsPort: lo.FromPtrOr(opts.MetricsPort, 8080), - HealthProbePort: lo.FromPtrOr(opts.HealthProbePort, 8081), - KubeClientQPS: lo.FromPtrOr(opts.KubeClientQPS, 200), - KubeClientBurst: lo.FromPtrOr(opts.KubeClientBurst, 300), - EnableProfiling: lo.FromPtrOr(opts.EnableProfiling, false), - DisableLeaderElection: lo.FromPtrOr(opts.DisableLeaderElection, false), - MemoryLimit: lo.FromPtrOr(opts.MemoryLimit, -1), - CPURequests: lo.FromPtrOr(opts.CPURequests, 5000), // use 5 threads to enforce parallelism - LogLevel: lo.FromPtrOr(opts.LogLevel, ""), - LogOutputPaths: lo.FromPtrOr(opts.LogOutputPaths, "stdout"), - LogErrorOutputPaths: lo.FromPtrOr(opts.LogErrorOutputPaths, "stderr"), - BatchMaxDuration: lo.FromPtrOr(opts.BatchMaxDuration, 10*time.Second), - BatchIdleDuration: lo.FromPtrOr(opts.BatchIdleDuration, time.Second), - PreferencePolicy: lo.FromPtrOr(opts.PreferencePolicy, options.PreferencePolicyRespect), - MinValuesPolicy: lo.FromPtrOr(opts.MinValuesPolicy, options.MinValuesPolicyStrict), + ServiceName: lo.FromPtrOr(opts.ServiceName, ""), + MetricsPort: lo.FromPtrOr(opts.MetricsPort, 8080), + HealthProbePort: lo.FromPtrOr(opts.HealthProbePort, 8081), + KubeClientQPS: lo.FromPtrOr(opts.KubeClientQPS, 200), + KubeClientBurst: lo.FromPtrOr(opts.KubeClientBurst, 300), + EnableProfiling: lo.FromPtrOr(opts.EnableProfiling, false), + DisableLeaderElection: lo.FromPtrOr(opts.DisableLeaderElection, false), + DisableClusterStateObservability: lo.FromPtrOr(opts.DisableClusterStateObservability, false), + MemoryLimit: lo.FromPtrOr(opts.MemoryLimit, -1), + CPURequests: lo.FromPtrOr(opts.CPURequests, 5000), // use 5 threads to enforce parallelism + LogLevel: lo.FromPtrOr(opts.LogLevel, ""), + LogOutputPaths: lo.FromPtrOr(opts.LogOutputPaths, "stdout"), + LogErrorOutputPaths: lo.FromPtrOr(opts.LogErrorOutputPaths, "stderr"), + BatchMaxDuration: lo.FromPtrOr(opts.BatchMaxDuration, 10*time.Second), + BatchIdleDuration: lo.FromPtrOr(opts.BatchIdleDuration, time.Second), + PreferencePolicy: lo.FromPtrOr(opts.PreferencePolicy, options.PreferencePolicyRespect), + MinValuesPolicy: lo.FromPtrOr(opts.MinValuesPolicy, options.MinValuesPolicyStrict), FeatureGates: options.FeatureGates{ NodeRepair: lo.FromPtrOr(opts.FeatureGates.NodeRepair, false), ReservedCapacity: lo.FromPtrOr(opts.FeatureGates.ReservedCapacity, true),