diff --git a/changelog/20251007_feature_reintroduce_securitycontext_operator_setting_in.md b/changelog/20251007_feature_reintroduce_securitycontext_operator_setting_in.md new file mode 100644 index 000000000..c0a3885b5 --- /dev/null +++ b/changelog/20251007_feature_reintroduce_securitycontext_operator_setting_in.md @@ -0,0 +1,6 @@ +--- +kind: feature +date: 2025-10-07 +--- + +* **Helm Chart**: Introduced two new helm fields `operator.podSecurityContext` and `operator.securityContext` that can be used to configure `securityContext` for Operator deployment through Helm Chart. diff --git a/helm_chart/templates/operator.yaml b/helm_chart/templates/operator.yaml index d6b9fee91..679be2219 100644 --- a/helm_chart/templates/operator.yaml +++ b/helm_chart/templates/operator.yaml @@ -32,11 +32,10 @@ spec: {{- end }} spec: serviceAccountName: {{ .Values.operator.name }} -{{- if not .Values.managedSecurityContext }} + {{- if and (not .Values.managedSecurityContext) .Values.operator.podSecurityContext }} securityContext: - runAsNonRoot: true - runAsUser: 2000 -{{- end }} + {{- toYaml .Values.operator.podSecurityContext | nindent 8 }} + {{- end }} {{- if .Values.registry.imagePullSecrets}} imagePullSecrets: - name: {{ .Values.registry.imagePullSecrets }} @@ -74,6 +73,10 @@ spec: requests: cpu: {{ .Values.operator.resources.requests.cpu }} memory: {{ .Values.operator.resources.requests.memory }} + {{- if and (not .Values.managedSecurityContext) .Values.operator.securityContext }} + securityContext: + {{- toYaml .Values.operator.securityContext | nindent 12 }} + {{- end }} env: - name: OPERATOR_ENV value: {{ .Values.operator.env }} diff --git a/helm_chart/tests/operator_security_context_test.yaml b/helm_chart/tests/operator_security_context_test.yaml new file mode 100644 index 000000000..591d97278 --- /dev/null +++ b/helm_chart/tests/operator_security_context_test.yaml @@ -0,0 +1,142 @@ +suite: test operator security context settings for values.yaml +templates: + - operator.yaml +tests: + - it: default values are properly set + asserts: + - exists: + path: spec.template.spec.securityContext + - equal: + path: spec.template.spec.securityContext.runAsNonRoot + value: true + - equal: + path: spec.template.spec.securityContext.runAsUser + # noinspection YAMLIncompatibleTypes + value: 2000 + - notExists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator")].securityContext + - it: drop podSecurityContext and securityContext completely + set: + operator.podSecurityContext: + operator.securityContext: + asserts: + - notExists: + path: spec.template.spec.securityContext + - notExists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator")].securityContext + - it: drop podSecurityContext and securityContext completely when managedSecurityContext is true + set: + managedSecurityContext: true + asserts: + - notExists: + path: spec.template.spec.securityContext + - notExists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator")].securityContext + - it: custom values are properly set + set: + operator.podSecurityContext.runAsNonRoot: false + operator.podSecurityContext.seccompProfile.type: RuntimeDefault + operator.securityContext.allowPrivilegeEscalation: false + operator.securityContext.capabilities.drop: [ALL] + asserts: + - exists: + path: spec.template.spec.securityContext + - equal: + path: spec.template.spec.securityContext.runAsNonRoot + value: false + - equal: + path: spec.template.spec.securityContext.seccompProfile.type + # noinspection YAMLIncompatibleTypes + value: RuntimeDefault + - equal: + path: spec.template.spec.securityContext.runAsUser + # noinspection YAMLIncompatibleTypes + value: 2000 + - exists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator")].securityContext + - equal: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator")].securityContext.allowPrivilegeEscalation + value: false + - equal: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator")].securityContext.capabilities.drop + value: [ALL] + - it: default values are properly set when managedSecurityContext is true for openShift + values: + - ../values-openshift.yaml + asserts: + - notExists: + path: spec.template.spec.securityContext + - notExists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator")].securityContext + - exists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator")].env[?(@.name=="MANAGED_SECURITY_CONTEXT")] + - equal: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator")].env[?(@.name=="MANAGED_SECURITY_CONTEXT")].value + # noinspection YAMLIncompatibleTypes + value: "true" + - it: default values are properly set for multi-cluster deployment + values: + - ../values-multi-cluster.yaml + asserts: + - exists: + path: spec.template.spec.securityContext + - equal: + path: spec.template.spec.securityContext.runAsNonRoot + value: true + - equal: + path: spec.template.spec.securityContext.runAsUser + # noinspection YAMLIncompatibleTypes + value: 2000 + - notExists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator-multi-cluster")].securityContext + - it: drop podSecurityContext and securityContext completely for multi-cluster deployment + values: + - ../values-multi-cluster.yaml + set: + operator.podSecurityContext: + operator.securityContext: + asserts: + - notExists: + path: spec.template.spec.securityContext + - notExists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator-multi-cluster")].securityContext + - it: drop podSecurityContext and securityContext completely when managedSecurityContext is true for multi-cluster deployment + values: + - ../values-multi-cluster.yaml + set: + managedSecurityContext: true + asserts: + - notExists: + path: spec.template.spec.securityContext + - notExists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator-multi-cluster")].securityContext + - it: custom values are properly set for multi-cluster deployment + values: + - ../values-multi-cluster.yaml + set: + operator.podSecurityContext.runAsNonRoot: false + operator.podSecurityContext.seccompProfile.type: RuntimeDefault + operator.securityContext.allowPrivilegeEscalation: false + operator.securityContext.capabilities.drop: [ ALL ] + asserts: + - exists: + path: spec.template.spec.securityContext + - equal: + path: spec.template.spec.securityContext.runAsNonRoot + value: false + - equal: + path: spec.template.spec.securityContext.seccompProfile.type + # noinspection YAMLIncompatibleTypes + value: RuntimeDefault + - equal: + path: spec.template.spec.securityContext.runAsUser + # noinspection YAMLIncompatibleTypes + value: 2000 + - exists: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator-multi-cluster")].securityContext + - equal: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator-multi-cluster")].securityContext.allowPrivilegeEscalation + value: false + - equal: + path: spec.template.spec.containers[?(@.name=="mongodb-kubernetes-operator-multi-cluster")].securityContext.capabilities.drop + value: [ ALL ] diff --git a/helm_chart/values.yaml b/helm_chart/values.yaml index 61e109c24..470890f1a 100644 --- a/helm_chart/values.yaml +++ b/helm_chart/values.yaml @@ -47,6 +47,12 @@ operator: cpu: 1100m memory: 1Gi + podSecurityContext: + runAsNonRoot: true + runAsUser: 2000 + + securityContext: {} + # Control how many reconciles can be performed in parallel. # It sets MaxConcurrentReconciles https://pkg.go.dev/github.com/kubernetes-sigs/controller-runtime/pkg/controller#Options). # Increasing the number of concurrent reconciles will decrease the time needed to reconcile all watched resources.