From 60f0f98de72123ce712f3c3086d76076c5dc8d9e Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Tue, 7 Mar 2023 20:22:05 +0100 Subject: [PATCH 1/9] set pooler pod security context --- pkg/cluster/connection_pooler.go | 120 +++++++++++------------ pkg/cluster/connection_pooler_test.go | 135 ++++++++++++++++++++++++++ pkg/cluster/k8sres.go | 118 +++++++++++++--------- 3 files changed, 265 insertions(+), 108 deletions(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 1c7e7bcb8..7f817d1f8 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -3,7 +3,6 @@ package cluster import ( "context" "fmt" - "path/filepath" "strings" "time" @@ -261,6 +260,10 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( makeDefaultConnectionPoolerResources(&c.OpConfig), connectionPoolerContainer) + if err != nil { + return nil, fmt.Errorf("could not generate resource requirements: %v", err) + } + effectiveDockerImage := util.Coalesce( connectionPoolerSpec.DockerImage, c.OpConfig.ConnectionPooler.Image) @@ -269,10 +272,6 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( connectionPoolerSpec.Schema, c.OpConfig.ConnectionPooler.Schema) - if err != nil { - return nil, fmt.Errorf("could not generate resource requirements: %v", err) - } - secretSelector := func(key string) *v1.SecretKeySelector { effectiveUser := util.Coalesce( connectionPoolerSpec.User, @@ -344,62 +343,68 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( // 2. Reference the secret in a volume // 3. Mount the volume to the container at /tls var poolerVolumes []v1.Volume + var volumeMounts []v1.VolumeMount if spec.TLS != nil && spec.TLS.SecretName != "" { - // Env vars - crtFile := spec.TLS.CertificateFile - keyFile := spec.TLS.PrivateKeyFile - caFile := spec.TLS.CAFile - mountPath := "/tls" - mountPathCA := mountPath - - if crtFile == "" { - crtFile = "tls.crt" - } - if keyFile == "" { - keyFile = "tls.key" - } - if caFile == "" { - caFile = "ca.crt" - } - if spec.TLS.CASecretName != "" { - mountPathCA = mountPath + "ca" - } - - envVars = append( - envVars, - v1.EnvVar{ - Name: "CONNECTION_POOLER_CLIENT_TLS_CRT", Value: filepath.Join(mountPath, crtFile), - }, - v1.EnvVar{ - Name: "CONNECTION_POOLER_CLIENT_TLS_KEY", Value: filepath.Join(mountPath, keyFile), - }, - v1.EnvVar{ - Name: "CONNECTION_POOLER_CLIENT_CA_FILE", Value: filepath.Join(mountPathCA, caFile), - }, - ) + if spec.TLS != nil && spec.TLS.SecretName != "" { + getPoolerTLSEnv := func(k string) string { + keyName := "" + switch k { + case "tls.crt": + keyName = "CONNECTION_POOLER_CLIENT_TLS_CRT" + case "tls.key": + keyName = "CONNECTION_POOLER_CLIENT_TLS_KEY" + case "tls.ca": + keyName = "CONNECTION_POOLER_CLIENT_CA_FILE" + default: + panic(fmt.Sprintf("TLS env key for pooler unknown %s", k)) + } - // Volume - mode := int32(0640) - volume := v1.Volume{ - Name: "tls", - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: spec.TLS.SecretName, - DefaultMode: &mode, - }, - }, + return keyName + } + tlsEnv, tlsVolumes := generateTLSmounts(spec, getPoolerTLSEnv) + envVars = append(envVars, tlsEnv...) + for _, vol := range tlsVolumes { + poolerVolumes = append(poolerVolumes, v1.Volume{ + Name: vol.Name, + VolumeSource: vol.VolumeSource, + }) + volumeMounts = append(volumeMounts, v1.VolumeMount{ + Name: vol.Name, + MountPath: vol.MountPath, + }) + } } - poolerVolumes = append(poolerVolumes, volume) - - // Mount - poolerContainer.VolumeMounts = []v1.VolumeMount{{ - Name: "tls", - MountPath: "/tls", - }} } poolerContainer.Env = envVars + poolerContainer.VolumeMounts = volumeMounts tolerationsSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) + securityContext := v1.PodSecurityContext{} + + // determine the User, Group and FSGroup for the pooler pod + effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser + if spec.SpiloRunAsUser != nil { + effectiveRunAsUser = spec.SpiloRunAsUser + } + if effectiveRunAsUser != nil { + securityContext.RunAsUser = effectiveRunAsUser + } + + effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup + if spec.SpiloRunAsGroup != nil { + effectiveRunAsGroup = spec.SpiloRunAsGroup + } + if effectiveRunAsGroup != nil { + securityContext.RunAsGroup = effectiveRunAsGroup + } + + effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup + if spec.SpiloFSGroup != nil { + effectiveFSGroup = spec.SpiloFSGroup + } + if effectiveFSGroup != nil { + securityContext.FSGroup = effectiveFSGroup + } podTemplate := &v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -412,15 +417,10 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( Containers: []v1.Container{poolerContainer}, Tolerations: tolerationsSpec, Volumes: poolerVolumes, + SecurityContext: &securityContext, }, } - if spec.TLS != nil && spec.TLS.SecretName != "" && spec.SpiloFSGroup != nil { - podTemplate.Spec.SecurityContext = &v1.PodSecurityContext{ - FSGroup: spec.SpiloFSGroup, - } - } - nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity) if c.OpConfig.EnablePodAntiAffinity { labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels) diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index 13718ca06..48423c533 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -1,6 +1,7 @@ package cluster import ( + "context" "errors" "fmt" "strings" @@ -11,6 +12,7 @@ import ( fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" appsv1 "k8s.io/api/apps/v1" @@ -19,6 +21,19 @@ import ( "k8s.io/client-go/kubernetes/fake" ) +func newFakeK8sPoolerTestClient() (k8sutil.KubernetesClient, *fake.Clientset) { + acidClientSet := fakeacidv1.NewSimpleClientset() + clientSet := fake.NewSimpleClientset() + + return k8sutil.KubernetesClient{ + PodsGetter: clientSet.CoreV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + StatefulSetsGetter: clientSet.AppsV1(), + DeploymentsGetter: clientSet.AppsV1(), + ServicesGetter: clientSet.CoreV1(), + }, clientSet +} + func mockInstallLookupFunction(schema string, user string) error { return nil } @@ -919,6 +934,126 @@ func testServiceSelector(cluster *Cluster, service *v1.Service, role PostgresRol return nil } +func TestPoolerTLS(t *testing.T) { + client, _ := newFakeK8sPoolerTestClient() + clusterName := "acid-test-cluster" + namespace := "default" + tlsSecretName := "my-secret" + spiloRunAsUser := int64(101) + spiloRunAsGroup := int64(103) + spiloFSGroup := int64(103) + defaultMode := int32(0640) + mountPath := "/tls" + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + TeamID: "myapp", NumberOfInstances: 1, + EnableConnectionPooler: util.True(), + Resources: &acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + }, + Volume: acidv1.Volume{ + Size: "1G", + }, + TLS: &acidv1.TLSDescription{ + SecretName: tlsSecretName, CAFile: "ca.crt"}, + AdditionalVolumes: []acidv1.AdditionalVolume{ + acidv1.AdditionalVolume{ + Name: tlsSecretName, + MountPath: mountPath, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: tlsSecretName, + DefaultMode: &defaultMode, + }, + }, + }, + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + SpiloRunAsUser: &spiloRunAsUser, + SpiloRunAsGroup: &spiloRunAsGroup, + SpiloFSGroup: &spiloFSGroup, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, client, pg, logger, eventRecorder) + + // create a statefulset + _, err := cluster.createStatefulSet() + assert.NoError(t, err) + + // create pooler resources + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} + cluster.ConnectionPooler[Master] = &ConnectionPoolerObjects{ + Deployment: nil, + Service: nil, + Name: cluster.connectionPoolerName(Master), + ClusterName: clusterName, + Namespace: namespace, + LookupFunction: false, + Role: Master, + } + + _, err = cluster.syncConnectionPoolerWorker(nil, &pg, Master) + assert.NoError(t, err) + + deploy, err := client.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(Master), metav1.GetOptions{}) + assert.NoError(t, err) + + fsGroup := int64(103) + assert.Equal(t, &fsGroup, deploy.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned") + + volume := v1.Volume{ + Name: "my-secret", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: "my-secret", + DefaultMode: &defaultMode, + }, + }, + } + assert.Contains(t, deploy.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume") + + poolerContainer := deploy.Spec.Template.Spec.Containers[constants.ConnectionPoolerContainer] + assert.Contains(t, poolerContainer.VolumeMounts, v1.VolumeMount{ + MountPath: "/tls", + Name: "my-secret", + }, "the volume gets mounted in /tls") + + assert.Contains(t, poolerContainer.Env, v1.EnvVar{Name: "CONNECTION_POOLER_CLIENT_TLS_CRT", Value: "/tls/tls.crt"}) + assert.Contains(t, poolerContainer.Env, v1.EnvVar{Name: "CONNECTION_POOLER_CLIENT_TLS_KEY", Value: "/tls/tls.key"}) + assert.Contains(t, poolerContainer.Env, v1.EnvVar{Name: "CONNECTION_POOLER_CLIENT_CA_FILE", Value: "/tls/ca.crt"}) +} + func TestConnectionPoolerServiceSpec(t *testing.T) { testName := "Test connection pooler service spec generation" var cluster = New( diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 3de5e430f..f38560460 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1288,57 +1288,26 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef // configure TLS with a custom secret volume if spec.TLS != nil && spec.TLS.SecretName != "" { - // this is combined with the FSGroup in the section above - // to give read access to the postgres user - defaultMode := int32(0640) - mountPath := "/tls" - additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{ - Name: spec.TLS.SecretName, - MountPath: mountPath, - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: spec.TLS.SecretName, - DefaultMode: &defaultMode, - }, - }, - }) - - // use the same filenames as Secret resources by default - certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt") - privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key") - spiloEnvVars = appendEnvVars( - spiloEnvVars, - v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: certFile}, - v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: privateKeyFile}, - ) - - if spec.TLS.CAFile != "" { - // support scenario when the ca.crt resides in a different secret, diff path - mountPathCA := mountPath - if spec.TLS.CASecretName != "" { - mountPathCA = mountPath + "ca" + getSpiloTLSEnv := func(k string) string { + keyName := "" + switch k { + case "tls.crt": + keyName = "SSL_CERTIFICATE_FILE" + case "tls.key": + keyName = "SSL_PRIVATE_KEY_FILE" + case "tls.ca": + keyName = "SSL_CA_FILE" + default: + panic(fmt.Sprintf("TLS env key unknown %s", k)) } - caFile := ensurePath(spec.TLS.CAFile, mountPathCA, "") - spiloEnvVars = appendEnvVars( - spiloEnvVars, - v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile}, - ) - - // the ca file from CASecretName secret takes priority - if spec.TLS.CASecretName != "" { - additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{ - Name: spec.TLS.CASecretName, - MountPath: mountPathCA, - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: spec.TLS.CASecretName, - DefaultMode: &defaultMode, - }, - }, - }) - } + return keyName + } + tlsEnv, tlsVolumes := generateTLSmounts(spec, getSpiloTLSEnv) + for _, env := range tlsEnv { + spiloEnvVars = appendEnvVars(spiloEnvVars, env) } + additionalVolumes = append(additionalVolumes, tlsVolumes...) } // generate the spilo container @@ -1492,6 +1461,59 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef return statefulSet, nil } +func generateTLSmounts(spec *acidv1.PostgresSpec, tlsEnv func(key string) string) ([]v1.EnvVar, []acidv1.AdditionalVolume) { + // this is combined with the FSGroup in the section above + // to give read access to the postgres user + defaultMode := int32(0640) + mountPath := "/tls" + env := make([]v1.EnvVar, 0) + volumes := make([]acidv1.AdditionalVolume, 0) + + volumes = append(volumes, acidv1.AdditionalVolume{ + Name: spec.TLS.SecretName, + MountPath: mountPath, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: spec.TLS.SecretName, + DefaultMode: &defaultMode, + }, + }, + }) + + // use the same filenames as Secret resources by default + certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt") + privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key") + env = append(env, v1.EnvVar{Name: tlsEnv("tls.crt"), Value: certFile}) + env = append(env, v1.EnvVar{Name: tlsEnv("tls.key"), Value: privateKeyFile}) + + if spec.TLS.CAFile != "" { + // support scenario when the ca.crt resides in a different secret, diff path + mountPathCA := mountPath + if spec.TLS.CASecretName != "" { + mountPathCA = mountPath + "ca" + } + + caFile := ensurePath(spec.TLS.CAFile, mountPathCA, "") + env = append(env, v1.EnvVar{Name: tlsEnv("tls.ca"), Value: caFile}) + + // the ca file from CASecretName secret takes priority + if spec.TLS.CASecretName != "" { + volumes = append(volumes, acidv1.AdditionalVolume{ + Name: spec.TLS.CASecretName, + MountPath: mountPathCA, + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: spec.TLS.CASecretName, + DefaultMode: &defaultMode, + }, + }, + }) + } + } + + return env, volumes +} + func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]string { annotations := make(map[string]string) for k, v := range c.OpConfig.CustomPodAnnotations { From ef77ff9c13f7b1b2083ae1a0a8b372a33f7ef78b Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Tue, 7 Mar 2023 20:46:38 +0100 Subject: [PATCH 2/9] bump pooler image --- charts/postgres-operator/crds/operatorconfigurations.yaml | 2 +- charts/postgres-operator/values.yaml | 2 +- manifests/configmap.yaml | 2 +- manifests/minimal-fake-pooler-deployment.yaml | 2 +- manifests/operatorconfiguration.crd.yaml | 2 +- manifests/postgresql-operator-default-configuration.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index e01a5f997..20fcee21b 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -637,7 +637,7 @@ spec: default: "pooler" connection_pooler_image: type: string - default: "registry.opensource.zalan.do/acid/pgbouncer:master-26" + default: "registry.opensource.zalan.do/acid/pgbouncer:master-27" connection_pooler_max_db_connections: type: integer default: 60 diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 50414e00a..aa17bcc68 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -416,7 +416,7 @@ configConnectionPooler: # db user for pooler to use connection_pooler_user: "pooler" # docker image - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-26" + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27" # max db connections the pooler should hold connection_pooler_max_db_connections: 60 # default pooling mode diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index e2fb21504..af9dd8acf 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -17,7 +17,7 @@ data: # connection_pooler_default_cpu_request: "500m" # connection_pooler_default_memory_limit: 100Mi # connection_pooler_default_memory_request: 100Mi - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-26" + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27" # connection_pooler_max_db_connections: 60 # connection_pooler_mode: "transaction" # connection_pooler_number_of_instances: 2 diff --git a/manifests/minimal-fake-pooler-deployment.yaml b/manifests/minimal-fake-pooler-deployment.yaml index b05f4f4ca..53332bad2 100644 --- a/manifests/minimal-fake-pooler-deployment.yaml +++ b/manifests/minimal-fake-pooler-deployment.yaml @@ -23,7 +23,7 @@ spec: serviceAccountName: postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/pgbouncer:master-26 + image: registry.opensource.zalan.do/acid/pgbouncer:master-27 imagePullPolicy: IfNotPresent resources: requests: diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index 8582c866a..6b917a31c 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -635,7 +635,7 @@ spec: default: "pooler" connection_pooler_image: type: string - default: "registry.opensource.zalan.do/acid/pgbouncer:master-26" + default: "registry.opensource.zalan.do/acid/pgbouncer:master-27" connection_pooler_max_db_connections: type: integer default: 60 diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 2e475910c..bff5a634e 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -203,7 +203,7 @@ configuration: connection_pooler_default_cpu_request: "500m" connection_pooler_default_memory_limit: 100Mi connection_pooler_default_memory_request: 100Mi - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-26" + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27" # connection_pooler_max_db_connections: 60 connection_pooler_mode: "transaction" connection_pooler_number_of_instances: 2 From c753fe61f0120235fd2b555abaa0b1f8671c8300 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Tue, 7 Mar 2023 21:00:16 +0100 Subject: [PATCH 3/9] use hard coded RunAsUser and RunAsGroup for pooler pod --- pkg/cluster/connection_pooler.go | 20 +++++--------------- pkg/cluster/connection_pooler_test.go | 4 ---- 2 files changed, 5 insertions(+), 19 deletions(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 7f817d1f8..827252e2c 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -24,6 +24,9 @@ import ( "github.com/zalando/postgres-operator/pkg/util/retryutil" ) +var poolerRunAsUser = int64(100) +var poolerRunAsGroup = int64(101) + // ConnectionPoolerObjects K8s objects that are belong to connection pooler type ConnectionPoolerObjects struct { Deployment *appsv1.Deployment @@ -382,21 +385,8 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( securityContext := v1.PodSecurityContext{} // determine the User, Group and FSGroup for the pooler pod - effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser - if spec.SpiloRunAsUser != nil { - effectiveRunAsUser = spec.SpiloRunAsUser - } - if effectiveRunAsUser != nil { - securityContext.RunAsUser = effectiveRunAsUser - } - - effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup - if spec.SpiloRunAsGroup != nil { - effectiveRunAsGroup = spec.SpiloRunAsGroup - } - if effectiveRunAsGroup != nil { - securityContext.RunAsGroup = effectiveRunAsGroup - } + securityContext.RunAsUser = &poolerRunAsUser + securityContext.RunAsGroup = &poolerRunAsGroup effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup if spec.SpiloFSGroup != nil { diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index 48423c533..25cc40ef1 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -939,8 +939,6 @@ func TestPoolerTLS(t *testing.T) { clusterName := "acid-test-cluster" namespace := "default" tlsSecretName := "my-secret" - spiloRunAsUser := int64(101) - spiloRunAsGroup := int64(103) spiloFSGroup := int64(103) defaultMode := int32(0640) mountPath := "/tls" @@ -994,8 +992,6 @@ func TestPoolerTLS(t *testing.T) { DefaultMemoryRequest: "300Mi", DefaultMemoryLimit: "300Mi", PodRoleLabel: "spilo-role", - SpiloRunAsUser: &spiloRunAsUser, - SpiloRunAsGroup: &spiloRunAsGroup, SpiloFSGroup: &spiloFSGroup, }, ConnectionPooler: config.ConnectionPooler{ From 9b98f78a0ba729ce39bb56510ecca39ab2beb16a Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Tue, 4 Apr 2023 10:23:59 +0200 Subject: [PATCH 4/9] remove redudant if and rename generateTlsMounts --- pkg/cluster/connection_pooler.go | 52 +++++++++++++++----------------- pkg/cluster/k8sres.go | 4 +-- 2 files changed, 27 insertions(+), 29 deletions(-) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 827252e2c..d66a39b26 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -348,34 +348,32 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( var poolerVolumes []v1.Volume var volumeMounts []v1.VolumeMount if spec.TLS != nil && spec.TLS.SecretName != "" { - if spec.TLS != nil && spec.TLS.SecretName != "" { - getPoolerTLSEnv := func(k string) string { - keyName := "" - switch k { - case "tls.crt": - keyName = "CONNECTION_POOLER_CLIENT_TLS_CRT" - case "tls.key": - keyName = "CONNECTION_POOLER_CLIENT_TLS_KEY" - case "tls.ca": - keyName = "CONNECTION_POOLER_CLIENT_CA_FILE" - default: - panic(fmt.Sprintf("TLS env key for pooler unknown %s", k)) - } - - return keyName - } - tlsEnv, tlsVolumes := generateTLSmounts(spec, getPoolerTLSEnv) - envVars = append(envVars, tlsEnv...) - for _, vol := range tlsVolumes { - poolerVolumes = append(poolerVolumes, v1.Volume{ - Name: vol.Name, - VolumeSource: vol.VolumeSource, - }) - volumeMounts = append(volumeMounts, v1.VolumeMount{ - Name: vol.Name, - MountPath: vol.MountPath, - }) + getPoolerTLSEnv := func(k string) string { + keyName := "" + switch k { + case "tls.crt": + keyName = "CONNECTION_POOLER_CLIENT_TLS_CRT" + case "tls.key": + keyName = "CONNECTION_POOLER_CLIENT_TLS_KEY" + case "tls.ca": + keyName = "CONNECTION_POOLER_CLIENT_CA_FILE" + default: + panic(fmt.Sprintf("TLS env key for pooler unknown %s", k)) } + + return keyName + } + tlsEnv, tlsVolumes := generateTlsMounts(spec, getPoolerTLSEnv) + envVars = append(envVars, tlsEnv...) + for _, vol := range tlsVolumes { + poolerVolumes = append(poolerVolumes, v1.Volume{ + Name: vol.Name, + VolumeSource: vol.VolumeSource, + }) + volumeMounts = append(volumeMounts, v1.VolumeMount{ + Name: vol.Name, + MountPath: vol.MountPath, + }) } } diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index f38560460..7d11a9e37 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1303,7 +1303,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef return keyName } - tlsEnv, tlsVolumes := generateTLSmounts(spec, getSpiloTLSEnv) + tlsEnv, tlsVolumes := generateTlsMounts(spec, getSpiloTLSEnv) for _, env := range tlsEnv { spiloEnvVars = appendEnvVars(spiloEnvVars, env) } @@ -1461,7 +1461,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef return statefulSet, nil } -func generateTLSmounts(spec *acidv1.PostgresSpec, tlsEnv func(key string) string) ([]v1.EnvVar, []acidv1.AdditionalVolume) { +func generateTlsMounts(spec *acidv1.PostgresSpec, tlsEnv func(key string) string) ([]v1.EnvVar, []acidv1.AdditionalVolume) { // this is combined with the FSGroup in the section above // to give read access to the postgres user defaultMode := int32(0640) From a2102df393a33fb253b2c9085d5eaa7279be86cc Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Tue, 4 Apr 2023 17:37:18 +0200 Subject: [PATCH 5/9] extend documentation on tls support --- docs/reference/cluster_manifest.md | 4 +++- docs/user.md | 17 +++++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 938469dd0..60343dd4f 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -543,7 +543,9 @@ for both master and replica pooler services (if `enableReplicaConnectionPooler` ## Custom TLS certificates -Those parameters are grouped under the `tls` top-level key. +Those parameters are grouped under the `tls` top-level key. Note, you have to +define `spiloFSGroup` in the Postgres cluster manifest or `spilo_fsgroup` in +the global configuration before adding the `tls` section'. * **secretName** By setting the `secretName` value, the cluster will switch to load the given diff --git a/docs/user.md b/docs/user.md index fa82e3344..a6636602b 100644 --- a/docs/user.md +++ b/docs/user.md @@ -1197,8 +1197,8 @@ don't know the value, use `103` which is the GID from the default Spilo image OpenShift allocates the users and groups dynamically (based on scc), and their range is different in every namespace. Due to this dynamic behaviour, it's not trivial to know at deploy time the uid/gid of the user in the cluster. -Therefore, instead of using a global `spilo_fsgroup` setting, use the -`spiloFSGroup` field per Postgres cluster. +Therefore, instead of using a global `spilo_fsgroup` setting in operator +configuration or use the `spiloFSGroup` field per Postgres cluster manifest. Upload the cert as a kubernetes secret: ```sh @@ -1255,3 +1255,16 @@ Alternatively, it is also possible to use Certificate rotation is handled in the Spilo image which checks every 5 minutes if the certificates have changed and reloads postgres accordingly. + +### TLS certificates for connection pooler + +By default, the pgBouncer image generates its own TLS certificate like Spilo. +When the `tls` section is specfied in the manifest it will be used for the +connection pooler pod(s) as well. The security context options are hard coded +to `runAsUser: 100` and `runAsGroup: 101`. The `fsGroup` will be the same +like for Spilo. + +As of now, the operator does not sync the pooler deployment automatically +which means that changes in the pod template are not caught. You need to +toggle `enableConnectionPooler` to set environment variables, volumes, secret +mounts and securityContext required for TLS support in the pooler pod. From 1d53b6cc0bd4075ef4baacddab1aaca4bab77f85 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Fri, 7 Apr 2023 00:27:25 +0200 Subject: [PATCH 6/9] add e2e test for tls support --- e2e/Dockerfile | 2 ++ e2e/tests/k8s_api.py | 20 ++++++++++++++++++++ e2e/tests/test_e2e.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) diff --git a/e2e/Dockerfile b/e2e/Dockerfile index b97f52dcb..46409ee8e 100644 --- a/e2e/Dockerfile +++ b/e2e/Dockerfile @@ -15,7 +15,9 @@ RUN apt-get update \ python3-pip \ curl \ vim \ + openssl \ && pip3 install --no-cache-dir -r requirements.txt \ + && openssl req -x509 -nodes -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=acid.zalan.do" \ && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl \ && chmod +x ./kubectl \ && mv ./kubectl /usr/local/bin/kubectl \ diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 82fed4c0b..75127b7c3 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -156,6 +156,14 @@ def get_services(): while not get_services(): time.sleep(self.RETRY_TIMEOUT_SEC) + def count_pods_with_volume_mount(self, mount_name, labels, namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: mount_name in x.spec.containers[0].volume_mounts, pods))) + + def count_pods_with_env_variable(self, env_variable_key, labels, namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: env_variable_key in x.spec.containers[0].env, pods))) + def count_pods_with_rolling_update_flag(self, labels, namespace='default'): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods))) @@ -519,6 +527,18 @@ def update_config(self, config_map_patch, step="Updating operator deployment"): self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) self.delete_operator_pod(step=step) + def create_tls_secret_with_kubectl(self, secret_name): + return subprocess.run( + ["kubectl", "create", "secret", "tls", secret_name, "--key", "tls.key", "--cert" "tls.crt"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def create_generic_secret_with_kubectl(self, secret_name, file): + return subprocess.run( + ["kubectl", "create", "secret", "generic", secret_name, "--from-file", file], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + def create_with_kubectl(self, path): return subprocess.run( ["kubectl", "apply", "-f", path], diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index d28cd6241..36cf62c3a 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -622,6 +622,48 @@ def test_cross_namespace_secrets(self): self.eventuallyEqual(lambda: k8s.count_secrets_with_label("cluster-name=acid-minimal-cluster,application=spilo", self.test_namespace), 1, "Secret not created for user in namespace") + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_custom_ssl_certificate(self): + ''' + Test if spilo uses a custom SSL certificate + ''' + + k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + tls_secret = "pg-tls" + + # get nodes of master and replica(s) (expected target of new master) + _, replica_nodes = k8s.get_pg_nodes(cluster_label) + self.assertNotEqual(replica_nodes, []) + + # create secrets containing ssl certificate and clientauth + self.k8s.create_tls_secret_with_kubectl(tls_secret) + + try: + # enable load balancer services + pg_patch_tls = { + "spec": { + "spiloFSGroup": 103, + "tls": { + "secretName": tls_secret + } + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_tls) + + # wait for switched over + k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + + self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("SSL_CERTIFICATE_FILE", cluster_label), 2, "TLS env variable SSL_CERTIFICATE_FILE missing in Spilo pods") + self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("SSL_PRIVATE_KEY_FILE", cluster_label), 2, "TLS env variable SSL_PRIVATE_KEY_FILE missing in Spilo pods") + self.eventuallyEqual(lambda: k8s.count_pods_with_volume_mount(tls_secret), cluster_label), 2, "TLS volume mount missing in Spilo pods") + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_enable_disable_connection_pooler(self): ''' From 6e6ac2225d003cf41b0c19d014c7200922f49e98 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Fri, 7 Apr 2023 00:36:13 +0200 Subject: [PATCH 7/9] fix brackets in tls e2e test --- e2e/tests/test_e2e.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 36cf62c3a..b2c6849d1 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -658,7 +658,7 @@ def test_custom_ssl_certificate(self): self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("SSL_CERTIFICATE_FILE", cluster_label), 2, "TLS env variable SSL_CERTIFICATE_FILE missing in Spilo pods") self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("SSL_PRIVATE_KEY_FILE", cluster_label), 2, "TLS env variable SSL_PRIVATE_KEY_FILE missing in Spilo pods") - self.eventuallyEqual(lambda: k8s.count_pods_with_volume_mount(tls_secret), cluster_label), 2, "TLS volume mount missing in Spilo pods") + self.eventuallyEqual(lambda: k8s.count_pods_with_volume_mount(tls_secret, cluster_label), 2, "TLS volume mount missing in Spilo pods") except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) From c596adfe7e69a8f630a510ddbfd2483f21a0cb35 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Fri, 7 Apr 2023 00:53:58 +0200 Subject: [PATCH 8/9] copy new k8s api functions --- e2e/tests/k8s_api.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 75127b7c3..1c432b849 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -249,6 +249,18 @@ def update_config(self, config_map_patch, step="Updating operator deployment"): def patch_pod(self, data, pod_name, namespace="default"): self.api.core_v1.patch_namespaced_pod(pod_name, namespace, data) + def create_tls_secret_with_kubectl(self, secret_name): + return subprocess.run( + ["kubectl", "create", "secret", "tls", secret_name, "--key", "tls.key", "--cert" "tls.crt"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def create_generic_secret_with_kubectl(self, secret_name, file): + return subprocess.run( + ["kubectl", "create", "secret", "generic", secret_name, "--from-file", file], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + def create_with_kubectl(self, path): return subprocess.run( ["kubectl", "apply", "-f", path], @@ -456,6 +468,14 @@ def get_services(): while not get_services(): time.sleep(self.RETRY_TIMEOUT_SEC) + def count_pods_with_volume_mount(self, mount_name, labels, namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: mount_name in x.spec.containers[0].volume_mounts, pods))) + + def count_pods_with_env_variable(self, env_variable_key, labels, namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: env_variable_key in x.spec.containers[0].env, pods))) + def count_pods_with_rolling_update_flag(self, labels, namespace='default'): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods))) From 4b5110ed06956e0f9dde0766e6f0faac9a52b0c8 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Thu, 13 Apr 2023 14:43:25 +0200 Subject: [PATCH 9/9] test TLS in e2e suite --- .gitignore | 1 + docs/user.md | 12 ++++++++---- e2e/Dockerfile | 2 -- e2e/Makefile | 2 ++ e2e/run.sh | 6 ++++++ e2e/tests/k8s_api.py | 42 +++++++++++++++++------------------------- e2e/tests/test_e2e.py | 12 +++++++++--- 7 files changed, 43 insertions(+), 34 deletions(-) diff --git a/.gitignore b/.gitignore index 1f2395f35..081eb5fba 100644 --- a/.gitignore +++ b/.gitignore @@ -95,6 +95,7 @@ coverage.xml # e2e tests e2e/manifests +e2e/tls # Translations *.mo diff --git a/docs/user.md b/docs/user.md index a6636602b..8506b0acd 100644 --- a/docs/user.md +++ b/docs/user.md @@ -1200,11 +1200,16 @@ trivial to know at deploy time the uid/gid of the user in the cluster. Therefore, instead of using a global `spilo_fsgroup` setting in operator configuration or use the `spiloFSGroup` field per Postgres cluster manifest. +For testing purposes, you can generate a self-signed certificate with openssl: +```sh +openssl req -x509 -nodes -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=acid.zalan.do" +``` + Upload the cert as a kubernetes secret: ```sh kubectl create secret tls pg-tls \ - --key pg-tls.key \ - --cert pg-tls.crt + --key tls.key \ + --cert tls.crt ``` When doing client auth, CA can come optionally from the same secret: @@ -1231,8 +1236,7 @@ spec: Optionally, the CA can be provided by a different secret: ```sh -kubectl create secret generic pg-tls-ca \ - --from-file=ca.crt=ca.crt +kubectl create secret generic pg-tls-ca --from-file=ca.crt=ca.crt ``` Then configure the postgres resource with the TLS secret: diff --git a/e2e/Dockerfile b/e2e/Dockerfile index 46409ee8e..b97f52dcb 100644 --- a/e2e/Dockerfile +++ b/e2e/Dockerfile @@ -15,9 +15,7 @@ RUN apt-get update \ python3-pip \ curl \ vim \ - openssl \ && pip3 install --no-cache-dir -r requirements.txt \ - && openssl req -x509 -nodes -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=acid.zalan.do" \ && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl \ && chmod +x ./kubectl \ && mv ./kubectl /usr/local/bin/kubectl \ diff --git a/e2e/Makefile b/e2e/Makefile index 9b1b5ea11..017f5d345 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -29,10 +29,12 @@ default: tools clean: rm -rf manifests + rm -rf tls copy: clean mkdir manifests cp -r ../manifests . + mkdir tls docker: scm-source.json docker build -t "$(IMAGE):$(TAG)" . diff --git a/e2e/run.sh b/e2e/run.sh index 12581a26a..ecef56d08 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -55,6 +55,10 @@ function set_kind_api_server_ip(){ sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}" } +function generate_certificate(){ + openssl req -x509 -nodes -newkey rsa:2048 -keyout tls/tls.key -out tls/tls.crt -subj "/CN=acid.zalan.do" +} + function run_tests(){ echo "Running tests... image: ${e2e_test_runner_image}" # tests modify files in ./manifests, so we mount a copy of this directory done by the e2e Makefile @@ -62,6 +66,7 @@ function run_tests(){ docker run --rm --network=host -e "TERM=xterm-256color" \ --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \ --mount type=bind,source="$(readlink -f manifests)",target=/manifests \ + --mount type=bind,source="$(readlink -f tls)",target=/tls \ --mount type=bind,source="$(readlink -f tests)",target=/tests \ --mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \ --mount type=bind,source="$(readlink -f scripts)",target=/scripts \ @@ -82,6 +87,7 @@ function main(){ [[ ! -f ${kubeconfig_path} ]] && start_kind load_operator_image set_kind_api_server_ip + generate_certificate shift run_tests $@ diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 1c432b849..3d687f49a 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -157,12 +157,24 @@ def get_services(): time.sleep(self.RETRY_TIMEOUT_SEC) def count_pods_with_volume_mount(self, mount_name, labels, namespace='default'): + pod_count = 0 pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items - return len(list(filter(lambda x: mount_name in x.spec.containers[0].volume_mounts, pods))) + for pod in pods: + for mount in pod.spec.containers[0].volume_mounts: + if mount.name == mount_name: + pod_count += 1 + + return pod_count def count_pods_with_env_variable(self, env_variable_key, labels, namespace='default'): + pod_count = 0 pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items - return len(list(filter(lambda x: env_variable_key in x.spec.containers[0].env, pods))) + for pod in pods: + for env in pod.spec.containers[0].env: + if env.name == env_variable_key: + pod_count += 1 + + return pod_count def count_pods_with_rolling_update_flag(self, labels, namespace='default'): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items @@ -251,13 +263,13 @@ def patch_pod(self, data, pod_name, namespace="default"): def create_tls_secret_with_kubectl(self, secret_name): return subprocess.run( - ["kubectl", "create", "secret", "tls", secret_name, "--key", "tls.key", "--cert" "tls.crt"], + ["kubectl", "create", "secret", "tls", secret_name, "--key=tls/tls.key", "--cert=tls/tls.crt"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - def create_generic_secret_with_kubectl(self, secret_name, file): + def create_tls_ca_secret_with_kubectl(self, secret_name): return subprocess.run( - ["kubectl", "create", "secret", "generic", secret_name, "--from-file", file], + ["kubectl", "create", "secret", "generic", secret_name, "--from-file=ca.crt=tls/ca.crt"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -468,14 +480,6 @@ def get_services(): while not get_services(): time.sleep(self.RETRY_TIMEOUT_SEC) - def count_pods_with_volume_mount(self, mount_name, labels, namespace='default'): - pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items - return len(list(filter(lambda x: mount_name in x.spec.containers[0].volume_mounts, pods))) - - def count_pods_with_env_variable(self, env_variable_key, labels, namespace='default'): - pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items - return len(list(filter(lambda x: env_variable_key in x.spec.containers[0].env, pods))) - def count_pods_with_rolling_update_flag(self, labels, namespace='default'): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods))) @@ -547,18 +551,6 @@ def update_config(self, config_map_patch, step="Updating operator deployment"): self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) self.delete_operator_pod(step=step) - def create_tls_secret_with_kubectl(self, secret_name): - return subprocess.run( - ["kubectl", "create", "secret", "tls", secret_name, "--key", "tls.key", "--cert" "tls.crt"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - def create_generic_secret_with_kubectl(self, secret_name, file): - return subprocess.run( - ["kubectl", "create", "secret", "generic", secret_name, "--from-file", file], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - def create_with_kubectl(self, path): return subprocess.run( ["kubectl", "apply", "-f", path], diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index b2c6849d1..ed04fab61 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -636,10 +636,11 @@ def test_custom_ssl_certificate(self): _, replica_nodes = k8s.get_pg_nodes(cluster_label) self.assertNotEqual(replica_nodes, []) - # create secrets containing ssl certificate and clientauth - self.k8s.create_tls_secret_with_kubectl(tls_secret) - try: + # create secret containing ssl certificate + result = self.k8s.create_tls_secret_with_kubectl(tls_secret) + print("stdout: {}, stderr: {}".format(result.stdout, result.stderr)) + # enable load balancer services pg_patch_tls = { "spec": { @@ -695,6 +696,11 @@ def test_enable_disable_connection_pooler(self): self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label), 2, "No pooler service found") self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label), 1, "Pooler secret not created") + # TLS still enabled so check existing env variables and volume mounts + self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("CONNECTION_POOLER_CLIENT_TLS_CRT", pooler_label), 4, "TLS env variable CONNECTION_POOLER_CLIENT_TLS_CRT missing in pooler pods") + self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("CONNECTION_POOLER_CLIENT_TLS_KEY", pooler_label), 4, "TLS env variable CONNECTION_POOLER_CLIENT_TLS_KEY missing in pooler pods") + self.eventuallyEqual(lambda: k8s.count_pods_with_volume_mount("pg-tls", pooler_label), 4, "TLS volume mount missing in pooler pods") + k8s.api.custom_objects_api.patch_namespaced_custom_object( 'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster',