From 6bb3dc7f6c0d8753a30f87ce69b7de8f0d9e0f35 Mon Sep 17 00:00:00 2001 From: Peter Brachwitz Date: Tue, 2 Mar 2021 19:00:14 +0100 Subject: [PATCH] Add support for VolumeClaimDeletePolicies for Elasticsearch clusters (#4050) Approach inspired by kubernetes/enhancements#1915 Adds an new volumeClaimDeletePolicy to the Elasticsearch Spec on the cluster level (not per NodeSet) apiVersion: elasticsearch.k8s.elastic.co/v1 kind: Elasticsearch metadata: name: es spec: version: 7.10.1 volumeClaimDeletePolicy: DeleteOnScaledownAndClusterDeletion nodeSets: - name: default count: 2 Possible values are DeleteOnScaledownAndClusterDeletion (default), DeleteOnScaledownOnly. RemoveOnScaledownAndClusterDeletion relies on an owner reference pointing to the Elasticsearch resource to garbage collect PVCs once the Elasticsearch cluster has been deleted (existing behaviour). It also runs additional garbage collection to remove PVCs on each reconciliation that are no longer in use because either the whole node set has been removed or individual nodes have been scaled down (existing behaviour). RemoveOnScaledownOnly means the PVCs are kept around after the cluster has been deleted. This is implemented by removing the owner reference. Removal of PVCs on scale down happens as before. Switching from one to the other strategy is allowed and is implemented by avoiding the StatefulSet templating mechanism. This is mainly because the PVC template in StatefulSets are considered immutable and it would require StatefulSets to be recreated in order to change the PVC ownership. Instead the operator edits the PVCs after they have been created by the StatefulSet controller. --- Makefile | 1 + config/crds/all-crds.yaml | 8 + ...search.k8s.elastic.co_elasticsearches.yaml | 9 + .../eck-operator-crds/templates/all-crds.yaml | 8 + docs/reference/api-docs.asciidoc | 13 + .../elasticsearch/v1/elasticsearch_types.go | 25 ++ pkg/controller/common/reconciler/secret.go | 40 +-- .../common/reconciler/secret_test.go | 182 ----------- pkg/controller/elasticsearch/driver/nodes.go | 4 + .../elasticsearch/driver/pvc_gc_test.go | 60 +++- .../elasticsearch/driver/pvc_owner.go | 47 +++ .../elasticsearch/driver/pvc_owner_test.go | 116 +++++++ .../elasticsearch/nodespec/statefulset.go | 49 +-- .../nodespec/statefulset_test.go | 63 +--- .../elasticsearch/validation/webhook_test.go | 6 + pkg/utils/k8s/k8sutils.go | 13 - pkg/utils/k8s/k8sutils_test.go | 75 ----- pkg/utils/k8s/owner_refs.go | 55 ++++ pkg/utils/k8s/owner_refs_test.go | 285 ++++++++++++++++++ test/e2e/es/volume_test.go | 37 +++ test/e2e/test/elasticsearch/builder.go | 5 + test/e2e/test/elasticsearch/steps_deletion.go | 3 + 22 files changed, 688 insertions(+), 416 deletions(-) create mode 100644 pkg/controller/elasticsearch/driver/pvc_owner.go create mode 100644 pkg/controller/elasticsearch/driver/pvc_owner_test.go create mode 100644 pkg/utils/k8s/owner_refs.go create mode 100644 pkg/utils/k8s/owner_refs_test.go diff --git a/Makefile b/Makefile index a90cb5da8d..8a6f99138f 100644 --- a/Makefile +++ b/Makefile @@ -460,6 +460,7 @@ e2e-local: LOCAL_E2E_CTX := /tmp/e2e-local.json e2e-local: @go run test/e2e/cmd/main.go run \ --test-run-name=e2e \ + --operator-image=$(OPERATOR_IMAGE) \ --test-context-out=$(LOCAL_E2E_CTX) \ --test-license=$(TEST_LICENSE) \ --test-license-pkey-path=$(TEST_LICENSE_PKEY_PATH) \ diff --git a/config/crds/all-crds.yaml b/config/crds/all-crds.yaml index a1babaf65a..a0f2aaad29 100644 --- a/config/crds/all-crds.yaml +++ b/config/crds/all-crds.yaml @@ -2387,6 +2387,14 @@ spec: version: description: Version of Elasticsearch. type: string + volumeClaimDeletePolicy: + description: VolumeClaimDeletePolicy sets the policy for handling deletion + of PersistentVolumeClaims for all NodeSets. Possible values are DeleteOnScaledownOnly + and DeleteOnScaledownAndClusterDeletion. Defaults to DeleteOnScaledownAndClusterDeletion. + enum: + - DeleteOnScaledownOnly + - DeleteOnScaledownAndClusterDeletion + type: string required: - nodeSets - version diff --git a/config/crds/bases/elasticsearch.k8s.elastic.co_elasticsearches.yaml b/config/crds/bases/elasticsearch.k8s.elastic.co_elasticsearches.yaml index 217e70ae1f..42baf03ac9 100644 --- a/config/crds/bases/elasticsearch.k8s.elastic.co_elasticsearches.yaml +++ b/config/crds/bases/elasticsearch.k8s.elastic.co_elasticsearches.yaml @@ -4418,6 +4418,15 @@ spec: version: description: Version of Elasticsearch. type: string + volumeClaimDeletePolicy: + description: VolumeClaimDeletePolicy sets the policy for handling + deletion of PersistentVolumeClaims for all NodeSets. Possible values + are DeleteOnScaledownOnly and DeleteOnScaledownAndClusterDeletion. + Defaults to DeleteOnScaledownAndClusterDeletion. + enum: + - DeleteOnScaledownOnly + - DeleteOnScaledownAndClusterDeletion + type: string required: - nodeSets - version diff --git a/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml b/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml index 241b235cb4..6e14696b22 100644 --- a/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml +++ b/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml @@ -2411,6 +2411,14 @@ spec: version: description: Version of Elasticsearch. type: string + volumeClaimDeletePolicy: + description: VolumeClaimDeletePolicy sets the policy for handling deletion + of PersistentVolumeClaims for all NodeSets. Possible values are DeleteOnScaledownOnly + and DeleteOnScaledownAndClusterDeletion. Defaults to DeleteOnScaledownAndClusterDeletion. + enum: + - DeleteOnScaledownOnly + - DeleteOnScaledownAndClusterDeletion + type: string required: - nodeSets - version diff --git a/docs/reference/api-docs.asciidoc b/docs/reference/api-docs.asciidoc index 1646090267..6977bb313f 100644 --- a/docs/reference/api-docs.asciidoc +++ b/docs/reference/api-docs.asciidoc @@ -895,6 +895,7 @@ ElasticsearchSpec holds the specification of an Elasticsearch cluster. | *`secureSettings`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-pkg-apis-common-v1-secretsource[$$SecretSource$$]__ | SecureSettings is a list of references to Kubernetes secrets containing sensitive configuration options for Elasticsearch. | *`serviceAccountName`* __string__ | ServiceAccountName is used to check access from the current resource to a resource (eg. a remote Elasticsearch cluster) in a different namespace. Can only be used if ECK is enforcing RBAC on references. | *`remoteClusters`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-pkg-apis-elasticsearch-v1-remotecluster[$$RemoteCluster$$] array__ | RemoteClusters enables you to establish uni-directional connections to a remote Elasticsearch cluster. +| *`volumeClaimDeletePolicy`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-pkg-apis-elasticsearch-v1-volumeclaimdeletepolicy[$$VolumeClaimDeletePolicy$$]__ | VolumeClaimDeletePolicy sets the policy for handling deletion of PersistentVolumeClaims for all NodeSets. Possible values are DeleteOnScaledownOnly and DeleteOnScaledownAndClusterDeletion. Defaults to DeleteOnScaledownAndClusterDeletion. |=== @@ -1030,6 +1031,18 @@ UpdateStrategy specifies how updates to the cluster should be performed. |=== +[id="{anchor_prefix}-github-com-elastic-cloud-on-k8s-pkg-apis-elasticsearch-v1-volumeclaimdeletepolicy"] +=== VolumeClaimDeletePolicy (string) + +VolumeClaimDeletePolicy describes the delete policy for handling PersistentVolumeClaims that hold Elasticsearch data. Inspired by https://github.com/kubernetes/enhancements/pull/2440 + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-pkg-apis-elasticsearch-v1-elasticsearchspec[$$ElasticsearchSpec$$] +**** + + + diff --git a/pkg/apis/elasticsearch/v1/elasticsearch_types.go b/pkg/apis/elasticsearch/v1/elasticsearch_types.go index 3bf3e8efca..30c55c554a 100644 --- a/pkg/apis/elasticsearch/v1/elasticsearch_types.go +++ b/pkg/apis/elasticsearch/v1/elasticsearch_types.go @@ -66,8 +66,26 @@ type ElasticsearchSpec struct { // RemoteClusters enables you to establish uni-directional connections to a remote Elasticsearch cluster. // +optional RemoteClusters []RemoteCluster `json:"remoteClusters,omitempty"` + + // VolumeClaimDeletePolicy sets the policy for handling deletion of PersistentVolumeClaims for all NodeSets. + // Possible values are DeleteOnScaledownOnly and DeleteOnScaledownAndClusterDeletion. Defaults to DeleteOnScaledownAndClusterDeletion. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=DeleteOnScaledownOnly;DeleteOnScaledownAndClusterDeletion + VolumeClaimDeletePolicy VolumeClaimDeletePolicy `json:"volumeClaimDeletePolicy,omitempty"` } +// VolumeClaimDeletePolicy describes the delete policy for handling PersistentVolumeClaims that hold Elasticsearch data. +// Inspired by https://github.com/kubernetes/enhancements/pull/2440 +type VolumeClaimDeletePolicy string + +const ( + // DeleteOnScaledownAndClusterDeletionPolicy remove PersistentVolumeClaims when the corresponding Elasticsearch node is removed. + DeleteOnScaledownAndClusterDeletionPolicy VolumeClaimDeletePolicy = "DeleteOnScaledownAndClusterDeletion" + // DeleteOnScaledownOnlyPolicy removes PersistentVolumeClaims on scale down of Elasticsearch nodes but retains all + // current PersistenVolumeClaims when the Elasticsearch cluster has been deleted. + DeleteOnScaledownOnlyPolicy VolumeClaimDeletePolicy = "DeleteOnScaledownOnly" +) + // TransportConfig holds the transport layer settings for Elasticsearch. type TransportConfig struct { // Service defines the template for the associated Kubernetes Service object. @@ -118,6 +136,13 @@ func (es ElasticsearchSpec) NodeCount() int32 { return count } +func (es ElasticsearchSpec) VolumeClaimDeletePolicyOrDefault() VolumeClaimDeletePolicy { + if es.VolumeClaimDeletePolicy == "" { + return DeleteOnScaledownAndClusterDeletionPolicy + } + return es.VolumeClaimDeletePolicy +} + // Auth contains user authentication and authorization security settings for Elasticsearch. type Auth struct { // Roles to propagate to the Elasticsearch cluster. diff --git a/pkg/controller/common/reconciler/secret.go b/pkg/controller/common/reconciler/secret.go index 5afe3a9495..d13ba40f3c 100644 --- a/pkg/controller/common/reconciler/secret.go +++ b/pkg/controller/common/reconciler/secret.go @@ -13,7 +13,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -118,7 +117,7 @@ func ReconcileSecretNoOwnerRef(c k8s.Client, expected corev1.Secret, softOwner r // or if secret data is not strictly equal !reflect.DeepEqual(expected.Data, reconciled.Data) || // or if an existing owner should be removed - hasOwner(&reconciled, ownerMeta) + k8s.HasOwner(&reconciled, ownerMeta) }, UpdateReconciled: func() { // set expected annotations and labels, but don't remove existing ones @@ -127,7 +126,7 @@ func ReconcileSecretNoOwnerRef(c k8s.Client, expected corev1.Secret, softOwner r reconciled.Annotations = maps.Merge(reconciled.Annotations, expected.Annotations) reconciled.Data = expected.Data // remove existing owner - removeOwner(&reconciled, ownerMeta) + k8s.RemoveOwner(&reconciled, ownerMeta) }, }); err != nil { return corev1.Secret{}, err @@ -221,38 +220,3 @@ func GarbageCollectAllSoftOwnedOrphanSecrets(c k8s.Client, ownerKinds map[string } return nil } - -func hasOwner(resource metav1.Object, owner metav1.Object) bool { - if owner == nil || resource == nil { - return false - } - found, _ := findOwner(resource, owner) - return found -} - -func removeOwner(resource metav1.Object, owner metav1.Object) { - if resource == nil || owner == nil { - return - } - found, index := findOwner(resource, owner) - if !found { - return - } - owners := resource.GetOwnerReferences() - // remove the owner at index i from the slice - newOwners := append(owners[:index], owners[index+1:]...) - resource.SetOwnerReferences(newOwners) -} - -func findOwner(resource metav1.Object, owner metav1.Object) (found bool, index int) { - if owner == nil || resource == nil { - return false, 0 - } - ownerRefs := resource.GetOwnerReferences() - for i := range ownerRefs { - if ownerRefs[i].Name == owner.GetName() && ownerRefs[i].UID == owner.GetUID() { - return true, i - } - } - return false, 0 -} diff --git a/pkg/controller/common/reconciler/secret_test.go b/pkg/controller/common/reconciler/secret_test.go index ad3c7fbbc8..0f20b12200 100644 --- a/pkg/controller/common/reconciler/secret_test.go +++ b/pkg/controller/common/reconciler/secret_test.go @@ -245,188 +245,6 @@ func addOwner(secret *corev1.Secret, name string, uid types.UID) *corev1.Secret return secret } -func Test_hasOwner(t *testing.T) { - owner := sampleOwner() - type args struct { - resource metav1.Object - owner metav1.Object - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "owner is referenced (same name and uid)", - args: args{ - resource: addOwner(&corev1.Secret{}, owner.Name, owner.UID), - owner: owner, - }, - want: true, - }, - { - name: "owner referenced among other owner references", - args: args{ - resource: addOwner(addOwner(&corev1.Secret{}, "another-name", types.UID("another-id")), owner.Name, owner.UID), - owner: owner, - }, - want: true, - }, - { - name: "owner not referenced", - args: args{ - resource: addOwner(addOwner(&corev1.Secret{}, "another-name", types.UID("another-id")), "yet-another-name", "yet-another-uid"), - owner: owner, - }, - want: false, - }, - { - name: "no owner ref", - args: args{ - resource: &corev1.Secret{}, - owner: owner, - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := hasOwner(tt.args.resource, tt.args.owner); got != tt.want { - t.Errorf("hasOwner() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_removeOwner(t *testing.T) { - type args struct { - resource metav1.Object - owner metav1.Object - } - tests := []struct { - name string - args args - wantResource *corev1.Secret - }{ - { - name: "no owner: no-op", - args: args{ - resource: &corev1.Secret{}, - owner: sampleOwner(), - }, - wantResource: &corev1.Secret{}, - }, - { - name: "different owner: no-op", - args: args{ - resource: addOwner(&corev1.Secret{}, "another-owner-name", "another-owner-id"), - owner: sampleOwner(), - }, - wantResource: addOwner(&corev1.Secret{}, "another-owner-name", "another-owner-id"), - }, - { - name: "remove the single owner", - args: args{ - resource: addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), - owner: sampleOwner(), - }, - wantResource: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{}}}, - }, - { - name: "remove the owner from a list of owners", - args: args{ - resource: addOwner(addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), "another-owner", "another-uid"), - owner: sampleOwner(), - }, - wantResource: addOwner(&corev1.Secret{}, "another-owner", "another-uid"), - }, - { - name: "owner listed twice in the list (shouldn't happen): remove the first occurrence", - args: args{ - resource: addOwner(addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), sampleOwner().Name, sampleOwner().UID), - owner: sampleOwner(), - }, - wantResource: addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - removeOwner(tt.args.resource, tt.args.owner) - require.Equal(t, tt.wantResource, tt.args.resource) - }) - } -} - -func Test_findOwner(t *testing.T) { - type args struct { - resource metav1.Object - owner metav1.Object - } - tests := []struct { - name string - args args - wantFound bool - wantIndex int - }{ - { - name: "no owner: not found", - args: args{ - resource: &corev1.Secret{}, - owner: sampleOwner(), - }, - wantFound: false, - wantIndex: 0, - }, - { - name: "different owner: not found", - args: args{ - resource: addOwner(&corev1.Secret{}, "another-owner-name", "another-owner-id"), - owner: sampleOwner(), - }, - wantFound: false, - wantIndex: 0, - }, - { - name: "owner at index 0", - args: args{ - resource: addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), - owner: sampleOwner(), - }, - wantFound: true, - wantIndex: 0, - }, - { - name: "owner at index 1", - args: args{ - resource: addOwner(addOwner(&corev1.Secret{}, "another-owner", "another-uid"), sampleOwner().Name, sampleOwner().UID), - owner: sampleOwner(), - }, - wantFound: true, - wantIndex: 1, - }, - { - name: "owner listed twice in the list (shouldn't happen): return the first occurrence (index 0)", - args: args{ - resource: addOwner(addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), sampleOwner().Name, sampleOwner().UID), - owner: sampleOwner(), - }, - wantFound: true, - wantIndex: 0, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotFound, gotIndex := findOwner(tt.args.resource, tt.args.owner) - if gotFound != tt.wantFound { - t.Errorf("findOwner() gotFound = %v, want %v", gotFound, tt.wantFound) - } - if gotIndex != tt.wantIndex { - t.Errorf("findOwner() gotIndex = %v, want %v", gotIndex, tt.wantIndex) - } - }) - } -} - func ownedSecret(namespace, name, ownerNs, ownerName, ownerKind string) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name, Labels: map[string]string{ diff --git a/pkg/controller/elasticsearch/driver/nodes.go b/pkg/controller/elasticsearch/driver/nodes.go index f3ecf8a6fa..0feb4e852d 100644 --- a/pkg/controller/elasticsearch/driver/nodes.go +++ b/pkg/controller/elasticsearch/driver/nodes.go @@ -120,6 +120,10 @@ func (d *defaultDriver) reconcileNodeSpecs( return results.WithError(err) } + if err := reconcilePVCOwnerRefs(d.K8sClient(), d.ES); err != nil { + return results.WithError(err) + } + if err := GarbageCollectPVCs(d.K8sClient(), d.ES, actualStatefulSets, expectedResources.StatefulSets()); err != nil { return results.WithError(err) } diff --git a/pkg/controller/elasticsearch/driver/pvc_gc_test.go b/pkg/controller/elasticsearch/driver/pvc_gc_test.go index be98055f97..c611115541 100644 --- a/pkg/controller/elasticsearch/driver/pvc_gc_test.go +++ b/pkg/controller/elasticsearch/driver/pvc_gc_test.go @@ -131,20 +131,58 @@ func Test_pvcsToRemove(t *testing.T) { } func TestGarbageCollectPVCs(t *testing.T) { - // Test_pvcsToRemove covers most of the testing logic, - // let's just check everything is correctly plugged to the k8s api here. - es := esv1.Elasticsearch{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"}} existingPVCS := []runtime.Object{ buildPVCPtr("claim1-sset1-0"), // should not be removed buildPVCPtr("claim1-oldsset-0"), // should be removed } - actualSsets := sset.StatefulSetList{buildSsetWithClaims("sset1", 1, "claim1")} - expectedSsets := sset.StatefulSetList{buildSsetWithClaims("sset2", 1, "claim1")} - k8sClient := k8s.NewFakeClient(existingPVCS...) - err := GarbageCollectPVCs(k8sClient, es, actualSsets, expectedSsets) - require.NoError(t, err) + type args struct { + k8sClient k8s.Client + es esv1.Elasticsearch + actualStatefulSets sset.StatefulSetList + expectedStatefulSets sset.StatefulSetList + } + tests := []struct { + name string + args args + wantErr bool + wantPVCs int + }{ + { + name: "Remove on default scale down policy", + args: args{ + k8sClient: k8s.NewFakeClient(existingPVCS...), + es: esv1.Elasticsearch{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"}}, + actualStatefulSets: sset.StatefulSetList{buildSsetWithClaims("sset1", 1, "claim1")}, + expectedStatefulSets: sset.StatefulSetList{buildSsetWithClaims("sset2", 1, "claim1")}, + }, + wantErr: false, + wantPVCs: 1, + }, + { + name: "Remove on any other explicitly set policy", + args: args{ + k8sClient: k8s.NewFakeClient(existingPVCS...), + es: esv1.Elasticsearch{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "es"}, + Spec: esv1.ElasticsearchSpec{ + VolumeClaimDeletePolicy: esv1.DeleteOnScaledownOnlyPolicy, + }}, + actualStatefulSets: sset.StatefulSetList{buildSsetWithClaims("sset1", 1, "claim1")}, + expectedStatefulSets: sset.StatefulSetList{buildSsetWithClaims("sset2", 1, "claim1")}, + }, + wantErr: false, + wantPVCs: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := GarbageCollectPVCs(tt.args.k8sClient, tt.args.es, tt.args.actualStatefulSets, tt.args.expectedStatefulSets); (err != nil) != tt.wantErr { + t.Errorf("GarbageCollectPVCs() error = %v, wantErr %v", err, tt.wantErr) + } + var retrievedPVCs corev1.PersistentVolumeClaimList + require.NoError(t, tt.args.k8sClient.List(context.Background(), &retrievedPVCs)) + require.Equal(t, tt.wantPVCs, len(retrievedPVCs.Items)) - var retrievedPVCs corev1.PersistentVolumeClaimList - require.NoError(t, k8sClient.List(context.Background(), &retrievedPVCs)) - require.Equal(t, 1, len(retrievedPVCs.Items)) + }) + } } diff --git a/pkg/controller/elasticsearch/driver/pvc_owner.go b/pkg/controller/elasticsearch/driver/pvc_owner.go new file mode 100644 index 0000000000..a129df62fa --- /dev/null +++ b/pkg/controller/elasticsearch/driver/pvc_owner.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package driver + +import ( + "context" + "fmt" + + esv1 "github.com/elastic/cloud-on-k8s/pkg/apis/elasticsearch/v1" + "github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/label" + "github.com/elastic/cloud-on-k8s/pkg/utils/k8s" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// reconcilePVCOwnerRefs sets or removes an owner reference into each PVC for the given Elasticsearch cluster depending +// on the VolumeClaimDeletePolicy. +// The intent behind this approach is to allow users to specify per cluster whether they want to retain or remove +// the related PVCs. We rely on Kubernetes garbage collection for the cleanup once a cluster has been deleted and +// the operator separately deletes PVCs on scale down if so desired (see GarbageCollectPVCs) +func reconcilePVCOwnerRefs(c k8s.Client, es esv1.Elasticsearch) error { + var pvcs corev1.PersistentVolumeClaimList + ns := client.InNamespace(es.Namespace) + labelSelector := label.NewLabelSelectorForElasticsearch(es) + if err := c.List(context.Background(), &pvcs, ns, labelSelector); err != nil { + return fmt.Errorf("while listing pvcs to reconcile owner refs: %w", err) + } + + for _, pvc := range pvcs.Items { + switch es.Spec.VolumeClaimDeletePolicyOrDefault() { + case esv1.DeleteOnScaledownOnlyPolicy: + k8s.RemoveOwner(&pvc, &es) + case esv1.DeleteOnScaledownAndClusterDeletionPolicy: + if err := controllerutil.SetOwnerReference(&es, &pvc, scheme.Scheme); err != nil { + return fmt.Errorf("while setting owner during owner ref reconciliation: %w", err) + } + } + if err := c.Update(context.Background(), &pvc); err != nil { + return fmt.Errorf("while updating pvc during owner ref reconciliation: %w", err) + } + } + return nil +} diff --git a/pkg/controller/elasticsearch/driver/pvc_owner_test.go b/pkg/controller/elasticsearch/driver/pvc_owner_test.go new file mode 100644 index 0000000000..977b5499fd --- /dev/null +++ b/pkg/controller/elasticsearch/driver/pvc_owner_test.go @@ -0,0 +1,116 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package driver + +import ( + "context" + "testing" + + esv1 "github.com/elastic/cloud-on-k8s/pkg/apis/elasticsearch/v1" + "github.com/elastic/cloud-on-k8s/pkg/controller/common/comparison" + "github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/label" + "github.com/elastic/cloud-on-k8s/pkg/utils/k8s" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Test_reconcilePVCOwnerRefs(t *testing.T) { + type args struct { + c k8s.Client + es esv1.Elasticsearch + } + + esFixture := func(policy esv1.VolumeClaimDeletePolicy) esv1.Elasticsearch { + return esv1.Elasticsearch{ + ObjectMeta: metav1.ObjectMeta{Name: "es", Namespace: "ns"}, + Spec: esv1.ElasticsearchSpec{VolumeClaimDeletePolicy: policy}, + } + } + + pvcFixture := func(name string, ownerRefs ...string) corev1.PersistentVolumeClaim { + pvc := corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: name, + Labels: map[string]string{ + label.ClusterNameLabelName: "es", + }, + }, + } + for _, ref := range ownerRefs { + pvc.OwnerReferences = append(pvc.OwnerReferences, metav1.OwnerReference{ + Name: ref, + Kind: "Elasticsearch", + APIVersion: "elasticsearch.k8s.elastic.co/v1", + }) + } + return pvc + } + + pvcFixturePtr := func(name string, ownerRefs ...string) *corev1.PersistentVolumeClaim { + pvc := pvcFixture(name, ownerRefs...) + return &pvc + } + + tests := []struct { + name string + args args + want []corev1.PersistentVolumeClaim + wantErr bool + }{ + { + name: "remove references on DeleteOnScaledownOnlyPolicy", + args: args{ + c: k8s.NewFakeClient(pvcFixturePtr("es-data-0", "es")), + es: esFixture(esv1.DeleteOnScaledownOnlyPolicy), + }, + want: []corev1.PersistentVolumeClaim{pvcFixture("es-data-0")}, + wantErr: false, + }, + { + name: "add references for DeleteOnScaledownAndClusterDeletionPolicy", + args: args{ + c: k8s.NewFakeClient(pvcFixturePtr("es-data-0")), + es: esFixture(esv1.DeleteOnScaledownAndClusterDeletionPolicy), + }, + want: []corev1.PersistentVolumeClaim{pvcFixture("es-data-0", "es")}, + wantErr: false, + }, + { + name: "keep references set by other controllers when removing owner ref", + args: args{ + c: k8s.NewFakeClient(pvcFixturePtr("es-data-0", "es", "some-other-ref")), + es: esFixture(esv1.DeleteOnScaledownOnlyPolicy), + }, + want: []corev1.PersistentVolumeClaim{pvcFixture("es-data-0", "some-other-ref")}, + wantErr: false, + }, + { + name: "keep references set by other controllers when setting owner ref", + args: args{ + c: k8s.NewFakeClient(pvcFixturePtr("es-data-0", "some-other-ref")), + es: esFixture(esv1.DeleteOnScaledownAndClusterDeletionPolicy), + }, + want: []corev1.PersistentVolumeClaim{pvcFixture("es-data-0", "some-other-ref", "es")}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := reconcilePVCOwnerRefs(tt.args.c, tt.args.es); (err != nil) != tt.wantErr { + t.Errorf("reconcilePVCOwnerRefs() error = %v, wantErr %v", err, tt.wantErr) + } + var pvcs corev1.PersistentVolumeClaimList + if err := tt.args.c.List(context.Background(), &pvcs); err != nil { + t.Errorf("reconcilePVCOwnerRefs(), failed to list pvcs: %v", err) + } + require.Equal(t, len(tt.want), len(pvcs.Items), "unexpected number of pvcs") + for i := 0; i < len(tt.want); i++ { + comparison.AssertEqual(t, &pvcs.Items[i], &tt.want[i]) + } + }) + } +} diff --git a/pkg/controller/elasticsearch/nodespec/statefulset.go b/pkg/controller/elasticsearch/nodespec/statefulset.go index c3511b330e..f92c7f711c 100644 --- a/pkg/controller/elasticsearch/nodespec/statefulset.go +++ b/pkg/controller/elasticsearch/nodespec/statefulset.go @@ -5,12 +5,6 @@ package nodespec import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - esv1 "github.com/elastic/cloud-on-k8s/pkg/apis/elasticsearch/v1" "github.com/elastic/cloud-on-k8s/pkg/controller/common/defaults" "github.com/elastic/cloud-on-k8s/pkg/controller/common/hash" @@ -21,10 +15,9 @@ import ( "github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/sset" esvolume "github.com/elastic/cloud-on-k8s/pkg/controller/elasticsearch/volume" "github.com/elastic/cloud-on-k8s/pkg/utils/k8s" -) - -var ( - f = false + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // HeadlessServiceName returns the name of the headless service for the given StatefulSet. @@ -98,10 +91,7 @@ func BuildStatefulSet( if existingSset, exists := existingStatefulSets.GetByName(statefulSetName); exists { existingClaims = existingSset.Spec.VolumeClaimTemplates } - claims, err := setVolumeClaimsControllerReference(nodeSet.VolumeClaimTemplates, existingClaims, es) - if err != nil { - return appsv1.StatefulSet{}, err - } + claims := preserveExistingVolumeClaimsOwnerRefs(nodeSet.VolumeClaimTemplates, existingClaims) sset := appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ @@ -136,13 +126,12 @@ func BuildStatefulSet( return sset, nil } -func setVolumeClaimsControllerReference( +func preserveExistingVolumeClaimsOwnerRefs( persistentVolumeClaims []corev1.PersistentVolumeClaim, existingClaims []corev1.PersistentVolumeClaim, - es esv1.Elasticsearch, -) ([]corev1.PersistentVolumeClaim, error) { - // set the owner reference of all volume claims to the ES resource, - // so PVC get deleted automatically upon Elasticsearch resource deletion +) []corev1.PersistentVolumeClaim { + // before https://github.com/elastic/cloud-on-k8s/pull/4050, we used to set an ownerRef into all claims + // now keep existing ownerReferences for backwards compatibility but don't add new ones claims := make([]corev1.PersistentVolumeClaim, 0, len(persistentVolumeClaims)) for _, claim := range persistentVolumeClaims { if existingClaim := sset.GetClaim(existingClaims, claim.Name); existingClaim != nil { @@ -156,30 +145,10 @@ func setVolumeClaimsControllerReference( // Having ownerReferences with a "deprecated" apiVersion is fine, and does not prevent resources // from being garbage collected as expected. claim.OwnerReferences = existingClaim.OwnerReferences - - claims = append(claims, claim) - continue - } - - // Temporarily set the claim namespace to match the ES namespace, then set it back to empty. - // `SetControllerReference` does a safety check on object vs. owner namespace mismatch to cover common errors, - // but in this particular case we don't need to set a namespace in the claim template. - claim.Namespace = es.Namespace - if err := controllerutil.SetControllerReference(&es, &claim, scheme.Scheme); err != nil { - return nil, err - } - claim.Namespace = "" - - // Set block owner deletion to false as the statefulset controller might not be able to do that if it cannot - // set finalizers on the resource. - // See https://github.com/elastic/cloud-on-k8s/issues/1884 - refs := claim.OwnerReferences - for i := range refs { - refs[i].BlockOwnerDeletion = &f } claims = append(claims, claim) } - return claims, nil + return claims } // UpdateReplicas updates the given StatefulSet with the given replicas, diff --git a/pkg/controller/elasticsearch/nodespec/statefulset_test.go b/pkg/controller/elasticsearch/nodespec/statefulset_test.go index 0a9e47ece9..d79e853963 100644 --- a/pkg/controller/elasticsearch/nodespec/statefulset_test.go +++ b/pkg/controller/elasticsearch/nodespec/statefulset_test.go @@ -7,12 +7,11 @@ package nodespec import ( "testing" + esv1 "github.com/elastic/cloud-on-k8s/pkg/apis/elasticsearch/v1" + controllerscheme "github.com/elastic/cloud-on-k8s/pkg/controller/common/scheme" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - esv1 "github.com/elastic/cloud-on-k8s/pkg/apis/elasticsearch/v1" - controllerscheme "github.com/elastic/cloud-on-k8s/pkg/controller/common/scheme" ) func Test_setVolumeClaimsControllerReference(t *testing.T) { @@ -32,12 +31,13 @@ func Test_setVolumeClaimsControllerReference(t *testing.T) { } tests := []struct { name string + es esv1.Elasticsearch persistentVolumeClaims []corev1.PersistentVolumeClaim existingClaims []corev1.PersistentVolumeClaim wantClaims []corev1.PersistentVolumeClaim }{ { - name: "should set the ownerRef when building a new StatefulSet", + name: "should not set the ownerRef when building a new StatefulSet", persistentVolumeClaims: []corev1.PersistentVolumeClaim{ {ObjectMeta: metav1.ObjectMeta{Name: "elasticsearch-data"}}, }, @@ -46,62 +46,12 @@ func Test_setVolumeClaimsControllerReference(t *testing.T) { { ObjectMeta: metav1.ObjectMeta{ Name: "elasticsearch-data", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: es.APIVersion, - Kind: es.Kind, - Name: es.Name, - UID: es.UID, - Controller: &varTrue, - BlockOwnerDeletion: &varFalse, - }, - }, - }, - }, - }, - }, - { - name: "should set the ownerRef on user-provided claims when building a new StatefulSet", - persistentVolumeClaims: []corev1.PersistentVolumeClaim{ - {ObjectMeta: metav1.ObjectMeta{Name: "elasticsearch-data"}}, - {ObjectMeta: metav1.ObjectMeta{Name: "user-provided"}}, - }, - existingClaims: nil, - wantClaims: []corev1.PersistentVolumeClaim{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "elasticsearch-data", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: es.APIVersion, - Kind: es.Kind, - Name: es.Name, - UID: es.UID, - Controller: &varTrue, - BlockOwnerDeletion: &varFalse, - }, - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "user-provided", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: es.APIVersion, - Kind: es.Kind, - Name: es.Name, - UID: es.UID, - Controller: &varTrue, - BlockOwnerDeletion: &varFalse, - }, - }, }, }, }, }, { - name: "should inherit existing claim ownerRefs that may have a different apiVersion", + name: "should inherit existing claim ownerRefs for backwards compatibility (that may also have a different apiVersion)", persistentVolumeClaims: []corev1.PersistentVolumeClaim{ {ObjectMeta: metav1.ObjectMeta{Name: "elasticsearch-data"}}, {ObjectMeta: metav1.ObjectMeta{Name: "user-provided"}}, @@ -177,8 +127,7 @@ func Test_setVolumeClaimsControllerReference(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := setVolumeClaimsControllerReference(tt.persistentVolumeClaims, tt.existingClaims, es) - require.NoError(t, err) + got := preserveExistingVolumeClaimsOwnerRefs(tt.persistentVolumeClaims, tt.existingClaims) require.Equal(t, tt.wantClaims, got) }) } diff --git a/pkg/controller/elasticsearch/validation/webhook_test.go b/pkg/controller/elasticsearch/validation/webhook_test.go index 2c31c41271..f837f0e019 100644 --- a/pkg/controller/elasticsearch/validation/webhook_test.go +++ b/pkg/controller/elasticsearch/validation/webhook_test.go @@ -43,6 +43,9 @@ func Test_validatingWebhook_Handle(t *testing.T) { }{ { name: "accept valid creation", + fields: fields{ + client: k8s.NewFakeClient(), + }, args: args{ req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{ Operation: admissionv1.Create, @@ -58,6 +61,9 @@ func Test_validatingWebhook_Handle(t *testing.T) { }, { name: "reject invalid creation (no version provided)", + fields: fields{ + client: k8s.NewFakeClient(), + }, args: args{ req: admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{ Operation: admissionv1.Create, diff --git a/pkg/utils/k8s/k8sutils.go b/pkg/utils/k8s/k8sutils.go index 6dbe34e832..f8798a48ea 100644 --- a/pkg/utils/k8s/k8sutils.go +++ b/pkg/utils/k8s/k8sutils.go @@ -162,19 +162,6 @@ func PodsMatchingLabels(c Client, namespace string, labels map[string]string) ([ return pods.Items, nil } -// OverrideControllerReference overrides the controller owner reference with the given owner reference. -func OverrideControllerReference(obj metav1.Object, newOwner metav1.OwnerReference) { - owners := obj.GetOwnerReferences() - - ref := indexOfCtrlRef(owners) - if ref == -1 { - obj.SetOwnerReferences([]metav1.OwnerReference{newOwner}) - return - } - owners[ref] = newOwner - obj.SetOwnerReferences(owners) -} - func indexOfCtrlRef(owners []metav1.OwnerReference) int { for index, r := range owners { if r.Controller != nil && *r.Controller { diff --git a/pkg/utils/k8s/k8sutils_test.go b/pkg/utils/k8s/k8sutils_test.go index b9ae8eea87..fd5c5bec22 100644 --- a/pkg/utils/k8s/k8sutils_test.go +++ b/pkg/utils/k8s/k8sutils_test.go @@ -147,81 +147,6 @@ func TestGetServiceIPAddresses(t *testing.T) { } } -func TestOverrideControllerReference(t *testing.T) { - - ownerRefFixture := func(name string, controller bool) metav1.OwnerReference { - return metav1.OwnerReference{ - APIVersion: "v1", - Kind: "some", - Name: name, - UID: "uid", - Controller: &controller, - } - } - type args struct { - obj metav1.Object - newOwner metav1.OwnerReference - } - tests := []struct { - name string - args args - assertion func(object metav1.Object) - }{ - { - name: "no existing controller", - args: args{ - obj: &corev1.Secret{}, - newOwner: ownerRefFixture("obj1", true), - }, - assertion: func(object metav1.Object) { - require.Equal(t, object.GetOwnerReferences(), []metav1.OwnerReference{ownerRefFixture("obj1", true)}) - }, - }, - { - name: "replace existing controller", - args: args{ - obj: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{ - ownerRefFixture("obj1", true), - }, - }, - }, - newOwner: ownerRefFixture("obj2", true), - }, - assertion: func(object metav1.Object) { - require.Equal(t, object.GetOwnerReferences(), []metav1.OwnerReference{ - ownerRefFixture("obj2", true)}) - }, - }, - { - name: "replace existing controller preserving existing references", - args: args{ - obj: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: []metav1.OwnerReference{ - ownerRefFixture("other", false), - ownerRefFixture("obj1", true), - }, - }, - }, - newOwner: ownerRefFixture("obj2", true), - }, - assertion: func(object metav1.Object) { - require.Equal(t, object.GetOwnerReferences(), []metav1.OwnerReference{ - ownerRefFixture("other", false), - ownerRefFixture("obj2", true)}) - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - OverrideControllerReference(tt.args.obj, tt.args.newOwner) - tt.assertion(tt.args.obj) - }) - } -} - func TestCompareStorageRequests(t *testing.T) { type args struct { initial corev1.ResourceRequirements diff --git a/pkg/utils/k8s/owner_refs.go b/pkg/utils/k8s/owner_refs.go new file mode 100644 index 0000000000..5ebc806083 --- /dev/null +++ b/pkg/utils/k8s/owner_refs.go @@ -0,0 +1,55 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package k8s + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// OverrideControllerReference overrides the controller owner reference with the given owner reference. +func OverrideControllerReference(obj metav1.Object, newOwner metav1.OwnerReference) { + owners := obj.GetOwnerReferences() + + ref := indexOfCtrlRef(owners) + if ref == -1 { + obj.SetOwnerReferences([]metav1.OwnerReference{newOwner}) + return + } + owners[ref] = newOwner + obj.SetOwnerReferences(owners) +} + +func HasOwner(resource, owner metav1.Object) bool { + if owner == nil || resource == nil { + return false + } + found, _ := FindOwner(resource, owner) + return found +} + +func RemoveOwner(resource, owner metav1.Object) { + if resource == nil || owner == nil { + return + } + found, index := FindOwner(resource, owner) + if !found { + return + } + owners := resource.GetOwnerReferences() + // remove the owner at index i from the slice + newOwners := append(owners[:index], owners[index+1:]...) + resource.SetOwnerReferences(newOwners) +} + +func FindOwner(resource, owner metav1.Object) (found bool, index int) { + if owner == nil || resource == nil { + return false, 0 + } + ownerRefs := resource.GetOwnerReferences() + for i := range ownerRefs { + if ownerRefs[i].Name == owner.GetName() && ownerRefs[i].UID == owner.GetUID() { + return true, i + } + } + return false, 0 +} diff --git a/pkg/utils/k8s/owner_refs_test.go b/pkg/utils/k8s/owner_refs_test.go new file mode 100644 index 0000000000..44a1d5dbc9 --- /dev/null +++ b/pkg/utils/k8s/owner_refs_test.go @@ -0,0 +1,285 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package k8s + +import ( + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestOverrideControllerReference(t *testing.T) { + + ownerRefFixture := func(name string, controller bool) metav1.OwnerReference { + return metav1.OwnerReference{ + APIVersion: "v1", + Kind: "some", + Name: name, + UID: "uid", + Controller: &controller, + } + } + type args struct { + obj metav1.Object + newOwner metav1.OwnerReference + } + tests := []struct { + name string + args args + assertion func(object metav1.Object) + }{ + { + name: "no existing controller", + args: args{ + obj: &corev1.Secret{}, + newOwner: ownerRefFixture("obj1", true), + }, + assertion: func(object metav1.Object) { + require.Equal(t, object.GetOwnerReferences(), []metav1.OwnerReference{ownerRefFixture("obj1", true)}) + }, + }, + { + name: "replace existing controller", + args: args{ + obj: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + ownerRefFixture("obj1", true), + }, + }, + }, + newOwner: ownerRefFixture("obj2", true), + }, + assertion: func(object metav1.Object) { + require.Equal(t, object.GetOwnerReferences(), []metav1.OwnerReference{ + ownerRefFixture("obj2", true)}) + }, + }, + { + name: "replace existing controller preserving existing references", + args: args{ + obj: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + ownerRefFixture("other", false), + ownerRefFixture("obj1", true), + }, + }, + }, + newOwner: ownerRefFixture("obj2", true), + }, + assertion: func(object metav1.Object) { + require.Equal(t, object.GetOwnerReferences(), []metav1.OwnerReference{ + ownerRefFixture("other", false), + ownerRefFixture("obj2", true)}) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + OverrideControllerReference(tt.args.obj, tt.args.newOwner) + tt.assertion(tt.args.obj) + }) + } +} + +func sampleOwner() *corev1.Secret { + // we use a secret here but it could be any Elasticsearch | Kibana | ApmServer | etc. + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "owner-name", UID: "owner-id"}, + TypeMeta: metav1.TypeMeta{Kind: "Secret"}, + } +} + +func addOwner(secret *corev1.Secret, name string, uid types.UID) *corev1.Secret { + secret = secret.DeepCopy() + secret.OwnerReferences = append(secret.OwnerReferences, metav1.OwnerReference{Name: name, UID: uid}) + return secret +} + +func Test_hasOwner(t *testing.T) { + owner := sampleOwner() + type args struct { + resource metav1.Object + owner metav1.Object + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "owner is referenced (same name and uid)", + args: args{ + resource: addOwner(&corev1.Secret{}, owner.Name, owner.UID), + owner: owner, + }, + want: true, + }, + { + name: "owner referenced among other owner references", + args: args{ + resource: addOwner(addOwner(&corev1.Secret{}, "another-name", types.UID("another-id")), owner.Name, owner.UID), + owner: owner, + }, + want: true, + }, + { + name: "owner not referenced", + args: args{ + resource: addOwner(addOwner(&corev1.Secret{}, "another-name", types.UID("another-id")), "yet-another-name", "yet-another-uid"), + owner: owner, + }, + want: false, + }, + { + name: "no owner ref", + args: args{ + resource: &corev1.Secret{}, + owner: owner, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := HasOwner(tt.args.resource, tt.args.owner); got != tt.want { + t.Errorf("hasOwner() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_removeOwner(t *testing.T) { + type args struct { + resource metav1.Object + owner metav1.Object + } + tests := []struct { + name string + args args + wantResource *corev1.Secret + }{ + { + name: "no owner: no-op", + args: args{ + resource: &corev1.Secret{}, + owner: sampleOwner(), + }, + wantResource: &corev1.Secret{}, + }, + { + name: "different owner: no-op", + args: args{ + resource: addOwner(&corev1.Secret{}, "another-owner-name", "another-owner-id"), + owner: sampleOwner(), + }, + wantResource: addOwner(&corev1.Secret{}, "another-owner-name", "another-owner-id"), + }, + { + name: "remove the single owner", + args: args{ + resource: addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), + owner: sampleOwner(), + }, + wantResource: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{}}}, + }, + { + name: "remove the owner from a list of owners", + args: args{ + resource: addOwner(addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), "another-owner", "another-uid"), + owner: sampleOwner(), + }, + wantResource: addOwner(&corev1.Secret{}, "another-owner", "another-uid"), + }, + { + name: "owner listed twice in the list (shouldn't happen): remove the first occurrence", + args: args{ + resource: addOwner(addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), sampleOwner().Name, sampleOwner().UID), + owner: sampleOwner(), + }, + wantResource: addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + RemoveOwner(tt.args.resource, tt.args.owner) + require.Equal(t, tt.wantResource, tt.args.resource) + }) + } +} + +func Test_findOwner(t *testing.T) { + type args struct { + resource metav1.Object + owner metav1.Object + } + tests := []struct { + name string + args args + wantFound bool + wantIndex int + }{ + { + name: "no owner: not found", + args: args{ + resource: &corev1.Secret{}, + owner: sampleOwner(), + }, + wantFound: false, + wantIndex: 0, + }, + { + name: "different owner: not found", + args: args{ + resource: addOwner(&corev1.Secret{}, "another-owner-name", "another-owner-id"), + owner: sampleOwner(), + }, + wantFound: false, + wantIndex: 0, + }, + { + name: "owner at index 0", + args: args{ + resource: addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), + owner: sampleOwner(), + }, + wantFound: true, + wantIndex: 0, + }, + { + name: "owner at index 1", + args: args{ + resource: addOwner(addOwner(&corev1.Secret{}, "another-owner", "another-uid"), sampleOwner().Name, sampleOwner().UID), + owner: sampleOwner(), + }, + wantFound: true, + wantIndex: 1, + }, + { + name: "owner listed twice in the list (shouldn't happen): return the first occurrence (index 0)", + args: args{ + resource: addOwner(addOwner(&corev1.Secret{}, sampleOwner().Name, sampleOwner().UID), sampleOwner().Name, sampleOwner().UID), + owner: sampleOwner(), + }, + wantFound: true, + wantIndex: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotFound, gotIndex := FindOwner(tt.args.resource, tt.args.owner) + if gotFound != tt.wantFound { + t.Errorf("findOwner() gotFound = %v, want %v", gotFound, tt.wantFound) + } + if gotIndex != tt.wantIndex { + t.Errorf("findOwner() gotIndex = %v, want %v", gotIndex, tt.wantIndex) + } + }) + } +} diff --git a/test/e2e/es/volume_test.go b/test/e2e/es/volume_test.go index c1b608c4e6..781c4c35e9 100644 --- a/test/e2e/es/volume_test.go +++ b/test/e2e/es/volume_test.go @@ -37,6 +37,43 @@ func TestVolumeEmptyDir(t *testing.T) { RunSequential(t) } +func TestVolumeRetention(t *testing.T) { + var dataCheck *elasticsearch.DataIntegrityCheck + b := elasticsearch.NewBuilder("test-volume-retain-policy"). + WithESMasterDataNodes(3, elasticsearch.DefaultResources). + WithVolumeClaimDeletePolicy(esv1.DeleteOnScaledownOnlyPolicy) + + // Create a cluster configured to retain its PVCs and ingest data + test.Sequence(nil, func(k *test.K8sClient) test.StepList { + return test.StepList{ + { + Name: "Ingest verification sample data", + Test: func(t *testing.T) { + dataCheck = elasticsearch.NewDataIntegrityCheck(k, b) + require.NoError(t, dataCheck.Init()) + }, + }, + } + }, b).RunSequential(t) + + // The cluster has now been deleted as part of our usual test step sequence, but PVCs have been retained. + // Recreate it just this time without retaining the PVCs. + + b2 := b.WithVolumeClaimDeletePolicy(esv1.DeleteOnScaledownAndClusterDeletionPolicy) + test.Sequence(nil, func(k *test.K8sClient) test.StepList { + return test.StepList{ + { + Name: "Verify data has been retained after cluster recreation", + Test: func(t *testing.T) { + require.NoError(t, dataCheck.Verify()) + }, + }, + } + }, b2).RunSequential(t) + + // The cluster has now been deleted including its PVCs as evidenced by our usual deletion test step sequence. +} + func TestVolumeMultiDataPath(t *testing.T) { b := elasticsearch.NewBuilder("test-es-multi-data-path"). WithNodeSet(esv1.NodeSet{ diff --git a/test/e2e/test/elasticsearch/builder.go b/test/e2e/test/elasticsearch/builder.go index 45dafccd83..6f89e25d9a 100644 --- a/test/e2e/test/elasticsearch/builder.go +++ b/test/e2e/test/elasticsearch/builder.go @@ -266,6 +266,11 @@ func (b Builder) WithESSecureSettings(secretNames ...string) Builder { return b } +func (b Builder) WithVolumeClaimDeletePolicy(policy esv1.VolumeClaimDeletePolicy) Builder { + b.Elasticsearch.Spec.VolumeClaimDeletePolicy = policy + return b +} + func (b Builder) WithEmptyDirVolumes() Builder { for i := range b.Elasticsearch.Spec.NodeSets { // remove any default claim diff --git a/test/e2e/test/elasticsearch/steps_deletion.go b/test/e2e/test/elasticsearch/steps_deletion.go index 98a4093789..1f99ba9435 100644 --- a/test/e2e/test/elasticsearch/steps_deletion.go +++ b/test/e2e/test/elasticsearch/steps_deletion.go @@ -74,6 +74,9 @@ func (b Builder) DeletionTestSteps(k *test.K8sClient) test.StepList { } return nil }), + Skip: func() bool { + return b.Elasticsearch.Spec.VolumeClaimDeletePolicy == esv1.DeleteOnScaledownOnlyPolicy + }, }, { Name: "Soft-owned secrets should eventually be removed",