From af6a16fcea7d2097662e1fe953abb93b3da6b011 Mon Sep 17 00:00:00 2001 From: Sunny Song Date: Tue, 4 Apr 2023 20:52:12 +0000 Subject: [PATCH] Update sig-storage-lib-external-provisioner to v8.0.1 --- go.mod | 2 +- go.sum | 4 +- vendor/modules.txt | 2 +- .../v8/controller/controller.go | 65 +++++++++++-------- 4 files changed, 41 insertions(+), 32 deletions(-) diff --git a/go.mod b/go.mod index f601323a4d..e0148bcb1b 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( k8s.io/csi-translation-lib v0.25.2 k8s.io/klog/v2 v2.80.1 sigs.k8s.io/controller-runtime v0.13.0 - sigs.k8s.io/sig-storage-lib-external-provisioner/v8 v8.0.0 + sigs.k8s.io/sig-storage-lib-external-provisioner/v8 v8.0.1 ) require ( diff --git a/go.sum b/go.sum index d0aa08cc2a..8fbe7dd190 100644 --- a/go.sum +++ b/go.sum @@ -946,8 +946,8 @@ sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRM sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/sig-storage-lib-external-provisioner/v8 v8.0.0 h1:vQUoaDxbberC3UwvE+zauyOMkpWlleaVgc75LoDOyy4= -sigs.k8s.io/sig-storage-lib-external-provisioner/v8 v8.0.0/go.mod h1:ejoxC3K6lnUtjUanKStWadRVnwIuyRPNJGQ4dkExDao= +sigs.k8s.io/sig-storage-lib-external-provisioner/v8 v8.0.1 h1:Y7QU+Cb0j8n7r+5aOu5+I/mILMtkeCDVzipqFiDw8+E= +sigs.k8s.io/sig-storage-lib-external-provisioner/v8 v8.0.1/go.mod h1:ejoxC3K6lnUtjUanKStWadRVnwIuyRPNJGQ4dkExDao= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/modules.txt b/vendor/modules.txt index b4f5852874..dfa72d916c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -771,7 +771,7 @@ sigs.k8s.io/controller-runtime/pkg/log ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/sig-storage-lib-external-provisioner/v8 v8.0.0 +# sigs.k8s.io/sig-storage-lib-external-provisioner/v8 v8.0.1 ## explicit; go 1.16 sigs.k8s.io/sig-storage-lib-external-provisioner/v8/controller sigs.k8s.io/sig-storage-lib-external-provisioner/v8/controller/metrics diff --git a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/v8/controller/controller.go b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/v8/controller/controller.go index 83f19b3677..7d13adac80 100644 --- a/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/v8/controller/controller.go +++ b/vendor/sigs.k8s.io/sig-storage-lib-external-provisioner/v8/controller/controller.go @@ -1386,6 +1386,10 @@ func (ctrl *ProvisionController) provisionClaimOperation(ctx context.Context, cl selectedNode, err = ctrl.client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) // TODO (verult) cache Nodes } if err != nil { + // if node does not exist, reschedule and remove volume.kubernetes.io/selected-node annotation + if apierrs.IsNotFound(err) { + return ctrl.provisionVolumeErrorHandling(ctx, ProvisioningReschedule, err, claim, operation) + } err = fmt.Errorf("failed to get target node: %v", err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) return ProvisioningNoChange, err @@ -1408,35 +1412,9 @@ func (ctrl *ProvisionController) provisionClaimOperation(ctx context.Context, cl klog.Info(logOperation(operation, "volume provision ignored: %v", ierr)) return ProvisioningFinished, errStopProvision } - err = fmt.Errorf("failed to provision volume with StorageClass %q: %v", claimClass, err) - ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) - if _, ok := claim.Annotations[annSelectedNode]; ok && result == ProvisioningReschedule { - // For dynamic PV provisioning with delayed binding, the provisioner may fail - // because the node is wrong (permanent error) or currently unusable (not enough - // capacity). If the provisioner wants to give up scheduling with the currently - // selected node, then it can ask for that by returning ProvisioningReschedule - // as state. - // - // `selectedNode` must be removed to notify scheduler to schedule again. - if errLabel := ctrl.rescheduleProvisioning(ctx, claim); errLabel != nil { - klog.Info(logOperation(operation, "volume rescheduling failed: %v", errLabel)) - // If unsetting that label fails in ctrl.rescheduleProvisioning, we - // keep the volume in the work queue as if the provisioner had - // returned ProvisioningFinished and simply try again later. - return ProvisioningFinished, err - } - // Label was removed, stop working on the volume. - klog.Info(logOperation(operation, "volume rescheduled because: %v", err)) - return ProvisioningFinished, errStopProvision - } - // ProvisioningReschedule shouldn't have been returned for volumes without selected node, - // but if we get it anyway, then treat it like ProvisioningFinished because we cannot - // reschedule. - if result == ProvisioningReschedule { - result = ProvisioningFinished - } - return result, err + err = fmt.Errorf("failed to provision volume with StorageClass %q: %v", claimClass, err) + return ctrl.provisionVolumeErrorHandling(ctx, result, err, claim, operation) } klog.Info(logOperation(operation, "volume %q provisioned", volume.Name)) @@ -1463,6 +1441,37 @@ func (ctrl *ProvisionController) provisionClaimOperation(ctx context.Context, cl return ProvisioningFinished, nil } +func (ctrl *ProvisionController) provisionVolumeErrorHandling(ctx context.Context, result ProvisioningState, err error, claim *v1.PersistentVolumeClaim, operation string) (ProvisioningState, error) { + ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) + if _, ok := claim.Annotations[annSelectedNode]; ok && result == ProvisioningReschedule { + // For dynamic PV provisioning with delayed binding, the provisioner may fail + // because the node is wrong (permanent error) or currently unusable (not enough + // capacity). If the provisioner wants to give up scheduling with the currently + // selected node, then it can ask for that by returning ProvisioningReschedule + // as state. + // + // `selectedNode` must be removed to notify scheduler to schedule again. + if errLabel := ctrl.rescheduleProvisioning(ctx, claim); errLabel != nil { + klog.Info(logOperation(operation, "volume rescheduling failed: %v", errLabel)) + // If unsetting that label fails in ctrl.rescheduleProvisioning, we + // keep the volume in the work queue as if the provisioner had + // returned ProvisioningFinished and simply try again later. + return ProvisioningFinished, err + } + // Label was removed, stop working on the volume. + klog.Info(logOperation(operation, "volume rescheduled because: %v", err)) + return ProvisioningFinished, errStopProvision + } + + // ProvisioningReschedule shouldn't have been returned for volumes without selected node, + // but if we get it anyway, then treat it like ProvisioningFinished because we cannot + // reschedule. + if result == ProvisioningReschedule { + result = ProvisioningFinished + } + return result, err +} + // deleteVolumeOperation attempts to delete the volume backing the given // volume. Returns error, which indicates whether deletion should be retried // (requeue the volume) or not