diff --git a/allocator/minmax.go b/allocator/minmax.go index 0db518d..e3d7865 100644 --- a/allocator/minmax.go +++ b/allocator/minmax.go @@ -44,7 +44,7 @@ var ( ErrInternal = errors.New("internal error") ) -//MinMaxAllocator defines allocator struct. +// MinMaxAllocator defines allocator struct. type MinMaxAllocator struct { lock sync.Mutex min int @@ -79,7 +79,7 @@ func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) { }, nil } -//SetRange defines the range/pool with provided min and max values. +// SetRange defines the range/pool with provided min and max values. func (a *MinMaxAllocator) SetRange(min, max int) error { if min > max { return ErrInvalidRange @@ -108,7 +108,7 @@ func (a *MinMaxAllocator) SetRange(min, max int) error { return nil } -//Allocate allocates provided value in the allocator and mark it as used. +// Allocate allocates provided value in the allocator and mark it as used. func (a *MinMaxAllocator) Allocate(i int) (bool, error) { a.lock.Lock() defer a.lock.Unlock() @@ -127,7 +127,7 @@ func (a *MinMaxAllocator) Allocate(i int) (bool, error) { return true, nil } -//AllocateNext allocates next value from the allocator. +// AllocateNext allocates next value from the allocator. func (a *MinMaxAllocator) AllocateNext() (int, bool, error) { a.lock.Lock() defer a.lock.Unlock() @@ -150,7 +150,7 @@ func (a *MinMaxAllocator) AllocateNext() (int, bool, error) { return 0, false, ErrInternal } -//Release free/delete provided value from the allocator. +// Release free/delete provided value from the allocator. func (a *MinMaxAllocator) Release(i int) error { a.lock.Lock() defer a.lock.Unlock() @@ -173,7 +173,7 @@ func (a *MinMaxAllocator) has(i int) bool { return ok } -//Has check whether the provided value is used in the allocator +// Has check whether the provided value is used in the allocator func (a *MinMaxAllocator) Has(i int) bool { a.lock.Lock() defer a.lock.Unlock() @@ -181,7 +181,7 @@ func (a *MinMaxAllocator) Has(i int) bool { return a.has(i) } -//Free returns the number of free values in the allocator. +// Free returns the number of free values in the allocator. func (a *MinMaxAllocator) Free() int { a.lock.Lock() defer a.lock.Unlock() diff --git a/controller/controller.go b/controller/controller.go index 83f19b3..7d13ada 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -1386,6 +1386,10 @@ func (ctrl *ProvisionController) provisionClaimOperation(ctx context.Context, cl selectedNode, err = ctrl.client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) // TODO (verult) cache Nodes } if err != nil { + // if node does not exist, reschedule and remove volume.kubernetes.io/selected-node annotation + if apierrs.IsNotFound(err) { + return ctrl.provisionVolumeErrorHandling(ctx, ProvisioningReschedule, err, claim, operation) + } err = fmt.Errorf("failed to get target node: %v", err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) return ProvisioningNoChange, err @@ -1408,35 +1412,9 @@ func (ctrl *ProvisionController) provisionClaimOperation(ctx context.Context, cl klog.Info(logOperation(operation, "volume provision ignored: %v", ierr)) return ProvisioningFinished, errStopProvision } - err = fmt.Errorf("failed to provision volume with StorageClass %q: %v", claimClass, err) - ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) - if _, ok := claim.Annotations[annSelectedNode]; ok && result == ProvisioningReschedule { - // For dynamic PV provisioning with delayed binding, the provisioner may fail - // because the node is wrong (permanent error) or currently unusable (not enough - // capacity). If the provisioner wants to give up scheduling with the currently - // selected node, then it can ask for that by returning ProvisioningReschedule - // as state. - // - // `selectedNode` must be removed to notify scheduler to schedule again. - if errLabel := ctrl.rescheduleProvisioning(ctx, claim); errLabel != nil { - klog.Info(logOperation(operation, "volume rescheduling failed: %v", errLabel)) - // If unsetting that label fails in ctrl.rescheduleProvisioning, we - // keep the volume in the work queue as if the provisioner had - // returned ProvisioningFinished and simply try again later. - return ProvisioningFinished, err - } - // Label was removed, stop working on the volume. - klog.Info(logOperation(operation, "volume rescheduled because: %v", err)) - return ProvisioningFinished, errStopProvision - } - // ProvisioningReschedule shouldn't have been returned for volumes without selected node, - // but if we get it anyway, then treat it like ProvisioningFinished because we cannot - // reschedule. - if result == ProvisioningReschedule { - result = ProvisioningFinished - } - return result, err + err = fmt.Errorf("failed to provision volume with StorageClass %q: %v", claimClass, err) + return ctrl.provisionVolumeErrorHandling(ctx, result, err, claim, operation) } klog.Info(logOperation(operation, "volume %q provisioned", volume.Name)) @@ -1463,6 +1441,37 @@ func (ctrl *ProvisionController) provisionClaimOperation(ctx context.Context, cl return ProvisioningFinished, nil } +func (ctrl *ProvisionController) provisionVolumeErrorHandling(ctx context.Context, result ProvisioningState, err error, claim *v1.PersistentVolumeClaim, operation string) (ProvisioningState, error) { + ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error()) + if _, ok := claim.Annotations[annSelectedNode]; ok && result == ProvisioningReschedule { + // For dynamic PV provisioning with delayed binding, the provisioner may fail + // because the node is wrong (permanent error) or currently unusable (not enough + // capacity). If the provisioner wants to give up scheduling with the currently + // selected node, then it can ask for that by returning ProvisioningReschedule + // as state. + // + // `selectedNode` must be removed to notify scheduler to schedule again. + if errLabel := ctrl.rescheduleProvisioning(ctx, claim); errLabel != nil { + klog.Info(logOperation(operation, "volume rescheduling failed: %v", errLabel)) + // If unsetting that label fails in ctrl.rescheduleProvisioning, we + // keep the volume in the work queue as if the provisioner had + // returned ProvisioningFinished and simply try again later. + return ProvisioningFinished, err + } + // Label was removed, stop working on the volume. + klog.Info(logOperation(operation, "volume rescheduled because: %v", err)) + return ProvisioningFinished, errStopProvision + } + + // ProvisioningReschedule shouldn't have been returned for volumes without selected node, + // but if we get it anyway, then treat it like ProvisioningFinished because we cannot + // reschedule. + if result == ProvisioningReschedule { + result = ProvisioningFinished + } + return result, err +} + // deleteVolumeOperation attempts to delete the volume backing the given // volume. Returns error, which indicates whether deletion should be retried // (requeue the volume) or not diff --git a/controller/controller_test.go b/controller/controller_test.go index 5fb1e02..18fc24a 100644 --- a/controller/controller_test.go +++ b/controller/controller_test.go @@ -542,6 +542,25 @@ func TestController(t *testing.T) { }, { name: "do not remove selectedNode after final error, only the claim", + objs: []runtime.Object{ + newStorageClassWithVolumeBindingMode("class-1", "foo.bar/baz", &modeWait), + newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz", annSelectedNode: "node-1"}), + newNode("node-1"), + }, + provisionerName: "foo.bar/baz", + provisioner: newBadTestProvisioner(), + expectedClaims: []v1.PersistentVolumeClaim{ + *newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz", annSelectedNode: "node-1"}), + }, + expectedClaimsInProgress: nil, // not in progress anymore + expectedMetrics: testMetrics{ + provisioned: counts{ + "class-1": count{failed: 1}, + }, + }, + }, + { + name: "remove selectedNode if no node exists", objs: []runtime.Object{ newStorageClassWithVolumeBindingMode("class-1", "foo.bar/baz", &modeWait), newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz", annSelectedNode: "node-wrong"}), @@ -549,7 +568,7 @@ func TestController(t *testing.T) { provisionerName: "foo.bar/baz", provisioner: newBadTestProvisioner(), expectedClaims: []v1.PersistentVolumeClaim{ - *newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz", annSelectedNode: "node-wrong"}), + *newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz"}), }, expectedClaimsInProgress: nil, // not in progress anymore expectedMetrics: testMetrics{ @@ -560,6 +579,24 @@ func TestController(t *testing.T) { }, { name: "do not remove selectedNode if nothing changes", + objs: []runtime.Object{ + newStorageClassWithVolumeBindingMode("class-1", "foo.bar/baz", &modeWait), + newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz", annSelectedNode: "node-1"}), + newNode("node-1"), + }, + provisionerName: "foo.bar/baz", + provisioner: newNoChangeTestProvisioner(), + expectedClaims: []v1.PersistentVolumeClaim{ + *newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz", annSelectedNode: "node-1"}), + }, + expectedMetrics: testMetrics{ + provisioned: counts{ + "class-1": count{failed: 1}, + }, + }, + }, + { + name: "remove selectedNode if nothing changes and no node exists", objs: []runtime.Object{ newStorageClassWithVolumeBindingMode("class-1", "foo.bar/baz", &modeWait), newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz", annSelectedNode: "node-wrong"}), @@ -567,7 +604,7 @@ func TestController(t *testing.T) { provisionerName: "foo.bar/baz", provisioner: newNoChangeTestProvisioner(), expectedClaims: []v1.PersistentVolumeClaim{ - *newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz", annSelectedNode: "node-wrong"}), + *newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annBetaStorageProvisioner: "foo.bar/baz"}), }, expectedMetrics: testMetrics{ provisioned: counts{ @@ -876,7 +913,7 @@ func TestTopologyParams(t *testing.T) { newStorageClass("class-1", "foo.bar/baz"), newClaim("claim-1", "uid-1-1", "class-1", "foo.bar/baz", "", map[string]string{annSelectedNode: "node-1"}), }, - expectedParams: nil, + expectedParams: &provisionParams{}, }, } for _, test := range tests { diff --git a/gidallocator/gidallocator.go b/gidallocator/gidallocator.go index 1e575f5..76c53a0 100644 --- a/gidallocator/gidallocator.go +++ b/gidallocator/gidallocator.go @@ -113,12 +113,10 @@ func (a *Allocator) Release(volume *v1.PersistentVolume) error { return nil } -// // Return the gid table for a storage class. -// - If this is the first time, fill it with all the gids -// used in PVs of this storage class by traversing the PVs. -// - Adapt the range of the table to the current range of the SC. -// +// - If this is the first time, fill it with all the gids +// used in PVs of this storage class by traversing the PVs. +// - Adapt the range of the table to the current range of the SC. func (a *Allocator) getGidTable(className string, min int, max int) (*allocator.MinMaxAllocator, error) { var err error a.gidTableLock.Lock() @@ -174,7 +172,6 @@ func (a *Allocator) getGidTable(className string, min int, max int) (*allocator. // Traverse the PVs, fetching all the GIDs from those // in a given storage class, and mark them in the table. -// func (a *Allocator) collectGids(className string, gidTable *allocator.MinMaxAllocator) error { pvList, err := a.client.CoreV1().PersistentVolumes().List(context.Background(), metav1.ListOptions{}) if err != nil { diff --git a/mount/mountinfo_linux.go b/mount/mountinfo_linux.go index c55d794..c4f1cf8 100644 --- a/mount/mountinfo_linux.go +++ b/mount/mountinfo_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/mount/mountinfo_unsupported.go b/mount/mountinfo_unsupported.go index 7fd3241..0fa0954 100644 --- a/mount/mountinfo_unsupported.go +++ b/mount/mountinfo_unsupported.go @@ -1,3 +1,4 @@ +//go:build (!windows && !linux && !freebsd && !solaris) || (freebsd && !cgo) || (solaris && !cgo) // +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo /* diff --git a/repo-infra/verify/boilerplate/boilerplate.py b/repo-infra/verify/boilerplate/boilerplate.py index 3507c21..ad6e6f0 100755 --- a/repo-infra/verify/boilerplate/boilerplate.py +++ b/repo-infra/verify/boilerplate/boilerplate.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2015 The Kubernetes Authors. # @@ -24,7 +24,7 @@ import os import re import sys -from datetime import date +import datetime parser = argparse.ArgumentParser() parser.add_argument( @@ -32,8 +32,7 @@ help="list of files to check, all files if unspecified", nargs='*') -# Rootdir defaults to the directory **above** the repo-infra dir. -rootdir = os.path.dirname(__file__) + "/../../../" +rootdir = os.path.dirname(__file__) + "/../../" rootdir = os.path.abspath(rootdir) parser.add_argument( "--rootdir", default=rootdir, help="root directory to examine") @@ -51,6 +50,7 @@ verbose_out = sys.stderr if args.verbose else open("/dev/null", "w") + def get_refs(): refs = {} @@ -64,6 +64,12 @@ def get_refs(): return refs + +def is_generated_file(filename, data, regexs): + p = regexs["generated"] + return p.search(data) + + def file_passes(filename, refs, regexs): try: f = open(filename, 'r') @@ -74,20 +80,25 @@ def file_passes(filename, refs, regexs): data = f.read() f.close() + # determine if the file is automatically generated + generated = is_generated_file(filename, data, regexs) + basename = os.path.basename(filename) extension = file_extension(filename) + if generated: + if extension == "go": + extension = "generatego" + if extension != "": ref = refs[extension] else: ref = refs[basename] - # remove build tags from the top of Go files - if extension == "go": + # remove extra content from the top of files + if extension == "go" or extension == "generatego": p = regexs["go_build_constraints"] (data, found) = p.subn("", data, 1) - - # remove shebang from the top of shell files - if extension == "sh" or extension == "py": + elif extension in ["sh", "py"]: p = regexs["shebang"] (data, found) = p.subn("", data, 1) @@ -106,19 +117,26 @@ def file_passes(filename, refs, regexs): p = regexs["year"] for d in data: if p.search(d): - print('File %s is missing the year' % filename, file=verbose_out) + if generated: + print('File %s has the YEAR field, but it should not be in generated file' % + filename, file=verbose_out) + else: + print('File %s has the YEAR field, but missing the year of date' % + filename, file=verbose_out) return False - # Replace all occurrences of the regex "CURRENT_YEAR|...|2016|2015|2014" with "YEAR" - p = regexs["date"] - for i, d in enumerate(data): - (data[i], found) = p.subn('YEAR', d) - if found != 0: - break + if not generated: + # Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR" + p = regexs["date"] + for i, d in enumerate(data): + (data[i], found) = p.subn('YEAR', d) + if found != 0: + break # if we don't match the reference at this point, fail if ref != data: - print("Header in %s does not match reference, diff:" % filename, file=verbose_out) + print("Header in %s does not match reference, diff:" % + filename, file=verbose_out) if args.verbose: print(file=verbose_out) for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''): @@ -128,17 +146,20 @@ def file_passes(filename, refs, regexs): return True + def file_extension(filename): return os.path.splitext(filename)[1].split(".")[-1].lower() -skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', - 'cluster/env.sh', 'vendor', 'test/e2e/generated/bindata.go', - 'repo-infra/verify/boilerplate/test', '.glide'] + +skipped_names = ['third_party', '_gopath', '_output', '.git', 'cluster/env.sh', + "vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test", + "staging/src/k8s.io/kubectl/pkg/generated/bindata.go"] + def normalize_files(files): newfiles = [] for pathname in files: - if any(x in pathname for x in skipped_dirs): + if any(x in pathname for x in skipped_names): continue newfiles.append(pathname) for i, pathname in enumerate(newfiles): @@ -146,6 +167,7 @@ def normalize_files(files): newfiles[i] = os.path.join(args.rootdir, pathname) return newfiles + def get_files(extensions): files = [] if len(args.filenames) > 0: @@ -156,16 +178,19 @@ def get_files(extensions): # as we would prune these later in normalize_files(). But doing it # cuts down the amount of filesystem walking we do and cuts down # the size of the file list - for d in skipped_dirs: + for d in skipped_names: if d in dirs: dirs.remove(d) + for d in dirs: + # dirs that start with __ are ignored + if re.match("^__", d): + dirs.remove(d) for name in walkfiles: pathname = os.path.join(root, name) files.append(pathname) files = normalize_files(files) - outfiles = [] for pathname in files: basename = os.path.basename(pathname) @@ -174,23 +199,35 @@ def get_files(extensions): outfiles.append(pathname) return outfiles + +def get_dates(): + years = datetime.datetime.now().year + return '(%s)' % '|'.join((str(year) for year in range(2014, years+1))) + + def get_regexs(): regexs = {} # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing - regexs["year"] = re.compile( 'YEAR' ) - # dates can be 2014, 2015, 2016, ..., CURRENT_YEAR, company holder names can be anything - years = range(2014, date.today().year + 1) - regexs["date"] = re.compile( '(%s)' % "|".join(map(lambda l: str(l), years)) ) - # strip // +build \n\n build constraints - regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE) - # strip #!.* from shell scripts + regexs["year"] = re.compile('YEAR') + # get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)"; + # company holder names can be anything + regexs["date"] = re.compile(get_dates()) + # strip the following build constraints/tags: + # //go:build + # // +build \n\n + regexs["go_build_constraints"] = re.compile( + r"^(//(go:build| \+build).*\n)+\n", re.MULTILINE) + # strip #!.* from scripts regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE) + # Search for generated files + regexs["generated"] = re.compile(r"^[/*#]+ +.* DO NOT EDIT\.$", re.MULTILINE) return regexs + def main(): regexs = get_regexs() refs = get_refs() - filenames = get_files(refs.keys()) + filenames = get_files(list(refs.keys())) for filename in filenames: if not file_passes(filename, refs, regexs): @@ -198,5 +235,6 @@ def main(): return 0 + if __name__ == "__main__": - sys.exit(main()) + sys.exit(main()) diff --git a/repo-infra/verify/boilerplate/boilerplate_test.py b/repo-infra/verify/boilerplate/boilerplate_test.py index 2d60156..19bf123 100644 --- a/repo-infra/verify/boilerplate/boilerplate_test.py +++ b/repo-infra/verify/boilerplate/boilerplate_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2016 The Kubernetes Authors. # @@ -28,7 +28,7 @@ class TestBoilerplate(unittest.TestCase): """ Note: run this test from the hack/boilerplate directory. - $ python -m unittest boilerplate_test + $ python3 -m unittest boilerplate_test """ def test_boilerplate(self): diff --git a/repo-infra/verify/boilerplate/test/fail.py b/repo-infra/verify/boilerplate/test/fail.py index cbdd06f..e59627a 100644 --- a/repo-infra/verify/boilerplate/test/fail.py +++ b/repo-infra/verify/boilerplate/test/fail.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2015 The Kubernetes Authors. # diff --git a/repo-infra/verify/boilerplate/test/pass.py b/repo-infra/verify/boilerplate/test/pass.py index 5b7ce29..0a0f9c9 100644 --- a/repo-infra/verify/boilerplate/test/pass.py +++ b/repo-infra/verify/boilerplate/test/pass.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2015 The Kubernetes Authors. # diff --git a/repo-infra/verify/verify-boilerplate.sh b/repo-infra/verify/verify-boilerplate.sh index 0168d33..ad60fff 100755 --- a/repo-infra/verify/verify-boilerplate.sh +++ b/repo-infra/verify/verify-boilerplate.sh @@ -38,7 +38,7 @@ cleanup() { } pushd "${boilerDir}" >/dev/null -if ! python -m unittest boilerplate_test 2>"${unitTestOut}"; then +if ! python3 -m unittest boilerplate_test 2>"${unitTestOut}"; then echo "boilerplate_test.py failed" echo cat "${unitTestOut}"