From 0535c0d4f101a4c147a710c5fff74e81368dc7e3 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Wed, 16 Jan 2019 10:47:35 -0800 Subject: [PATCH 01/41] Initial commit from kubernetes-template-project --- CONTRIBUTING.md | 31 +++++++ LICENSE | 201 +++++++++++++++++++++++++++++++++++++++++++++ OWNERS | 6 ++ OWNERS_ALIASES | 20 +++++ README.md | 29 +++++++ RELEASE.md | 9 ++ SECURITY_CONTACTS | 14 ++++ code-of-conduct.md | 3 + 8 files changed, 313 insertions(+) create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 OWNERS create mode 100644 OWNERS_ALIASES create mode 100644 README.md create mode 100644 RELEASE.md create mode 100644 SECURITY_CONTACTS create mode 100644 code-of-conduct.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..de471151 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/OWNERS b/OWNERS new file mode 100644 index 00000000..e4c79b9d --- /dev/null +++ b/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +approvers: + # TODO: in your repo created from this template, you should replace the + # steering-committee with a list of project owners, see the doc linked above. + - steering-committee diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES new file mode 100644 index 00000000..8bcd3a60 --- /dev/null +++ b/OWNERS_ALIASES @@ -0,0 +1,20 @@ +# See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#owners_aliases + +aliases: + # TODO: remove this alias, it will go stale in your repo, and in your repo + # you should have your own set of approvers (see OWNERS) + # in the original template repo, we must maintain this list to approve changes + # to the template itself + steering-committee: + - bgrant0607 + - brendandburns + - derekwaynecarr + - dims + - jbeda + - michelleN + - philips + - pwittrock + - sarahnovotny + - smarterclayton + - spiffxp + - timothysc diff --git a/README.md b/README.md new file mode 100644 index 00000000..9431b7bb --- /dev/null +++ b/README.md @@ -0,0 +1,29 @@ +# Kubernetes Template Project + +The Kubernetes Template Project is a template for starting new projects in the GitHub organizations owned by Kubernetes. All Kubernetes projects, at minimum, must have the following files: + +- a `README.md` outlining the project goals, sponsoring sig, and community contact information +- an `OWNERS` with the project leads listed as approvers ([docs on `OWNERS` files][owners]) +- a `CONTRIBUTING.md` outlining how to contribute to the project +- an unmodified copy of `code-of-conduct.md` from this repo, which outlines community behavior and the consequences of breaking the code +- a `LICENSE` which must be Apache 2.0 for code projects, or [Creative Commons 4.0] for documentation repositories, without any custom content +- a `SECURITY_CONTACTS` with the contact points for the Product Security Team + to reach out to for triaging and handling of incoming issues. They must agree to abide by the + [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) + and will be removed and replaced if they violate that agreement. + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack](http://slack.k8s.io/) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-dev) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +[owners]: https://git.k8s.io/community/contributors/guide/owners.md +[Creative Commons 4.0]: https://git.k8s.io/website/LICENSE diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 00000000..7274b344 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The Kubernetes Template Project is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS new file mode 100644 index 00000000..39689175 --- /dev/null +++ b/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +bob +alice diff --git a/code-of-conduct.md b/code-of-conduct.md new file mode 100644 index 00000000..0d15c00c --- /dev/null +++ b/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) From b41cac350d9dd1b9c478b16c398745aeb95390a6 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Wed, 16 Jan 2019 10:51:52 -0800 Subject: [PATCH 02/41] Add files from github.com/pohly/csi-build-rules --- README.md | 79 ++-- build.make | 1211 ++++++++++++++++++++++++++++++++++++++++++++++++++++ travis.yml | 14 + 3 files changed, 1275 insertions(+), 29 deletions(-) create mode 100644 build.make create mode 100644 travis.yml diff --git a/README.md b/README.md index 9431b7bb..c67e9308 100644 --- a/README.md +++ b/README.md @@ -1,29 +1,50 @@ -# Kubernetes Template Project - -The Kubernetes Template Project is a template for starting new projects in the GitHub organizations owned by Kubernetes. All Kubernetes projects, at minimum, must have the following files: - -- a `README.md` outlining the project goals, sponsoring sig, and community contact information -- an `OWNERS` with the project leads listed as approvers ([docs on `OWNERS` files][owners]) -- a `CONTRIBUTING.md` outlining how to contribute to the project -- an unmodified copy of `code-of-conduct.md` from this repo, which outlines community behavior and the consequences of breaking the code -- a `LICENSE` which must be Apache 2.0 for code projects, or [Creative Commons 4.0] for documentation repositories, without any custom content -- a `SECURITY_CONTACTS` with the contact points for the Product Security Team - to reach out to for triaging and handling of incoming issues. They must agree to abide by the - [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) - and will be removed and replaced if they violate that agreement. - -## Community, discussion, contribution, and support - -Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). - -You can reach the maintainers of this project at: - -- [Slack](http://slack.k8s.io/) -- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-dev) - -### Code of conduct - -Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). - -[owners]: https://git.k8s.io/community/contributors/guide/owners.md -[Creative Commons 4.0]: https://git.k8s.io/website/LICENSE +# [csi-build-rules](https://github.com/kubernetes-csi/csi-build-rules) + +These build and test rules can be shared between different Go projects +without modifications. Customization for the different projects happen +in the top-level Makefile. + +The rules include support for building and pushing Docker images, with +the following features: + - one or more command and image per project + - push canary and/or tagged release images + - automatically derive the image tag(s) from repo tags + - the source code revision is stored in a "revision" image label + - never overwrites an existing release image + +Usage +----- + +The expected repository layout is: + - `cmd/*/*.go` - source code for each command + - `cmd/*/Dockerfile` - docker file for each command or + Dockerfile in the root when only building a single command + - `Makefile` - includes `build-rules/build.make` and sets + configuration variables + - `.travis.yml` - a symlink to `build-rules/.travis.yml` + +To create a release, tag a certain revision with a name that +starts with `v`, for example `v1.0.0`, then `make push` +while that commit is checked out. + +It does not matter on which branch that revision exists, i.e. it is +possible to create releases directly from master. A release branch can +still be created for maintenance releases later if needed. + +Release branches are expected to be named `release-x.y` for releases +`x.y.z`. Building from such a branch creates `x.y-canary` +images. Building from master creates the main `canary` image. + +Sharing and updating +-------------------- + +[`git subtree`](https://github.com/git/git/blob/master/contrib/subtree/git-subtree.txt) +is the recommended way of maintaining a copy of the rules inside the +`build-rules` directory of a project. This way, it is possible to make +changes also locally, test them and then push them back to the shared +repository at a later time. + +Cheat sheet: + +- `git subtree pull --prefix=build-rules https://github.com/kubernetes-csi/csi-build-rules.git master` - update local copy to latest upstream +- edit, `git commit`, `git subtree push --prefix=build-rules git@github.com:/csi-build-rules.git ` - push to a new branch before submitting a PR diff --git a/build.make b/build.make new file mode 100644 index 00000000..460ee71c --- /dev/null +++ b/build.make @@ -0,0 +1,1211 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + csi-build-rules/build.make at master · pohly/csi-build-rules · GitHub + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Skip to content +
+ + + + + + + + + +
+ +
+ +
+ +
+ + + +
+
+
+ + + + + + + + + + + +
+
+ + + + + + + + Permalink + + + + + + +
+ +
+ + +
+
+
+
+ +
+
+
+
+ +
+ + Find file + + + Copy path + +
+ +
+ + + +
+ + + c03f108 + + Jan 9, 2019 + + + +
+ +
+ + + 1 contributor + + +
+ +

Users who have contributed to this file

+
+ + + +
+
+ +
+
+ + + + +
+ +
+ +
+ + +
+ Raw + Blame + History +
+ + + + +
+ +
+ 100 lines (83 sloc) + + 3.78 KB +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
.PHONY: build-% build container-% container push-% push clean test
+
# A space-separated list of all commands in the repository, must be
# set in main Makefile of a repository.
# CMDS=
+
# This is the default. It can be overridden in the main Makefile after
# including build.make.
REGISTRY_NAME=quay.io/k8scsi
+
# Revision that gets built into each binary via the main.version
# string. Uses the `git describe` output based on the most recent
# version tag with a short revision suffix or, if nothing has been
# tagged yet, just the revision.
#
# Beware that tags may also be missing in shallow clones as done by
# some CI systems (like TravisCI, which pulls only 50 commits).
REV=$(shell git describe --long --tags --match='v*' --dirty 2>/dev/null || git rev-list -n1 HEAD)
+
# A space-separated list of image tags under which the current build is to be pushed.
# Determined dynamically.
IMAGE_TAGS=
+
# A "canary" image gets built if the current commit is the head of the remote "master" branch.
# That branch does not exist when building some other branch in TravisCI.
IMAGE_TAGS+=$(shell if [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 origin/master 2>/dev/null)" ]; then echo "canary"; fi)
+
# A "X.Y.Z-canary" image gets built if the current commit is the head of a "origin/release-X.Y.Z" branch.
# The actual suffix does not matter, only the "release-" prefix is checked.
IMAGE_TAGS+=$(shell git branch -r --points-at=HEAD | grep 'origin/release-' | grep -v -e ' -> ' | sed -e 's;.*/release-\(.*\);\1-canary;')
+
# A release image "vX.Y.Z" gets built if there is a tag of that format for the current commit.
# --abbrev=0 suppresses long format, only showing the closest tag.
IMAGE_TAGS+=$(shell tagged="$$(git describe --tags --match='v*' --abbrev=0)"; if [ "$$tagged" ] && [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 $$tagged)" ]; then echo $$tagged; fi)
+
# Images are named after the command contained in them.
IMAGE_NAME=$(REGISTRY_NAME)/$*
+
ifdef V
TESTARGS = -v -args -alsologtostderr -v 5
else
TESTARGS =
endif
+
build-%:
mkdir -p bin
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$*
+
container-%: build-%
docker build -t $*:latest -f $(shell if [ -e ./cmd/$*/Dockerfile ]; then echo ./cmd/$*/Dockerfile; else echo Dockerfile; fi) --label revision=$(REV) .
+
push-%: container-%
set -ex; \
push_image () { \
docker tag $*:latest $(IMAGE_NAME):$$tag; \
docker push $(IMAGE_NAME):$$tag; \
}; \
for tag in $(IMAGE_TAGS); do \
if echo $$tag | grep -q -e '-canary$$'; then \
: "creating or overwriting canary image"; \
push_image; \
elif docker pull $(IMAGE_NAME):$$tag 2>&1 | tee /dev/stderr | grep -q "manifest for $(IMAGE_NAME):$$tag not found"; then \
: "creating release image"; \
push_image; \
else \
: "release image $(IMAGE_NAME):$$tag already exists, skipping push"; \
fi; \
done
+
build: $(CMDS:%=build-%)
container: $(CMDS:%=container-%)
push: $(CMDS:%=push-%)
+
clean:
-rm -rf bin
+
test:
go test `go list ./... | grep -v 'vendor'` $(TESTARGS)
go vet `go list ./... | grep -v vendor`
files=$$(find . -name '*.go' | grep -v './vendor'); \
if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \
echo "formatting errors:"; \
gofmt -d $$files; \
false; \
fi
+ + + +
+ +
+ + + +
+ + +
+ + +
+
+ + + +
+ +
+ +
+
+ +
+ + + + + + +
+ + + You can’t perform that action at this time. +
+ + + + + + + + + +
+ + You signed in with another tab or window. Reload to refresh your session. + You signed out in another tab or window. Reload to refresh your session. +
+ + + + + + +
+ Press h to open a hovercard with more details. +
+ +
+ + + + diff --git a/travis.yml b/travis.yml new file mode 100644 index 00000000..a44b0017 --- /dev/null +++ b/travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: required +services: + - docker +matrix: + include: + - go: 1.11.1 +script: +- make all test +after_success: + - if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then + docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; + make push; + fi From d23a16cb744734b468ac4e226c98ca8aaaea9125 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Wed, 16 Jan 2019 11:19:16 -0800 Subject: [PATCH 03/41] Port over information from kubernetes/org --- OWNERS | 11 ++++++++--- OWNERS_ALIASES | 20 -------------------- RELEASE.md | 8 +------- SECURITY_CONTACTS | 4 ++-- 4 files changed, 11 insertions(+), 32 deletions(-) delete mode 100644 OWNERS_ALIASES diff --git a/OWNERS b/OWNERS index e4c79b9d..6d2f474e 100644 --- a/OWNERS +++ b/OWNERS @@ -1,6 +1,11 @@ # See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md approvers: - # TODO: in your repo created from this template, you should replace the - # steering-committee with a list of project owners, see the doc linked above. - - steering-committee +- saad-ali +- msau42 +- pohly + +reviewers: +- saad-ali +- msau42 +- pohly diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES deleted file mode 100644 index 8bcd3a60..00000000 --- a/OWNERS_ALIASES +++ /dev/null @@ -1,20 +0,0 @@ -# See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#owners_aliases - -aliases: - # TODO: remove this alias, it will go stale in your repo, and in your repo - # you should have your own set of approvers (see OWNERS) - # in the original template repo, we must maintain this list to approve changes - # to the template itself - steering-committee: - - bgrant0607 - - brendandburns - - derekwaynecarr - - dims - - jbeda - - michelleN - - philips - - pwittrock - - sarahnovotny - - smarterclayton - - spiffxp - - timothysc diff --git a/RELEASE.md b/RELEASE.md index 7274b344..491044d2 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,9 +1,3 @@ # Release Process -The Kubernetes Template Project is released on an as-needed basis. The process is as follows: - -1. An issue is proposing a new release with a changelog since the last release -1. All [OWNERS](OWNERS) must LGTM this release -1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` -1. The release issue is closed -1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` +TODO: describe the release process for this project diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS index 39689175..2af1414e 100644 --- a/SECURITY_CONTACTS +++ b/SECURITY_CONTACTS @@ -10,5 +10,5 @@ # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ -bob -alice +saad-ali +msau42 From a1136470c29f970e0af87d352a1b3cb1520d09dc Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 21 Jan 2019 10:08:51 +0100 Subject: [PATCH 04/41] build.make: initial content The repo was created with an HTML version of the build.make file from https://github.com/pohly/csi-build-rules/. Here's the raw file. --- build.make | 1310 ++++------------------------------------------------ 1 file changed, 99 insertions(+), 1211 deletions(-) diff --git a/build.make b/build.make index 460ee71c..42146483 100644 --- a/build.make +++ b/build.make @@ -1,1211 +1,99 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - csi-build-rules/build.make at master · pohly/csi-build-rules · GitHub - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- Skip to content -
- - - - - - - - - -
- -
- -
- -
- - - -
-
-
- - - - - - - - - - - -
-
- - - - - - - - Permalink - - - - - - -
- -
- - -
-
-
-
- -
-
-
-
- -
- - Find file - - - Copy path - -
- -
- - - -
- - - c03f108 - - Jan 9, 2019 - - - -
- -
- - - 1 contributor - - -
- -

Users who have contributed to this file

-
- - - -
-
- -
-
- - - - -
- -
- -
- - -
- Raw - Blame - History -
- - - - -
- -
- 100 lines (83 sloc) - - 3.78 KB -
-
- - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
.PHONY: build-% build container-% container push-% push clean test
-
# A space-separated list of all commands in the repository, must be
# set in main Makefile of a repository.
# CMDS=
-
# This is the default. It can be overridden in the main Makefile after
# including build.make.
REGISTRY_NAME=quay.io/k8scsi
-
# Revision that gets built into each binary via the main.version
# string. Uses the `git describe` output based on the most recent
# version tag with a short revision suffix or, if nothing has been
# tagged yet, just the revision.
#
# Beware that tags may also be missing in shallow clones as done by
# some CI systems (like TravisCI, which pulls only 50 commits).
REV=$(shell git describe --long --tags --match='v*' --dirty 2>/dev/null || git rev-list -n1 HEAD)
-
# A space-separated list of image tags under which the current build is to be pushed.
# Determined dynamically.
IMAGE_TAGS=
-
# A "canary" image gets built if the current commit is the head of the remote "master" branch.
# That branch does not exist when building some other branch in TravisCI.
IMAGE_TAGS+=$(shell if [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 origin/master 2>/dev/null)" ]; then echo "canary"; fi)
-
# A "X.Y.Z-canary" image gets built if the current commit is the head of a "origin/release-X.Y.Z" branch.
# The actual suffix does not matter, only the "release-" prefix is checked.
IMAGE_TAGS+=$(shell git branch -r --points-at=HEAD | grep 'origin/release-' | grep -v -e ' -> ' | sed -e 's;.*/release-\(.*\);\1-canary;')
-
# A release image "vX.Y.Z" gets built if there is a tag of that format for the current commit.
# --abbrev=0 suppresses long format, only showing the closest tag.
IMAGE_TAGS+=$(shell tagged="$$(git describe --tags --match='v*' --abbrev=0)"; if [ "$$tagged" ] && [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 $$tagged)" ]; then echo $$tagged; fi)
-
# Images are named after the command contained in them.
IMAGE_NAME=$(REGISTRY_NAME)/$*
-
ifdef V
TESTARGS = -v -args -alsologtostderr -v 5
else
TESTARGS =
endif
-
build-%:
mkdir -p bin
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$*
-
container-%: build-%
docker build -t $*:latest -f $(shell if [ -e ./cmd/$*/Dockerfile ]; then echo ./cmd/$*/Dockerfile; else echo Dockerfile; fi) --label revision=$(REV) .
-
push-%: container-%
set -ex; \
push_image () { \
docker tag $*:latest $(IMAGE_NAME):$$tag; \
docker push $(IMAGE_NAME):$$tag; \
}; \
for tag in $(IMAGE_TAGS); do \
if echo $$tag | grep -q -e '-canary$$'; then \
: "creating or overwriting canary image"; \
push_image; \
elif docker pull $(IMAGE_NAME):$$tag 2>&1 | tee /dev/stderr | grep -q "manifest for $(IMAGE_NAME):$$tag not found"; then \
: "creating release image"; \
push_image; \
else \
: "release image $(IMAGE_NAME):$$tag already exists, skipping push"; \
fi; \
done
-
build: $(CMDS:%=build-%)
container: $(CMDS:%=container-%)
push: $(CMDS:%=push-%)
-
clean:
-rm -rf bin
-
test:
go test `go list ./... | grep -v 'vendor'` $(TESTARGS)
go vet `go list ./... | grep -v vendor`
files=$$(find . -name '*.go' | grep -v './vendor'); \
if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \
echo "formatting errors:"; \
gofmt -d $$files; \
false; \
fi
- - - -
- -
- - - -
- - -
- - -
-
- - - -
- -
- -
-
- -
- - - - - - -
- - - You can’t perform that action at this time. -
- - - - - - - - - -
- - You signed in with another tab or window. Reload to refresh your session. - You signed out in another tab or window. Reload to refresh your session. -
- - - - - - -
- Press h to open a hovercard with more details. -
- -
- - - - +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: build-% build container-% container push-% push clean test + +# A space-separated list of all commands in the repository, must be +# set in main Makefile of a repository. +# CMDS= + +# This is the default. It can be overridden in the main Makefile after +# including build.make. +REGISTRY_NAME=quay.io/k8scsi + +# Revision that gets built into each binary via the main.version +# string. Uses the `git describe` output based on the most recent +# version tag with a short revision suffix or, if nothing has been +# tagged yet, just the revision. +# +# Beware that tags may also be missing in shallow clones as done by +# some CI systems (like TravisCI, which pulls only 50 commits). +REV=$(shell git describe --long --tags --match='v*' --dirty 2>/dev/null || git rev-list -n1 HEAD) + +# A space-separated list of image tags under which the current build is to be pushed. +# Determined dynamically. +IMAGE_TAGS= + +# A "canary" image gets built if the current commit is the head of the remote "master" branch. +# That branch does not exist when building some other branch in TravisCI. +IMAGE_TAGS+=$(shell if [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 origin/master 2>/dev/null)" ]; then echo "canary"; fi) + +# A "X.Y.Z-canary" image gets built if the current commit is the head of a "origin/release-X.Y.Z" branch. +# The actual suffix does not matter, only the "release-" prefix is checked. +IMAGE_TAGS+=$(shell git branch -r --points-at=HEAD | grep 'origin/release-' | grep -v -e ' -> ' | sed -e 's;.*/release-\(.*\);\1-canary;') + +# A release image "vX.Y.Z" gets built if there is a tag of that format for the current commit. +# --abbrev=0 suppresses long format, only showing the closest tag. +IMAGE_TAGS+=$(shell tagged="$$(git describe --tags --match='v*' --abbrev=0)"; if [ "$$tagged" ] && [ "$$(git rev-list -n1 HEAD)" = "$$(git rev-list -n1 $$tagged)" ]; then echo $$tagged; fi) + +# Images are named after the command contained in them. +IMAGE_NAME=$(REGISTRY_NAME)/$* + +ifdef V +TESTARGS = -v -args -alsologtostderr -v 5 +else +TESTARGS = +endif + +build-%: + mkdir -p bin + CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$* + +container-%: build-% + docker build -t $*:latest -f $(shell if [ -e ./cmd/$*/Dockerfile ]; then echo ./cmd/$*/Dockerfile; else echo Dockerfile; fi) --label revision=$(REV) . + +push-%: container-% + set -ex; \ + push_image () { \ + docker tag $*:latest $(IMAGE_NAME):$$tag; \ + docker push $(IMAGE_NAME):$$tag; \ + }; \ + for tag in $(IMAGE_TAGS); do \ + if echo $$tag | grep -q -e '-canary$$'; then \ + : "creating or overwriting canary image"; \ + push_image; \ + elif docker pull $(IMAGE_NAME):$$tag 2>&1 | tee /dev/stderr | grep -q "manifest for $(IMAGE_NAME):$$tag not found"; then \ + : "creating release image"; \ + push_image; \ + else \ + : "release image $(IMAGE_NAME):$$tag already exists, skipping push"; \ + fi; \ + done + +build: $(CMDS:%=build-%) +container: $(CMDS:%=container-%) +push: $(CMDS:%=push-%) + +clean: + -rm -rf bin + +test: + go test `go list ./... | grep -v 'vendor'` $(TESTARGS) + go vet `go list ./... | grep -v vendor` + files=$$(find . -name '*.go' | grep -v './vendor'); \ + if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ + echo "formatting errors:"; \ + gofmt -d $$files; \ + false; \ + fi From 51e16be615359fa450d223f3a7381a85dacc62fb Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 21 Jan 2019 10:18:56 +0100 Subject: [PATCH 05/41] RELEASE.md: clarify release process It's worth calling out explicitly that only the master branch is maintained. --- RELEASE.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 491044d2..a0fd815b 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,5 @@ # Release Process -TODO: describe the release process for this project +No tagged releases are planned at this point. The intention is to keep +the master branch in a state such that it can be used for all +supported branches in downstream repos which use these files. From c876547b7f4b22447c963a68cc81529e181105ba Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 21 Jan 2019 10:26:11 +0100 Subject: [PATCH 06/41] README.md: update repo name, add initial setup step The actual repository was not named like the prototype repo. --- README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c67e9308..286be88d 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# [csi-build-rules](https://github.com/kubernetes-csi/csi-build-rules) +# [csi-release-tools](https://github.com/kubernetes-csi/csi-release-tools) These build and test rules can be shared between different Go projects without modifications. Customization for the different projects happen @@ -19,9 +19,9 @@ The expected repository layout is: - `cmd/*/*.go` - source code for each command - `cmd/*/Dockerfile` - docker file for each command or Dockerfile in the root when only building a single command - - `Makefile` - includes `build-rules/build.make` and sets + - `Makefile` - includes `release-tools/build.make` and sets configuration variables - - `.travis.yml` - a symlink to `build-rules/.travis.yml` + - `.travis.yml` - a symlink to `release-tools/.travis.yml` To create a release, tag a certain revision with a name that starts with `v`, for example `v1.0.0`, then `make push` @@ -40,11 +40,12 @@ Sharing and updating [`git subtree`](https://github.com/git/git/blob/master/contrib/subtree/git-subtree.txt) is the recommended way of maintaining a copy of the rules inside the -`build-rules` directory of a project. This way, it is possible to make +`release-tools` directory of a project. This way, it is possible to make changes also locally, test them and then push them back to the shared repository at a later time. Cheat sheet: -- `git subtree pull --prefix=build-rules https://github.com/kubernetes-csi/csi-build-rules.git master` - update local copy to latest upstream -- edit, `git commit`, `git subtree push --prefix=build-rules git@github.com:/csi-build-rules.git ` - push to a new branch before submitting a PR +- `git subtree add --prefix=release-tools https://github.com/pohly/csi-release-tools.git master` - add release tools to a repo which does not have them yet (only once) +- `git subtree pull --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - update local copy to latest upstream (whenever upstream changes) +- edit, `git commit`, `git subtree push --prefix=release-tools git@github.com:/csi-release-tools.git ` - push to a new branch before submitting a PR From f49080e0865b38254f20b372cf6da8aab23917b9 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 22 Jan 2019 10:28:11 +0100 Subject: [PATCH 07/41] README.md: fix repo URL for initial setup Copy-and-paste error from the time when the kubernetes-csi/csi-release-tools repo didn't have the code... --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 286be88d..56d2248c 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,6 @@ repository at a later time. Cheat sheet: -- `git subtree add --prefix=release-tools https://github.com/pohly/csi-release-tools.git master` - add release tools to a repo which does not have them yet (only once) +- `git subtree add --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - add release tools to a repo which does not have them yet (only once) - `git subtree pull --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - update local copy to latest upstream (whenever upstream changes) - edit, `git commit`, `git subtree push --prefix=release-tools git@github.com:/csi-release-tools.git ` - push to a new branch before submitting a PR From 73db45967f57f911fbe707032c7b241af82ad5d7 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 23 Jan 2019 14:41:26 +0100 Subject: [PATCH 08/41] verify-subtree.sh: ensure that there are no local commits The goal is to enforce that changes get merged upstream first and only get into the local repo via a normal "git subtree merge". --- verify-subtree.sh | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100755 verify-subtree.sh diff --git a/verify-subtree.sh b/verify-subtree.sh new file mode 100755 index 00000000..ce8375fc --- /dev/null +++ b/verify-subtree.sh @@ -0,0 +1,41 @@ +#! /bin/sh -e +# +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script verifies that the content of a directory managed +# by "git subtree" has not been modified locally. It does that +# by looking for commits that modify the files with the +# subtree prefix (aka directory) while ignoring merge +# commits. Merge commits are where "git subtree" pulls the +# upstream files into the directory. +# +# Theoretically a developer can subvert this check by modifying files +# in a merge commit, but in practice that shouldn't happen. + +DIR="$1" +if [ ! "$DIR" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +REV=$(git log -n1 --format=format:%H --no-merges -- "$DIR") +if [ "$REV" ]; then + echo "Directory '$DIR' contains non-upstream changes:" + echo + git log --no-merges -- "$DIR" + exit 1 +else + echo "$DIR is a clean copy of upstream." +fi From 9132a016e9465b0f6af506a9aa372b45aa1bfbe0 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 23 Jan 2019 14:49:07 +0100 Subject: [PATCH 09/41] test: split up into individual targets, run all "make test" used to abort after the first test failure. That was partly intentional: if the simple tests already fail (for example, because of a syntax error), then there is no point in continuing to test. However, it also makes it harder to find all errors in a CI system when the errors are unrelated (first error shows up, gets fixed, next error shows up, etc.). Now "make test" still aborts early, but "make -k test" is used in the CI and will run all individual tests because they are split up into different targets. --- build.make | 15 +++++++++++++++ travis.yml | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/build.make b/build.make index 42146483..4818b512 100644 --- a/build.make +++ b/build.make @@ -89,8 +89,23 @@ clean: -rm -rf bin test: + +.PHONY: test-go +test: test-go +test-go: + @ echo; echo $@ go test `go list ./... | grep -v 'vendor'` $(TESTARGS) + +.PHONY: test-vet +test: test-vet +test-vet: + @ echo; echo $@ go vet `go list ./... | grep -v vendor` + +.PHONY: test-fmt +test: test-fmt +test-fmt: + @ echo; echo $@ files=$$(find . -name '*.go' | grep -v './vendor'); \ if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ echo "formatting errors:"; \ diff --git a/travis.yml b/travis.yml index a44b0017..b5a360af 100644 --- a/travis.yml +++ b/travis.yml @@ -6,7 +6,7 @@ matrix: include: - go: 1.11.1 script: -- make all test +- make -k all test after_success: - if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; From f28c6b60aaa5f1ab61cb606744f25cc0ed8081a1 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 23 Jan 2019 14:52:09 +0100 Subject: [PATCH 10/41] test: verify that 'release-tools' subtree is clean We don't want to allow local modifications in the subtree. Everything should go to the csi-release-tools repo first. --- build.make | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/build.make b/build.make index 4818b512..f07f25e8 100644 --- a/build.make +++ b/build.make @@ -112,3 +112,9 @@ test-fmt: gofmt -d $$files; \ false; \ fi + +.PHONY: test-subtree +test: test-subtree +test-subtree: + @ echo; echo $@ + ./release-tools/verify-subtree.sh release-tools From 154e33d434e49657651e669956eac56c7d43e073 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 24 Jan 2019 16:42:23 +0100 Subject: [PATCH 11/41] build.make: clarify usage of "make V=1" This may or may not work, depending on which packages have tests and whether they contain glog. --- build.make | 1 + 1 file changed, 1 insertion(+) diff --git a/build.make b/build.make index f07f25e8..010dcb94 100644 --- a/build.make +++ b/build.make @@ -51,6 +51,7 @@ IMAGE_TAGS+=$(shell tagged="$$(git describe --tags --match='v*' --abbrev=0)"; if IMAGE_NAME=$(REGISTRY_NAME)/$* ifdef V +# Adding "-alsologtostderr" assumes that all test binaries contain glog. This is not guaranteed. TESTARGS = -v -args -alsologtostderr -v 5 else TESTARGS = From 147892c95429680cf2caa4a6ce32461fa50422d3 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 24 Jan 2019 16:43:24 +0100 Subject: [PATCH 12/41] build.make: support suppressing checks Individual repos may have to filter out certain packages from testing. For example, in csi-test the cmd/csi-sanity directory contains a special test that depends on additional parameters that set the CSI driver to test against. --- build.make | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/build.make b/build.make index 010dcb94..dfc405e0 100644 --- a/build.make +++ b/build.make @@ -57,6 +57,9 @@ else TESTARGS = endif +# Specific packages can be excluded from each of the tests below by setting the *_FILTER_CMD variables +# to something like "| grep -v 'github.com/kubernetes-csi/project/pkg/foobar'". See usage below. + build-%: mkdir -p bin CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-X main.version=$(REV) -extldflags "-static"' -o ./bin/$* ./cmd/$* @@ -95,19 +98,19 @@ test: test: test-go test-go: @ echo; echo $@ - go test `go list ./... | grep -v 'vendor'` $(TESTARGS) + go test `go list ./... | grep -v 'vendor' $(TEST_GO_FILTER_CMD)` $(TESTARGS) .PHONY: test-vet test: test-vet test-vet: @ echo; echo $@ - go vet `go list ./... | grep -v vendor` + go vet `go list ./... | grep -v vendor $(TEST_VET_FILTER_CMD)` .PHONY: test-fmt test: test-fmt test-fmt: @ echo; echo $@ - files=$$(find . -name '*.go' | grep -v './vendor'); \ + files=$$(find . -name '*.go' | grep -v './vendor' $(TEST_FMT_FILTER_CMD)); \ if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ echo "formatting errors:"; \ gofmt -d $$files; \ From 09436b9f9011cbb997f9226f243347bd4e802a3d Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 25 Jan 2019 11:48:59 +0100 Subject: [PATCH 13/41] build.make: fix pushing of "canary" image from master branch After merging into external-attacher, the next Travis CI run did not push the "canary" image because the check for "canary" only covered the case where "-canary" is used as suffix (https://travis-ci.org/kubernetes-csi/external-attacher/builds/484095261). --- build.make | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.make b/build.make index f07f25e8..a6a8916e 100644 --- a/build.make +++ b/build.make @@ -70,7 +70,7 @@ push-%: container-% docker push $(IMAGE_NAME):$$tag; \ }; \ for tag in $(IMAGE_TAGS); do \ - if echo $$tag | grep -q -e '-canary$$'; then \ + if [ "$$tag" = "canary" ] || echo "$$tag" | grep -q -e '-canary$$'; then \ : "creating or overwriting canary image"; \ push_image; \ elif docker pull $(IMAGE_NAME):$$tag 2>&1 | tee /dev/stderr | grep -q "manifest for $(IMAGE_NAME):$$tag not found"; then \ From b0336b553c3770a34f67336a320ef8d1f77b28f8 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 25 Jan 2019 11:53:36 +0100 Subject: [PATCH 14/41] build.make: more readable "make test" output The introduction for each individual test looked like an actual command: test-subtree ./release-tools/verify-subtree.sh release-tools Directory 'release-tools' contains non-upstream changes: ... It's better to make it look like a shell comment and increase its visibility with a longer prefix: ### test-subtree: ./release-tools/verify-subtree.sh release-tools ... --- build.make | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/build.make b/build.make index dfc405e0..4296ff47 100644 --- a/build.make +++ b/build.make @@ -97,19 +97,19 @@ test: .PHONY: test-go test: test-go test-go: - @ echo; echo $@ + @ echo; echo "### $@:" go test `go list ./... | grep -v 'vendor' $(TEST_GO_FILTER_CMD)` $(TESTARGS) .PHONY: test-vet test: test-vet test-vet: - @ echo; echo $@ + @ echo; echo "### $@:" go vet `go list ./... | grep -v vendor $(TEST_VET_FILTER_CMD)` .PHONY: test-fmt test: test-fmt test-fmt: - @ echo; echo $@ + @ echo; echo "### $@:" files=$$(find . -name '*.go' | grep -v './vendor' $(TEST_FMT_FILTER_CMD)); \ if [ $$(gofmt -d $$files | wc -l) -ne 0 ]; then \ echo "formatting errors:"; \ @@ -120,5 +120,5 @@ test-fmt: .PHONY: test-subtree test: test-subtree test-subtree: - @ echo; echo $@ + @ echo; echo "### $@:" ./release-tools/verify-subtree.sh release-tools From cc564f929a4e32a829d35272533242b94797aa22 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 6 Mar 2019 17:23:07 +0100 Subject: [PATCH 15/41] verify-subtree.sh: relax check and ignore old content If for whatever reasons a repo already had a `release-tools` directory before doing a clean import of it with `git subtree`, the check used to fail because it found those old commits. This can be fixed by telling `git log` to stop when the directory disappears from the repo. There has to be a commit with removes the old content, because otherwise `git subtree add` doesn't work. Fixes: https://github.com/kubernetes-csi/external-resizer/issues/21 --- verify-subtree.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/verify-subtree.sh b/verify-subtree.sh index ce8375fc..f04a9fa2 100755 --- a/verify-subtree.sh +++ b/verify-subtree.sh @@ -30,7 +30,7 @@ if [ ! "$DIR" ]; then exit 1 fi -REV=$(git log -n1 --format=format:%H --no-merges -- "$DIR") +REV=$(git log -n1 --remove-empty --format=format:%H --no-merges -- "$DIR") if [ "$REV" ]; then echo "Directory '$DIR' contains non-upstream changes:" echo From 94fc1e31d8acc93663992d117ecf9fc3ca9fceb0 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 15 Mar 2019 11:08:53 +0100 Subject: [PATCH 16/41] build.make: avoid unit-testing E2E test suite In repos that have a test/e2e, that test suite should be run separately because it depends on a running cluster. --- build.make | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.make b/build.make index 8ca0b2c2..6ea32795 100644 --- a/build.make +++ b/build.make @@ -98,7 +98,7 @@ test: test: test-go test-go: @ echo; echo "### $@:" - go test `go list ./... | grep -v 'vendor' $(TEST_GO_FILTER_CMD)` $(TESTARGS) + go test `go list ./... | grep -v -e 'vendor' -e '/test/e2e$$' $(TEST_GO_FILTER_CMD)` $(TESTARGS) .PHONY: test-vet test: test-vet From fb13c5198f899657baac7bb44156abc6d0f20724 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 15 Mar 2019 11:55:06 +0100 Subject: [PATCH 17/41] verify-shellcheck.sh: import from Kubernetes This is an unmodified copy of kubernetes/hack/verify-shellcheck.sh revision d5a3db003916b1d33b503ccd2e4897e094d8af77. --- util.sh | 839 +++++++++++++++++++++++++++++++++++++++++++ verify-shellcheck.sh | 187 ++++++++++ 2 files changed, 1026 insertions(+) create mode 100755 util.sh create mode 100755 verify-shellcheck.sh diff --git a/util.sh b/util.sh new file mode 100755 index 00000000..31ce9fc4 --- /dev/null +++ b/util.sh @@ -0,0 +1,839 @@ +#!/usr/bin/env bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function kube::util::sourced_variable { + # Call this function to tell shellcheck that a variable is supposed to + # be used from other calling context. This helps quiet an "unused + # variable" warning from shellcheck and also document your code. + true +} + +kube::util::sortable_date() { + date "+%Y%m%d-%H%M%S" +} + +# arguments: target, item1, item2, item3, ... +# returns 0 if target is in the given items, 1 otherwise. +kube::util::array_contains() { + local search="$1" + local element + shift + for element; do + if [[ "${element}" == "${search}" ]]; then + return 0 + fi + done + return 1 +} + +kube::util::wait_for_url() { + local url=$1 + local prefix=${2:-} + local wait=${3:-1} + local times=${4:-30} + local maxtime=${5:-1} + + command -v curl >/dev/null || { + kube::log::usage "curl must be installed" + exit 1 + } + + local i + for i in $(seq 1 "${times}"); do + local out + if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then + kube::log::status "On try ${i}, ${prefix}: ${out}" + return 0 + fi + sleep "${wait}" + done + kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each" + return 1 +} + +# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG +# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal +kube::util::trap_add() { + local trap_add_cmd + trap_add_cmd=$1 + shift + + for trap_add_name in "$@"; do + local existing_cmd + local new_cmd + + # Grab the currently defined trap commands for this trap + existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}') + + if [[ -z "${existing_cmd}" ]]; then + new_cmd="${trap_add_cmd}" + else + new_cmd="${trap_add_cmd};${existing_cmd}" + fi + + # Assign the test. Disable the shellcheck warning telling that trap + # commands should be single quoted to avoid evaluating them at this + # point instead evaluating them at run time. The logic of adding new + # commands to a single trap requires them to be evaluated right away. + # shellcheck disable=SC2064 + trap "${new_cmd}" "${trap_add_name}" + done +} + +# Opposite of kube::util::ensure-temp-dir() +kube::util::cleanup-temp-dir() { + rm -rf "${KUBE_TEMP}" +} + +# Create a temp dir that'll be deleted at the end of this bash session. +# +# Vars set: +# KUBE_TEMP +kube::util::ensure-temp-dir() { + if [[ -z ${KUBE_TEMP-} ]]; then + KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX) + kube::util::trap_add kube::util::cleanup-temp-dir EXIT + fi +} + +kube::util::host_os() { + local host_os + case "$(uname -s)" in + Darwin) + host_os=darwin + ;; + Linux) + host_os=linux + ;; + *) + kube::log::error "Unsupported host OS. Must be Linux or Mac OS X." + exit 1 + ;; + esac + echo "${host_os}" +} + +kube::util::host_arch() { + local host_arch + case "$(uname -m)" in + x86_64*) + host_arch=amd64 + ;; + i?86_64*) + host_arch=amd64 + ;; + amd64*) + host_arch=amd64 + ;; + aarch64*) + host_arch=arm64 + ;; + arm64*) + host_arch=arm64 + ;; + arm*) + host_arch=arm + ;; + i?86*) + host_arch=x86 + ;; + s390x*) + host_arch=s390x + ;; + ppc64le*) + host_arch=ppc64le + ;; + *) + kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." + exit 1 + ;; + esac + echo "${host_arch}" +} + +# This figures out the host platform without relying on golang. We need this as +# we don't want a golang install to be a prerequisite to building yet we need +# this info to figure out where the final binaries are placed. +kube::util::host_platform() { + echo "$(kube::util::host_os)/$(kube::util::host_arch)" +} + +kube::util::find-binary-for-platform() { + local -r lookfor="$1" + local -r platform="$2" + local locations=( + "${KUBE_ROOT}/_output/bin/${lookfor}" + "${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}" + "${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}" + "${KUBE_ROOT}/platforms/${platform}/${lookfor}" + ) + # Also search for binary in bazel build tree. + # The bazel go rules place some binaries in subtrees like + # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure + # the platform name is matched in the path. + while IFS=$'\n' read -r location; do + locations+=("$location"); + done < <(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \ + \( -path "*/${platform/\//_}*/${lookfor}" -o -path "*/${lookfor}" \) 2>/dev/null || true) + + # List most recently-updated location. + local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) + echo -n "${bin}" +} + +kube::util::find-binary() { + kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)" +} + +# Run all known doc generators (today gendocs and genman for kubectl) +# $1 is the directory to put those generated documents +kube::util::gen-docs() { + local dest="$1" + + # Find binary + gendocs=$(kube::util::find-binary "gendocs") + genkubedocs=$(kube::util::find-binary "genkubedocs") + genman=$(kube::util::find-binary "genman") + genyaml=$(kube::util::find-binary "genyaml") + genfeddocs=$(kube::util::find-binary "genfeddocs") + + # TODO: If ${genfeddocs} is not used from anywhere (it isn't used at + # least from k/k tree), remove it completely. + kube::util::sourced_variable "${genfeddocs}" + + mkdir -p "${dest}/docs/user-guide/kubectl/" + "${gendocs}" "${dest}/docs/user-guide/kubectl/" + mkdir -p "${dest}/docs/admin/" + "${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver" + "${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager" + "${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager" + "${genkubedocs}" "${dest}/docs/admin/" "kube-proxy" + "${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler" + "${genkubedocs}" "${dest}/docs/admin/" "kubelet" + "${genkubedocs}" "${dest}/docs/admin/" "kubeadm" + + mkdir -p "${dest}/docs/man/man1/" + "${genman}" "${dest}/docs/man/man1/" "kube-apiserver" + "${genman}" "${dest}/docs/man/man1/" "kube-controller-manager" + "${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager" + "${genman}" "${dest}/docs/man/man1/" "kube-proxy" + "${genman}" "${dest}/docs/man/man1/" "kube-scheduler" + "${genman}" "${dest}/docs/man/man1/" "kubelet" + "${genman}" "${dest}/docs/man/man1/" "kubectl" + "${genman}" "${dest}/docs/man/man1/" "kubeadm" + + mkdir -p "${dest}/docs/yaml/kubectl/" + "${genyaml}" "${dest}/docs/yaml/kubectl/" + + # create the list of generated files + pushd "${dest}" > /dev/null || return 1 + touch docs/.generated_docs + find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs + popd > /dev/null || return 1 +} + +# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT +# must be set. +kube::util::remove-gen-docs() { + if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then + # remove all of the old docs; we don't want to check them in. + while read -r file; do + rm "${KUBE_ROOT}/${file}" 2>/dev/null || true + done <"${KUBE_ROOT}/docs/.generated_docs" + # The docs/.generated_docs file lists itself, so we don't need to explicitly + # delete it. + fi +} + +# Takes a group/version and returns the path to its location on disk, sans +# "pkg". E.g.: +# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1 +# * default behavior for only a group: experimental -> apis/experimental +# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned +# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1 +# * Very special handling for when both group and version are "": / -> api +kube::util::group-version-to-pkg-path() { + local group_version="$1" + + while IFS=$'\n' read -r api; do + if [[ "${api}" = "${group_version/.*k8s.io/}" ]]; then + echo "vendor/k8s.io/api/${group_version/.*k8s.io/}" + return + fi + done < <(cd "${KUBE_ROOT}/staging/src/k8s.io/api" && find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort) + + # "v1" is the API GroupVersion + if [[ "${group_version}" == "v1" ]]; then + echo "vendor/k8s.io/api/core/v1" + return + fi + + # Special cases first. + # TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api, + # moving the results to pkg/apis/api. + case "${group_version}" in + # both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API. + __internal) + echo "pkg/apis/core" + ;; + meta/v1) + echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1" + ;; + meta/v1beta1) + echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1" + ;; + *.k8s.io) + echo "pkg/apis/${group_version%.*k8s.io}" + ;; + *.k8s.io/*) + echo "pkg/apis/${group_version/.*k8s.io/}" + ;; + *) + echo "pkg/apis/${group_version%__internal}" + ;; + esac +} + +# Takes a group/version and returns the swagger-spec file name. +# default behavior: extensions/v1beta1 -> extensions_v1beta1 +# special case for v1: v1 -> v1 +kube::util::gv-to-swagger-name() { + local group_version="$1" + case "${group_version}" in + v1) + echo "v1" + ;; + *) + echo "${group_version%/*}_${group_version#*/}" + ;; + esac +} + +# Returns the name of the upstream remote repository name for the local git +# repo, e.g. "upstream" or "origin". +kube::util::git_upstream_remote_name() { + git remote -v | grep fetch |\ + grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\ + head -n 1 | awk '{print $1}' +} + +# Ensures the current directory is a git tree for doing things like restoring or +# validating godeps +kube::util::create-fake-git-tree() { + local -r target_dir=${1:-$(pwd)} + + pushd "${target_dir}" >/dev/null || return 1 + git init >/dev/null + git config --local user.email "nobody@k8s.io" + git config --local user.name "$0" + git add . >/dev/null + git commit -q -m "Snapshot" >/dev/null + if (( ${KUBE_VERBOSE:-5} >= 6 )); then + kube::log::status "${target_dir} is now a git tree." + fi + popd >/dev/null || return 1 +} + +# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist +# and are checked out to the referenced rev. +kube::util::godep_restored() { + local -r godeps_json=${1:-Godeps/Godeps.json} + local -r gopath=${2:-${GOPATH%:*}} + + kube::util::require-jq + + local root + local old_rev="" + while read -r path rev; do + rev="${rev//[\'\"]}" # remove quotes which are around revs sometimes + + if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then + # avoid checking the same git/hg root again + continue + fi + + root="${path}" + while [ "${root}" != "." ] && [ ! -d "${gopath}/src/${root}/.git" ] && [ ! -d "${gopath}/src/${root}/.hg" ]; do + root=$(dirname "${root}") + done + if [ "${root}" == "." ]; then + echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2 + return 1 + fi + local head + if [ -d "${gopath}/src/${root}/.git" ]; then + head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)" + else + head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')" + fi + if [ "${head}" != "${rev}" ]; then + echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2 + return 1 + fi + old_rev="${rev}" + done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}") + return 0 +} + +# Exits script if working directory is dirty. If it's run interactively in the terminal +# the user can commit changes in a second terminal. This script will wait. +kube::util::ensure_clean_working_dir() { + while ! git diff HEAD --exit-code &>/dev/null; do + echo -e "\nUnexpected dirty working directory:\n" + if tty -s; then + git status -s + else + git diff -a # be more verbose in log files without tty + exit 1 + fi | sed 's/^/ /' + echo -e "\nCommit your changes in another terminal and then continue here by pressing enter." + read -r + done 1>&2 +} + +# Ensure that the given godep version is installed and in the path. Almost +# nobody should use any version but the default. +# +# Sets: +# KUBE_GODEP: The path to the godep binary +# +kube::util::ensure_godep_version() { + local godep_target_version=${1:-"v80-k8s-r1"} # this version is known to work + + # If KUBE_GODEP is already set, and it's the right version, then use it. + if [[ -n "${KUBE_GODEP:-}" && "$(${KUBE_GODEP:?} version 2>/dev/null)" == *"godep ${godep_target_version}"* ]]; then + kube::log::status "Using ${KUBE_GODEP}" + return + fi + + # Otherwise, install forked godep + kube::log::status "Installing godep version ${godep_target_version}" + GOBIN="${KUBE_OUTPUT_BINPATH}" go install k8s.io/kubernetes/third_party/forked/godep + export KUBE_GODEP="${KUBE_OUTPUT_BINPATH}/godep" + kube::log::status "Installed ${KUBE_GODEP}" + + # Verify that the installed godep from fork is what we expect + if [[ "$(${KUBE_GODEP:?} version 2>/dev/null)" != *"godep ${godep_target_version}"* ]]; then + kube::log::error "Expected godep ${godep_target_version} from ${KUBE_GODEP}, got $(${KUBE_GODEP:?} version)" + return 1 + fi +} + +# Ensure that none of the staging repos is checked out in the GOPATH because this +# easily confused godep. +kube::util::ensure_no_staging_repos_in_gopath() { + kube::util::ensure_single_dir_gopath + local error=0 + for repo_file in "${KUBE_ROOT}"/staging/src/k8s.io/*; do + if [[ ! -d "${repo_file}" ]]; then + # not a directory or there were no files + continue; + fi + repo="$(basename "${repo_file}")" + if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then + echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2 + error=1 + fi + done + if [ "${error}" = "1" ]; then + exit 1 + fi +} + +# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple. +kube::util::ensure_single_dir_gopath() { + if [[ "${GOPATH}" == *:* ]]; then + echo "GOPATH must consist of a single directory." 1>&2 + exit 1 + fi +} + +# Find the base commit using: +# $PULL_BASE_SHA if set (from Prow) +# current ref from the remote upstream branch +kube::util::base_ref() { + local -r git_branch=$1 + + if [[ -n ${PULL_BASE_SHA:-} ]]; then + echo "${PULL_BASE_SHA}" + return + fi + + full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}" + + # make sure the branch is valid, otherwise the check will pass erroneously. + if ! git describe "${full_branch}" >/dev/null; then + # abort! + exit 1 + fi + + echo "${full_branch}" +} + +# Checks whether there are any files matching pattern $2 changed between the +# current branch and upstream branch named by $1. +# Returns 1 (false) if there are no changes +# 0 (true) if there are changes detected. +kube::util::has_changes() { + local -r git_branch=$1 + local -r pattern=$2 + local -r not_pattern=${3:-totallyimpossiblepattern} + + local base_ref + base_ref=$(kube::util::base_ref "${git_branch}") + echo "Checking for '${pattern}' changes against '${base_ref}'" + + # notice this uses ... to find the first shared ancestor + if git diff --name-only "${base_ref}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then + return 0 + fi + # also check for pending changes + if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then + echo "Detected '${pattern}' uncommitted changes." + return 0 + fi + echo "No '${pattern}' changes detected." + return 1 +} + +kube::util::download_file() { + local -r url=$1 + local -r destination_file=$2 + + rm "${destination_file}" 2&> /dev/null || true + + for i in $(seq 5) + do + if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then + echo "Downloading ${url} failed. $((5-i)) retries left." + sleep 1 + else + echo "Downloading ${url} succeed" + return 0 + fi + done + return 1 +} + +# Test whether openssl is installed. +# Sets: +# OPENSSL_BIN: The path to the openssl binary to use +function kube::util::test_openssl_installed { + if ! openssl version >& /dev/null; then + echo "Failed to run openssl. Please ensure openssl is installed" + exit 1 + fi + + OPENSSL_BIN=$(command -v openssl) +} + +# creates a client CA, args are sudo, dest-dir, ca-id, purpose +# purpose is dropped in after "key encipherment", you usually want +# '"client auth"' +# '"server auth"' +# '"client auth","server auth"' +function kube::util::create_signing_certkey { + local sudo=$1 + local dest_dir=$2 + local id=$3 + local purpose=$4 + # Create client ca + ${sudo} /usr/bin/env bash -e < "${dest_dir}/${id}-ca-config.json" +EOF +} + +# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups... +function kube::util::create_client_certkey { + local sudo=$1 + local dest_dir=$2 + local ca=$3 + local id=$4 + local cn=${5:-$4} + local groups="" + local SEP="" + shift 5 + while [ -n "${1:-}" ]; do + groups+="${SEP}{\"O\":\"$1\"}" + SEP="," + shift 1 + done + ${sudo} /usr/bin/env bash -e < /dev/null +apiVersion: v1 +kind: Config +clusters: + - cluster: + certificate-authority: ${ca_file} + server: https://${api_host}:${api_port}/ + name: local-up-cluster +users: + - user: + token: ${token} + client-certificate: ${dest_dir}/client-${client_id}.crt + client-key: ${dest_dir}/client-${client_id}.key + name: local-up-cluster +contexts: + - context: + cluster: local-up-cluster + user: local-up-cluster + name: local-up-cluster +current-context: local-up-cluster +EOF + + # flatten the kubeconfig files to make them self contained + username=$(whoami) + ${sudo} /usr/bin/env bash -e < "/tmp/${client_id}.kubeconfig" + mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig" + chown ${username} "${dest_dir}/${client_id}.kubeconfig" +EOF +} + +# Determines if docker can be run, failures may simply require that the user be added to the docker group. +function kube::util::ensure_docker_daemon_connectivity { + IFS=" " read -ra DOCKER <<< "${DOCKER_OPTS}" + # Expand ${DOCKER[@]} only if it's not unset. This is to work around + # Bash 3 issue with unbound variable. + DOCKER=(docker ${DOCKER[@]:+"${DOCKER[@]}"}) + if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then + cat <<'EOF' >&2 +Can't connect to 'docker' daemon. please fix and retry. + +Possible causes: + - Docker Daemon not started + - Linux: confirm via your init system + - macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start ` + - macOS w/ Docker for Mac: Check the menu bar and start the Docker application + - DOCKER_HOST hasn't been set or is set incorrectly + - Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}` + - macOS w/ docker-machine: run `eval "$(docker-machine env )"` + - macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}` + - Other things to check: + - Linux: User isn't in 'docker' group. Add and relogin. + - Something like 'sudo usermod -a -G docker ${USER}' + - RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8 +EOF + return 1 + fi +} + +# Wait for background jobs to finish. Return with +# an error status if any of the jobs failed. +kube::util::wait-for-jobs() { + local fail=0 + local job + for job in $(jobs -p); do + wait "${job}" || fail=$((fail + 1)) + done + return ${fail} +} + +# kube::util::join +# Concatenates the list elements with the delimiter passed as first parameter +# +# Ex: kube::util::join , a b c +# -> a,b,c +function kube::util::join { + local IFS="$1" + shift + echo "$*" +} + +# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH +# +# Assumed vars: +# $1 (cfssl directory) (optional) +# +# Sets: +# CFSSL_BIN: The path of the installed cfssl binary +# CFSSLJSON_BIN: The path of the installed cfssljson binary +# +function kube::util::ensure-cfssl { + if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then + CFSSL_BIN=$(command -v cfssl) + CFSSLJSON_BIN=$(command -v cfssljson) + return 0 + fi + + host_arch=$(kube::util::host_arch) + + if [[ "${host_arch}" != "amd64" ]]; then + echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed." + echo "Please install cfssl and cfssljson and verify they are in \$PATH." + echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..." + exit 1 + fi + + # Create a temp dir for cfssl if no directory was given + local cfssldir=${1:-} + if [[ -z "${cfssldir}" ]]; then + kube::util::ensure-temp-dir + cfssldir="${KUBE_TEMP}/cfssl" + fi + + mkdir -p "${cfssldir}" + pushd "${cfssldir}" > /dev/null || return 1 + + echo "Unable to successfully run 'cfssl' from ${PATH}; downloading instead..." + kernel=$(uname -s) + case "${kernel}" in + Linux) + curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 + curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 + ;; + Darwin) + curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64 + curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64 + ;; + *) + echo "Unknown, unsupported platform: ${kernel}." >&2 + echo "Supported platforms: Linux, Darwin." >&2 + exit 2 + esac + + chmod +x cfssl || true + chmod +x cfssljson || true + + CFSSL_BIN="${cfssldir}/cfssl" + CFSSLJSON_BIN="${cfssldir}/cfssljson" + if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then + echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH." + echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..." + exit 1 + fi + popd > /dev/null || return 1 +} + +# kube::util::ensure_dockerized +# Confirms that the script is being run inside a kube-build image +# +function kube::util::ensure_dockerized { + if [[ -f /kube-build-image ]]; then + return 0 + else + echo "ERROR: This script is designed to be run inside a kube-build container" + exit 1 + fi +} + +# kube::util::ensure-gnu-sed +# Determines which sed binary is gnu-sed on linux/darwin +# +# Sets: +# SED: The name of the gnu-sed binary +# +function kube::util::ensure-gnu-sed { + if LANG=C sed --help 2>&1 | grep -q GNU; then + SED="sed" + elif command -v gsed &>/dev/null; then + SED="gsed" + else + kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2 + return 1 + fi + kube::util::sourced_variable "${SED}" +} + +# kube::util::check-file-in-alphabetical-order +# Check that the file is in alphabetical order +# +function kube::util::check-file-in-alphabetical-order { + local failure_file="$1" + if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then + { + echo + echo "${failure_file} is not in alphabetical order. Please sort it:" + echo + echo " LC_ALL=C sort -o ${failure_file} ${failure_file}" + echo + } >&2 + false + fi +} + +# kube::util::require-jq +# Checks whether jq is installed. +function kube::util::require-jq { + if ! command -v jq &>/dev/null; then + echo "jq not found. Please install." 1>&2 + return 1 + fi +} + +# Some useful colors. +if [[ -z "${color_start-}" ]]; then + declare -r color_start="\033[" + declare -r color_red="${color_start}0;31m" + declare -r color_yellow="${color_start}0;33m" + declare -r color_green="${color_start}0;32m" + declare -r color_blue="${color_start}1;34m" + declare -r color_cyan="${color_start}1;36m" + declare -r color_norm="${color_start}0m" + + kube::util::sourced_variable "${color_start}" + kube::util::sourced_variable "${color_red}" + kube::util::sourced_variable "${color_yellow}" + kube::util::sourced_variable "${color_green}" + kube::util::sourced_variable "${color_blue}" + kube::util::sourced_variable "${color_cyan}" + kube::util::sourced_variable "${color_norm}" +fi + +# ex: ts=2 sw=2 et filetype=sh diff --git a/verify-shellcheck.sh b/verify-shellcheck.sh new file mode 100755 index 00000000..1b882fa8 --- /dev/null +++ b/verify-shellcheck.sh @@ -0,0 +1,187 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" +source "${KUBE_ROOT}/hack/lib/util.sh" + +# required version for this script, if not installed on the host we will +# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE +SHELLCHECK_VERSION="0.6.0" +# upstream shellcheck latest stable image as of January 10th, 2019 +SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.6.0@sha256:7d4d712a2686da99d37580b4e2f45eb658b74e4b01caf67c1099adc294b96b52" + +# fixed name for the shellcheck docker container so we can reliably clean it up +SHELLCHECK_CONTAINER="k8s-shellcheck" + +# disabled lints +disabled=( + # this lint disallows non-constant source, which we use extensively without + # any known bugs + 1090 + # this lint prefers command -v to which, they are not the same + 2230 +) +# comma separate for passing to shellcheck +join_by() { + local IFS="$1"; + shift; + echo "$*"; +} +SHELLCHECK_DISABLED="$(join_by , "${disabled[@]}")" +readonly SHELLCHECK_DISABLED + +# creates the shellcheck container for later use +create_container () { + # TODO(bentheelder): this is a performance hack, we create the container with + # a sleep MAX_INT32 so that it is effectively paused. + # We then repeatedly exec to it to run each shellcheck, and later rm it when + # we're done. + # This is incredibly much faster than creating a container for each shellcheck + # call ... + docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${KUBE_ROOT}:${KUBE_ROOT}" -w "${KUBE_ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647 +} +# removes the shellcheck container +remove_container () { + docker rm -f "${SHELLCHECK_CONTAINER}" &> /dev/null || true +} + +# ensure we're linting the k8s source tree +cd "${KUBE_ROOT}" + +# find all shell scripts excluding ./_*, ./.git/*, ./vendor*, +# and anything git-ignored +all_shell_scripts=() +while IFS=$'\n' read -r script; + do git check-ignore -q "$script" || all_shell_scripts+=("$script"); +done < <(find . -name "*.sh" \ + -not \( \ + -path ./_\* -o \ + -path ./.git\* -o \ + -path ./vendor\* \ + \)) + +# make sure known failures are sorted +failure_file="${KUBE_ROOT}/hack/.shellcheck_failures" +kube::util::check-file-in-alphabetical-order "${failure_file}" + +# load known failure files +failing_files=() +while IFS=$'\n' read -r script; + do failing_files+=("$script"); +done < <(cat "${failure_file}") + +# detect if the host machine has the required shellcheck version installed +# if so, we will use that instead. +HAVE_SHELLCHECK=false +if which shellcheck &>/dev/null; then + detected_version="$(shellcheck --version | grep 'version: .*')" + if [[ "${detected_version}" = "version: ${SHELLCHECK_VERSION}" ]]; then + HAVE_SHELLCHECK=true + fi +fi + +# tell the user which we've selected and possibly set up the container +if ${HAVE_SHELLCHECK}; then + echo "Using host shellcheck ${SHELLCHECK_VERSION} binary." +else + echo "Using shellcheck ${SHELLCHECK_VERSION} docker image." + # remove any previous container, ensure we will attempt to cleanup on exit, + # and create the container + remove_container + kube::util::trap_add 'remove_container' EXIT + if ! output="$(create_container 2>&1)"; then + { + echo "Failed to create shellcheck container with output: " + echo "" + echo "${output}" + } >&2 + exit 1 + fi +fi + +# lint each script, tracking failures +errors=() +not_failing=() +for f in "${all_shell_scripts[@]}"; do + set +o errexit + if ${HAVE_SHELLCHECK}; then + failedLint=$(shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") + else + failedLint=$(docker exec -t ${SHELLCHECK_CONTAINER} \ + shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") + fi + set -o errexit + kube::util::array_contains "${f}" "${failing_files[@]}" && in_failing=$? || in_failing=$? + if [[ -n "${failedLint}" ]] && [[ "${in_failing}" -ne "0" ]]; then + errors+=( "${failedLint}" ) + fi + if [[ -z "${failedLint}" ]] && [[ "${in_failing}" -eq "0" ]]; then + not_failing+=( "${f}" ) + fi +done + +# Check to be sure all the packages that should pass lint are. +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All shell files are passing lint (excluding those in hack/.shellcheck_failures).' +else + { + echo "Errors from shellcheck:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please review the above warnings. You can test via "./hack/verify-shellcheck"' + echo 'If the above warnings do not make sense, you can exempt this package from shellcheck' + echo 'checking by adding it to hack/.shellcheck_failures (if your reviewer is okay with it).' + echo + } >&2 + false +fi + +if [[ ${#not_failing[@]} -gt 0 ]]; then + { + echo "Some packages in hack/.shellcheck_failures are passing shellcheck. Please remove them." + echo + for f in "${not_failing[@]}"; do + echo " $f" + done + echo + } >&2 + false +fi + +# Check that all failing_packages actually still exist +gone=() +for f in "${failing_files[@]}"; do + kube::util::array_contains "$f" "${all_shell_scripts[@]}" || gone+=( "$f" ) +done + +if [[ ${#gone[@]} -gt 0 ]]; then + { + echo "Some files in hack/.shellcheck_failures do not exist anymore. Please remove them." + echo + for f in "${gone[@]}"; do + echo " $f" + done + echo + } >&2 + false +fi From e6db50df7eefbc7a0f116e5dff79b01d9cc22741 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 27 Mar 2019 17:39:43 +0100 Subject: [PATCH 18/41] check vendor directory This runs "dep check" to verify that the vendor directory is up-to-date and meets expectations (= done with dep >= 0.5.0). --- build.make | 10 ++++++++++ travis.yml | 5 +++++ 2 files changed, 15 insertions(+) diff --git a/build.make b/build.make index 8ca0b2c2..b4084061 100644 --- a/build.make +++ b/build.make @@ -117,6 +117,16 @@ test-fmt: false; \ fi +# This test only runs when dep >= 0.5 is installed, which is the case for the CI setup. +.PHONY: test-vendor +test: test-vendor +test-vendor: + @ echo; echo "### $@:" + @ case "$$(dep version 2>/dev/null | grep 'version *:')" in \ + *v0.[56789]*) dep check && echo "vendor up-to-date" || false;; \ + *) echo "skipping check, dep >= 0.5 required";; \ + esac + .PHONY: test-subtree test: test-subtree test-subtree: diff --git a/travis.yml b/travis.yml index b5a360af..1c05dfd9 100644 --- a/travis.yml +++ b/travis.yml @@ -5,6 +5,11 @@ services: matrix: include: - go: 1.11.1 +before_script: +- mkdir -p bin +- wget https://github.com/golang/dep/releases/download/v0.5.1/dep-linux-amd64 -O bin/dep +- chmod u+x bin/dep +- export PATH=$PWD/bin:$PATH script: - make -k all test after_success: From 104a1ac9699f22e815c54afc2620aef85bf490e2 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 15 Mar 2019 11:08:53 +0100 Subject: [PATCH 19/41] build.make: avoid unit-testing E2E test suite In repos that have a test/e2e, that test suite should be run separately because it depends on a running cluster. --- build.make | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.make b/build.make index 8ca0b2c2..6ea32795 100644 --- a/build.make +++ b/build.make @@ -98,7 +98,7 @@ test: test: test-go test-go: @ echo; echo "### $@:" - go test `go list ./... | grep -v 'vendor' $(TEST_GO_FILTER_CMD)` $(TESTARGS) + go test `go list ./... | grep -v -e 'vendor' -e '/test/e2e$$' $(TEST_GO_FILTER_CMD)` $(TESTARGS) .PHONY: test-vet test: test-vet From b2d25d4f4d7886b1d9ad40943dd06748e0f06874 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 15 Mar 2019 16:42:10 +0100 Subject: [PATCH 20/41] verify-shellcheck.sh: make it usable in csi-release-tools These are the modifications that were necessary to call this outside of Kubernetes. The support for excluding files from checking gets removed to simplify the script. It shouldn't be needed, because linting can be enabled after fixing whatever scripts might fail the check. --- util.sh | 691 ------------------------------------------- verify-shellcheck.sh | 71 +---- 2 files changed, 15 insertions(+), 747 deletions(-) diff --git a/util.sh b/util.sh index 31ce9fc4..abeb1b2e 100755 --- a/util.sh +++ b/util.sh @@ -39,31 +39,6 @@ kube::util::array_contains() { return 1 } -kube::util::wait_for_url() { - local url=$1 - local prefix=${2:-} - local wait=${3:-1} - local times=${4:-30} - local maxtime=${5:-1} - - command -v curl >/dev/null || { - kube::log::usage "curl must be installed" - exit 1 - } - - local i - for i in $(seq 1 "${times}"); do - local out - if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then - kube::log::status "On try ${i}, ${prefix}: ${out}" - return 0 - fi - sleep "${wait}" - done - kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each" - return 1 -} - # Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG # See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal kube::util::trap_add() { @@ -93,422 +68,6 @@ kube::util::trap_add() { done } -# Opposite of kube::util::ensure-temp-dir() -kube::util::cleanup-temp-dir() { - rm -rf "${KUBE_TEMP}" -} - -# Create a temp dir that'll be deleted at the end of this bash session. -# -# Vars set: -# KUBE_TEMP -kube::util::ensure-temp-dir() { - if [[ -z ${KUBE_TEMP-} ]]; then - KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX) - kube::util::trap_add kube::util::cleanup-temp-dir EXIT - fi -} - -kube::util::host_os() { - local host_os - case "$(uname -s)" in - Darwin) - host_os=darwin - ;; - Linux) - host_os=linux - ;; - *) - kube::log::error "Unsupported host OS. Must be Linux or Mac OS X." - exit 1 - ;; - esac - echo "${host_os}" -} - -kube::util::host_arch() { - local host_arch - case "$(uname -m)" in - x86_64*) - host_arch=amd64 - ;; - i?86_64*) - host_arch=amd64 - ;; - amd64*) - host_arch=amd64 - ;; - aarch64*) - host_arch=arm64 - ;; - arm64*) - host_arch=arm64 - ;; - arm*) - host_arch=arm - ;; - i?86*) - host_arch=x86 - ;; - s390x*) - host_arch=s390x - ;; - ppc64le*) - host_arch=ppc64le - ;; - *) - kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." - exit 1 - ;; - esac - echo "${host_arch}" -} - -# This figures out the host platform without relying on golang. We need this as -# we don't want a golang install to be a prerequisite to building yet we need -# this info to figure out where the final binaries are placed. -kube::util::host_platform() { - echo "$(kube::util::host_os)/$(kube::util::host_arch)" -} - -kube::util::find-binary-for-platform() { - local -r lookfor="$1" - local -r platform="$2" - local locations=( - "${KUBE_ROOT}/_output/bin/${lookfor}" - "${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}" - "${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}" - "${KUBE_ROOT}/platforms/${platform}/${lookfor}" - ) - # Also search for binary in bazel build tree. - # The bazel go rules place some binaries in subtrees like - # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure - # the platform name is matched in the path. - while IFS=$'\n' read -r location; do - locations+=("$location"); - done < <(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \ - \( -path "*/${platform/\//_}*/${lookfor}" -o -path "*/${lookfor}" \) 2>/dev/null || true) - - # List most recently-updated location. - local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) - echo -n "${bin}" -} - -kube::util::find-binary() { - kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)" -} - -# Run all known doc generators (today gendocs and genman for kubectl) -# $1 is the directory to put those generated documents -kube::util::gen-docs() { - local dest="$1" - - # Find binary - gendocs=$(kube::util::find-binary "gendocs") - genkubedocs=$(kube::util::find-binary "genkubedocs") - genman=$(kube::util::find-binary "genman") - genyaml=$(kube::util::find-binary "genyaml") - genfeddocs=$(kube::util::find-binary "genfeddocs") - - # TODO: If ${genfeddocs} is not used from anywhere (it isn't used at - # least from k/k tree), remove it completely. - kube::util::sourced_variable "${genfeddocs}" - - mkdir -p "${dest}/docs/user-guide/kubectl/" - "${gendocs}" "${dest}/docs/user-guide/kubectl/" - mkdir -p "${dest}/docs/admin/" - "${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver" - "${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager" - "${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager" - "${genkubedocs}" "${dest}/docs/admin/" "kube-proxy" - "${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler" - "${genkubedocs}" "${dest}/docs/admin/" "kubelet" - "${genkubedocs}" "${dest}/docs/admin/" "kubeadm" - - mkdir -p "${dest}/docs/man/man1/" - "${genman}" "${dest}/docs/man/man1/" "kube-apiserver" - "${genman}" "${dest}/docs/man/man1/" "kube-controller-manager" - "${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager" - "${genman}" "${dest}/docs/man/man1/" "kube-proxy" - "${genman}" "${dest}/docs/man/man1/" "kube-scheduler" - "${genman}" "${dest}/docs/man/man1/" "kubelet" - "${genman}" "${dest}/docs/man/man1/" "kubectl" - "${genman}" "${dest}/docs/man/man1/" "kubeadm" - - mkdir -p "${dest}/docs/yaml/kubectl/" - "${genyaml}" "${dest}/docs/yaml/kubectl/" - - # create the list of generated files - pushd "${dest}" > /dev/null || return 1 - touch docs/.generated_docs - find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs - popd > /dev/null || return 1 -} - -# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT -# must be set. -kube::util::remove-gen-docs() { - if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then - # remove all of the old docs; we don't want to check them in. - while read -r file; do - rm "${KUBE_ROOT}/${file}" 2>/dev/null || true - done <"${KUBE_ROOT}/docs/.generated_docs" - # The docs/.generated_docs file lists itself, so we don't need to explicitly - # delete it. - fi -} - -# Takes a group/version and returns the path to its location on disk, sans -# "pkg". E.g.: -# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1 -# * default behavior for only a group: experimental -> apis/experimental -# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned -# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1 -# * Very special handling for when both group and version are "": / -> api -kube::util::group-version-to-pkg-path() { - local group_version="$1" - - while IFS=$'\n' read -r api; do - if [[ "${api}" = "${group_version/.*k8s.io/}" ]]; then - echo "vendor/k8s.io/api/${group_version/.*k8s.io/}" - return - fi - done < <(cd "${KUBE_ROOT}/staging/src/k8s.io/api" && find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort) - - # "v1" is the API GroupVersion - if [[ "${group_version}" == "v1" ]]; then - echo "vendor/k8s.io/api/core/v1" - return - fi - - # Special cases first. - # TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api, - # moving the results to pkg/apis/api. - case "${group_version}" in - # both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API. - __internal) - echo "pkg/apis/core" - ;; - meta/v1) - echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1" - ;; - meta/v1beta1) - echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1" - ;; - *.k8s.io) - echo "pkg/apis/${group_version%.*k8s.io}" - ;; - *.k8s.io/*) - echo "pkg/apis/${group_version/.*k8s.io/}" - ;; - *) - echo "pkg/apis/${group_version%__internal}" - ;; - esac -} - -# Takes a group/version and returns the swagger-spec file name. -# default behavior: extensions/v1beta1 -> extensions_v1beta1 -# special case for v1: v1 -> v1 -kube::util::gv-to-swagger-name() { - local group_version="$1" - case "${group_version}" in - v1) - echo "v1" - ;; - *) - echo "${group_version%/*}_${group_version#*/}" - ;; - esac -} - -# Returns the name of the upstream remote repository name for the local git -# repo, e.g. "upstream" or "origin". -kube::util::git_upstream_remote_name() { - git remote -v | grep fetch |\ - grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\ - head -n 1 | awk '{print $1}' -} - -# Ensures the current directory is a git tree for doing things like restoring or -# validating godeps -kube::util::create-fake-git-tree() { - local -r target_dir=${1:-$(pwd)} - - pushd "${target_dir}" >/dev/null || return 1 - git init >/dev/null - git config --local user.email "nobody@k8s.io" - git config --local user.name "$0" - git add . >/dev/null - git commit -q -m "Snapshot" >/dev/null - if (( ${KUBE_VERBOSE:-5} >= 6 )); then - kube::log::status "${target_dir} is now a git tree." - fi - popd >/dev/null || return 1 -} - -# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist -# and are checked out to the referenced rev. -kube::util::godep_restored() { - local -r godeps_json=${1:-Godeps/Godeps.json} - local -r gopath=${2:-${GOPATH%:*}} - - kube::util::require-jq - - local root - local old_rev="" - while read -r path rev; do - rev="${rev//[\'\"]}" # remove quotes which are around revs sometimes - - if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then - # avoid checking the same git/hg root again - continue - fi - - root="${path}" - while [ "${root}" != "." ] && [ ! -d "${gopath}/src/${root}/.git" ] && [ ! -d "${gopath}/src/${root}/.hg" ]; do - root=$(dirname "${root}") - done - if [ "${root}" == "." ]; then - echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2 - return 1 - fi - local head - if [ -d "${gopath}/src/${root}/.git" ]; then - head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)" - else - head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')" - fi - if [ "${head}" != "${rev}" ]; then - echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2 - return 1 - fi - old_rev="${rev}" - done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}") - return 0 -} - -# Exits script if working directory is dirty. If it's run interactively in the terminal -# the user can commit changes in a second terminal. This script will wait. -kube::util::ensure_clean_working_dir() { - while ! git diff HEAD --exit-code &>/dev/null; do - echo -e "\nUnexpected dirty working directory:\n" - if tty -s; then - git status -s - else - git diff -a # be more verbose in log files without tty - exit 1 - fi | sed 's/^/ /' - echo -e "\nCommit your changes in another terminal and then continue here by pressing enter." - read -r - done 1>&2 -} - -# Ensure that the given godep version is installed and in the path. Almost -# nobody should use any version but the default. -# -# Sets: -# KUBE_GODEP: The path to the godep binary -# -kube::util::ensure_godep_version() { - local godep_target_version=${1:-"v80-k8s-r1"} # this version is known to work - - # If KUBE_GODEP is already set, and it's the right version, then use it. - if [[ -n "${KUBE_GODEP:-}" && "$(${KUBE_GODEP:?} version 2>/dev/null)" == *"godep ${godep_target_version}"* ]]; then - kube::log::status "Using ${KUBE_GODEP}" - return - fi - - # Otherwise, install forked godep - kube::log::status "Installing godep version ${godep_target_version}" - GOBIN="${KUBE_OUTPUT_BINPATH}" go install k8s.io/kubernetes/third_party/forked/godep - export KUBE_GODEP="${KUBE_OUTPUT_BINPATH}/godep" - kube::log::status "Installed ${KUBE_GODEP}" - - # Verify that the installed godep from fork is what we expect - if [[ "$(${KUBE_GODEP:?} version 2>/dev/null)" != *"godep ${godep_target_version}"* ]]; then - kube::log::error "Expected godep ${godep_target_version} from ${KUBE_GODEP}, got $(${KUBE_GODEP:?} version)" - return 1 - fi -} - -# Ensure that none of the staging repos is checked out in the GOPATH because this -# easily confused godep. -kube::util::ensure_no_staging_repos_in_gopath() { - kube::util::ensure_single_dir_gopath - local error=0 - for repo_file in "${KUBE_ROOT}"/staging/src/k8s.io/*; do - if [[ ! -d "${repo_file}" ]]; then - # not a directory or there were no files - continue; - fi - repo="$(basename "${repo_file}")" - if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then - echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2 - error=1 - fi - done - if [ "${error}" = "1" ]; then - exit 1 - fi -} - -# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple. -kube::util::ensure_single_dir_gopath() { - if [[ "${GOPATH}" == *:* ]]; then - echo "GOPATH must consist of a single directory." 1>&2 - exit 1 - fi -} - -# Find the base commit using: -# $PULL_BASE_SHA if set (from Prow) -# current ref from the remote upstream branch -kube::util::base_ref() { - local -r git_branch=$1 - - if [[ -n ${PULL_BASE_SHA:-} ]]; then - echo "${PULL_BASE_SHA}" - return - fi - - full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}" - - # make sure the branch is valid, otherwise the check will pass erroneously. - if ! git describe "${full_branch}" >/dev/null; then - # abort! - exit 1 - fi - - echo "${full_branch}" -} - -# Checks whether there are any files matching pattern $2 changed between the -# current branch and upstream branch named by $1. -# Returns 1 (false) if there are no changes -# 0 (true) if there are changes detected. -kube::util::has_changes() { - local -r git_branch=$1 - local -r pattern=$2 - local -r not_pattern=${3:-totallyimpossiblepattern} - - local base_ref - base_ref=$(kube::util::base_ref "${git_branch}") - echo "Checking for '${pattern}' changes against '${base_ref}'" - - # notice this uses ... to find the first shared ancestor - if git diff --name-only "${base_ref}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then - return 0 - fi - # also check for pending changes - if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then - echo "Detected '${pattern}' uncommitted changes." - return 0 - fi - echo "No '${pattern}' changes detected." - return 1 -} - kube::util::download_file() { local -r url=$1 local -r destination_file=$2 @@ -528,152 +87,6 @@ kube::util::download_file() { return 1 } -# Test whether openssl is installed. -# Sets: -# OPENSSL_BIN: The path to the openssl binary to use -function kube::util::test_openssl_installed { - if ! openssl version >& /dev/null; then - echo "Failed to run openssl. Please ensure openssl is installed" - exit 1 - fi - - OPENSSL_BIN=$(command -v openssl) -} - -# creates a client CA, args are sudo, dest-dir, ca-id, purpose -# purpose is dropped in after "key encipherment", you usually want -# '"client auth"' -# '"server auth"' -# '"client auth","server auth"' -function kube::util::create_signing_certkey { - local sudo=$1 - local dest_dir=$2 - local id=$3 - local purpose=$4 - # Create client ca - ${sudo} /usr/bin/env bash -e < "${dest_dir}/${id}-ca-config.json" -EOF -} - -# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups... -function kube::util::create_client_certkey { - local sudo=$1 - local dest_dir=$2 - local ca=$3 - local id=$4 - local cn=${5:-$4} - local groups="" - local SEP="" - shift 5 - while [ -n "${1:-}" ]; do - groups+="${SEP}{\"O\":\"$1\"}" - SEP="," - shift 1 - done - ${sudo} /usr/bin/env bash -e < /dev/null -apiVersion: v1 -kind: Config -clusters: - - cluster: - certificate-authority: ${ca_file} - server: https://${api_host}:${api_port}/ - name: local-up-cluster -users: - - user: - token: ${token} - client-certificate: ${dest_dir}/client-${client_id}.crt - client-key: ${dest_dir}/client-${client_id}.key - name: local-up-cluster -contexts: - - context: - cluster: local-up-cluster - user: local-up-cluster - name: local-up-cluster -current-context: local-up-cluster -EOF - - # flatten the kubeconfig files to make them self contained - username=$(whoami) - ${sudo} /usr/bin/env bash -e < "/tmp/${client_id}.kubeconfig" - mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig" - chown ${username} "${dest_dir}/${client_id}.kubeconfig" -EOF -} - -# Determines if docker can be run, failures may simply require that the user be added to the docker group. -function kube::util::ensure_docker_daemon_connectivity { - IFS=" " read -ra DOCKER <<< "${DOCKER_OPTS}" - # Expand ${DOCKER[@]} only if it's not unset. This is to work around - # Bash 3 issue with unbound variable. - DOCKER=(docker ${DOCKER[@]:+"${DOCKER[@]}"}) - if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then - cat <<'EOF' >&2 -Can't connect to 'docker' daemon. please fix and retry. - -Possible causes: - - Docker Daemon not started - - Linux: confirm via your init system - - macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start ` - - macOS w/ Docker for Mac: Check the menu bar and start the Docker application - - DOCKER_HOST hasn't been set or is set incorrectly - - Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}` - - macOS w/ docker-machine: run `eval "$(docker-machine env )"` - - macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}` - - Other things to check: - - Linux: User isn't in 'docker' group. Add and relogin. - - Something like 'sudo usermod -a -G docker ${USER}' - - RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8 -EOF - return 1 - fi -} - # Wait for background jobs to finish. Return with # an error status if any of the jobs failed. kube::util::wait-for-jobs() { @@ -696,101 +109,6 @@ function kube::util::join { echo "$*" } -# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH -# -# Assumed vars: -# $1 (cfssl directory) (optional) -# -# Sets: -# CFSSL_BIN: The path of the installed cfssl binary -# CFSSLJSON_BIN: The path of the installed cfssljson binary -# -function kube::util::ensure-cfssl { - if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then - CFSSL_BIN=$(command -v cfssl) - CFSSLJSON_BIN=$(command -v cfssljson) - return 0 - fi - - host_arch=$(kube::util::host_arch) - - if [[ "${host_arch}" != "amd64" ]]; then - echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed." - echo "Please install cfssl and cfssljson and verify they are in \$PATH." - echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..." - exit 1 - fi - - # Create a temp dir for cfssl if no directory was given - local cfssldir=${1:-} - if [[ -z "${cfssldir}" ]]; then - kube::util::ensure-temp-dir - cfssldir="${KUBE_TEMP}/cfssl" - fi - - mkdir -p "${cfssldir}" - pushd "${cfssldir}" > /dev/null || return 1 - - echo "Unable to successfully run 'cfssl' from ${PATH}; downloading instead..." - kernel=$(uname -s) - case "${kernel}" in - Linux) - curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 - curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 - ;; - Darwin) - curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64 - curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64 - ;; - *) - echo "Unknown, unsupported platform: ${kernel}." >&2 - echo "Supported platforms: Linux, Darwin." >&2 - exit 2 - esac - - chmod +x cfssl || true - chmod +x cfssljson || true - - CFSSL_BIN="${cfssldir}/cfssl" - CFSSLJSON_BIN="${cfssldir}/cfssljson" - if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then - echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH." - echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..." - exit 1 - fi - popd > /dev/null || return 1 -} - -# kube::util::ensure_dockerized -# Confirms that the script is being run inside a kube-build image -# -function kube::util::ensure_dockerized { - if [[ -f /kube-build-image ]]; then - return 0 - else - echo "ERROR: This script is designed to be run inside a kube-build container" - exit 1 - fi -} - -# kube::util::ensure-gnu-sed -# Determines which sed binary is gnu-sed on linux/darwin -# -# Sets: -# SED: The name of the gnu-sed binary -# -function kube::util::ensure-gnu-sed { - if LANG=C sed --help 2>&1 | grep -q GNU; then - SED="sed" - elif command -v gsed &>/dev/null; then - SED="gsed" - else - kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2 - return 1 - fi - kube::util::sourced_variable "${SED}" -} - # kube::util::check-file-in-alphabetical-order # Check that the file is in alphabetical order # @@ -808,15 +126,6 @@ function kube::util::check-file-in-alphabetical-order { fi } -# kube::util::require-jq -# Checks whether jq is installed. -function kube::util::require-jq { - if ! command -v jq &>/dev/null; then - echo "jq not found. Please install." 1>&2 - return 1 - fi -} - # Some useful colors. if [[ -z "${color_start-}" ]]; then declare -r color_start="\033[" diff --git a/verify-shellcheck.sh b/verify-shellcheck.sh index 1b882fa8..fd28021a 100755 --- a/verify-shellcheck.sh +++ b/verify-shellcheck.sh @@ -18,9 +18,12 @@ set -o errexit set -o nounset set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -source "${KUBE_ROOT}/hack/lib/init.sh" -source "${KUBE_ROOT}/hack/lib/util.sh" +# The csi-release-tools directory. +TOOLS="$(dirname "${BASH_SOURCE[0]}")" +. "${TOOLS}/util.sh" + +# Directory to check. Default is the parent of the tools themselves. +ROOT="${1:-${TOOLS}/..}" # required version for this script, if not installed on the host we will # use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE @@ -56,15 +59,15 @@ create_container () { # we're done. # This is incredibly much faster than creating a container for each shellcheck # call ... - docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${KUBE_ROOT}:${KUBE_ROOT}" -w "${KUBE_ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647 + docker run --name "${SHELLCHECK_CONTAINER}" -d --rm -v "${ROOT}:${ROOT}" -w "${ROOT}" --entrypoint="sleep" "${SHELLCHECK_IMAGE}" 2147483647 } # removes the shellcheck container remove_container () { docker rm -f "${SHELLCHECK_CONTAINER}" &> /dev/null || true } -# ensure we're linting the k8s source tree -cd "${KUBE_ROOT}" +# ensure we're linting the source tree +cd "${ROOT}" # find all shell scripts excluding ./_*, ./.git/*, ./vendor*, # and anything git-ignored @@ -78,16 +81,6 @@ done < <(find . -name "*.sh" \ -path ./vendor\* \ \)) -# make sure known failures are sorted -failure_file="${KUBE_ROOT}/hack/.shellcheck_failures" -kube::util::check-file-in-alphabetical-order "${failure_file}" - -# load known failure files -failing_files=() -while IFS=$'\n' read -r script; - do failing_files+=("$script"); -done < <(cat "${failure_file}") - # detect if the host machine has the required shellcheck version installed # if so, we will use that instead. HAVE_SHELLCHECK=false @@ -119,7 +112,6 @@ fi # lint each script, tracking failures errors=() -not_failing=() for f in "${all_shell_scripts[@]}"; do set +o errexit if ${HAVE_SHELLCHECK}; then @@ -129,18 +121,14 @@ for f in "${all_shell_scripts[@]}"; do shellcheck --exclude="${SHELLCHECK_DISABLED}" "${f}") fi set -o errexit - kube::util::array_contains "${f}" "${failing_files[@]}" && in_failing=$? || in_failing=$? - if [[ -n "${failedLint}" ]] && [[ "${in_failing}" -ne "0" ]]; then - errors+=( "${failedLint}" ) - fi - if [[ -z "${failedLint}" ]] && [[ "${in_failing}" -eq "0" ]]; then - not_failing+=( "${f}" ) + if [[ -n "${failedLint}" ]]; then + errors+=( "${failedLint}" ) fi done # Check to be sure all the packages that should pass lint are. if [ ${#errors[@]} -eq 0 ]; then - echo 'Congratulations! All shell files are passing lint (excluding those in hack/.shellcheck_failures).' + echo 'Congratulations! All shell files are passing lint.' else { echo "Errors from shellcheck:" @@ -149,38 +137,9 @@ else done echo echo 'Please review the above warnings. You can test via "./hack/verify-shellcheck"' - echo 'If the above warnings do not make sense, you can exempt this package from shellcheck' - echo 'checking by adding it to hack/.shellcheck_failures (if your reviewer is okay with it).' - echo - } >&2 - false -fi - -if [[ ${#not_failing[@]} -gt 0 ]]; then - { - echo "Some packages in hack/.shellcheck_failures are passing shellcheck. Please remove them." - echo - for f in "${not_failing[@]}"; do - echo " $f" - done - echo - } >&2 - false -fi - -# Check that all failing_packages actually still exist -gone=() -for f in "${failing_files[@]}"; do - kube::util::array_contains "$f" "${all_shell_scripts[@]}" || gone+=( "$f" ) -done - -if [[ ${#gone[@]} -gt 0 ]]; then - { - echo "Some files in hack/.shellcheck_failures do not exist anymore. Please remove them." - echo - for f in "${gone[@]}"; do - echo " $f" - done + echo 'If the above warnings do not make sense, you can exempt them from shellcheck' + echo 'checking by adding the "shellcheck disable" directive' + echo '(https://github.com/koalaman/shellcheck/wiki/Directive#disable).' echo } >&2 false From 6c7ba1be0fbb012b6b3d6d204fcc363164bce12d Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 26 Mar 2019 13:09:00 +0100 Subject: [PATCH 21/41] build.make: integrate shellcheck into "make test" By default this only tests the scripts inside the "release-tools" directory, which is useful when making experimental changes to them in a component that uses csi-release-tools. But a component can also enable checking for other directories. --- README.md | 12 ++++++++++++ build.make | 15 +++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/README.md b/README.md index 56d2248c..cb028154 100644 --- a/README.md +++ b/README.md @@ -49,3 +49,15 @@ Cheat sheet: - `git subtree add --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - add release tools to a repo which does not have them yet (only once) - `git subtree pull --prefix=release-tools https://github.com/kubernetes-csi/csi-release-tools.git master` - update local copy to latest upstream (whenever upstream changes) - edit, `git commit`, `git subtree push --prefix=release-tools git@github.com:/csi-release-tools.git ` - push to a new branch before submitting a PR + +verify-shellcheck.sh +-------------------- + +The [verify-shellcheck.sh](./verify-shellcheck.sh) script in this repo +is a stripped down copy of the [corresponding +script](https://github.com/kubernetes/kubernetes/blob/release-1.14/hack/verify-shellcheck.sh) +in the Kubernetes repository. It can be used to check for certain +errors shell scripts, like missing quotation marks. The default +`test-shellcheck` target in [build.make](./build.make) only checks the +scripts in this directory. Components can add more directories to +`TEST_SHELLCHECK_DIRS` to check also other scripts. diff --git a/build.make b/build.make index 6ea32795..3cac9d30 100644 --- a/build.make +++ b/build.make @@ -122,3 +122,18 @@ test: test-subtree test-subtree: @ echo; echo "### $@:" ./release-tools/verify-subtree.sh release-tools + +# Components can extend the set of directories which must pass shellcheck. +# The default is to check only the release-tools directory itself. +TEST_SHELLCHECK_DIRS=release-tools +.PHONY: test-shellcheck +test: test-shellcheck +test-shellcheck: + @ echo; echo "### $@:" + @ ret=0; \ + for dir in $(abspath $(TEST_SHELLCHECK_DIRS)); do \ + echo; \ + echo "$$dir:"; \ + ./release-tools/verify-shellcheck.sh "$$dir" || ret=1; \ + done; \ + return $$ret From 55212ff2b4f6a83c22cf2344b101d31cd11ec271 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 15 Mar 2019 16:45:16 +0100 Subject: [PATCH 22/41] initial Prow test job This enables testing of other repos and of this repo itself inside Prow. Currently supported is unit testing ("make test") and E2E testing (either via a local test suite or the Kubernetes E2E test suite applied to the hostpath driver example deployment). The script passes shellcheck and uses Prow to verify that for future PRs. --- .prow.sh | 7 + README.md | 45 +++ filter-junit.go | 133 +++++++ prow.sh | 916 ++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1101 insertions(+) create mode 100755 .prow.sh create mode 100644 filter-junit.go create mode 100755 prow.sh diff --git a/.prow.sh b/.prow.sh new file mode 100755 index 00000000..b18c5358 --- /dev/null +++ b/.prow.sh @@ -0,0 +1,7 @@ +#! /bin/bash -e +# +# This is for testing csi-release-tools itself in Prow. All other +# repos use prow.sh for that, but as csi-release-tools isn't a normal +# repo with some Go code in it, it has a custom Prow test script. + +./verify-shellcheck.sh "$(pwd)" diff --git a/README.md b/README.md index cb028154..bc061aee 100644 --- a/README.md +++ b/README.md @@ -61,3 +61,48 @@ errors shell scripts, like missing quotation marks. The default `test-shellcheck` target in [build.make](./build.make) only checks the scripts in this directory. Components can add more directories to `TEST_SHELLCHECK_DIRS` to check also other scripts. + +End-to-end testing +------------------ + +A repo that wants to opt into testing via Prow must set up a top-level +`.prow.sh`. Typically that will source `prow.sh` and then transfer +control to it: + +``` bash +#! /bin/bash -e + +. release-tools/prow.sh +main +``` + +All Kubernetes-CSI repos are expected to switch to Prow. For details +on what is enabled in Prow, see +https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-csi + +Test results for periodic jobs are visible in +https://testgrid.k8s.io/sig-storage-csi + +It is possible to reproduce the Prow testing locally on a suitable machine: +- Linux host +- Docker installed +- code to be tested checkout out in `$GOPATH/src/` +- `cd $GOPATH/src/ && ./.prow.sh` + +Beware that the script intentionally doesn't clean up after itself and +modifies the content of `$GOPATH`, in particular the `kubernetes` and +`kind` repositories there. Better run it in an empty, disposable +`$GOPATH`. + +When it terminates, the following command can be used to get access to +the Kubernetes cluster that was brought up for testing (assuming that +this step succeeded): + + export KUBECONFIG="$(kind get kubeconfig-path --name="csi-prow")" + +It is possible to control the execution via environment variables. See +`prow.sh` for details. Particularly useful is testing against different +Kubernetes releases: + + CSI_PROW_KUBERNETES_VERSION=1.13.3 ./.prow.sh + CSI_PROW_KUBERNETES_VERSION=latest ./.prow.sh diff --git a/filter-junit.go b/filter-junit.go new file mode 100644 index 00000000..2f51be00 --- /dev/null +++ b/filter-junit.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + * This command filters a JUnit file such that only tests with a name + * matching a regular expression are passed through. By concatenating + * multiple input files it is possible to merge them into a single file. + */ +package main + +import ( + "encoding/xml" + "flag" + "io/ioutil" + "os" + "regexp" +) + +var ( + output = flag.String("o", "-", "junit file to write, - for stdout") + tests = flag.String("t", "", "regular expression matching the test names that are to be included in the output") +) + +/* + * TestSuite represents a JUnit file. Due to how encoding/xml works, we have + * represent all fields that we want to be passed through. It's therefore + * not a complete solution, but good enough for Ginkgo + Spyglass. + */ +type TestSuite struct { + XMLName string `xml:"testsuite"` + TestCases []TestCase `xml:"testcase"` +} + +type TestCase struct { + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SystemOut string `xml:"system-out,omitempty"` + Failure string `xml:"failure,omitempty"` + Skipped SkipReason `xml:"skipped,omitempty"` +} + +// SkipReason deals with the special : +// if present, we must re-encode it, even if empty. +type SkipReason string + +func (s *SkipReason) UnmarshalText(text []byte) error { + *s = SkipReason(text) + if *s == "" { + *s = " " + } + return nil +} + +func (s SkipReason) MarshalText() ([]byte, error) { + if s == " " { + return []byte{}, nil + } + return []byte(s), nil +} + +func main() { + var junit TestSuite + var data []byte + + flag.Parse() + + re := regexp.MustCompile(*tests) + + // Read all input files. + for _, input := range flag.Args() { + if input == "-" { + if _, err := os.Stdin.Read(data); err != nil { + panic(err) + } + } else { + var err error + data, err = ioutil.ReadFile(input) + if err != nil { + panic(err) + } + } + if err := xml.Unmarshal(data, &junit); err != nil { + panic(err) + } + } + + // Keep only matching testcases. Testcases skipped in all test runs are only stored once. + filtered := map[string]TestCase{} + for _, testcase := range junit.TestCases { + if !re.MatchString(testcase.Name) { + continue + } + entry, ok := filtered[testcase.Name] + if !ok || // not present yet + entry.Skipped != "" && testcase.Skipped == "" { // replaced skipped test with real test run + filtered[testcase.Name] = testcase + } + } + junit.TestCases = nil + for _, testcase := range filtered { + junit.TestCases = append(junit.TestCases, testcase) + } + + // Re-encode. + data, err := xml.MarshalIndent(junit, "", " ") + if err != nil { + panic(err) + } + + // Write to output. + if *output == "-" { + if _, err := os.Stdout.Write(data); err != nil { + panic(err) + } + } else { + if err := ioutil.WriteFile(*output, data, 0644); err != nil { + panic(err) + } + } +} diff --git a/prow.sh b/prow.sh new file mode 100755 index 00000000..17e8ae4a --- /dev/null +++ b/prow.sh @@ -0,0 +1,916 @@ +#! /bin/bash +# +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This script runs inside a Prow job. It runs unit tests ("make test") +# and E2E testing. This E2E testing covers different scenarios (see +# https://github.com/kubernetes/enhancements/pull/807): +# - running the stable hostpath example against a Kubernetes release +# - running the canary hostpath example against a Kubernetes release +# - building the component in the current repo and running the +# stable hostpath example with that one component replaced against +# a Kubernetes release +# +# The intended usage of this script is that individual repos import +# csi-release-tools, then link their top-level prow.sh to this or +# include it in that file. When including it, several of the variables +# can be overridden in the top-level prow.sh to customize the script +# for the repo. +# +# The expected environment is: +# - $GOPATH/src/ for the repository that is to be tested, +# with PR branch merged (when testing a PR) +# - running on linux-amd64 +# - bazel installed (when testing against Kubernetes master), must be recent +# enough for Kubernetes master +# - kind (https://github.com/kubernetes-sigs/kind) installed +# - optional: Go already installed + +# Sets the default value for a variable if not set already and logs the value. +# Any variable set this way is usually something that a repo's .prow.sh +# or the job can set. +configvar () { + # Ignore: Word is of the form "A"B"C" (B indicated). Did you mean "ABC" or "A\"B\"C"? + # shellcheck disable=SC2140 + eval : \$\{"$1":="\$2"\} + eval echo "\$3:" "$1=\${$1}" +} + +# Go versions can be specified seperately for different tasks +# If the pre-installed Go is missing or a different +# version, the required version here will get installed +# from https://golang.org/dl/. +configvar CSI_PROW_GO_VERSION_BUILD 1.11.4 "Go version for building the component" # depends on component's source code +configvar CSI_PROW_GO_VERSION_K8S 1.12.1 "Go version for building Kubernetes for the test cluster" # depends on Kubernetes version +configvar CSI_PROW_GO_VERSION_E2E 1.12.1 "Go version for building the Kubernetes E2E test suite" # depends on CSI_PROW_E2E settings below +configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below +configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below +configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on CSI_PROW_GINKGO_VERSION below + +# kind version to use. If the pre-installed version is different, +# the desired version is downloaded from https://github.com/kubernetes-sigs/kind/releases/download/ +# (if available), otherwise it is built from source. +configvar CSI_PROW_KIND_VERSION 0.2.1 "kind" + +# ginkgo test runner version to use. If the pre-installed version is +# different, the desired version is built from source. +configvar CSI_PROW_GINKGO_VERSION v1.7.0 "Ginkgo" + +# Ginkgo runs the E2E test in parallel. The default is based on the number +# of CPUs, but typically this can be set to something higher in the job. +configvar CSI_PROW_GINKO_PARALLEL "-p" "Ginko parallelism parameter(s)" + +# Enables building the code in the repository. On by default, can be +# disabled in jobs which only use pre-built components. +configvar CSI_PROW_BUILD_JOB true "building code in repo enabled" + +# Kubernetes version to test against. This must be a version number +# (like 1.13.3) for which there is a pre-built kind image (see +# https://hub.docker.com/r/kindest/node/tags), "latest" (builds +# Kubernetes from the master branch) or "release-x.yy" (builds +# Kubernetes from a release branch). +# +# This can also be a version that was not released yet at the time +# that the settings below were chose. The script will then +# use the same settings as for "latest" Kubernetes. This works +# as long as there are no breaking changes in Kubernetes, like +# deprecating or changing the implementation of an alpha feature. +configvar CSI_PROW_KUBERNETES_VERSION 1.13.3 "Kubernetes" + +# CSI_PROW_KUBERNETES_VERSION reduced to first two version numbers and +# with underscore (1_13 instead of 1.13.3) and in uppercase (LATEST +# instead of latest). +# +# This is used to derive the right defaults for the variables below +# when a Prow job just defines the Kubernetes version. +csi_prow_kubernetes_version_suffix="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | tr . _ | tr '[:lower:]' '[:upper:]' | sed -e 's/^RELEASE-//' -e 's/\([0-9]*\)_\([0-9]*\).*/\1_\2/')" + +# Work directory. It has to allow running executables, therefore /tmp +# is avoided. Cleaning up after the script is intentionally left to +# the caller. +configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csiprow.XXXXXXXXXX")" "work directory" + +# The hostpath deployment script is searched for in several places. +# +# - The "deploy" directory in the current repository: this is useful +# for the situation that a component becomes incompatible with the +# shared deployment, because then it can (temporarily!) provide its +# own example until the shared one can be updated; it's also how +# csi-driver-host-path itself provides the example. +# +# - CSI_PROW_HOSTPATH_VERSION of the CSI_PROW_HOSTPATH_REPO is checked +# out: this allows other repos to reference a version of the example +# that is known to be compatible. +# +# - The csi-driver-host-path/deploy directory has multiple sub-directories, +# each with different deployments (stable set of images for Kubernetes 1.13, +# stable set of images for Kubernetes 1.14, canary for latest Kubernetes, etc.). +# This is necessary because there may be incompatible changes in the +# "API" of a component (for example, its command line options or RBAC rules) +# or in its support for different Kubernetes versions (CSIDriverInfo as +# CRD in Kubernetes 1.13 vs builtin API in Kubernetes 1.14). +# +# When testing an update for a component in a PR job, the +# CSI_PROW_DEPLOYMENT variable can be set in the +# .prow.sh of each component when there are breaking changes +# that require using a non-default deployment. The default +# is a deployment named "kubernetes-x.yy" (if available), +# otherwise "kubernetes-latest". +# "none" disables the deployment of the hostpath driver. +# +# When no deploy script is found (nothing in `deploy` directory, +# CSI_PROW_HOSTPATH_REPO=none), nothing gets deployed. +# +# TODO: merge https://github.com/kubernetes-csi/csi-driver-host-path/pull/29, switch to revision from master +configvar CSI_PROW_HOSTPATH_VERSION deployment "hostpath driver" +configvar CSI_PROW_HOSTPATH_REPO https://github.com/pohly/csi-driver-host-path "hostpath repo" +# Ignore 'See if you can use ${variable//search/replace} instead.' +# shellcheck disable=SC2001 +configvar CSI_PROW_DEPLOYMENT "" "deployment" + +# If CSI_PROW_HOSTPATH_CANARY is set (typically to "canary", but also +# "1.0-canary"), then all image versions are replaced with that +# version tag. +configvar CSI_PROW_HOSTPATH_CANARY "" "hostpath image" + +# The E2E testing can come from an arbitrary repo. The expectation is that +# the repo supports "go test ./test/e2e -args --storage.testdriver" (https://github.com/kubernetes/kubernetes/pull/72836) +# after setting KUBECONFIG. As a special case, if the repository is Kubernetes, +# then `make WHAT=test/e2e/e2e.test` is called first to ensure that +# all generated files are present. +# +# CSI_PROW_E2E_REPO=none disables E2E testing. +configvar CSI_PROW_E2E_VERSION v1.14.0 "E2E version" +configvar CSI_PROW_E2E_REPO https://github.com/kubernetes/kubernetes "E2E repo" +configvar CSI_PROW_E2E_IMPORT_PATH k8s.io/kubernetes "E2E package" + +# csi-sanity testing from the csi-test repo can be run against the installed +# CSI driver. For this to work, deploying the driver must expose the Unix domain +# csi.sock as a TCP service for use by the csi-sanity command, which runs outside +# of the cluster. The alternative would have been to (cross-)compile csi-sanity +# and install it inside the cluster, which is not necessarily easier. +configvar CSI_PROW_SANITY_REPO https://github.com/kubernetes-csi/csi-test "csi-test repo" +configvar CSI_PROW_SANITY_VERSION 5421d9f3c37be3b95b241b44a094a3db11bee789 "csi-test version" # latest master +configvar CSI_PROW_SANITY_IMPORT_PATH github.com/kubernetes-csi/csi-test "csi-test package" +configvar CSI_PROW_SANITY_SERVICE "hostpath-service" "Kubernetes TCP service name that exposes csi.sock" +configvar CSI_PROW_SANITY_POD "csi-hostpathplugin-0" "Kubernetes pod with CSI driver" +configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI driver" + +# Each job can run one or more of the following tests, identified by +# a single word: +# - unit testing +# - parallel excluding alpha features +# - serial excluding alpha features +# - parallel, only alpha feature +# - serial, only alpha features +# - sanity +# +# Sanity testing with csi-sanity only covers the CSI driver itself and thus +# is off by default. A CSI driver can change that default in its .prow.sh +# by setting CSI_PROW_TESTS_SANITY. +configvar CSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha ${CSI_PROW_TESTS_SANITY}" "tests to run" +test_enabled () { + echo "${CSI_PROW_TESTS}" | grep -q -w -e "$1" +} + +# Serial vs. parallel is always determined by these regular expressions. +# Individual regular expressions are seperated by spaces for readability +# and expected to not contain spaces. Use dots instead. The complete +# regex for Ginkgo will be created by joining the individual terms. +configvar CSI_PROW_E2E_SERIAL '\[Serial\] \[Disruptive\]' "tags for serial E2E tests" +regex_join () { + echo "$@" | sed -e 's/ */|/g' -e 's/^|*//' -e 's/|*$//' -e 's/^$/this-matches-nothing/g' +} + +# Prints the value of a variable + version suffix, falling back to variable + "LATEST". +get_versioned_variable () { + local var="$1" + local version="$2" + local value + + eval value="\${${var}_${version}}" + if ! [ "$value" ]; then + eval value="\${${var}_LATEST}" + fi + echo "$value" +} + +# Which tests are alpha depends on the Kubernetes version. The same +# E2E test suite is used for all Kubernetes versions, including older +# Kubernetes. +# +# Feature tags in the test are set based on what is an alpha +# feature in the Kubernetes version that contains the E2E test suite's +# source code. +# +# So when testing against an older Kubernetes release, some tests +# might be enabled by default which don't pass for that older +# Kubernetes version. In that case, the +# CSI_PROW_E2E_ALPHA_ variable cannot just +# be based on the `Feature` tag, it also must contain the names +# of tests that used to be alpha but no longer have that tag. +# +# is just major+minor version separated by +# underscore. "latest" matches master. It is also used for +# unknown versions, so when master gets released there is +# no need to add a new versioned variable. That only needs to be +# done when updating something in the configuration that leads +# to "latest" no longer being suitable. +configvar CSI_PROW_E2E_ALPHA_1_13 '\[Feature: \[Testpattern:.Dynamic.PV..block.volmode.\] should.create.and.delete.block.persistent.volumes' "alpha tests for Kubernetes 1.13" # Raw block was an alpha feature in 1.13. +configvar CSI_PROW_E2E_ALPHA_LATEST '\[Feature:' "alpha tests for Kubernetes master" +configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi_prow_kubernetes_version_suffix}")" "alpha tests" + +# After the parallel E2E test without alpha features, a test cluster +# with alpha features is brought up and tests that were previously +# disabled are run. The alpha gates in each release have to be listed +# explicitly. If none are set (= variable empty), alpha testing +# is skipped. +# +# Testing against "latest" Kubernetes is problematic because some alpha +# feature which used to work might stop working or change their behavior +# such that the current tests no longer pass. If that happens, +# kubernetes-csi components must be updated, either by disabling +# the failing test for "latest" or by updating the test and not running +# it anymore for older releases. +configvar CSI_PROW_E2E_ALPHA_GATES_1_13 'VolumeSnapshotDataSource=true,BlockVolume=true,CSIBlockVolume=true' "alpha feature gates for Kubernetes 1.13" +configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true' "alpha feature gates for latest Kubernetes" +configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" + +# Some tests are known to be unusable in a KinD cluster. For example, +# stopping kubelet with "ssh systemctl stop kubelet" simply +# doesn't work. Such tests should be written in a way that they verify +# whether they can run with the current cluster provider, but until +# they are, we filter them out by name. Like the other test selection +# variables, this is again a space separated list of regular expressions. +configvar CSI_PROW_E2E_SKIP 'while.kubelet.is.down.*Disruptive' "tests that need to be skipped" + +# This is the directory for additional result files. Usually set by Prow, but +# if not (for example, when invoking manually) it defaults to the work directory. +configvar ARTIFACTS "${CSI_PROW_WORK}/artifacts" "artifacts" +mkdir -p "${ARTIFACTS}" + +RELEASE_TOOLS_ROOT="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" +REPO_DIR="$(pwd)" + +run () { + echo "$(date) $(go version | sed -e 's/.*version \(go[^ ]*\).*/\1/') $(if [ "$(pwd)" != "${REPO_DIR}" ]; then pwd; fi)\$" "$@" >&2 + "$@" +} + +info () { + echo >&2 INFO: "$@" +} + +warn () { + echo >&2 WARNING: "$@" +} + +die () { + echo >&2 ERROR: "$@" + exit 1 +} + +# For additional tools. +CSI_PROW_BIN="${CSI_PROW_WORK}/bin" +mkdir -p "${CSI_PROW_BIN}" +PATH="${CSI_PROW_BIN}:$PATH" + +# Ensure that PATH has the desired version of the Go tools, then run command given as argument. +run_with_go () { + local version + version="$1" + shift + + if go version 2>/dev/null | grep -q "go$version"; then + run "$@" + else + if ! [ -d "${CSI_PROW_WORK}/go-$version" ]; then + run curl --fail --location "https://dl.google.com/go/go$version.linux-amd64.tar.gz" | tar -C "${CSI_PROW_WORK}" -zxf - || die "installation of Go $version failed" + mv "${CSI_PROW_WORK}/go" "${CSI_PROW_WORK}/go-$version" + fi + PATH="${CSI_PROW_WORK}/go-$version/bin:$PATH" run "$@" + fi +} + +# Ensure that we have the desired version of kind. +install_kind () { + if kind --version 2>/dev/null | grep -q " ${CSI_PROW_KIND_VERSION}$"; then + return + fi + if run curl --fail --location -o "${CSI_PROW_WORK}/bin/kind" "https://github.com/kubernetes-sigs/kind/releases/download/${CSI_PROW_KIND_VERSION}/kind-linux-amd64"; then + chmod u+x "${CSI_PROW_WORK}/bin/kind" + else + git_checkout https://github.com/kubernetes-sigs/kind "$GOPATH/src/sigs.k8s.io/kind" "${CSI_PROW_KIND_VERSION}" --depth=1 && + run_with_go "${CSI_PROW_GO_VERSION_KIND}" go build -o "${CSI_PROW_WORK}/bin/kind" sigs.k8s.io/kind + fi +} + +# Ensure that we have the desired version of the ginkgo test runner. +install_ginkgo () { + # CSI_PROW_GINKGO_VERSION contains the tag with v prefix, the command line output does not. + if [ "v$(ginkgo version 2>/dev/null | sed -e 's/.* //')" = "${CSI_PROW_GINKGO_VERSION}" ]; then + return + fi + git_checkout https://github.com/onsi/ginkgo "$GOPATH/src/github.com/onsi/ginkgo" "${CSI_PROW_GINKGO_VERSION}" --depth=1 && + # We have to get dependencies and hence can't call just "go build". + run_with_go "${CSI_PROW_GO_VERSION_GINKGO}" go get github.com/onsi/ginkgo/ginkgo || die "building ginkgo failed" && + mv "$GOPATH/bin/ginkgo" "${CSI_PROW_BIN}" +} + +# This checks out a repo ("https://github.com/kubernetes/kubernetes") +# in a certain location ("$GOPATH/src/k8s.io/kubernetes") at +# a certain revision (a hex commit hash, v1.13.1, master). It's okay +# for that directory to exist already. +git_checkout () { + local repo path revision + repo="$1" + shift + path="$1" + shift + revision="$1" + shift + + mkdir -p "$path" + if ! [ -d "$path/.git" ]; then + run git init "$path" + fi + if (cd "$path" && run git fetch "$@" "$repo" "$revision"); then + (cd "$path" && run git checkout FETCH_HEAD) || die "checking out $repo $revision failed" + else + # Might have been because fetching by revision is not + # supported by GitHub (https://github.com/isaacs/github/issues/436). + # Fall back to fetching everything. + (cd "$path" && run git fetch "$repo" '+refs/heads/*:refs/remotes/csiprow/heads/*' '+refs/tags/*:refs/tags/*') || die "fetching $repo failed" + (cd "$path" && run git checkout "$revision") || die "checking out $repo $revision failed" + fi +} + +list_gates () ( + set -f; IFS=',' + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + set -- $1 + while [ "$1" ]; do + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + echo "$1" | sed -e 's/ *\([^ =]*\) *= *\([^ ]*\) */ \1: \2/' + shift + done +) + +csi_prow_kind_have_kubernetes=false +# Brings up a Kubernetes cluster and sets KUBECONFIG. +# Accepts additional feature gates in the form gate1=true|false,gate2=... +start_cluster () { + local image gates + gates="$1" + + if kind get clusters | grep -q csi-prow; then + run kind delete cluster --name=csi-prow || die "kind delete failed" + fi + + # Build from source? + if [[ "${CSI_PROW_KUBERNETES_VERSION}" =~ ^release-|^latest$ ]]; then + if ! ${csi_prow_kind_have_kubernetes}; then + local version="${CSI_PROW_KUBERNETES_VERSION}" + if [ "$version" = "latest" ]; then + version=master + fi + git_checkout https://github.com/kubernetes/kubernetes "$GOPATH/src/k8s.io/kubernetes" "$version" --depth=1 || die "checking out Kubernetes $version failed" + + # "kind build" and/or the Kubernetes build rules need at least one tag, which we don't have + # when doing a shallow fetch. Therefore we fake one. For some reason, v1.0.0 didn't work + # while v1.14.0-fake.1 did. + (cd "$GOPATH/src/k8s.io/kubernetes" && run git tag -f v1.14.0-fake.1) || die "git tag failed" + + run_with_go "${CSI_PROW_GO_VERSION_K8S}" kind build node-image --type bazel --image csiprow/node:latest --kube-root "$GOPATH/src/k8s.io/kubernetes" || die "'kind build node-image' failed" + csi_prow_kind_have_kubernetes=true + fi + image="csiprow/node:latest" + else + image="kindest/node:v${CSI_PROW_KUBERNETES_VERSION}" + fi + cat >"${CSI_PROW_WORK}/kind-config.yaml" </dev/null; wait) + info "For container output see job artifacts." + die "deploying the hostpath driver with ${deploy_hostpath} failed" + fi +} + +# collect logs and cluster status (like the version of all components, Kubernetes version, test version) +collect_cluster_info () { + cat <>"${ARTIFACTS}/$pod/$container.log" & + echo "$!" + done + done +} + +# Makes the E2E test suite binary available as "${CSI_PROW_WORK}/e2e.test". +install_e2e () { + if [ -e "${CSI_PROW_WORK}/e2e.test" ]; then + return + fi + + git_checkout "${CSI_PROW_E2E_REPO}" "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}" --depth=1 && + if [ "${CSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then + run_with_go "${CSI_PROW_GO_VERSION_E2E}" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && + ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}" + else + run_with_go "${CSI_PROW_GO_VERSION_E2E}" go test -c -o "${CSI_PROW_WORK}/e2e.test" "${CSI_PROW_E2E_IMPORT_PATH}/test/e2e" + fi +} + +# Makes the csi-sanity test suite binary available as +# "${CSI_PROW_WORK}/csi-sanity". +install_sanity () ( + if [ -e "${CSI_PROW_WORK}/csi-sanity" ]; then + return + fi + + git_checkout "${CSI_PROW_SANITY_REPO}" "${GOPATH}/src/${CSI_PROW_SANITY_IMPORT_PATH}" "${CSI_PROW_SANITY_VERSION}" --depth=1 || die "checking out csi-sanity failed" + run_with_go "${CSI_PROW_GO_VERSION_SANITY}" go test -c -o "${CSI_PROW_WORK}/csi-sanity" "${CSI_PROW_SANITY_IMPORT_PATH}/cmd/csi-sanity" || die "building csi-sanity failed" +) + +# Whether the hostpath driver supports raw block devices depends on which version +# we are testing. It would be much nicer if we could determine that by querying the +# installed driver. +hostpath_supports_block () { + if [ -e "cmd/hostpathplugin" ] && ${CSI_PROW_BUILD_JOB}; then + # The assumption is that if we build the hostpath driver, then it is + # a current version with support. + echo true + return + fi + + case "${CSI_PROW_DEPLOYMENT}" in kubernetes-1.13) echo false;; # wasn't supported and probably won't be backported + *) echo true;; # probably all other deployments have a recent driver + esac +} + +# Captures pod output while running some other command. +run_with_loggers () ( + loggers=$(start_loggers -f) + trap 'kill $loggers' EXIT + + run "$@" +) + +# Invokes the filter-junit.go tool. +run_filter_junit () { + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" go run "${RELEASE_TOOLS_ROOT}/filter-junit.go" "$@" +} + +# Runs the E2E test suite in a sub-shell. +run_e2e () ( + name="$1" + shift + + install_e2e || die "building e2e.test failed" + install_ginkgo || die "installing ginkgo failed" + + # TODO (?): multi-node cluster (depends on https://github.com/kubernetes-csi/csi-driver-host-path/pull/14). + # When running on a multi-node cluster, we need to figure out where the + # hostpath driver was deployed and set ClientNodeName accordingly. + + # The content of this file depends on both what the E2E suite expects and + # what the installed hostpath driver supports. Generating it here seems + # prone to breakage, but it is uncertain where a better place might be. + cat >"${CSI_PROW_WORK}/hostpath-test-driver.yaml" </dev/null >/dev/null; then + run_filter_junit -t="External Storage" -o "${ARTIFACTS}/junit_${name}.xml" "${ARTIFACTS}"/junit_[0-9]*.xml && rm -f "${ARTIFACTS}"/junit_[0-9]*.xml + fi + } + trap move_junit EXIT + + cd "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && + run_with_loggers ginkgo -v "$@" "${CSI_PROW_WORK}/e2e.test" -- -report-dir "${ARTIFACTS}" -storage.testdriver="${CSI_PROW_WORK}/hostpath-test-driver.yaml" +) + +# Run csi-sanity against installed CSI driver. +run_sanity () ( + install_sanity || die "installing csi-sanity failed" + + cat >"${CSI_PROW_WORK}/mkdir_in_pod.sh" <"${CSI_PROW_WORK}/rmdir_in_pod.sh" </\>/g' -e 's/\x1B...//g' +} + +# The "make test" output starts each test with "### :" +# and then ends when the next test starts or with "make: *** +# [] Error 1" when there was a failure. Here we read each +# line of that output, split it up into individual tests and generate +# a make-test.xml file in JUnit format. +make_test_to_junit () { + local ret out testname testoutput + ret=0 + # Plain make-test.xml was not delivered as text/xml by the web + # server and ignored by spyglass. It seems that the name has to + # match junit*.xml. + out="${ARTIFACTS}/junit_make_test.xml" + testname= + echo "" >>"$out" + + while IFS= read -r line; do + echo "$line" # pass through + if echo "$line" | grep -q "^### [^ ]*:$"; then + if [ "$testname" ]; then + # previous test succesful + echo " " >>"$out" + echo " " >>"$out" + fi + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + # + # start new test + testname="$(echo "$line" | sed -e 's/^### \([^ ]*\):$/\1/')" + testoutput= + echo " " >>"$out" + echo " " >>"$out" + elif echo "$line" | grep -q '^make: .*Error [0-9]*$'; then + if [ "$testname" ]; then + # Ignore: Consider using { cmd1; cmd2; } >> file instead of individual redirects. + # shellcheck disable=SC2129 + # + # end test with failure + echo " " >>"$out" + # Include the same text as in also in , + # because then it is easier to view in spyglass (shown directly + # instead of having to click through to stdout). + echo " " >>"$out" + echo -n "$testoutput" | ascii_to_xml >>"$out" + echo " " >>"$out" + echo " " >>"$out" + fi + # remember failure for exit code + ret=1 + # not currently inside a test + testname= + else + if [ "$testname" ]; then + # Test output. + echo "$line" | ascii_to_xml >>"$out" + testoutput="$testoutput$line +" + fi + fi + done + # if still in a test, close it now + if [ "$testname" ]; then + echo " " >>"$out" + echo " " >>"$out" + fi + echo "" >>"$out" + + # this makes the error more visible in spyglass + if [ "$ret" -ne 0 ]; then + echo "ERROR: 'make test' failed" + return 1 + fi +} + +main () { + local images ret + ret=0 + + images= + if ${CSI_PROW_BUILD_JOB}; then + # A successful build is required for testing. + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make all || die "'make all' failed" + # We don't want test failures to prevent E2E testing below, because the failure + # might have been minor or unavoidable, for example when experimenting with + # changes in "release-tools" in a PR (that fails the "is release-tools unmodified" + # test). + if test_enabled "unit"; then + if ! run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make -k test 2>&1 | make_test_to_junit; then + warn "'make test' failed, proceeding anyway" + ret=1 + fi + fi + # Required for E2E testing. + run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make container || die "'make container' failed" + fi + + install_kind || die "installing kind failed" + start_cluster || die "starting the cluster failed" + + if ${CSI_PROW_BUILD_JOB}; then + cmds="$(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//')" + # Get the image that was just built (if any) from the + # top-level Makefile CMDS variable and set the + # deploy-hostpath.sh env variables for it. We also need to + # side-load those images into the cluster. + for i in $cmds; do + e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_REGISTRY=none ${e}_TAG=csiprow" + + # We must avoid the tag "latest" because that implies + # always pulling the image + # (https://github.com/kubernetes-sigs/kind/issues/328). + docker tag "$i:latest" "$i:csiprow" || die "tagging the locally built container image for $i failed" + kind load docker-image --name csi-prow "$i:csiprow" || die "could not load the $i:latest image into the kind cluster" + done + + if [ -e deploy/kubernetes/rbac.yaml ]; then + # This is one of those components which has its own RBAC rules (like external-provisioner). + # We are testing a locally built image and also want to test with the the current, + # potentially modified RBAC rules. + if [ "$(echo "$cmds" | wc -w)" != 1 ]; then + die "ambiguous deploy/kubernetes/rbac.yaml: need exactly one command, got: $cmds" + fi + e=$(echo "$cmds" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_RBAC=$(pwd)/deploy/kubernetes/rbac.yaml" + fi + fi + + # Installing the driver might be disabled, in which case we bail out early. + if ! install_hostpath "$images"; then + info "hostpath driver installation disabled, skipping E2E testing" + return "$ret" + fi + + collect_cluster_info + + if test_enabled "sanity"; then + if ! run_sanity; then + ret=1 + fi + fi + + if test_enabled "parallel"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \ + -focus="External.Storage" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel failed" + ret=1 + fi + fi + + if test_enabled "serial"; then + if ! run_e2e serial \ + -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_SERIAL}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial failed" + ret=1 + fi + fi + + if (test_enabled "parallel-alpha" || test_enabled "serial-alpha") && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then + # Need to (re)create the cluster. + start_cluster "${CSI_PROW_E2E_ALPHA_GATES}" || die "starting alpha cluster failed" + if ${CSI_PROW_BUILD_JOB}; then + # Ignore: Double quote to prevent globbing and word splitting. + # Ignore: To read lines rather than words, pipe/redirect to a 'while read' loop. + # shellcheck disable=SC2086 disable=SC2013 + for i in $(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//'); do + kind load docker-image --name csi-prow $i:csiprow || die "could not load the $i:latest image into the kind cluster" + done + fi + install_hostpath "$images" || die "hostpath driver installation failed unexpectedly on alpha cluster" + + if test_enabled "parallel-alpha"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel-alpha ${CSI_PROW_GINKO_PARALLEL} \ + -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_ALPHA}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel alpha failed" + ret=1 + fi + fi + + if test_enabled "serial-alpha"; then + if ! run_e2e serial-alpha \ + -focus="External.Storage.*(($(regex_join "${CSI_PROW_E2E_SERIAL}")).*($(regex_join "${CSI_PROW_E2E_ALPHA}"))|($(regex_join "${CSI_PROW_E2E_ALPHA}")).*($(regex_join "${CSI_PROW_E2E_SERIAL}")))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial alpha failed" + ret=1 + fi + fi + fi + + # Merge all junit files into one. This gets rid of duplicated "skipped" tests. + if ls "${ARTIFACTS}"/junit_*.xml 2>/dev/null >&2; then + run_filter_junit -o "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}"/junit_*.xml && rm "${ARTIFACTS}"/junit_*.xml && mv "${CSI_PROW_WORK}/junit_final.xml" "${ARTIFACTS}" + fi + + return "$ret" +} From 0a0fd49b8b146c21e9c90ffbfd696b83b6ed86b7 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 2 Apr 2019 20:53:08 +0200 Subject: [PATCH 23/41] prow.sh: comment clarification --- prow.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index 17e8ae4a..faa5c361 100755 --- a/prow.sh +++ b/prow.sh @@ -15,7 +15,7 @@ # limitations under the License. -# This script runs inside a Prow job. It runs unit tests ("make test") +# This script runs inside a Prow job. It can run unit tests ("make test") # and E2E testing. This E2E testing covers different scenarios (see # https://github.com/kubernetes/enhancements/pull/807): # - running the stable hostpath example against a Kubernetes release From 429581c52d1cdbd1d1c3f9c25411fe6aa8c66610 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 2 Apr 2019 20:53:12 +0200 Subject: [PATCH 24/41] prow.sh: pull Go version from travis.yml The travis.yml is now the only place where the Go version for the component itself needs to be configured. --- prow.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/prow.sh b/prow.sh index faa5c361..bab7eb04 100755 --- a/prow.sh +++ b/prow.sh @@ -39,6 +39,9 @@ # - kind (https://github.com/kubernetes-sigs/kind) installed # - optional: Go already installed +RELEASE_TOOLS_ROOT="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" +REPO_DIR="$(pwd)" + # Sets the default value for a variable if not set already and logs the value. # Any variable set this way is usually something that a repo's .prow.sh # or the job can set. @@ -53,7 +56,10 @@ configvar () { # If the pre-installed Go is missing or a different # version, the required version here will get installed # from https://golang.org/dl/. -configvar CSI_PROW_GO_VERSION_BUILD 1.11.4 "Go version for building the component" # depends on component's source code +go_from_travis_yml () { + grep "^ *- go:" "${RELEASE_TOOLS_ROOT}/travis.yml" | sed -e 's/.*go: *//' +} +configvar CSI_PROW_GO_VERSION_BUILD "$(go_from_travis_yml)" "Go version for building the component" # depends on component's source code configvar CSI_PROW_GO_VERSION_K8S 1.12.1 "Go version for building Kubernetes for the test cluster" # depends on Kubernetes version configvar CSI_PROW_GO_VERSION_E2E 1.12.1 "Go version for building the Kubernetes E2E test suite" # depends on CSI_PROW_E2E settings below configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below @@ -262,9 +268,6 @@ configvar CSI_PROW_E2E_SKIP 'while.kubelet.is.down.*Disruptive' "tests that need configvar ARTIFACTS "${CSI_PROW_WORK}/artifacts" "artifacts" mkdir -p "${ARTIFACTS}" -RELEASE_TOOLS_ROOT="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" -REPO_DIR="$(pwd)" - run () { echo "$(date) $(go version | sed -e 's/.*version \(go[^ ]*\).*/\1/') $(if [ "$(pwd)" != "${REPO_DIR}" ]; then pwd; fi)\$" "$@" >&2 "$@" From 29545bb012b6cbeab74ffbced61951e9b36137d2 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 3 Apr 2019 12:38:33 +0200 Subject: [PATCH 25/41] prow.sh: take Go version from Kubernetes source Using the same (recent) Go version for all Kubernetes versions can break for older versions when there are incompatible changes in Go. To avoid that, we use exactly the minimum version of Go required for each Kubernetes version. This is based on the assumption that this combination was tested successfully. When building the E2E suite from Kubernetes (the default) we do the same, but still allow building it from elsewhere. We allow the Go version to be empty when it doesn't matter. --- prow.sh | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/prow.sh b/prow.sh index bab7eb04..6fd55cc2 100755 --- a/prow.sh +++ b/prow.sh @@ -60,8 +60,7 @@ go_from_travis_yml () { grep "^ *- go:" "${RELEASE_TOOLS_ROOT}/travis.yml" | sed -e 's/.*go: *//' } configvar CSI_PROW_GO_VERSION_BUILD "$(go_from_travis_yml)" "Go version for building the component" # depends on component's source code -configvar CSI_PROW_GO_VERSION_K8S 1.12.1 "Go version for building Kubernetes for the test cluster" # depends on Kubernetes version -configvar CSI_PROW_GO_VERSION_E2E 1.12.1 "Go version for building the Kubernetes E2E test suite" # depends on CSI_PROW_E2E settings below +configvar CSI_PROW_GO_VERSION_E2E "" "override Go version for building the Kubernetes E2E test suite" # normally doesn't need to be set, see install_e2e configvar CSI_PROW_GO_VERSION_SANITY "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building the csi-sanity test suite" # depends on CSI_PROW_SANITY settings below configvar CSI_PROW_GO_VERSION_KIND "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building 'kind'" # depends on CSI_PROW_KIND_VERSION below configvar CSI_PROW_GO_VERSION_GINKGO "${CSI_PROW_GO_VERSION_BUILD}" "Go version for building ginkgo" # depends on CSI_PROW_GINKGO_VERSION below @@ -292,12 +291,14 @@ mkdir -p "${CSI_PROW_BIN}" PATH="${CSI_PROW_BIN}:$PATH" # Ensure that PATH has the desired version of the Go tools, then run command given as argument. +# Empty parameter uses the already installed Go. In Prow, that version is kept up-to-date by +# bumping the container image regularly. run_with_go () { local version version="$1" shift - if go version 2>/dev/null | grep -q "go$version"; then + if ! [ "$version" ] || go version 2>/dev/null | grep -q "go$version"; then run "$@" else if ! [ -d "${CSI_PROW_WORK}/go-$version" ]; then @@ -374,6 +375,20 @@ list_gates () ( done ) +go_version_for_kubernetes () ( + local path="$1" + local version="$2" + local go_version + + # We use the minimal Go version specified for each K8S release (= minimum_go_version in hack/lib/golang.sh). + # More recent versions might also work, but we don't want to count on that. + go_version="$(grep minimum_go_version= "$path/hack/lib/golang.sh" | sed -e 's/.*=go//')" + if ! [ "$go_version" ]; then + die "Unable to determine Go version for Kubernetes $version from hack/lib/golang.sh." + fi + echo "$go_version" +) + csi_prow_kind_have_kubernetes=false # Brings up a Kubernetes cluster and sets KUBECONFIG. # Accepts additional feature gates in the form gate1=true|false,gate2=... @@ -399,7 +414,8 @@ start_cluster () { # while v1.14.0-fake.1 did. (cd "$GOPATH/src/k8s.io/kubernetes" && run git tag -f v1.14.0-fake.1) || die "git tag failed" - run_with_go "${CSI_PROW_GO_VERSION_K8S}" kind build node-image --type bazel --image csiprow/node:latest --kube-root "$GOPATH/src/k8s.io/kubernetes" || die "'kind build node-image' failed" + go_version="$(go_version_for_kubernetes "$GOPATH/src/k8s.io/kubernetes" "$version")" || die "cannot proceed without knowing Go version for Kubernetes" + run_with_go "$go_version" kind build node-image --type bazel --image csiprow/node:latest --kube-root "$GOPATH/src/k8s.io/kubernetes" || die "'kind build node-image' failed" csi_prow_kind_have_kubernetes=true fi image="csiprow/node:latest" @@ -563,7 +579,8 @@ install_e2e () { git_checkout "${CSI_PROW_E2E_REPO}" "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}" --depth=1 && if [ "${CSI_PROW_E2E_IMPORT_PATH}" = "k8s.io/kubernetes" ]; then - run_with_go "${CSI_PROW_GO_VERSION_E2E}" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && + go_version="${CSI_PROW_GO_VERSION_E2E:-$(go_version_for_kubernetes "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" "${CSI_PROW_E2E_VERSION}")}" && + run_with_go "$go_version" make WHAT=test/e2e/e2e.test "-C${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}" && ln -s "${GOPATH}/src/${CSI_PROW_E2E_IMPORT_PATH}/_output/bin/e2e.test" "${CSI_PROW_WORK}" else run_with_go "${CSI_PROW_GO_VERSION_E2E}" go test -c -o "${CSI_PROW_WORK}/e2e.test" "${CSI_PROW_E2E_IMPORT_PATH}/test/e2e" From 741319bd308c10b6c7b16c3cce71ef941cb7086b Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 3 Apr 2019 12:42:33 +0200 Subject: [PATCH 26/41] prow.sh: improve building Kubernetes from source While switching back and forth between release-1.13 and release-1.14 locally, there was the problem that the local kind image kept using the wrong kubelet binary despite rebuilding from source. The problem went away after cleaning the Bazel cache. Exact root cause unknown, but perhaps using unique tags and properly cleaning the repo helps. If not, then the unique tags at least will show what the problem is. --- prow.sh | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/prow.sh b/prow.sh index 6fd55cc2..a7ea66f2 100755 --- a/prow.sh +++ b/prow.sh @@ -360,6 +360,9 @@ git_checkout () { (cd "$path" && run git fetch "$repo" '+refs/heads/*:refs/remotes/csiprow/heads/*' '+refs/tags/*:refs/tags/*') || die "fetching $repo failed" (cd "$path" && run git checkout "$revision") || die "checking out $repo $revision failed" fi + # This is useful for local testing or when switching between different revisions in the same + # repo. + (cd "$path" && run git clean -fdx) || die "failed to clean $path" } list_gates () ( @@ -410,10 +413,20 @@ start_cluster () { git_checkout https://github.com/kubernetes/kubernetes "$GOPATH/src/k8s.io/kubernetes" "$version" --depth=1 || die "checking out Kubernetes $version failed" # "kind build" and/or the Kubernetes build rules need at least one tag, which we don't have - # when doing a shallow fetch. Therefore we fake one. For some reason, v1.0.0 didn't work - # while v1.14.0-fake.1 did. - (cd "$GOPATH/src/k8s.io/kubernetes" && run git tag -f v1.14.0-fake.1) || die "git tag failed" - + # when doing a shallow fetch. Therefore we fake one: + # release-1.12 -> v1.12.0-release..csiprow + # latest or -> v1.14.0-.csiprow + case "${CSI_PROW_KUBERNETES_VERSION}" in + release-*) + # Ignore: See if you can use ${variable//search/replace} instead. + # shellcheck disable=SC2001 + tag="$(echo "${CSI_PROW_KUBERNETES_VERSION}" | sed -e 's/release-\(.*\)/v\1.0-release./')";; + *) + # We have to make something up. v1.0.0 did not work for some reasons. + tag="v1.14.0-";; + esac + tag="$tag$(cd "$GOPATH/src/k8s.io/kubernetes" && git rev-list --abbrev-commit HEAD).csiprow" + (cd "$GOPATH/src/k8s.io/kubernetes" && run git tag -f "$tag") || die "git tag failed" go_version="$(go_version_for_kubernetes "$GOPATH/src/k8s.io/kubernetes" "$version")" || die "cannot proceed without knowing Go version for Kubernetes" run_with_go "$go_version" kind build node-image --type bazel --image csiprow/node:latest --kube-root "$GOPATH/src/k8s.io/kubernetes" || die "'kind build node-image' failed" csi_prow_kind_have_kubernetes=true From 6602d38bfbce6a526f9225c39aa6b86adbd86978 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 3 Apr 2019 13:54:01 +0200 Subject: [PATCH 27/41] prow.sh: different E2E suite depending on Kubernetes version Instead of always using the latest E2E tests for all Kubernetes versions, the plan now is to use the tests that match the Kubernetes version. However, for 1.13 we have to make an exception because the suite for that version did not support the necessary --storage.testdriver parameter. --- prow.sh | 72 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/prow.sh b/prow.sh index a7ea66f2..c54a81b4 100755 --- a/prow.sh +++ b/prow.sh @@ -52,6 +52,19 @@ configvar () { eval echo "\$3:" "$1=\${$1}" } +# Prints the value of a variable + version suffix, falling back to variable + "LATEST". +get_versioned_variable () { + local var="$1" + local version="$2" + local value + + eval value="\${${var}_${version}}" + if ! [ "$value" ]; then + eval value="\${${var}_LATEST}" + fi + echo "$value" +} + # Go versions can be specified seperately for different tasks # If the pre-installed Go is missing or a different # version, the required version here will get installed @@ -158,9 +171,15 @@ configvar CSI_PROW_HOSTPATH_CANARY "" "hostpath image" # all generated files are present. # # CSI_PROW_E2E_REPO=none disables E2E testing. -configvar CSI_PROW_E2E_VERSION v1.14.0 "E2E version" -configvar CSI_PROW_E2E_REPO https://github.com/kubernetes/kubernetes "E2E repo" -configvar CSI_PROW_E2E_IMPORT_PATH k8s.io/kubernetes "E2E package" +configvar CSI_PROW_E2E_VERSION_1_13 v1.14.0 "E2E version for Kubernetes 1.13.x" # we can't use the one from 1.13.x because it didn't have --storage.testdriver +configvar CSI_PROW_E2E_VERSION_1_14 v1.14.0 "E2E version for Kubernetes 1.14.x" +# TODO: add new CSI_PROW_E2E_VERSION entry for future Kubernetes releases +configvar CSI_PROW_E2E_VERSION_LATEST master "E2E version for Kubernetes master" # testing against Kubernetes master is already tracking a moving target, so we might as well use a moving E2E version +configvar CSI_PROW_E2E_REPO_LATEST https://github.com/kubernetes/kubernetes "E2E repo for Kubernetes >= 1.13.x" # currently the same for all versions +configvar CSI_PROW_E2E_IMPORT_PATH_LATEST k8s.io/kubernetes "E2E package for Kubernetes >= 1.13.x" # currently the same for all versions +configvar CSI_PROW_E2E_VERSION "$(get_versioned_variable CSI_PROW_E2E_VERSION "${csi_prow_kubernetes_version_suffix}")" "E2E version" +configvar CSI_PROW_E2E_REPO "$(get_versioned_variable CSI_PROW_E2E_REPO "${csi_prow_kubernetes_version_suffix}")" "E2E repo" +configvar CSI_PROW_E2E_IMPORT_PATH "$(get_versioned_variable CSI_PROW_E2E_IMPORT_PATH "${csi_prow_kubernetes_version_suffix}")" "E2E package" # csi-sanity testing from the csi-test repo can be run against the installed # CSI driver. For this to work, deploying the driver must expose the Unix domain @@ -200,42 +219,22 @@ regex_join () { echo "$@" | sed -e 's/ */|/g' -e 's/^|*//' -e 's/|*$//' -e 's/^$/this-matches-nothing/g' } -# Prints the value of a variable + version suffix, falling back to variable + "LATEST". -get_versioned_variable () { - local var="$1" - local version="$2" - local value - - eval value="\${${var}_${version}}" - if ! [ "$value" ]; then - eval value="\${${var}_LATEST}" - fi - echo "$value" -} - -# Which tests are alpha depends on the Kubernetes version. The same -# E2E test suite is used for all Kubernetes versions, including older -# Kubernetes. -# -# Feature tags in the test are set based on what is an alpha -# feature in the Kubernetes version that contains the E2E test suite's -# source code. +# Which tests are alpha depends on the Kubernetes version. We could +# use the same E2E test for all Kubernetes version. This would have +# the advantage that new tests can be applied to older versions +# without having to backport tests. # -# So when testing against an older Kubernetes release, some tests -# might be enabled by default which don't pass for that older -# Kubernetes version. In that case, the -# CSI_PROW_E2E_ALPHA_ variable cannot just -# be based on the `Feature` tag, it also must contain the names -# of tests that used to be alpha but no longer have that tag. +# But the feature tag gets removed from E2E tests when the corresponding +# feature becomes beta, so we would have to track which tests were +# alpha in previous Kubernetes releases. This was considered too +# error prone. Therefore we use E2E tests that match the Kubernetes +# version that is getting tested. # -# is just major+minor version separated by -# underscore. "latest" matches master. It is also used for -# unknown versions, so when master gets released there is -# no need to add a new versioned variable. That only needs to be -# done when updating something in the configuration that leads -# to "latest" no longer being suitable. +# However, for 1.13.x testing we have to use the E2E tests from 1.14 +# because 1.13 didn't have --storage.testdriver yet, so for that (and only +# that version) we have to define alpha tests differently. configvar CSI_PROW_E2E_ALPHA_1_13 '\[Feature: \[Testpattern:.Dynamic.PV..block.volmode.\] should.create.and.delete.block.persistent.volumes' "alpha tests for Kubernetes 1.13" # Raw block was an alpha feature in 1.13. -configvar CSI_PROW_E2E_ALPHA_LATEST '\[Feature:' "alpha tests for Kubernetes master" +configvar CSI_PROW_E2E_ALPHA_LATEST '\[Feature:' "alpha tests for Kubernetes >= 1.14" # there's no need to update this, adding a new case for CSI_PROW_E2E for a new Kubernetes is enough configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi_prow_kubernetes_version_suffix}")" "alpha tests" # After the parallel E2E test without alpha features, a test cluster @@ -251,6 +250,7 @@ configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi # the failing test for "latest" or by updating the test and not running # it anymore for older releases. configvar CSI_PROW_E2E_ALPHA_GATES_1_13 'VolumeSnapshotDataSource=true,BlockVolume=true,CSIBlockVolume=true' "alpha feature gates for Kubernetes 1.13" +# TODO: add new CSI_PROW_ALPHA_GATES entry for future Kubernetes releases configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true' "alpha feature gates for latest Kubernetes" configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" From d87eccb46dca47496f7f03bce1909e9ba548f00e Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 3 Apr 2019 14:59:33 +0200 Subject: [PATCH 28/41] prow.sh: switch back to upstream csi-driver-host-path The temporary fork was merged, we can use the upstream repo again. --- prow.sh | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/prow.sh b/prow.sh index c54a81b4..490fe99f 100755 --- a/prow.sh +++ b/prow.sh @@ -151,12 +151,8 @@ configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csip # # When no deploy script is found (nothing in `deploy` directory, # CSI_PROW_HOSTPATH_REPO=none), nothing gets deployed. -# -# TODO: merge https://github.com/kubernetes-csi/csi-driver-host-path/pull/29, switch to revision from master -configvar CSI_PROW_HOSTPATH_VERSION deployment "hostpath driver" -configvar CSI_PROW_HOSTPATH_REPO https://github.com/pohly/csi-driver-host-path "hostpath repo" -# Ignore 'See if you can use ${variable//search/replace} instead.' -# shellcheck disable=SC2001 +configvar CSI_PROW_HOSTPATH_VERSION 486074dc3beef59955faf7bb5210418d9844e0a7 "hostpath driver" # pre-1.1.0 +configvar CSI_PROW_HOSTPATH_REPO https://github.com/kubernetes-csi/csi-driver-host-path "hostpath repo" configvar CSI_PROW_DEPLOYMENT "" "deployment" # If CSI_PROW_HOSTPATH_CANARY is set (typically to "canary", but also From f5014439fc2b977a357ff3b49fd6aab052302f7c Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 5 Apr 2019 08:44:53 +0200 Subject: [PATCH 29/41] prow.sh: AllAlpha=true for unknown Kubernetes versions This ensures that also new, currently unknown alpha gates are enabled when testing against a future Kubernetes versions. For all currently known Kubernetes versions we just use the minimal set of alpha gates, which ensures that we don't miss any of them in our documentation. --- prow.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index 490fe99f..5a636bc3 100755 --- a/prow.sh +++ b/prow.sh @@ -246,8 +246,9 @@ configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi # the failing test for "latest" or by updating the test and not running # it anymore for older releases. configvar CSI_PROW_E2E_ALPHA_GATES_1_13 'VolumeSnapshotDataSource=true,BlockVolume=true,CSIBlockVolume=true' "alpha feature gates for Kubernetes 1.13" +configvar CSI_PROW_E2E_ALPHA_GATES_1_14 'VolumeSnapshotDataSource=true' "alpha feature gates for Kubernetes 1.14" # TODO: add new CSI_PROW_ALPHA_GATES entry for future Kubernetes releases -configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true' "alpha feature gates for latest Kubernetes" +configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'AllAlpha=true' "alpha feature gates for latest Kubernetes" configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" # Some tests are known to be unusable in a KinD cluster. For example, From 31dfaf31dc6fb132255f9696a4562ab3e8a741ae Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 8 Apr 2019 08:44:10 +0200 Subject: [PATCH 30/41] prow.sh: fix running of just "alpha" tests "grep -w" treated "serial-alpha" as two words and therefore CSI_PROW_TESTS sometimes ran too many tests. --- prow.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index 5a636bc3..f39b3394 100755 --- a/prow.sh +++ b/prow.sh @@ -203,7 +203,16 @@ configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI dr # by setting CSI_PROW_TESTS_SANITY. configvar CSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha ${CSI_PROW_TESTS_SANITY}" "tests to run" test_enabled () { - echo "${CSI_PROW_TESTS}" | grep -q -w -e "$1" + local test="$1" + # We want word-splitting here, so ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + set ${CSI_PROW_TESTS} + for t in "$@"; do + if [ "$t" = "$test" ]; then + return + fi + done + return 1 } # Serial vs. parallel is always determined by these regular expressions. From f3d1d2df5c85dede9cf25201c69634551a538b92 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 8 Apr 2019 08:47:15 +0200 Subject: [PATCH 31/41] prow.sh: fix hostpath driver version check The previous logic failed for canary jobs, those also deploy a recent driver. Instead of guessing what driver gets installed based on job parameters, check what really runs in the cluster and base the decision on that. We only need to maintain this blacklist for 1.0.x until we replace it with 1.1.0, then this entire hostpath_supports_block can be removed. --- prow.sh | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/prow.sh b/prow.sh index f39b3394..cd041319 100755 --- a/prow.sh +++ b/prow.sh @@ -619,18 +619,18 @@ install_sanity () ( # Whether the hostpath driver supports raw block devices depends on which version # we are testing. It would be much nicer if we could determine that by querying the -# installed driver. +# installed driver's capabilities instead of having to do a version check. hostpath_supports_block () { - if [ -e "cmd/hostpathplugin" ] && ${CSI_PROW_BUILD_JOB}; then - # The assumption is that if we build the hostpath driver, then it is - # a current version with support. - echo true - return - fi - - case "${CSI_PROW_DEPLOYMENT}" in kubernetes-1.13) echo false;; # wasn't supported and probably won't be backported - *) echo true;; # probably all other deployments have a recent driver - esac + local result + result="$(docker exec csi-prow-control-plane docker image ls --format='{{.Repository}} {{.Tag}} {{.ID}}' | grep hostpath | while read -r repo tag id; do + if [ "$tag" == "v1.0.1" ]; then + # Old version because the revision label is missing: didn't have support yet. + echo "false" + return + fi + done)" + # If not set, then it must be a newer driver with support. + echo "${result:-true}" } # Captures pod output while running some other command. From aa45a1cd9b9fafde60f7d8a8e314287e3b8a7794 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 8 Apr 2019 08:51:11 +0200 Subject: [PATCH 32/41] prow.sh: more efficient execution of individual tests When running only some tests, sometimes extra, unnecessarily work was done, like bringing up the cluster without alpha gates. --- prow.sh | 214 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 116 insertions(+), 98 deletions(-) diff --git a/prow.sh b/prow.sh index cd041319..72be16f5 100755 --- a/prow.sh +++ b/prow.sh @@ -202,18 +202,30 @@ configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI dr # is off by default. A CSI driver can change that default in its .prow.sh # by setting CSI_PROW_TESTS_SANITY. configvar CSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha ${CSI_PROW_TESTS_SANITY}" "tests to run" -test_enabled () { - local test="$1" - # We want word-splitting here, so ignore: Double quote to prevent globbing and word splitting. - # shellcheck disable=SC2086 - set ${CSI_PROW_TESTS} - for t in "$@"; do - if [ "$t" = "$test" ]; then - return - fi +tests_enabled () { + local t1 t2 + # We want word-splitting here, so ignore: Quote to prevent word splitting, or split robustly with mapfile or read -a. + # shellcheck disable=SC2206 + local tests=(${CSI_PROW_TESTS}) + for t1 in "$@"; do + for t2 in "${tests[@]}"; do + if [ "$t1" = "$t2" ]; then + return + fi + done done return 1 } +tests_need_kind () { + tests_enabled "sanity" "parallel" "serial" "serial-alpha" "parallel-alpha" +} +tests_need_non_alpha_cluster () { + tests_enabled "sanity" "parallel" "serial" +} +tests_need_alpha_cluster () { + tests_enabled "parallel-alpha" "serial-alpha" +} + # Serial vs. parallel is always determined by these regular expressions. # Individual regular expressions are seperated by spaces for readability @@ -521,6 +533,15 @@ install_hostpath () { return 1 fi + if ${CSI_PROW_BUILD_JOB}; then + # Ignore: Double quote to prevent globbing and word splitting. + # Ignore: To read lines rather than words, pipe/redirect to a 'while read' loop. + # shellcheck disable=SC2086 disable=SC2013 + for i in $(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//'); do + kind load docker-image --name csi-prow $i:csiprow || die "could not load the $i:latest image into the kind cluster" + done + fi + if deploy_hostpath="$(find_deployment "$(pwd)/deploy")"; then : elif [ "${CSI_PROW_HOSTPATH_REPO}" = "none" ]; then @@ -836,7 +857,7 @@ main () { # might have been minor or unavoidable, for example when experimenting with # changes in "release-tools" in a PR (that fails the "is release-tools unmodified" # test). - if test_enabled "unit"; then + if tests_enabled "unit"; then if ! run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make -k test 2>&1 | make_test_to_junit; then warn "'make test' failed, proceeding anyway" ret=1 @@ -846,102 +867,99 @@ main () { run_with_go "${CSI_PROW_GO_VERSION_BUILD}" make container || die "'make container' failed" fi - install_kind || die "installing kind failed" - start_cluster || die "starting the cluster failed" + if tests_need_kind; then + install_kind || die "installing kind failed" - if ${CSI_PROW_BUILD_JOB}; then - cmds="$(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//')" - # Get the image that was just built (if any) from the - # top-level Makefile CMDS variable and set the - # deploy-hostpath.sh env variables for it. We also need to - # side-load those images into the cluster. - for i in $cmds; do - e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _) - images="$images ${e}_REGISTRY=none ${e}_TAG=csiprow" - - # We must avoid the tag "latest" because that implies - # always pulling the image - # (https://github.com/kubernetes-sigs/kind/issues/328). - docker tag "$i:latest" "$i:csiprow" || die "tagging the locally built container image for $i failed" - kind load docker-image --name csi-prow "$i:csiprow" || die "could not load the $i:latest image into the kind cluster" - done + if ${CSI_PROW_BUILD_JOB}; then + cmds="$(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//')" + # Get the image that was just built (if any) from the + # top-level Makefile CMDS variable and set the + # deploy-hostpath.sh env variables for it. We also need to + # side-load those images into the cluster. + for i in $cmds; do + e=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_REGISTRY=none ${e}_TAG=csiprow" + + # We must avoid the tag "latest" because that implies + # always pulling the image + # (https://github.com/kubernetes-sigs/kind/issues/328). + docker tag "$i:latest" "$i:csiprow" || die "tagging the locally built container image for $i failed" + done - if [ -e deploy/kubernetes/rbac.yaml ]; then - # This is one of those components which has its own RBAC rules (like external-provisioner). - # We are testing a locally built image and also want to test with the the current, - # potentially modified RBAC rules. - if [ "$(echo "$cmds" | wc -w)" != 1 ]; then - die "ambiguous deploy/kubernetes/rbac.yaml: need exactly one command, got: $cmds" + if [ -e deploy/kubernetes/rbac.yaml ]; then + # This is one of those components which has its own RBAC rules (like external-provisioner). + # We are testing a locally built image and also want to test with the the current, + # potentially modified RBAC rules. + if [ "$(echo "$cmds" | wc -w)" != 1 ]; then + die "ambiguous deploy/kubernetes/rbac.yaml: need exactly one command, got: $cmds" + fi + e=$(echo "$cmds" | tr '[:lower:]' '[:upper:]' | tr - _) + images="$images ${e}_RBAC=$(pwd)/deploy/kubernetes/rbac.yaml" fi - e=$(echo "$cmds" | tr '[:lower:]' '[:upper:]' | tr - _) - images="$images ${e}_RBAC=$(pwd)/deploy/kubernetes/rbac.yaml" - fi - fi - - # Installing the driver might be disabled, in which case we bail out early. - if ! install_hostpath "$images"; then - info "hostpath driver installation disabled, skipping E2E testing" - return "$ret" - fi - - collect_cluster_info - - if test_enabled "sanity"; then - if ! run_sanity; then - ret=1 - fi - fi - - if test_enabled "parallel"; then - # Ignore: Double quote to prevent globbing and word splitting. - # shellcheck disable=SC2086 - if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \ - -focus="External.Storage" \ - -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then - warn "E2E parallel failed" - ret=1 fi - fi - - if test_enabled "serial"; then - if ! run_e2e serial \ - -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_SERIAL}"))" \ - -skip="$(regex_join "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then - warn "E2E serial failed" - ret=1 - fi - fi - if (test_enabled "parallel-alpha" || test_enabled "serial-alpha") && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then - # Need to (re)create the cluster. - start_cluster "${CSI_PROW_E2E_ALPHA_GATES}" || die "starting alpha cluster failed" - if ${CSI_PROW_BUILD_JOB}; then - # Ignore: Double quote to prevent globbing and word splitting. - # Ignore: To read lines rather than words, pipe/redirect to a 'while read' loop. - # shellcheck disable=SC2086 disable=SC2013 - for i in $(grep '^\s*CMDS\s*=' Makefile | sed -e 's/\s*CMDS\s*=//'); do - kind load docker-image --name csi-prow $i:csiprow || die "could not load the $i:latest image into the kind cluster" - done - fi - install_hostpath "$images" || die "hostpath driver installation failed unexpectedly on alpha cluster" - - if test_enabled "parallel-alpha"; then - # Ignore: Double quote to prevent globbing and word splitting. - # shellcheck disable=SC2086 - if ! run_e2e parallel-alpha ${CSI_PROW_GINKO_PARALLEL} \ - -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_ALPHA}"))" \ - -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_SKIP}")"; then - warn "E2E parallel alpha failed" - ret=1 + if tests_need_non_alpha_cluster; then + start_cluster || die "starting the non-alpha cluster failed" + + # Installing the driver might be disabled. + if install_hostpath "$images"; then + collect_cluster_info + + if tests_enabled "sanity"; then + if ! run_sanity; then + ret=1 + fi + fi + + if tests_enabled "parallel"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel ${CSI_PROW_GINKO_PARALLEL} \ + -focus="External.Storage" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel failed" + ret=1 + fi + fi + + if tests_enabled "serial"; then + if ! run_e2e serial \ + -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_SERIAL}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_ALPHA}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial failed" + ret=1 + fi + fi fi fi - if test_enabled "serial-alpha"; then - if ! run_e2e serial-alpha \ - -focus="External.Storage.*(($(regex_join "${CSI_PROW_E2E_SERIAL}")).*($(regex_join "${CSI_PROW_E2E_ALPHA}"))|($(regex_join "${CSI_PROW_E2E_ALPHA}")).*($(regex_join "${CSI_PROW_E2E_SERIAL}")))" \ - -skip="$(regex_join "${CSI_PROW_E2E_SKIP}")"; then - warn "E2E serial alpha failed" - ret=1 + if tests_need_alpha_cluster && [ "${CSI_PROW_E2E_ALPHA_GATES}" ]; then + # Need to (re)create the cluster. + start_cluster "${CSI_PROW_E2E_ALPHA_GATES}" || die "starting alpha cluster failed" + + # Installing the driver might be disabled. + if install_hostpath "$images"; then + collect_cluster_info + + if tests_enabled "parallel-alpha"; then + # Ignore: Double quote to prevent globbing and word splitting. + # shellcheck disable=SC2086 + if ! run_e2e parallel-alpha ${CSI_PROW_GINKO_PARALLEL} \ + -focus="External.Storage.*($(regex_join "${CSI_PROW_E2E_ALPHA}"))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SERIAL}" "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E parallel alpha failed" + ret=1 + fi + fi + + if tests_enabled "serial-alpha"; then + if ! run_e2e serial-alpha \ + -focus="External.Storage.*(($(regex_join "${CSI_PROW_E2E_SERIAL}")).*($(regex_join "${CSI_PROW_E2E_ALPHA}"))|($(regex_join "${CSI_PROW_E2E_ALPHA}")).*($(regex_join "${CSI_PROW_E2E_SERIAL}")))" \ + -skip="$(regex_join "${CSI_PROW_E2E_SKIP}")"; then + warn "E2E serial alpha failed" + ret=1 + fi + fi fi fi fi From 9b0d9cd74370bff4dfaf86c4ab75973ec9aa56a2 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 8 Apr 2019 11:37:01 +0200 Subject: [PATCH 33/41] build.make: skip shellcheck if Docker is not available Not all environments have Docker. The simplifying assumption here is that if the Docker command is available, it's also usable. --- build.make | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/build.make b/build.make index 078100c9..e5769c58 100644 --- a/build.make +++ b/build.make @@ -141,6 +141,10 @@ test: test-shellcheck test-shellcheck: @ echo; echo "### $@:" @ ret=0; \ + if ! command -v docker; then \ + echo "skipped, no Docker"; \ + return 0; \ + fi; \ for dir in $(abspath $(TEST_SHELLCHECK_DIRS)); do \ echo; \ echo "$$dir:"; \ From 546d5504a1902276561ee1bfc3811afb61346855 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 8 Apr 2019 19:42:38 +0200 Subject: [PATCH 34/41] prow.sh: debug failing KinD cluster creation When KinD fails in a Prow job, we need additional information to understand why it failed. --- prow.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index 72be16f5..c2075685 100755 --- a/prow.sh +++ b/prow.sh @@ -488,7 +488,17 @@ $(list_gates "$gates") featureGates: $(list_gates "$gates") EOF - run kind create cluster --name csi-prow --config "${CSI_PROW_WORK}/kind-config.yaml" --wait 5m --image "$image" || die "'kind create cluster' failed" + info "kind-config.yaml:" + cat "${CSI_PROW_WORK}/kind-config.yaml" + if ! run kind create cluster --name csi-prow --config "${CSI_PROW_WORK}/kind-config.yaml" --wait 5m --image "$image"; then + warn "Cluster creation failed. Will try again with higher verbosity." + info "Available Docker images:" + docker image ls + if ! run kind --loglevel debug create cluster --retain --name csi-prow --config "${CSI_PROW_WORK}/kind-config.yaml" --wait 5m --image "$image"; then + run kind export logs --name csi-prow "$ARTIFACTS/kind-cluster" + die "Cluster creation failed again, giving up. See the 'kind-cluster' artifact directory for additional logs." + fi + fi KUBECONFIG="$(kind get kubeconfig-path --name=csi-prow)" export KUBECONFIG } From cda2fc5874b42ed68b68cbb60e92fff6ab7ba4c0 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 8 Apr 2019 19:45:44 +0200 Subject: [PATCH 35/41] prow.sh: avoid AllAlpha=true It turned out to not work. Instead of reverting the commit which introduced this, let's better document this explicitly. --- prow.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/prow.sh b/prow.sh index c2075685..0a2b37d7 100755 --- a/prow.sh +++ b/prow.sh @@ -267,9 +267,9 @@ configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi # the failing test for "latest" or by updating the test and not running # it anymore for older releases. configvar CSI_PROW_E2E_ALPHA_GATES_1_13 'VolumeSnapshotDataSource=true,BlockVolume=true,CSIBlockVolume=true' "alpha feature gates for Kubernetes 1.13" -configvar CSI_PROW_E2E_ALPHA_GATES_1_14 'VolumeSnapshotDataSource=true' "alpha feature gates for Kubernetes 1.14" +configvar CSI_PROW_E2E_ALPHA_GATES_1_14 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for Kubernetes 1.14" # TODO: add new CSI_PROW_ALPHA_GATES entry for future Kubernetes releases -configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'AllAlpha=true' "alpha feature gates for latest Kubernetes" +configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'AllAlpha=true,ExpandCSIVolumes=true' "alpha feature gates for latest Kubernetes" configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" # Some tests are known to be unusable in a KinD cluster. For example, From 7aaac225719beb59cf984b885bd8241c699723e7 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 10 Apr 2019 08:44:54 +0200 Subject: [PATCH 36/41] prow.sh: remove AllAlpha=all, part II This was already meant to be done earlier in cda2fc5874b42ed68b. While at it, extend the permanent TODO with guidance on future feature gates. --- prow.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/prow.sh b/prow.sh index 0a2b37d7..57415d43 100755 --- a/prow.sh +++ b/prow.sh @@ -268,8 +268,9 @@ configvar CSI_PROW_E2E_ALPHA "$(get_versioned_variable CSI_PROW_E2E_ALPHA "${csi # it anymore for older releases. configvar CSI_PROW_E2E_ALPHA_GATES_1_13 'VolumeSnapshotDataSource=true,BlockVolume=true,CSIBlockVolume=true' "alpha feature gates for Kubernetes 1.13" configvar CSI_PROW_E2E_ALPHA_GATES_1_14 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for Kubernetes 1.14" -# TODO: add new CSI_PROW_ALPHA_GATES entry for future Kubernetes releases -configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'AllAlpha=true,ExpandCSIVolumes=true' "alpha feature gates for latest Kubernetes" +# TODO: add new CSI_PROW_ALPHA_GATES_xxx entry for future Kubernetes releases and +# add new gates to CSI_PROW_E2E_ALPHA_GATES_LATEST. +configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true,ExpandCSIVolumes=true' "alpha feature gates for latest Kubernetes" configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates" # Some tests are known to be unusable in a KinD cluster. For example, From bb5336c7832c185a4f4c18dbfface79528f44d09 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 11 Apr 2019 11:34:09 +0200 Subject: [PATCH 37/41] .prow.sh: enable Prow testing --- .prow.sh | 5 +++++ 1 file changed, 5 insertions(+) create mode 100755 .prow.sh diff --git a/.prow.sh b/.prow.sh new file mode 100755 index 00000000..40dc1c3d --- /dev/null +++ b/.prow.sh @@ -0,0 +1,5 @@ +#! /bin/bash + +. release-tools/prow.sh + +main From ff9bce4a71feb31e69d3e5a8ed9874da542066f1 Mon Sep 17 00:00:00 2001 From: Pengzhi Sun Date: Thu, 11 Apr 2019 17:42:44 +0800 Subject: [PATCH 38/41] Replace 'return' to 'exit' to fix shellcheck error --- build.make | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.make b/build.make index e5769c58..e3d4795d 100644 --- a/build.make +++ b/build.make @@ -143,11 +143,11 @@ test-shellcheck: @ ret=0; \ if ! command -v docker; then \ echo "skipped, no Docker"; \ - return 0; \ + exit 0; \ fi; \ for dir in $(abspath $(TEST_SHELLCHECK_DIRS)); do \ echo; \ echo "$$dir:"; \ ./release-tools/verify-shellcheck.sh "$$dir" || ret=1; \ done; \ - return $$ret + exit $$ret From 232c56a6714cfbe54ea5ed6819fe9714dca8c704 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 11 Apr 2019 11:43:11 +0200 Subject: [PATCH 39/41] build: use csi-release-tools This makes the build process consistent with the other sidecars. --- .gitignore | 50 +------------------------------------------------- .travis.yml | 20 +------------------- Makefile | 37 +++---------------------------------- 3 files changed, 5 insertions(+), 102 deletions(-) mode change 100644 => 120000 .travis.yml diff --git a/.gitignore b/.gitignore index 8e957675..5e56e040 100644 --- a/.gitignore +++ b/.gitignore @@ -1,49 +1 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -# OSX leaves these everywhere on SMB shares -._* - -# OSX trash -.DS_Store - -# Eclipse files -.classpath -.project -.settings/** - -# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA -.idea/ -*.iml - -# Vscode files -.vscode - -# Emacs save files -*~ -\#*\# -.\#* - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Ignore vi backup files -*~ -# Ignore patches. -*.patch +/bin diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index edfa58a0..00000000 --- a/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go - -services: - - docker - -# Needed for e2e tests -sudo: true -go: 1.10.x -script: -- go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0 -- go vet $(go list ./... | grep -v vendor) -- go test $(go list ./... | grep -v vendor) -- make livenessprobe -- ./hack/e2e-livenessprobe.sh -after_success: - - if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then - docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" quay.io; - make push; - fi diff --git a/.travis.yml b/.travis.yml new file mode 120000 index 00000000..a554dfc7 --- /dev/null +++ b/.travis.yml @@ -0,0 +1 @@ +release-tools/travis.yml \ No newline at end of file diff --git a/Makefile b/Makefile index 8a68710f..286853c5 100644 --- a/Makefile +++ b/Makefile @@ -12,38 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -REGISTRY_NAME = quay.io/k8scsi -IMAGE_VERSION = canary -IMAGE_NAME=livenessprobe -IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(IMAGE_VERSION) +CMDS=livenessprobe +all: build -.PHONY: all liveness clean test - -ifdef V -TESTARGS = -v -args -alsologtostderr -v 5 -else -TESTARGS = -endif - -all: livenessprobe - -livenessprobe: - mkdir -p bin - CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o ./bin/livenessprobe ./cmd/livenessprobe - -macos-livenessprobe: - mkdir -p bin - CGO_ENABLED=0 GOOS=darwin go build -a -ldflags '-extldflags "-static"' -o ./bin/livenessprobe.osx ./cmd/livenessprobe - -livenessprobe-container: livenessprobe - docker build -t $(IMAGE_TAG) -f ./Dockerfile . - -push: livenessprobe-container - docker push $(IMAGE_TAG) - -clean: - rm -rf bin - -test: - go test `go list ./... | grep -v 'vendor'` $(TESTARGS) - go vet `go list ./... | grep -v vendor` +include release-tools/build.make From 0b10f6a4b026a52bdda5d8d63b80a45b79b84ff3 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 11 Apr 2019 13:37:43 +0200 Subject: [PATCH 40/41] prow.sh: update csi-driver-host-path This pulls in https://github.com/kubernetes-csi/csi-driver-host-path/pull/37, which turned out to be necessary for pre-submit testing of the livenessprobe. --- prow.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prow.sh b/prow.sh index 57415d43..ab3a1a57 100755 --- a/prow.sh +++ b/prow.sh @@ -151,7 +151,7 @@ configvar CSI_PROW_WORK "$(mkdir -p "$GOPATH/pkg" && mktemp -d "$GOPATH/pkg/csip # # When no deploy script is found (nothing in `deploy` directory, # CSI_PROW_HOSTPATH_REPO=none), nothing gets deployed. -configvar CSI_PROW_HOSTPATH_VERSION 486074dc3beef59955faf7bb5210418d9844e0a7 "hostpath driver" # pre-1.1.0 +configvar CSI_PROW_HOSTPATH_VERSION fc52d13ba07922c80555a24616a5b16480350c3f "hostpath driver" # pre-1.1.0 configvar CSI_PROW_HOSTPATH_REPO https://github.com/kubernetes-csi/csi-driver-host-path "hostpath repo" configvar CSI_PROW_DEPLOYMENT "" "deployment" From 0fafc663dc8c0d43cbc411b69ad68a74ecacbc8b Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 11 Apr 2019 22:32:01 +0200 Subject: [PATCH 41/41] prow.sh: skip sanity testing if component doesn't support it Whether a component supports sanity testing depends on the component. For example, csi-driver-host-path enables it because it makes sense there (and only there). Letting the prow.sh script decide whether it actually runs simplifies the job definitions in test-infra. --- prow.sh | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/prow.sh b/prow.sh index ab3a1a57..204cc294 100755 --- a/prow.sh +++ b/prow.sh @@ -198,10 +198,13 @@ configvar CSI_PROW_SANITY_CONTAINER "hostpath" "Kubernetes container with CSI dr # - serial, only alpha features # - sanity # -# Sanity testing with csi-sanity only covers the CSI driver itself and thus -# is off by default. A CSI driver can change that default in its .prow.sh -# by setting CSI_PROW_TESTS_SANITY. -configvar CSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha ${CSI_PROW_TESTS_SANITY}" "tests to run" +# Unknown or unsupported entries are ignored. +# +# Sanity testing with csi-sanity only covers the CSI driver itself and +# thus only makes sense in repos which provide their own CSI +# driver. Repos can enable sanity testing by setting +# CSI_PROW_TESTS_SANITY=sanity. +configvar CSI_PROW_TESTS "unit parallel serial parallel-alpha serial-alpha sanity" "tests to run" tests_enabled () { local t1 t2 # We want word-splitting here, so ignore: Quote to prevent word splitting, or split robustly with mapfile or read -a. @@ -216,11 +219,16 @@ tests_enabled () { done return 1 } +sanity_enabled () { + [ "${CSI_PROW_TESTS_SANITY}" = "sanity" ] && tests_enabled "sanity" +} tests_need_kind () { - tests_enabled "sanity" "parallel" "serial" "serial-alpha" "parallel-alpha" + tests_enabled "parallel" "serial" "serial-alpha" "parallel-alpha" || + sanity_enabled } tests_need_non_alpha_cluster () { - tests_enabled "sanity" "parallel" "serial" + tests_enabled "parallel" "serial" || + sanity_enabled } tests_need_alpha_cluster () { tests_enabled "parallel-alpha" "serial-alpha" @@ -916,7 +924,7 @@ main () { if install_hostpath "$images"; then collect_cluster_info - if tests_enabled "sanity"; then + if sanity_enabled; then if ! run_sanity; then ret=1 fi