From 932e0f20f577bbe39a6240f789c611538f22a86b Mon Sep 17 00:00:00 2001 From: Enxebre Date: Tue, 17 Sep 2019 12:36:51 +0200 Subject: [PATCH] UPSTREAM: : openshift: Revendor to bring https://github.com/openshift/cluster-api/pull/72 --- Gopkg.lock | 4 +- .../openshift/cluster-api/Gopkg.lock | 67 +- .../openshift/cluster-api/Gopkg.toml | 5 +- .../github.com/openshift/cluster-api/Makefile | 4 + .../config/crds/machine_v1beta1_machine.yaml | 31 + .../crds/machine_v1beta1_machineset.yaml | 21 + .../openshift/cluster-api/hack/goimports.sh | 29 + .../pkg/apis/machine/v1beta1/machine_types.go | 7 + .../apis/machine/v1beta1/machineset_types.go | 5 + .../cluster-api/pkg/controller/BUILD.bazel | 2 - .../cluster-api/pkg/controller/add_node.go | 26 - .../pkg/controller/machine/BUILD.bazel | 2 +- .../pkg/controller/machine/controller.go | 76 ++- .../controller/machinedeployment/BUILD.bazel | 1 + .../machinedeployment/controller.go | 7 + .../pkg/controller/machinedeployment/sync.go | 6 + .../pkg/controller/machineset/controller.go | 6 + .../cluster-api/pkg/drain/BUILD.bazel | 45 ++ .../openshift/cluster-api/pkg/drain/drain.go | 602 ++++++++++++++++++ 19 files changed, 860 insertions(+), 86 deletions(-) create mode 100755 vendor/github.com/openshift/cluster-api/hack/goimports.sh delete mode 100644 vendor/github.com/openshift/cluster-api/pkg/controller/add_node.go create mode 100644 vendor/github.com/openshift/cluster-api/pkg/drain/BUILD.bazel create mode 100644 vendor/github.com/openshift/cluster-api/pkg/drain/drain.go diff --git a/Gopkg.lock b/Gopkg.lock index f3fadb7ffe..527b295a83 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -434,7 +434,7 @@ [[projects]] branch = "openshift-4.2-cluster-api-0.1.0" - digest = "1:39b28a2133488606e41fd0f6f13ec4239169424432fb0eb0dadb9d6f8ee4bf91" + digest = "1:3981936e2b498f98977288a69942aded35aec7465b3dd87c301d8a25466e9910" name = "github.com/openshift/cluster-api" packages = [ "cmd/clusterctl/clientcmd", @@ -465,7 +465,7 @@ "pkg/util", ] pruneopts = "T" - revision = "e911af77e9a33cfb45f82ba9778876d715cdb4ea" + revision = "655e2d6ccdd5774442da004081f933cd8f1f3273" [[projects]] branch = "master" diff --git a/vendor/github.com/openshift/cluster-api/Gopkg.lock b/vendor/github.com/openshift/cluster-api/Gopkg.lock index 9c4dee3608..0dcdd90256 100644 --- a/vendor/github.com/openshift/cluster-api/Gopkg.lock +++ b/vendor/github.com/openshift/cluster-api/Gopkg.lock @@ -106,10 +106,11 @@ [[projects]] branch = "master" - digest = "1:eaa7c96baf38f6abde2f720aac540a49dfc2229b74c3c591c3f84d2ff7e84269" + digest = "1:ebbb7da8a60c10db59cbc3bb5a874b0a6a9f441bc8bef66b90085111f71c5b49" name = "github.com/go-log/log" packages = [ ".", + "capture", "info", ] pruneopts = "UT" @@ -348,14 +349,6 @@ revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd" version = "v1.4.3" -[[projects]] - branch = "master" - digest = "1:f7646c654e93258958dba300641f8f674d5a9ed015c11119793ba1156e2acbe9" - name = "github.com/openshift/kubernetes-drain" - packages = ["."] - pruneopts = "UT" - revision = "c2e51be1758efa30d71a4d30dc4e2db86b70a4df" - [[projects]] digest = "1:e5d0bd87abc2781d14e274807a470acd180f0499f8bf5bb18606e9ec22ad9de9" name = "github.com/pborman/uuid" @@ -845,7 +838,7 @@ revision = "b6aa1175dafa586b8042c7bfdcd1585f9ecfaa08" [[projects]] - digest = "1:dfe595505c4f0f5dbd85f9a7b3d01052fde13cdcc587ac7ac3c7dfd79d6a4170" + digest = "1:4884c17ab853174d45b2c3776b3aae4672c0aaa98d663d97599cf992aacc4265" name = "k8s.io/client-go" packages = [ "discovery", @@ -903,43 +896,80 @@ "informers/storage/v1alpha1", "informers/storage/v1beta1", "kubernetes", + "kubernetes/fake", "kubernetes/scheme", "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/admissionregistration/v1beta1/fake", "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1/fake", "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta1/fake", "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/apps/v1beta2/fake", "kubernetes/typed/auditregistration/v1alpha1", + "kubernetes/typed/auditregistration/v1alpha1/fake", "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1/fake", "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authentication/v1beta1/fake", "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1/fake", "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/authorization/v1beta1/fake", "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v1/fake", "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta1/fake", "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/autoscaling/v2beta2/fake", "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1/fake", "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v1beta1/fake", "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/batch/v2alpha1/fake", "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/certificates/v1beta1/fake", "kubernetes/typed/coordination/v1", + "kubernetes/typed/coordination/v1/fake", "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/coordination/v1beta1/fake", "kubernetes/typed/core/v1", + "kubernetes/typed/core/v1/fake", "kubernetes/typed/events/v1beta1", + "kubernetes/typed/events/v1beta1/fake", "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/extensions/v1beta1/fake", "kubernetes/typed/networking/v1", + "kubernetes/typed/networking/v1/fake", "kubernetes/typed/networking/v1beta1", + "kubernetes/typed/networking/v1beta1/fake", "kubernetes/typed/node/v1alpha1", + "kubernetes/typed/node/v1alpha1/fake", "kubernetes/typed/node/v1beta1", + "kubernetes/typed/node/v1beta1/fake", "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/policy/v1beta1/fake", "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1/fake", "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1alpha1/fake", "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/rbac/v1beta1/fake", "kubernetes/typed/scheduling/v1", + "kubernetes/typed/scheduling/v1/fake", "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1alpha1/fake", "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/scheduling/v1beta1/fake", "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/settings/v1alpha1/fake", "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1/fake", "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1alpha1/fake", "kubernetes/typed/storage/v1beta1", + "kubernetes/typed/storage/v1beta1/fake", "listers/admissionregistration/v1beta1", "listers/apps/v1", "listers/apps/v1beta1", @@ -1086,7 +1116,8 @@ revision = "324c5df7d3f0fafd8ab337871d9f2c86afd1fe64" [[projects]] - digest = "1:5bfbcd19f444fc4bf9ae6cc938a8e9bc0a98050c8357e8b038ec03c3482c974c" + branch = "release-0.2" + digest = "1:2212c932307779541b20f07c46728a2ff0caca9ada465a1072d4d83187174566" name = "sigs.k8s.io/controller-runtime" packages = [ "pkg/cache", @@ -1126,7 +1157,7 @@ "pkg/webhook/internal/metrics", ] pruneopts = "UT" - revision = "96b67f231945a8e8550dbdd8bfbd550a0c68097b" + revision = "f60c87ec713cb8da81257228530605457ebf7220" [[projects]] digest = "1:77a19ea61ca4e01817ad2bc3e91689c5097b4b439668127d1fb5d8b95c3aca03" @@ -1172,10 +1203,11 @@ input-imports = [ "github.com/davecgh/go-spew/spew", "github.com/emicklei/go-restful", + "github.com/go-log/log", + "github.com/go-log/log/capture", "github.com/go-log/log/info", "github.com/onsi/ginkgo", "github.com/onsi/gomega", - "github.com/openshift/kubernetes-drain", "github.com/pkg/errors", "github.com/sergi/go-diff/diffmatchpatch", "github.com/spf13/cobra", @@ -1183,7 +1215,10 @@ "golang.org/x/net/context", "k8s.io/api/apps/v1", "k8s.io/api/autoscaling/v1", + "k8s.io/api/batch/v1", "k8s.io/api/core/v1", + "k8s.io/api/extensions/v1beta1", + "k8s.io/api/policy/v1beta1", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", "k8s.io/apimachinery/pkg/api/equality", "k8s.io/apimachinery/pkg/api/errors", @@ -1191,15 +1226,18 @@ "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", "k8s.io/apimachinery/pkg/apis/meta/v1/validation", + "k8s.io/apimachinery/pkg/fields", "k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", "k8s.io/apimachinery/pkg/runtime/serializer", "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/errors", "k8s.io/apimachinery/pkg/util/intstr", "k8s.io/apimachinery/pkg/util/json", "k8s.io/apimachinery/pkg/util/rand", "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets", "k8s.io/apimachinery/pkg/util/validation/field", "k8s.io/apimachinery/pkg/util/wait", "k8s.io/apimachinery/pkg/util/yaml", @@ -1209,8 +1247,11 @@ "k8s.io/client-go/discovery/fake", "k8s.io/client-go/informers", "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/fake", "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/apps/v1", "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/kubernetes/typed/policy/v1beta1", "k8s.io/client-go/plugin/pkg/client/auth", "k8s.io/client-go/plugin/pkg/client/auth/gcp", "k8s.io/client-go/rest", diff --git a/vendor/github.com/openshift/cluster-api/Gopkg.toml b/vendor/github.com/openshift/cluster-api/Gopkg.toml index d132d1d66e..b3c4116eb3 100644 --- a/vendor/github.com/openshift/cluster-api/Gopkg.toml +++ b/vendor/github.com/openshift/cluster-api/Gopkg.toml @@ -43,8 +43,9 @@ required = [ # STANZAS BELOW ARE GENERATED AND MAY BE WRITTEN - DO NOT MODIFY BELOW THIS LINE. [[constraint]] - name="sigs.k8s.io/controller-runtime" - revision="96b67f231945a8e8550dbdd8bfbd550a0c68097b" + name = "sigs.k8s.io/controller-runtime" + # release-0.2 since it imports kubernetes 1.14 + branch="release-0.2" [[constraint]] name="sigs.k8s.io/controller-tools" diff --git a/vendor/github.com/openshift/cluster-api/Makefile b/vendor/github.com/openshift/cluster-api/Makefile index 73eb4450ec..60c850fc3a 100644 --- a/vendor/github.com/openshift/cluster-api/Makefile +++ b/vendor/github.com/openshift/cluster-api/Makefile @@ -72,6 +72,10 @@ manifests: ## Generate manifests e.g. CRD, RBAC etc. fmt: ## Run go fmt against code go fmt ./pkg/... ./cmd/... +.PHONY: goimports +goimports: ## Go fmt your code + hack/goimports.sh . + .PHONY: vet vet: ## Run go vet against code go vet ./pkg/... ./cmd/... diff --git a/vendor/github.com/openshift/cluster-api/config/crds/machine_v1beta1_machine.yaml b/vendor/github.com/openshift/cluster-api/config/crds/machine_v1beta1_machine.yaml index 35dd1f4af4..b5cc2a4714 100644 --- a/vendor/github.com/openshift/cluster-api/config/crds/machine_v1beta1_machine.yaml +++ b/vendor/github.com/openshift/cluster-api/config/crds/machine_v1beta1_machine.yaml @@ -6,6 +6,37 @@ metadata: controller-tools.k8s.io: "1.0" name: machines.machine.openshift.io spec: + additionalPrinterColumns: + - JSONPath: .metadata.annotations['machine\.openshift\.io/instance-state'] + description: State of instance + name: State + type: string + - JSONPath: .metadata.labels['machine\.openshift\.io/instance-type'] + description: Type of instance + name: Type + type: string + - JSONPath: .metadata.labels['machine\.openshift\.io/region'] + description: Region associated with machine + name: Region + type: string + - JSONPath: .metadata.labels['machine\.openshift\.io/zone'] + description: Zone associated with machine + name: Zone + type: string + - JSONPath: .metadata.creationTimestamp + description: Machine age + name: Age + type: date + - JSONPath: .status.nodeRef.name + description: Node associated with machine + name: Node + priority: 1 + type: string + - JSONPath: .spec.providerID + description: Provider ID of machine created in cloud provider + name: ProviderID + priority: 1 + type: string group: machine.openshift.io names: kind: Machine diff --git a/vendor/github.com/openshift/cluster-api/config/crds/machine_v1beta1_machineset.yaml b/vendor/github.com/openshift/cluster-api/config/crds/machine_v1beta1_machineset.yaml index 4e88e2b694..0cbdf333b9 100644 --- a/vendor/github.com/openshift/cluster-api/config/crds/machine_v1beta1_machineset.yaml +++ b/vendor/github.com/openshift/cluster-api/config/crds/machine_v1beta1_machineset.yaml @@ -6,6 +6,27 @@ metadata: controller-tools.k8s.io: "1.0" name: machinesets.machine.openshift.io spec: + additionalPrinterColumns: + - JSONPath: .spec.replicas + description: Desired Replicas + name: Desired + type: integer + - JSONPath: .status.replicas + description: Current Replicas + name: Current + type: integer + - JSONPath: .status.readyReplicas + description: Ready Replicas + name: Ready + type: integer + - JSONPath: .status.availableReplicas + description: Observed number of available replicas + name: Available + type: string + - JSONPath: .metadata.creationTimestamp + description: Machineset age + name: Age + type: date group: machine.openshift.io names: kind: MachineSet diff --git a/vendor/github.com/openshift/cluster-api/hack/goimports.sh b/vendor/github.com/openshift/cluster-api/hack/goimports.sh new file mode 100755 index 0000000000..1c1573291f --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/hack/goimports.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +REPO_NAME=$(basename "${PWD}") +if [ "$IS_CONTAINER" != "" ]; then + for TARGET in "${@}"; do + find "${TARGET}" -name '*.go' ! -path '*/vendor/*' ! -path '*/.build/*' -exec goimports -w {} \+ + done + git diff --exit-code +else + docker run -it --rm \ + --env IS_CONTAINER=TRUE \ + --volume "${PWD}:/go/src/sigs.k8s.io/${REPO_NAME}:z" \ + --workdir "/go/src/sigs.k8s.io/${REPO_NAME}" \ + openshift/origin-release:golang-1.12 \ + ./hack/goimports.sh "${@}" +fi diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go index 62ec393376..0bbc48f5a7 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go @@ -46,6 +46,13 @@ const ( // Machine is the Schema for the machines API // +k8s:openapi-gen=true // +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".metadata.annotations['machine\.openshift\.io/instance-state']",description="State of instance" +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".metadata.labels['machine\.openshift\.io/instance-type']",description="Type of instance" +// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".metadata.labels['machine\.openshift\.io/region']",description="Region associated with machine" +// +kubebuilder:printcolumn:name="Zone",type="string",JSONPath=".metadata.labels['machine\.openshift\.io/zone']",description="Zone associated with machine" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machine age" +// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.nodeRef.name",description="Node associated with machine",priority=1 +// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID of machine created in cloud provider",priority=1 type Machine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go index b4edb7a303..413f598fe0 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go @@ -35,6 +35,11 @@ import ( // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="Desired Replicas" +// +kubebuilder:printcolumn:name="Current",type="integer",JSONPath=".status.replicas",description="Current Replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Ready Replicas" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.availableReplicas",description="Observed number of available replicas" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machineset age" type MachineSet struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/BUILD.bazel b/vendor/github.com/openshift/cluster-api/pkg/controller/BUILD.bazel index fec58b2796..dfeb46e5bd 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/BUILD.bazel +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/BUILD.bazel @@ -4,14 +4,12 @@ go_library( name = "go_default_library", srcs = [ "add_machineset.go", - "add_node.go", "controller.go", ], importpath = "github.com/openshift/cluster-api/pkg/controller", visibility = ["//visibility:public"], deps = [ "//pkg/controller/machineset:go_default_library", - "//pkg/controller/node:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", ], ) diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/add_node.go b/vendor/github.com/openshift/cluster-api/pkg/controller/add_node.go deleted file mode 100644 index 34c00cf72e..0000000000 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/add_node.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "github.com/openshift/cluster-api/pkg/controller/node" -) - -func init() { - // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. - AddToManagerFuncs = append(AddToManagerFuncs, node.Add) -} diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/BUILD.bazel b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/BUILD.bazel index 20a5e72841..5ce3bd7f60 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/BUILD.bazel +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/BUILD.bazel @@ -13,9 +13,9 @@ go_library( "//pkg/apis/cluster/v1alpha1:go_default_library", "//pkg/apis/machine/v1beta1:go_default_library", "//pkg/controller/error:go_default_library", + "//pkg/drain:go_default_library", "//pkg/util:go_default_library", "//vendor/github.com/go-log/log/info:go_default_library", - "//vendor/github.com/openshift/kubernetes-drain:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go index f2f1711997..3a6214566c 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go @@ -19,15 +19,16 @@ package machine import ( "context" "fmt" - "os" "time" "github.com/go-log/log/info" clusterv1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" + commonerrors "github.com/openshift/cluster-api/pkg/apis/machine/common" machinev1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" controllerError "github.com/openshift/cluster-api/pkg/controller/error" + kubedrain "github.com/openshift/cluster-api/pkg/drain" + clusterapiError "github.com/openshift/cluster-api/pkg/errors" "github.com/openshift/cluster-api/pkg/util" - kubedrain "github.com/openshift/kubernetes-drain" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -49,6 +50,18 @@ const ( // ExcludeNodeDrainingAnnotation annotation explicitly skips node draining if set ExcludeNodeDrainingAnnotation = "machine.openshift.io/exclude-node-draining" + + // MachineRegionLabelName as annotation name for a machine region + MachineRegionLabelName = "machine.openshift.io/region" + + // MachineAZLabelName as annotation name for a machine AZ + MachineAZLabelName = "machine.openshift.io/zone" + + // MachineInstanceStateAnnotationName as annotation name for a machine instance state + MachineInstanceStateAnnotationName = "machine.openshift.io/instance-state" + + // MachineInstanceTypeLabelName as annotation name for a machine instance type + MachineInstanceTypeLabelName = "machine.openshift.io/instance-type" ) var DefaultActuator Actuator @@ -64,14 +77,8 @@ func newReconciler(mgr manager.Manager, actuator Actuator) reconcile.Reconciler eventRecorder: mgr.GetEventRecorderFor("machine-controller"), config: mgr.GetConfig(), scheme: mgr.GetScheme(), - nodeName: os.Getenv(NodeNameEnvVar), actuator: actuator, } - - if r.nodeName == "" { - klog.Warningf("Environment variable %q is not set, this controller will not protect against deleting its own machine", NodeNameEnvVar) - } - return r } @@ -99,9 +106,6 @@ type ReconcileMachine struct { eventRecorder record.EventRecorder actuator Actuator - - // nodeName is the name of the node on which the machine controller is running, if not present, it is loaded from NODE_NAME. - nodeName string } // Reconcile reads that state of the cluster for a Machine object and makes changes based on the state read @@ -185,13 +189,7 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul return reconcile.Result{}, nil } - if !r.isDeleteAllowed(m) { - klog.Infof("Deleting machine hosting this controller is not allowed. Skipping reconciliation of machine %q", name) - return reconcile.Result{}, nil - } - klog.Infof("Reconciling machine %q triggers delete", name) - // Drain node before deletion // If a machine is not linked to a node, just delete the machine. Since a node // can be unlinked from a machine when the node goes NotReady and is removed @@ -205,8 +203,16 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul } if err := r.actuator.Delete(ctx, cluster, m); err != nil { - klog.Errorf("Failed to delete machine %q: %v", name, err) - return delayIfRequeueAfterError(err) + // isInvalidMachineConfiguration will take care of the case where the + // configuration is invalid from the beginning. len(m.Status.Addresses) > 0 + // will handle the case when a machine configuration was invalidated + // after an instance was created. So only a small window is left when + // we can loose instances, e.g. right after request to create one + // was sent and before a list of node addresses was set. + if len(m.Status.Addresses) > 0 || !isInvalidMachineConfigurationError(err) { + klog.Errorf("Failed to delete machine %q: %v", name, err) + return delayIfRequeueAfterError(err) + } } if m.Status.NodeRef != nil { @@ -312,27 +318,6 @@ func (r *ReconcileMachine) getCluster(ctx context.Context, machine *machinev1.Ma return cluster, nil } -func (r *ReconcileMachine) isDeleteAllowed(machine *machinev1.Machine) bool { - if r.nodeName == "" || machine.Status.NodeRef == nil { - return true - } - - if machine.Status.NodeRef.Name != r.nodeName { - return true - } - - node := &corev1.Node{} - if err := r.Client.Get(context.Background(), client.ObjectKey{Name: r.nodeName}, node); err != nil { - klog.Infof("Failed to determine if controller's node %q is associated with machine %q: %v", r.nodeName, machine.Name, err) - return true - } - - // When the UID of the machine's node reference and this controller's actual node match then then the request is to - // delete the machine this machine-controller is running on. Return false to not allow machine controller to delete its - // own machine. - return node.UID != machine.Status.NodeRef.UID -} - func (r *ReconcileMachine) deleteNode(ctx context.Context, name string) error { var node corev1.Node if err := r.Client.Get(ctx, client.ObjectKey{Name: name}, &node); err != nil { @@ -354,3 +339,14 @@ func delayIfRequeueAfterError(err error) (reconcile.Result, error) { } return reconcile.Result{}, err } + +func isInvalidMachineConfigurationError(err error) bool { + switch t := err.(type) { + case *clusterapiError.MachineError: + if t.Reason == commonerrors.InvalidConfigurationMachineError { + klog.Infof("Actuator returned invalid configuration error: %v", err) + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/BUILD.bazel b/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/BUILD.bazel index 62de605cdb..ab939364e8 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/BUILD.bazel +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/BUILD.bazel @@ -24,6 +24,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", "//vendor/k8s.io/klog:go_default_library", diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/controller.go b/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/controller.go index e47bd3e88a..a77e7a6299 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/controller.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/controller.go @@ -173,6 +173,13 @@ func (r *ReconcileMachineDeployment) Reconcile(request reconcile.Request) (recon // Error reading the object - requeue the request. return reconcile.Result{}, err } + + // Ignore deleted MachineDeployments, this can happen when foregroundDeletion + // is enabled + if d.DeletionTimestamp != nil { + return reconcile.Result{}, nil + } + result, err := r.reconcile(ctx, d) if err != nil { klog.Errorf("Failed to reconcile MachineDeployment %q: %v", request.NamespacedName, err) diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/sync.go b/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/sync.go index 497167f8fd..40bca89603 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/sync.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/sync.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" apirand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/retry" "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/client" @@ -154,6 +155,11 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *machinev1beta1.MachineD }, } + // Add foregroundDeletion finalizer to MachineSet if the MachineDeployment has it + if sets.NewString(d.Finalizers...).Has(metav1.FinalizerDeleteDependents) { + newMS.Finalizers = []string{metav1.FinalizerDeleteDependents} + } + allMSs := append(oldMSs, &newMS) newReplicasCount, err := dutil.NewMSNewReplicas(d, allMSs, &newMS) if err != nil { diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/controller.go b/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/controller.go index db2678c5a2..17c44f763e 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/controller.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/controller.go @@ -158,6 +158,12 @@ func (r *ReconcileMachineSet) Reconcile(request reconcile.Request) (reconcile.Re return reconcile.Result{}, err } + // Ignore deleted MachineSets, this can happen when foregroundDeletion + // is enabled + if machineSet.DeletionTimestamp != nil { + return reconcile.Result{}, nil + } + result, err := r.reconcile(ctx, machineSet) if err != nil { klog.Errorf("Failed to reconcile MachineSet %q: %v", request.NamespacedName, err) diff --git a/vendor/github.com/openshift/cluster-api/pkg/drain/BUILD.bazel b/vendor/github.com/openshift/cluster-api/pkg/drain/BUILD.bazel new file mode 100644 index 0000000000..39893e2c11 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/drain/BUILD.bazel @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["drain.go"], + importpath = "github.com/openshift/cluster-api/pkg/drain", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/go-log/log:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["drain_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/go-log/log/capture:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + ], +) diff --git a/vendor/github.com/openshift/cluster-api/pkg/drain/drain.go b/vendor/github.com/openshift/cluster-api/pkg/drain/drain.go new file mode 100644 index 0000000000..3f5b274e53 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/drain/drain.go @@ -0,0 +1,602 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package drain + +import ( + "errors" + "fmt" + "math" + "sort" + "strings" + "time" + + golog "github.com/go-log/log" + + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + typedpolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" +) + +type DrainOptions struct { + // Continue even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet. + Force bool + + // Ignore DaemonSet-managed pods. + IgnoreDaemonsets bool + + // Period of time in seconds given to each pod to terminate + // gracefully. If negative, the default value specified in the pod + // will be used. + GracePeriodSeconds int + + // The length of time to wait before giving up on deletion or + // eviction. Zero means infinite. + Timeout time.Duration + + // Continue even if there are pods using emptyDir (local data that + // will be deleted when the node is drained). + DeleteLocalData bool + + // Namespace to filter pods on the node. + Namespace string + + // Label selector to filter pods on the node. + Selector labels.Selector + + // Logger allows callers to plug in their preferred logger. + Logger golog.Logger +} + +// Takes a pod and returns a bool indicating whether or not to operate on the +// pod, an optional warning message, and an optional fatal error. +type podFilter func(corev1.Pod) (include bool, w *warning, f *fatal) +type warning struct { + string +} +type fatal struct { + string +} + +const ( + EvictionKind = "Eviction" + EvictionSubresource = "pods/eviction" + + kDaemonsetFatal = "DaemonSet-managed pods (use IgnoreDaemonsets to ignore)" + kDaemonsetWarning = "ignoring DaemonSet-managed pods" + kLocalStorageFatal = "pods with local storage (use DeleteLocalData to override)" + kLocalStorageWarning = "deleting pods with local storage" + kUnmanagedFatal = "pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet (use Force to override)" + kUnmanagedWarning = "deleting pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet" +) + +// GetNodes looks up the nodes (either given by name as arguments or +// by the Selector option). +func GetNodes(client typedcorev1.NodeInterface, nodes []string, selector string) (out []*corev1.Node, err error) { + if len(nodes) == 0 && len(selector) == 0 { + return nil, nil + } + + if len(selector) > 0 && len(nodes) > 0 { + return nil, errors.New("cannot specify both node names and a selector option") + } + + out = []*corev1.Node{} + + for _, node := range nodes { + node, err := client.Get(node, metav1.GetOptions{}) + if err != nil { + return nil, err + } + out = append(out, node) + } + + if len(selector) > 0 { + nodes, err := client.List(metav1.ListOptions{ + LabelSelector: selector, + }) + if err != nil { + return nil, err + } + for _, node := range nodes.Items { + out = append(out, &node) + } + } + + return out, nil +} + +// Drain nodes in preparation for maintenance. +// +// The given nodes will be marked unschedulable to prevent new pods from arriving. +// Drain evicts the pods if the APIServer supports eviction +// (http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use normal DELETE +// to delete the pods. +// Drain evicts or deletes all pods except mirror pods (which cannot be deleted through +// the API server). If there are DaemonSet-managed pods, Drain will not proceed +// without IgnoreDaemonsets, and regardless it will not delete any +// DaemonSet-managed pods, because those pods would be immediately replaced by the +// DaemonSet controller, which ignores unschedulable markings. If there are any +// pods that are neither mirror pods nor managed by ReplicationController, +// ReplicaSet, DaemonSet, StatefulSet or Job, then Drain will not delete any pods unless you +// use Force. Force will also allow deletion to proceed if the managing resource of one +// or more pods is missing. +// +// Drain waits for graceful termination. You should not operate on the machine until +// the command completes. +// +// When you are ready to put the nodes back into service, use Uncordon, which +// will make the nodes schedulable again. +// +// ![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg) +func Drain(client kubernetes.Interface, nodes []*corev1.Node, options *DrainOptions) (err error) { + nodeInterface := client.CoreV1().Nodes() + for _, node := range nodes { + if err := Cordon(nodeInterface, node, options.Logger); err != nil { + return err + } + } + + drainedNodes := sets.NewString() + var fatal error + + for _, node := range nodes { + err := DeleteOrEvictPods(client, node, options) + if err == nil { + drainedNodes.Insert(node.Name) + logf(options.Logger, "drained node %q", node.Name) + } else { + log(options.Logger, err) + logf(options.Logger, "unable to drain node %q", node.Name) + remainingNodes := []string{} + fatal = err + for _, remainingNode := range nodes { + if drainedNodes.Has(remainingNode.Name) { + continue + } + remainingNodes = append(remainingNodes, remainingNode.Name) + } + + if len(remainingNodes) > 0 { + sort.Strings(remainingNodes) + logf(options.Logger, "there are pending nodes to be drained: %s", strings.Join(remainingNodes, ",")) + } + } + } + + return fatal +} + +// DeleteOrEvictPods deletes or (where supported) evicts pods from the +// target node and waits until the deletion/eviction completes, +// Timeout elapses, or an error occurs. +func DeleteOrEvictPods(client kubernetes.Interface, node *corev1.Node, options *DrainOptions) error { + pods, err := getPodsForDeletion(client, node, options) + if err != nil { + return err + } + + err = deleteOrEvictPods(client, pods, options) + if err != nil { + pendingPods, newErr := getPodsForDeletion(client, node, options) + if newErr != nil { + return newErr + } + pendingNames := make([]string, len(pendingPods)) + for i, pendingPod := range pendingPods { + pendingNames[i] = pendingPod.Name + } + sort.Strings(pendingNames) + logf(options.Logger, "failed to evict pods from node %q (pending pods: %s): %v", node.Name, strings.Join(pendingNames, ","), err) + } + return err +} + +func getPodController(pod corev1.Pod) *metav1.OwnerReference { + return metav1.GetControllerOf(&pod) +} + +func (o *DrainOptions) unreplicatedFilter(pod corev1.Pod) (bool, *warning, *fatal) { + // any finished pod can be removed + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + return true, nil, nil + } + + controllerRef := getPodController(pod) + if controllerRef != nil { + return true, nil, nil + } + if o.Force { + return true, &warning{kUnmanagedWarning}, nil + } + + return false, nil, &fatal{kUnmanagedFatal} +} + +type DaemonSetFilterOptions struct { + client typedappsv1.AppsV1Interface + force bool + ignoreDaemonSets bool +} + +func (o *DaemonSetFilterOptions) daemonSetFilter(pod corev1.Pod) (bool, *warning, *fatal) { + // Note that we return false in cases where the pod is DaemonSet managed, + // regardless of flags. We never delete them, the only question is whether + // their presence constitutes an error. + // + // The exception is for pods that are orphaned (the referencing + // management resource - including DaemonSet - is not found). + // Such pods will be deleted if Force is used. + controllerRef := getPodController(pod) + if controllerRef == nil || controllerRef.Kind != "DaemonSet" { + return true, nil, nil + } + + if _, err := o.client.DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { + // remove orphaned pods with a warning if Force is used + if apierrors.IsNotFound(err) && o.force { + return true, &warning{err.Error()}, nil + } + return false, nil, &fatal{err.Error()} + } + + if !o.ignoreDaemonSets { + return false, nil, &fatal{kDaemonsetFatal} + } + + return false, &warning{kDaemonsetWarning}, nil +} + +func mirrorPodFilter(pod corev1.Pod) (bool, *warning, *fatal) { + if _, found := pod.ObjectMeta.Annotations[corev1.MirrorPodAnnotationKey]; found { + return false, nil, nil + } + return true, nil, nil +} + +func hasLocalStorage(pod corev1.Pod) bool { + for _, volume := range pod.Spec.Volumes { + if volume.EmptyDir != nil { + return true + } + } + + return false +} + +func (o *DrainOptions) localStorageFilter(pod corev1.Pod) (bool, *warning, *fatal) { + if !hasLocalStorage(pod) { + return true, nil, nil + } + if !o.DeleteLocalData { + return false, nil, &fatal{kLocalStorageFatal} + } + return true, &warning{kLocalStorageWarning}, nil +} + +// Map of status message to a list of pod names having that status. +type podStatuses map[string][]string + +func (ps podStatuses) message() string { + msgs := []string{} + + for key, pods := range ps { + msgs = append(msgs, fmt.Sprintf("%s: %s", key, strings.Join(pods, ", "))) + } + return strings.Join(msgs, "; ") +} + +// getPodsForDeletion receives resource info for a node, and returns all the pods from the given node that we +// are planning on deleting. If there are any pods preventing us from deleting, we return that list in an error. +func getPodsForDeletion(client kubernetes.Interface, node *corev1.Node, options *DrainOptions) (pods []corev1.Pod, err error) { + listOptions := metav1.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}).String(), + } + if options.Selector != nil { + listOptions.LabelSelector = options.Selector.String() + } + podList, err := client.CoreV1().Pods(options.Namespace).List(listOptions) + if err != nil { + return pods, err + } + + ws := podStatuses{} + fs := podStatuses{} + + daemonSetOptions := &DaemonSetFilterOptions{ + client: client.AppsV1(), + force: options.Force, + ignoreDaemonSets: options.IgnoreDaemonsets, + } + + for _, pod := range podList.Items { + podOk := true + for _, filt := range []podFilter{daemonSetOptions.daemonSetFilter, mirrorPodFilter, options.localStorageFilter, options.unreplicatedFilter} { + filterOk, w, f := filt(pod) + + podOk = podOk && filterOk + if w != nil { + ws[w.string] = append(ws[w.string], pod.Name) + } + if f != nil { + fs[f.string] = append(fs[f.string], pod.Name) + } + + // short-circuit as soon as pod not ok + // at that point, there is no reason to run pod + // through any additional filters + if !podOk { + break + } + } + if podOk { + pods = append(pods, pod) + } + } + + if len(fs) > 0 { + return []corev1.Pod{}, errors.New(fs.message()) + } + if len(ws) > 0 { + log(options.Logger, ws.message()) + } + return pods, nil +} + +func evictPod(client typedpolicyv1beta1.PolicyV1beta1Interface, pod corev1.Pod, policyGroupVersion string, gracePeriodSeconds int) error { + deleteOptions := &metav1.DeleteOptions{} + if gracePeriodSeconds >= 0 { + gracePeriod := int64(gracePeriodSeconds) + deleteOptions.GracePeriodSeconds = &gracePeriod + } + eviction := &policyv1beta1.Eviction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: policyGroupVersion, + Kind: EvictionKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Name, + Namespace: pod.Namespace, + }, + DeleteOptions: deleteOptions, + } + return client.Evictions(eviction.Namespace).Evict(eviction) +} + +// deleteOrEvictPods deletes or evicts the pods on the api server +func deleteOrEvictPods(client kubernetes.Interface, pods []corev1.Pod, options *DrainOptions) error { + if len(pods) == 0 { + return nil + } + + policyGroupVersion, err := SupportEviction(client) + if err != nil { + return err + } + + getPodFn := func(namespace, name string) (*corev1.Pod, error) { + return client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + } + + if len(policyGroupVersion) > 0 { + // Remember to change change the URL manipulation func when Evction's version change + return evictPods(client.PolicyV1beta1(), pods, policyGroupVersion, options, getPodFn) + } else { + return deletePods(client.CoreV1(), pods, options, getPodFn) + } +} + +func evictPods(client typedpolicyv1beta1.PolicyV1beta1Interface, pods []corev1.Pod, policyGroupVersion string, options *DrainOptions, getPodFn func(namespace, name string) (*corev1.Pod, error)) error { + returnCh := make(chan error, 1) + stopCh := make(chan struct{}) + // 0 timeout means infinite, we use MaxInt64 to represent it. + var globalTimeout time.Duration + if options.Timeout == 0 { + globalTimeout = time.Duration(math.MaxInt64) + } else { + globalTimeout = options.Timeout + } + + for _, pod := range pods { + go func(pod corev1.Pod, returnCh chan error, stopCh chan struct{}) { + var err error + for { + err = evictPod(client, pod, policyGroupVersion, options.GracePeriodSeconds) + if err == nil { + break + } else if apierrors.IsNotFound(err) { + returnCh <- nil + return + } else if apierrors.IsTooManyRequests(err) { + select { + case <-stopCh: + returnCh <- fmt.Errorf("global timeout!! Skip eviction retries for pod %q", pod.Name) + return + default: + logf(options.Logger, "error when evicting pod %q (will retry after 5s): %v", pod.Name, err) + time.Sleep(5 * time.Second) + } + } else { + returnCh <- fmt.Errorf("error when evicting pod %q: %v", pod.Name, err) + return + } + } + podArray := []corev1.Pod{pod} + _, err = waitForDelete(podArray, 1*time.Second, time.Duration(globalTimeout), true, options.Logger, getPodFn) + if err == nil { + returnCh <- nil + } else { + returnCh <- fmt.Errorf("error when waiting for pod %q terminating: %v", pod.Name, err) + } + }(pod, returnCh, stopCh) + } + + doneCount := 0 + var errors []error + + globalTimeoutCh := time.After(globalTimeout) + numPods := len(pods) + for doneCount < numPods { + select { + case err := <-returnCh: + doneCount++ + if err != nil { + errors = append(errors, err) + } + case <-globalTimeoutCh: + logf(options.Logger, "Closing stopCh") + close(stopCh) + } + } + return utilerrors.NewAggregate(errors) +} + +func deletePods(client typedcorev1.CoreV1Interface, pods []corev1.Pod, options *DrainOptions, getPodFn func(namespace, name string) (*corev1.Pod, error)) error { + // 0 timeout means infinite, we use MaxInt64 to represent it. + var globalTimeout time.Duration + if options.Timeout == 0 { + globalTimeout = time.Duration(math.MaxInt64) + } else { + globalTimeout = options.Timeout + } + deleteOptions := &metav1.DeleteOptions{} + if options.GracePeriodSeconds >= 0 { + gracePeriodSeconds := int64(options.GracePeriodSeconds) + deleteOptions.GracePeriodSeconds = &gracePeriodSeconds + } + for _, pod := range pods { + err := client.Pods(pod.Namespace).Delete(pod.Name, deleteOptions) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + } + _, err := waitForDelete(pods, 1*time.Second, globalTimeout, false, options.Logger, getPodFn) + return err +} + +func waitForDelete(pods []corev1.Pod, interval, timeout time.Duration, usingEviction bool, logger golog.Logger, getPodFn func(string, string) (*corev1.Pod, error)) ([]corev1.Pod, error) { + var verbStr string + if usingEviction { + verbStr = "evicted" + } else { + verbStr = "deleted" + } + + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + pendingPods := []corev1.Pod{} + for i, pod := range pods { + p, err := getPodFn(pod.Namespace, pod.Name) + if apierrors.IsNotFound(err) || (p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) { + logf(logger, "pod %q removed (%s)", pod.Name, verbStr) + continue + } else if err != nil { + return false, err + } else { + pendingPods = append(pendingPods, pods[i]) + } + } + pods = pendingPods + if len(pendingPods) > 0 { + return false, nil + } + return true, nil + }) + return pods, err +} + +// SupportEviction uses Discovery API to find out if the server +// supports the eviction subresource. If supported, it will return +// its groupVersion; otherwise it will return an empty string. +func SupportEviction(clientset kubernetes.Interface) (string, error) { + discoveryClient := clientset.Discovery() + groupList, err := discoveryClient.ServerGroups() + if err != nil { + return "", err + } + foundPolicyGroup := false + var policyGroupVersion string + for _, group := range groupList.Groups { + if group.Name == "policy" { + foundPolicyGroup = true + policyGroupVersion = group.PreferredVersion.GroupVersion + break + } + } + if !foundPolicyGroup { + return "", nil + } + resourceList, err := discoveryClient.ServerResourcesForGroupVersion("v1") + if err != nil { + return "", err + } + for _, resource := range resourceList.APIResources { + if resource.Name == EvictionSubresource && resource.Kind == EvictionKind { + return policyGroupVersion, nil + } + } + return "", nil +} + +// Cordon marks a node "Unschedulable". This method is idempotent. +func Cordon(client typedcorev1.NodeInterface, node *corev1.Node, logger golog.Logger) error { + return cordonOrUncordon(client, node, logger, true) +} + +// Uncordon marks a node "Schedulable". This method is idempotent. +func Uncordon(client typedcorev1.NodeInterface, node *corev1.Node, logger golog.Logger) error { + return cordonOrUncordon(client, node, logger, false) +} + +func cordonOrUncordon(client typedcorev1.NodeInterface, node *corev1.Node, logger golog.Logger, desired bool) error { + unsched := node.Spec.Unschedulable + if unsched == desired { + return nil + } + + patch := []byte(fmt.Sprintf("{\"spec\":{\"unschedulable\":%t}}", desired)) + _, err := client.Patch(node.Name, types.StrategicMergePatchType, patch) + if err == nil { + verbStr := "cordoned" + if !desired { + verbStr = "un" + verbStr + } + logf(logger, "%s node %q", verbStr, node.Name) + } + return err +} + +func log(logger golog.Logger, v ...interface{}) { + if logger != nil { + logger.Log(v...) + } +} + +func logf(logger golog.Logger, format string, v ...interface{}) { + if logger != nil { + logger.Logf(format, v...) + } +}