diff --git a/cmd/clusterctl/examples/openstack/cluster.yaml b/cmd/clusterctl/examples/openstack/cluster.yaml index 5c797b32f3..4624a9cbc5 100644 --- a/cmd/clusterctl/examples/openstack/cluster.yaml +++ b/cmd/clusterctl/examples/openstack/cluster.yaml @@ -15,4 +15,7 @@ spec: kind: "OpenstackProviderSpec" tags: - a_cluster_wide_tag - masterIP: + clusterConfiguration: + controlPlaneEndpoint: :6443 + kubernetesVersion: 1.15.0 + diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/master-user-data.sh b/cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/master-user-data.sh index 8b0431cb2d..9ca8e1da5c 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/master-user-data.sh +++ b/cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/master-user-data.sh @@ -7,45 +7,22 @@ NAMESPACE={{ .Machine.ObjectMeta.Namespace }} MACHINE=$NAMESPACE MACHINE+="/" MACHINE+={{ .Machine.ObjectMeta.Name }} -CONTROL_PLANE_VERSION={{ .Machine.Spec.Versions.ControlPlane }} -CLUSTER_DNS_DOMAIN={{ .Cluster.Spec.ClusterNetwork.ServiceDomain }} -POD_CIDR={{ .PodCIDR }} -SERVICE_CIDR={{ .ServiceCIDR }} ARCH=amd64 - swapoff -a # disable swap in fstab sed -i.bak -r 's/(.+ swap .+)/#\1/' /etc/fstab -# Getting master ip from the metadata of the node. By default we try the public-ipv4 -# If we don't get any, we fall back to local-ipv4 and in the worst case to localhost -MASTER="" + +# Getting local ip from the metadata of the node. +echo "Getting local ip from metadata" for i in $(seq 60); do - echo "trying to get public-ipv4 $i / 60" - MASTER=$(curl --fail -s http://169.254.169.254/2009-04-04/meta-data/public-ipv4) - if [[ $? == 0 ]] && [[ -n "$MASTER" ]]; then + echo "trying to get local-ipv4 $i / 60" + OPENSTACK_IPV4_LOCAL=$(curl --fail -s http://169.254.169.254/latest/meta-data/local-ipv4) + if [[ $? == 0 ]] && [[ -n "$OPENSTACK_IPV4_LOCAL" ]]; then break fi sleep 1 done -if [[ -z "$MASTER" ]]; then - echo "falling back to local-ipv4" - for i in $(seq 60); do - echo "trying to get local-ipv4 $i / 60" - MASTER=$(curl --fail -s http://169.254.169.254/2009-04-04/meta-data/local-ipv4) - if [[ $? == 0 ]] && [[ -n "$MASTER" ]]; then - break - fi - sleep 1 - done -fi - -if [[ -z "$MASTER" ]]; then - echo "falling back to localhost" - MASTER="localhost" -fi -MASTER="${MASTER}:443" - cat < /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes @@ -124,81 +101,17 @@ EOF # Set up kubeadm config file to pass parameters to kubeadm init. cat > /etc/kubernetes/kubeadm_config.yaml < /etc/yum.repos.d/kubernetes.repo [kubernetes] @@ -54,21 +51,7 @@ echo $OPENSTACK_CLOUD_CACERT_CONFIG | base64 -d > /etc/certs/cacert # Set up kubeadm config file to pass to kubeadm join. cat > /etc/kubernetes/kubeadm_config.yaml <&1 | tee /var/log/startup.log diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/master.yaml b/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/master.yaml index c977e0d63c..7b1049cea1 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/master.yaml +++ b/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/master.yaml @@ -85,51 +85,7 @@ storage: - path: /etc/kubernetes/kubeadm_config.yaml filesystem: root contents: - inline: | - apiVersion: kubeadm.k8s.io/v1beta1 - kind: InitConfiguration - localAPIEndpoint: - bindPort: 443 - nodeRegistration: - kubeletExtraArgs: - cloud-provider: "openstack" - cloud-config: "/etc/kubernetes/cloud.conf" - --- - apiVersion: kubeadm.k8s.io/v1beta1 - kind: ClusterConfiguration - kubernetesVersion: v{{ .Machine.Spec.Versions.ControlPlane }} - networking: - serviceSubnet: {{ .ServiceCIDR }} - podSubnet: {{ .PodCIDR }} - dns: - type: CoreDNS - clusterName: kubernetes - controlPlaneEndpoint: ${MASTER} - apiServer: - extraArgs: - cloud-provider: "openstack" - cloud-config: "/etc/kubernetes/cloud.conf" - extraVolumes: - - name: cloud - hostPath: "/etc/kubernetes/cloud.conf" - mountPath: "/etc/kubernetes/cloud.conf" - - name: cacert - hostPath: "/etc/certs/cacert" - mountPath: "/etc/certs/cacert" - controllerManager: - extraArgs: - cluster-cidr: {{ .PodCIDR }} - service-cluster-ip-range: {{ .ServiceCIDR }} - allocate-node-cidrs: "true" - cloud-provider: "openstack" - cloud-config: "/etc/kubernetes/cloud.conf" - extraVolumes: - - name: cloud - hostPath: "/etc/kubernetes/cloud.conf" - mountPath: "/etc/kubernetes/cloud.conf" - - name: cacert - hostPath: "/etc/certs/cacert" - mountPath: "/etc/certs/cacert" + inline: '{{ .KubeadmConfig | EscapeNewLines }}' user: id: 0 group: @@ -143,28 +99,8 @@ storage: . /run/metadata/coreos - MASTER="" - - echo "Trying to get the public IPv4 address." - if [[ -n "$COREOS_OPENSTACK_IPV4_PUBLIC" ]]; then - MASTER="$COREOS_OPENSTACK_IPV4_PUBLIC" - fi - - if [[ -z "$MASTER" ]]; then - echo "Trying to get the local IPv4 address. (Try $i/60)" - if [[ -n "$COREOS_OPENSTACK_IPV4_LOCAL" ]]; then - MASTER="$COREOS_OPENSTACK_IPV4_LOCAL" - fi - fi - - if [[ -z "$MASTER" ]]; then - echo "Falling back to localhost." - MASTER="localhost" - fi - - MASTER="${MASTER}:443" - - /usr/bin/sed -i "s#\${MASTER}#$MASTER#" /etc/kubernetes/kubeadm_config.yaml + echo "Replacing OPENSTACK_IPV4_LOCAL in kubeadm_config through ${COREOS_OPENSTACK_IPV4_LOCAL}" + /usr/bin/sed -i "s#\${OPENSTACK_IPV4_LOCAL}#${COREOS_OPENSTACK_IPV4_LOCAL}#" /etc/kubernetes/kubeadm_config.yaml user: id: 0 group: diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/worker.yaml b/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/worker.yaml index c397654b42..b5a013994d 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/worker.yaml +++ b/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/worker.yaml @@ -3,19 +3,7 @@ storage: - path: /etc/kubernetes/kubeadm_config.yaml filesystem: root contents: - inline: | - apiVersion: kubeadm.k8s.io/v1beta1 - kind: JoinConfiguration - nodeRegistration: - kubeletExtraArgs: - cloud-provider: "openstack" - cloud-config: "/etc/kubernetes/cloud.conf" - discovery: - bootstrapToken: - apiServerEndpoint: {{ call .GetMasterEndpoint }} - token: {{ .Token }} - unsafeSkipCAVerification: true - tlsBootstrapToken: {{ .Token }} + inline: '{{ .KubeadmConfig | EscapeNewLines }}' user: id: 0 group: diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/master-user-data.sh b/cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/master-user-data.sh index 0a2c8aa596..c6a9493b81 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/master-user-data.sh +++ b/cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/master-user-data.sh @@ -8,10 +8,6 @@ NAMESPACE={{ .Machine.ObjectMeta.Namespace }} MACHINE=$NAMESPACE MACHINE+="/" MACHINE+={{ .Machine.ObjectMeta.Name }} -CONTROL_PLANE_VERSION={{ .Machine.Spec.Versions.ControlPlane }} -CLUSTER_DNS_DOMAIN={{ .Cluster.Spec.ClusterNetwork.ServiceDomain }} -POD_CIDR={{ .PodCIDR }} -SERVICE_CIDR={{ .ServiceCIDR }} ARCH=amd64 swapoff -a # disable swap in fstab @@ -23,36 +19,17 @@ apt-get update -y apt-get install -y \ prips -# Getting master ip from the metadata of the node. By default we try the public-ipv4 -# If we don't get any, we fall back to local-ipv4 and in the worst case to localhost -MASTER="" +# Getting local ip from the metadata of the node. +echo "Getting local ip from metadata" for i in $(seq 60); do - echo "trying to get public-ipv4 $i / 60" - MASTER=$(curl --fail -s http://169.254.169.254/2009-04-04/meta-data/public-ipv4) - if [[ $? == 0 ]] && [[ -n "$MASTER" ]]; then + echo "trying to get local-ipv4 $i / 60" + OPENSTACK_IPV4_LOCAL=$(curl --fail -s http://169.254.169.254/latest/meta-data/local-ipv4) + if [[ $? == 0 ]] && [[ -n "$OPENSTACK_IPV4_LOCAL" ]]; then break fi sleep 1 done -if [[ -z "$MASTER" ]]; then - echo "falling back to local-ipv4" - for i in $(seq 60); do - echo "trying to get local-ipv4 $i / 60" - MASTER=$(curl --fail -s http://169.254.169.254/2009-04-04/meta-data/local-ipv4) - if [[ $? == 0 ]] && [[ -n "$MASTER" ]]; then - break - fi - sleep 1 - done -fi - -if [[ -z "$MASTER" ]]; then - echo "falling back to localhost" - MASTER="localhost" -fi -MASTER="${MASTER}:443" - function install_configure_docker () { # prevent docker from auto-starting echo "exit 101" > /usr/sbin/policy-rc.d @@ -78,8 +55,7 @@ install_configure_docker curl -sSL https://dl.k8s.io/release/${VERSION}/bin/linux/${ARCH}/kubeadm > /usr/bin/kubeadm.dl chmod a+rx /usr/bin/kubeadm.dl -# kubeadm uses 10th IP as DNS server -CLUSTER_DNS_SERVER=$(prips ${SERVICE_CIDR} | head -n 11 | tail -n 1) + # Our Debian packages have versions like "1.8.0-00" or "1.8.0-01". Do a prefix # search based on our SemVer to find the right (newest) package version. function getversion() { @@ -103,11 +79,6 @@ apt-get install -y \ mv /usr/bin/kubeadm.dl /usr/bin/kubeadm chmod a+rx /usr/bin/kubeadm -cat > /etc/systemd/system/kubelet.service.d/20-kubenet.conf < /etc/kubernetes/cloud.conf chmod 600 /etc/kubernetes/cloud.conf mkdir /etc/certs @@ -153,78 +124,13 @@ cat > /etc/kubernetes/pki/sa.key < /etc/kubernetes/kubeadm_config.yaml < /etc/certs/cacert # Set up kubeadm config file to pass to kubeadm join. cat > /etc/kubernetes/kubeadm_config.yaml <:", + where the only currently supported type is "sha256". This + is a hex-encoded SHA-256 hash of the Subject Public Key + Info (SPKI) object in DER-encoded ASN.1. These hashes + can be calculated using, for example, OpenSSL: openssl + x509 -pubkey -in ca.crt openssl rsa -pubin -outform der + 2>&/dev/null | openssl dgst -sha256 -hex' + items: + type: string + type: array + token: + description: Token is a token used to validate cluster information + fetched from the master. + type: string + unsafeSkipCAVerification: + description: UnsafeSkipCAVerification allows token-based + discovery without CA verification via CACertHashes. This + can weaken the security of kubeadm since other nodes can + impersonate the master. + type: boolean + required: + - token + - unsafeSkipCAVerification + type: object + file: + description: File is used to specify a file or URL to a kubeconfig + file from which to load cluster information BootstrapToken + and File are mutually exclusive + properties: + kubeConfigPath: + description: KubeConfigPath is used to specify the actual + file path or URL to the kubeconfig file from which to + load cluster information + type: string + required: + - kubeConfigPath + type: object + timeout: + description: Timeout modifies the discovery timeout + type: string + tlsBootstrapToken: + description: TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, + but can be overridden. If .File is set, this field **must + be set** in case the KubeConfigFile does not contain any other + authentication information + type: string + required: + - tlsBootstrapToken + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + nodeRegistration: + description: NodeRegistration holds fields that relate to registering + the new master node to the cluster + properties: + criSocket: + description: CRISocket is used to retrieve container runtime + info. This information will be annotated to the Node API object, + for later re-use + type: string + kubeletExtraArgs: + description: KubeletExtraArgs passes through extra arguments + to the kubelet. The arguments here are passed to the kubelet + command line via the environment file kubeadm writes at runtime + for the kubelet to source. This overrides the generic base-level + configuration in the kubelet-config-1.X ConfigMap Flags have + higher priority when parsing. These values are local and specific + to the node kubeadm is executing on. + type: object + name: + description: Name is the `.Metadata.Name` field of the Node + API object that will be created in this `kubeadm init` or + `kubeadm joiń` operation. This field is also used in the CommonName + field of the kubelet's client certificate to the API server. + Defaults to the hostname of the node if not provided. + type: string + taints: + description: 'Taints specifies the taints the Node API object + should be registered with. If this field is unset, i.e. nil, + in the `kubeadm init` process it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. + If you don''t want to taint your master node, set this field + to an empty slice, i.e. `taints: {}` in the YAML file. This + field is solely used for Node registration.' + items: + type: object + type: array + type: object + required: + - nodeRegistration + - caCertPath + - discovery + type: object + type: object metadata: type: object networks: diff --git a/go.mod b/go.mod index 3e1a69f69c..b026fece6e 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,6 @@ require ( github.com/gophercloud/gophercloud v0.3.0 github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03 github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.9.1 // indirect github.com/imdario/mergo v0.3.7 // indirect github.com/markbates/inflect v1.0.4 // indirect github.com/pborman/uuid v1.2.0 // indirect @@ -32,7 +31,6 @@ require ( github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v0.9.4 // indirect github.com/rogpeppe/go-internal v1.3.0 // indirect - github.com/spf13/afero v1.2.2 // indirect github.com/spf13/cobra v0.0.5 // indirect github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb // indirect go.uber.org/atomic v1.4.0 // indirect @@ -45,20 +43,20 @@ require ( golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 // indirect google.golang.org/appengine v1.6.1 // indirect - google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3 // indirect - google.golang.org/grpc v1.21.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.2.2 - k8s.io/api v0.0.0-20190612125737-db0771252981 - k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 // indirect - k8s.io/apimachinery v0.0.0-20190612125636-6a5db36e93ad - k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 - k8s.io/cluster-bootstrap v0.0.0-20190612131323-aa3fd9f69a09 - k8s.io/code-generator v0.0.0-20190612125529-c522cb6c26aa - k8s.io/component-base v0.0.0-20190615090122-7edc45fbd48e // indirect + k8s.io/api v0.0.0 + k8s.io/apiextensions-apiserver v0.0.0-00010101000000-000000000000 // indirect + k8s.io/apimachinery v0.0.0 + k8s.io/client-go v0.0.0 + k8s.io/cluster-bootstrap v0.0.0 + k8s.io/code-generator v0.0.0 + k8s.io/component-base v0.0.0-00010101000000-000000000000 // indirect k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a // indirect k8s.io/klog v0.3.3 k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 // indirect + k8s.io/kubernetes v1.13.4 + k8s.io/utils v0.0.0-20190801114015-581e00157fb1 // indirect sigs.k8s.io/cluster-api v0.1.4 sigs.k8s.io/controller-runtime v0.1.12 sigs.k8s.io/controller-tools v0.1.11 @@ -68,5 +66,23 @@ require ( replace ( k8s.io/api => k8s.io/api v0.0.0-20190222213804-5cb15d344471 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 + k8s.io/apiserver => k8s.io/apiserver v0.0.0-20190228174905-79427f02047f + k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20190228180923-a9e421a79326 + k8s.io/client-go => k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.0.0-20190228181926-f531db154915 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20190228181738-e96ff33745e4 + k8s.io/code-generator => k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b + k8s.io/component-base => k8s.io/component-base v0.0.0-20190620085130-185d68e6e6ea + k8s.io/cri-api => k8s.io/cri-api v0.0.0-20190531030430-6117653b35f1 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.0.0-20190620090116-299a7b270edc + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20190228175259-3e0149950b0e + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.0.0-20190228181625-2d4c0aa6bcf3 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.0.0-20190228181220-93400f02525d + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.0.0-20190228181501-284499a24813 + k8s.io/kubelet => k8s.io/kubelet v0.0.0-20190228181339-b2baec5a487a + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.0.0-20190620090156-2138f2c9de18 + k8s.io/metrics => k8s.io/metrics v0.0.0-20190228180609-34472d076c30 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.0.0-20190228175645-7167784fdc46 ) diff --git a/go.sum b/go.sum index a32d26014a..80137b534f 100644 --- a/go.sum +++ b/go.sum @@ -18,7 +18,6 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -85,7 +84,6 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/envy v1.7.0 h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= @@ -128,9 +126,8 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.1 h1:LgwPEIdyJmF9Ug9nINVNspG6Z6P8/TM0yKdQ5h3VQaQ= -github.com/grpc-ecosystem/grpc-gateway v1.9.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -212,7 +209,6 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= @@ -261,15 +257,11 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -306,7 +298,6 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -325,19 +316,13 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.6.0 h1:2tJEkRfnZL5g1GeBUlITh/rqT5HG3sFcoVCUUxmgJ2g= @@ -351,15 +336,13 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 h1:wuGevabY6r+ivPNagjUXGGxF+GqgMd+dBhjsxW4q9u4= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3 h1:0LGHEA/u5XLibPOx6D7D8FBT/ax6wT57vNKY0QckCwo= -google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= @@ -388,28 +371,28 @@ k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 h1:UYfHH+KEF88OTg+GojQUwF k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 h1:aE8wOCKuoRs2aU0OP/Rz8SXiAB0FTTku3VtGhhrkSmc= k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/cluster-bootstrap v0.0.0-20190612131323-aa3fd9f69a09 h1:XVDUMJYMVyNZmt6OjS1fCB9VKRR58iRWe49+pgdDZtA= -k8s.io/cluster-bootstrap v0.0.0-20190612131323-aa3fd9f69a09/go.mod h1:FVfUVPEEWnJVBpdMyfafv4LbLJKMf9he6hIRBWsoS44= -k8s.io/code-generator v0.0.0-20190612125529-c522cb6c26aa h1:R/ZQEUP8jVryCMdJDSiHqx00/u9k2oRt0LEZq/qK+tE= -k8s.io/code-generator v0.0.0-20190612125529-c522cb6c26aa/go.mod h1:G8bQwmHm2eafm5bgtX67XDZQ8CWKSGu9DekI+yN4Y5I= -k8s.io/component-base v0.0.0-20190615090122-7edc45fbd48e h1:vQPub++/Sh/fXLU0F8NHfMBTHQ371ZxFKF3iKMFawZw= -k8s.io/component-base v0.0.0-20190615090122-7edc45fbd48e/go.mod h1:MmIDXnint3qMN0cqXHKrSiJ2XQKo3J1BPIz7in7NvO0= -k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/cluster-bootstrap v0.0.0-20190228181738-e96ff33745e4 h1:C6Q0O7LHTsykhy2KMOfov/wJGeGOLOEMmVSn5bOxfxI= +k8s.io/cluster-bootstrap v0.0.0-20190228181738-e96ff33745e4/go.mod h1:iBSm2nwo3OaiuW8VDvc3ySDXK5SKfUrxwPvBloKG7zg= +k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b h1:KH0fUlgdFZH8UMxJ/FDCYHpczfSQKefetq5NjL6BVF0= +k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= +k8s.io/component-base v0.0.0-20190620085130-185d68e6e6ea h1:CAN9Jo6bb5+mp3s1/1w8TbaWIxYK1WGQwwYvlacSet4= +k8s.io/component-base v0.0.0-20190620085130-185d68e6e6ea/go.mod h1:VLedAFwENz2swOjm0zmUXpAP2mV55c49xgaOzPBI/QQ= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a h1:QoHVuRquf80YZ+/bovwxoMO3Q/A3nt3yTgS0/0nejuk= k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 h1:5sW+fEHvlJI3Ngolx30CmubFulwH28DhKjGf70Xmtco= k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= +k8s.io/kubernetes v1.13.4 h1:gQqFv/pH8hlbznLXQUsi8s5zqYnv0slmUDl/yVA0EWc= +k8s.io/kubernetes v1.13.4/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/cluster-api v0.1.4 h1:QKY3hJMFTKP8N7sZIELR8jVFw9yGm8eeZMnj/1E36u4= sigs.k8s.io/cluster-api v0.1.4/go.mod h1:aEXstFx3krpj6/AOcV/rOP2VLKdrtSNUMDR2Kmt6YSs= diff --git a/pkg/apis/openstackproviderconfig/v1alpha1/types.go b/pkg/apis/openstackproviderconfig/v1alpha1/types.go index f01ff947d4..7ff385019f 100644 --- a/pkg/apis/openstackproviderconfig/v1alpha1/types.go +++ b/pkg/apis/openstackproviderconfig/v1alpha1/types.go @@ -19,6 +19,8 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" ) // +genclient @@ -79,6 +81,21 @@ type OpenstackProviderSpec struct { // The volume metadata to boot from RootVolume *RootVolume `json:"rootVolume,omitempty"` + + // KubeadmConfiguration holds the kubeadm configuration options + // +optional + KubeadmConfiguration KubeadmConfiguration `json:"kubeadmConfiguration,omitempty"` +} + +// KubeadmConfiguration holds the various configurations that kubeadm uses +type KubeadmConfiguration struct { + // JoinConfiguration is used to customize any kubeadm join configuration + // parameters. + Join kubeadmv1beta1.JoinConfiguration `json:"join,omitempty"` + + // InitConfiguration is used to customize any kubeadm init configuration + // parameters. + Init kubeadmv1beta1.InitConfiguration `json:"init,omitempty"` } type SecurityGroupParam struct { @@ -228,6 +245,10 @@ type OpenstackClusterProviderSpec struct { // machines belonging to that group. In the future, we could make this more flexible. ManagedSecurityGroups bool `json:"managedSecurityGroups"` + // DisablePortSecurity disables the port security of the network created for the + // Kubernetes cluster, which also disables SecurityGroups + DisablePortSecurity bool `json:"disablePortSecurity,omitempty"` + // Tags for all resources in cluster Tags []string `json:"tags,omitempty"` @@ -246,9 +267,9 @@ type OpenstackClusterProviderSpec struct { // SAKeyPair is the service account key pair. SAKeyPair KeyPair `json:"saKeyPair,omitempty"` - // MasterIP is the ip of the master, if there is Loadbalancer, should be LB - // ip and no LB should be floating ip address - MasterIP string `json:"masterIP,omitempty"` + // ClusterConfiguration holds the cluster-wide information used during a + // kubeadm init call. + ClusterConfiguration kubeadmv1beta1.ClusterConfiguration `json:"clusterConfiguration,omitempty"` } // KeyPair is how operators can supply custom keypairs for kubeadm to use. diff --git a/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go index 63f73659ef..df3c9eac61 100644 --- a/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go @@ -94,6 +94,24 @@ func (in *KeyPair) DeepCopy() *KeyPair { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeadmConfiguration) DeepCopyInto(out *KubeadmConfiguration) { + *out = *in + in.Join.DeepCopyInto(&out.Join) + in.Init.DeepCopyInto(&out.Init) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeadmConfiguration. +func (in *KubeadmConfiguration) DeepCopy() *KubeadmConfiguration { + if in == nil { + return nil + } + out := new(KubeadmConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { *out = *in @@ -201,6 +219,7 @@ func (in *OpenstackClusterProviderSpec) DeepCopyInto(out *OpenstackClusterProvid in.EtcdCAKeyPair.DeepCopyInto(&out.EtcdCAKeyPair) in.FrontProxyCAKeyPair.DeepCopyInto(&out.FrontProxyCAKeyPair) in.SAKeyPair.DeepCopyInto(&out.SAKeyPair) + in.ClusterConfiguration.DeepCopyInto(&out.ClusterConfiguration) return } @@ -338,6 +357,7 @@ func (in *OpenstackProviderSpec) DeepCopyInto(out *OpenstackProviderSpec) { *out = new(RootVolume) **out = **in } + in.KubeadmConfiguration.DeepCopyInto(&out.KubeadmConfiguration) return } diff --git a/pkg/cloud/openstack/cluster/actuator.go b/pkg/cloud/openstack/cluster/actuator.go index 29daa3e4a3..c16785d463 100644 --- a/pkg/cloud/openstack/cluster/actuator.go +++ b/pkg/cloud/openstack/cluster/actuator.go @@ -24,6 +24,8 @@ import ( "sigs.k8s.io/cluster-api/pkg/controller/remote" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/patch" + "strconv" + "strings" ) // Actuator controls cluster related infrastructure. @@ -224,18 +226,14 @@ func (a *Actuator) storeCluster(cluster *clusterv1.Cluster, clusterCopy *cluster cluster.ResourceVersion = result.ResourceVersion } - // set to APIServerLoadBalancer IP & Port for now. With generated kubeadm - // this can be changed to clusterConfiguration.controlPlaneEndpoint for the - // non-LoadBalancer case. For now it doesn't matter because the Status is not - // used yet. - apiServerHost := clusterProviderSpec.APIServerLoadBalancerFloatingIP - apiServerPort := clusterProviderSpec.APIServerLoadBalancerPort - if clusterProviderSpec.ManagedAPIServerLoadBalancer { - if clusterProviderStatus.Network != nil && clusterProviderStatus.Network.APIServerLoadBalancer != nil && - apiServerHost != clusterProviderStatus.Network.APIServerLoadBalancer.IP { - return fmt.Errorf("APIServerLoadBalancer has IP %s instead of IP %s", clusterProviderStatus.Network.APIServerLoadBalancer.IP, apiServerHost) - } + controlPlaneURI := strings.Split(clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint, ":") + apiServerHost := controlPlaneURI[0] + apiServerPortStr := controlPlaneURI[1] + apiServerPort, err := strconv.Atoi(apiServerPortStr) + if err != nil { + return fmt.Errorf("could not parse port of controlPlaneEndpoint %s: %v", clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint, err) } + // Check if API endpoints is not set or has changed. if len(cluster.Status.APIEndpoints) == 0 || cluster.Status.APIEndpoints[0].Host != apiServerHost { cluster.Status.APIEndpoints = []clusterv1.APIEndpoint{ diff --git a/pkg/cloud/openstack/services/kubeadm/bootstraptoken.go b/pkg/cloud/openstack/services/kubeadm/bootstraptoken.go new file mode 100644 index 0000000000..01fc2852ab --- /dev/null +++ b/pkg/cloud/openstack/services/kubeadm/bootstraptoken.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" +) + +// BootstrapTokenDiscoveryOption will modify a kubeadm BootstrapTokenDiscovery. +type BootstrapTokenDiscoveryOption func(*kubeadmv1beta1.BootstrapTokenDiscovery) + +// NewBootstrapTokenDiscovery will create a new kubeadm bootstrap token discovery and run options against it. +func NewBootstrapTokenDiscovery(opts ...BootstrapTokenDiscoveryOption) *kubeadmv1beta1.BootstrapTokenDiscovery { + discovery := &kubeadmv1beta1.BootstrapTokenDiscovery{} + for _, opt := range opts { + opt(discovery) + } + return discovery +} + +// WithAPIServerEndpoint will set the kube-apiserver's endpoint for the bootstrap token discovery. +func WithAPIServerEndpoint(endpoint string) BootstrapTokenDiscoveryOption { + return func(b *kubeadmv1beta1.BootstrapTokenDiscovery) { + b.APIServerEndpoint = endpoint + } +} + +// WithToken will set the bootstrap token. +func WithToken(token string) BootstrapTokenDiscoveryOption { + return func(b *kubeadmv1beta1.BootstrapTokenDiscovery) { + b.Token = token + } +} + +// WitCACertificateHash sets the hash of CA for the bootstrap token to use. +func WithCACertificateHash(caCertHash string) BootstrapTokenDiscoveryOption { + return func(b *kubeadmv1beta1.BootstrapTokenDiscovery) { + b.CACertHashes = append(b.CACertHashes, caCertHash) + } +} diff --git a/pkg/cloud/openstack/services/kubeadm/bootstraptoken_test.go b/pkg/cloud/openstack/services/kubeadm/bootstraptoken_test.go new file mode 100644 index 0000000000..7d604a79a6 --- /dev/null +++ b/pkg/cloud/openstack/services/kubeadm/bootstraptoken_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm_test + +import ( + "reflect" + "testing" + + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/kubeadm" +) + +func TestNewBootstrapToken(t *testing.T) { + testcases := []struct { + name string + expected *kubeadmv1beta1.BootstrapTokenDiscovery + actual *kubeadmv1beta1.BootstrapTokenDiscovery + }{ + { + name: "simple test", + expected: &kubeadmv1beta1.BootstrapTokenDiscovery{ + APIServerEndpoint: "test apiserver endpoint", + Token: "test token", + CACertHashes: []string{"test ca cert hash"}, + }, + actual: kubeadm.NewBootstrapTokenDiscovery( + kubeadm.WithAPIServerEndpoint("test apiserver endpoint"), + kubeadm.WithToken("test token"), + kubeadm.WithCACertificateHash("test ca cert hash"), + ), + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if !reflect.DeepEqual(tc.expected, tc.actual) { + t.Fatalf("Expected and actual: \n%v\n%v", tc.expected, tc.actual) + } + }) + } +} diff --git a/pkg/cloud/openstack/services/kubeadm/configs.go b/pkg/cloud/openstack/services/kubeadm/configs.go new file mode 100644 index 0000000000..548b2c1800 --- /dev/null +++ b/pkg/cloud/openstack/services/kubeadm/configs.go @@ -0,0 +1,179 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" +) + +// InitConfigurationOption will set various options on kubeadm's InitConfiguration type. +type InitConfigurationOption func(*kubeadmv1beta1.InitConfiguration) + +// SetInitConfigurationOptions will run all the options provided against some existing InitConfiguration. +// Use this to customize an existing InitConfiguration. +func SetInitConfigurationOptions(base *kubeadmv1beta1.InitConfiguration, opts ...InitConfigurationOption) { + for _, opt := range opts { + opt(base) + } +} + +// WithNodeRegistrationOptions will set the node registration options. +func WithNodeRegistrationOptions(nro kubeadmv1beta1.NodeRegistrationOptions) InitConfigurationOption { + return func(c *kubeadmv1beta1.InitConfiguration) { + c.NodeRegistration = nro + } +} + +// WithInitLocalAPIEndpointAndPort sets the local advertise address and port the kube-apiserver will advertise it is listening on. +func WithInitLocalAPIEndpointAndPort(endpoint string, port int) InitConfigurationOption { + return func(j *kubeadmv1beta1.InitConfiguration) { + j.LocalAPIEndpoint.AdvertiseAddress = endpoint + j.LocalAPIEndpoint.BindPort = int32(port) + } +} + +// ClusterConfigurationOption is a type of function that sets options on ak kubeadm ClusterConfiguration. +type ClusterConfigurationOption func(*kubeadmv1beta1.ClusterConfiguration) + +// SetClusterConfigurationOptions will run all options on the passed in ClusterConfiguration. +// Use this to customize an existing ClusterConfiguration. +func SetClusterConfigurationOptions(base *kubeadmv1beta1.ClusterConfiguration, opts ...ClusterConfigurationOption) { + for _, opt := range opts { + opt(base) + } +} + +// WithClusterName sets the cluster name on a ClusterConfiguration. +func WithClusterName(name string) ClusterConfigurationOption { + return func(c *kubeadmv1beta1.ClusterConfiguration) { + c.ClusterName = name + } +} + +// WithKubernetesVersion sets the kubernetes version on a ClusterConfiguration. +func WithKubernetesVersion(version string) ClusterConfigurationOption { + return func(c *kubeadmv1beta1.ClusterConfiguration) { + c.KubernetesVersion = version + } +} + +// WithAPIServerExtraArgs sets the kube-apiserver's extra arguments in the ClusterConfiguration. +func WithAPIServerExtraArgs(args map[string]string) ClusterConfigurationOption { + return func(c *kubeadmv1beta1.ClusterConfiguration) { + if c.APIServer.ControlPlaneComponent.ExtraArgs == nil { + c.APIServer.ControlPlaneComponent.ExtraArgs = map[string]string{} + } + for key, value := range args { + c.APIServer.ControlPlaneComponent.ExtraArgs[key] = value + } + } +} + +// WithAPIServerExtraVolumes sets the kube-apiserver's extra arguments in the ClusterConfiguration. +func WithAPIServerExtraVolumes(args []kubeadmv1beta1.HostPathMount) ClusterConfigurationOption { + return func(c *kubeadmv1beta1.ClusterConfiguration) { + c.APIServer.ExtraVolumes = append(c.APIServer.ExtraVolumes, args...) + } +} + +// WithControllerManagerExtraArgs sets the controller manager's extra arguments in the ClusterConfiguration. +func WithControllerManagerExtraArgs(args map[string]string) ClusterConfigurationOption { + return func(c *kubeadmv1beta1.ClusterConfiguration) { + if c.ControllerManager.ExtraArgs == nil { + c.ControllerManager.ExtraArgs = map[string]string{} + } + for key, value := range args { + c.ControllerManager.ExtraArgs[key] = value + } + } +} + +// WithControllerManagerExtraVolumes sets the kube-apiserver's extra arguments in the ClusterConfiguration. +func WithControllerManagerExtraVolumes(args []kubeadmv1beta1.HostPathMount) ClusterConfigurationOption { + return func(c *kubeadmv1beta1.ClusterConfiguration) { + c.ControllerManager.ExtraVolumes = append(c.ControllerManager.ExtraVolumes, args...) + } +} + +// WithClusterNetworkFromClusterNetworkingConfig maps a cluster api ClusterNetworkingConfig to a kubeadm Networking object. +func WithClusterNetworkFromClusterNetworkingConfig(networkingConfig v1alpha1.ClusterNetworkingConfig) ClusterConfigurationOption { + return func(c *kubeadmv1beta1.ClusterConfiguration) { + c.Networking.DNSDomain = networkingConfig.ServiceDomain + c.Networking.PodSubnet = networkingConfig.Pods.CIDRBlocks[0] + c.Networking.ServiceSubnet = networkingConfig.Services.CIDRBlocks[0] + } +} + +// WithControlPlaneEndpoint sets the control plane endpoint, usually the load balancer in front of the kube-apiserver(s). +func WithControlPlaneEndpoint(endpoint string) ClusterConfigurationOption { + return func(c *kubeadmv1beta1.ClusterConfiguration) { + if c.ControlPlaneEndpoint == "" { + c.ControlPlaneEndpoint = endpoint + } + } +} + +// WithAPIServerCertificateSANs will add the names provided to the kube-apiserver's list of certificate SANs. +func WithAPIServerCertificateSANs(names ...string) ClusterConfigurationOption { + return func(c *kubeadmv1beta1.ClusterConfiguration) { + c.APIServer.CertSANs = append(c.APIServer.CertSANs, names...) + } +} + +// JoinConfigurationOption is a type of function that sets options on a kubeadm JoinConfiguration. +type JoinConfigurationOption func(*kubeadmv1beta1.JoinConfiguration) + +// SetJoinConfigurationOptions runs a set of options against a JoinConfiguration. +// Use this to customize an existing JoinConfiguration. +func SetJoinConfigurationOptions(base *kubeadmv1beta1.JoinConfiguration, opts ...JoinConfigurationOption) { + for _, opt := range opts { + opt(base) + } +} + +// WithBootstrapTokenDiscovery sets the BootstrapToken on the Discovery field to the passed in value. +func WithBootstrapTokenDiscovery(bootstrapTokenDiscovery *kubeadmv1beta1.BootstrapTokenDiscovery) JoinConfigurationOption { + return func(j *kubeadmv1beta1.JoinConfiguration) { + j.Discovery.BootstrapToken = bootstrapTokenDiscovery + } +} + +// WithTLSBootstrapToken sets the tlsBootstrapToken on the Discovery field to the passed in value. +func WithTLSBootstrapToken(tlsBootstrapToken string) JoinConfigurationOption { + return func(j *kubeadmv1beta1.JoinConfiguration) { + j.Discovery.TLSBootstrapToken = tlsBootstrapToken + } +} + +// WithLocalAPIEndpointAndPort sets the local advertise address and port the kube-apiserver will advertise it is listening on. +func WithLocalAPIEndpointAndPort(endpoint string, port int) JoinConfigurationOption { + return func(j *kubeadmv1beta1.JoinConfiguration) { + if j.ControlPlane == nil { + j.ControlPlane = &kubeadmv1beta1.JoinControlPlane{} + } + j.ControlPlane.LocalAPIEndpoint.AdvertiseAddress = endpoint + j.ControlPlane.LocalAPIEndpoint.BindPort = int32(port) + } +} + +// WithJoinNodeRegistrationOptions will set the NodeRegistrationOptions on the JoinConfiguration. +func WithJoinNodeRegistrationOptions(nro kubeadmv1beta1.NodeRegistrationOptions) JoinConfigurationOption { + return func(j *kubeadmv1beta1.JoinConfiguration) { + j.NodeRegistration = nro + } +} diff --git a/pkg/cloud/openstack/services/kubeadm/configs_test.go b/pkg/cloud/openstack/services/kubeadm/configs_test.go new file mode 100644 index 0000000000..eb405f4bf6 --- /dev/null +++ b/pkg/cloud/openstack/services/kubeadm/configs_test.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm_test + +import ( + "encoding/json" + "reflect" + "testing" + + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/kubeadm" + clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" +) + +func TestInitConfiguration(t *testing.T) { + testcases := []struct { + name string + expected *kubeadmv1beta1.InitConfiguration + options []kubeadm.InitConfigurationOption + }{ + { + name: "simple", + expected: &kubeadmv1beta1.InitConfiguration{ + NodeRegistration: kubeadmv1beta1.NodeRegistrationOptions{}, + }, + options: []kubeadm.InitConfigurationOption{ + kubeadm.WithNodeRegistrationOptions( + kubeadm.NewNodeRegistration(), + ), + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + actual := &kubeadmv1beta1.InitConfiguration{} + kubeadm.SetInitConfigurationOptions(actual, tc.options...) + if !reflect.DeepEqual(actual, tc.expected) { + e, _ := json.Marshal(tc.expected) + a, _ := json.Marshal(actual) + t.Fatalf("expected vs actual:\n%+v\n%+v", string(e), string(a)) + } + }) + } +} + +func TestClusterConfiguration(t *testing.T) { + testcases := []struct { + name string + expected *kubeadmv1beta1.ClusterConfiguration + options []kubeadm.ClusterConfigurationOption + }{ + { + name: "simple", + expected: &kubeadmv1beta1.ClusterConfiguration{ + ClusterName: "test name", + KubernetesVersion: "test version", + APIServer: kubeadmv1beta1.APIServer{ + ControlPlaneComponent: kubeadmv1beta1.ControlPlaneComponent{ + ExtraArgs: map[string]string{ + "test key": "test value", + }, + }, + CertSANs: []string{ + "test san one", + "test san two", + }, + }, + ControllerManager: kubeadmv1beta1.ControlPlaneComponent{ + ExtraArgs: map[string]string{"test cm key": "test cm value"}, + }, + Networking: kubeadmv1beta1.Networking{ + DNSDomain: "test dns domain", + PodSubnet: "test pod subnet", + ServiceSubnet: "test service subnet", + }, + ControlPlaneEndpoint: "test control plane endpoint", + }, + options: []kubeadm.ClusterConfigurationOption{ + kubeadm.WithClusterName("test name"), + kubeadm.WithKubernetesVersion("test version"), + kubeadm.WithAPIServerExtraArgs(map[string]string{"test key": "test value"}), + kubeadm.WithControllerManagerExtraArgs(map[string]string{"test cm key": "test cm value"}), + kubeadm.WithClusterNetworkFromClusterNetworkingConfig(clusterv1.ClusterNetworkingConfig{ + ServiceDomain: "test dns domain", + Pods: clusterv1.NetworkRanges{ + CIDRBlocks: []string{ + "test pod subnet", + "second test pod subnet", + }, + }, + Services: clusterv1.NetworkRanges{ + CIDRBlocks: []string{ + "test service subnet", + "second test service subnet", + }, + }, + }), + kubeadm.WithControlPlaneEndpoint("test control plane endpoint"), + kubeadm.WithAPIServerCertificateSANs("test san one", "test san two"), + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + actual := &kubeadmv1beta1.ClusterConfiguration{} + kubeadm.SetClusterConfigurationOptions(actual, tc.options...) + if !reflect.DeepEqual(actual, tc.expected) { + e, _ := json.Marshal(tc.expected) + a, _ := json.Marshal(actual) + t.Fatalf("expected vs actual:\n%+v\n%+v", string(e), string(a)) + } + }) + } + +} + +func TestJoinConfiguration(t *testing.T) { + testcases := []struct { + name string + expected *kubeadmv1beta1.JoinConfiguration + options []kubeadm.JoinConfigurationOption + }{ + { + name: "simple", + expected: &kubeadmv1beta1.JoinConfiguration{ + Discovery: kubeadmv1beta1.Discovery{ + BootstrapToken: &kubeadmv1beta1.BootstrapTokenDiscovery{}, + }, + ControlPlane: &kubeadmv1beta1.JoinControlPlane{ + LocalAPIEndpoint: kubeadmv1beta1.APIEndpoint{ + AdvertiseAddress: "test endpoint", + BindPort: int32(1234), + }, + }, + NodeRegistration: kubeadmv1beta1.NodeRegistrationOptions{ + Name: "test name", + }, + }, + options: []kubeadm.JoinConfigurationOption{ + kubeadm.WithBootstrapTokenDiscovery( + kubeadm.NewBootstrapTokenDiscovery(), + ), + kubeadm.WithLocalAPIEndpointAndPort("test endpoint", 1234), + kubeadm.WithJoinNodeRegistrationOptions( + kubeadmv1beta1.NodeRegistrationOptions{ + Name: "test name", + }, + ), + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + actual := &kubeadmv1beta1.JoinConfiguration{} + kubeadm.SetJoinConfigurationOptions(actual, tc.options...) + if !reflect.DeepEqual(actual, tc.expected) { + e, _ := json.Marshal(tc.expected) + a, _ := json.Marshal(actual) + t.Fatalf("expected vs actual:\n%+v\n%+v", string(e), string(a)) + } + }) + } +} diff --git a/pkg/cloud/openstack/services/kubeadm/noderegistration.go b/pkg/cloud/openstack/services/kubeadm/noderegistration.go new file mode 100644 index 0000000000..493d672b55 --- /dev/null +++ b/pkg/cloud/openstack/services/kubeadm/noderegistration.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "strings" + + corev1 "k8s.io/api/core/v1" + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" +) + +// NodeRegistrationOption is a function that sets a value on a Kubeadm NodeRegistrationOptions. +type NodeRegistrationOption func(*kubeadmv1beta1.NodeRegistrationOptions) + +// NewNodeRegistration will create a kubeadmNodeRegistrationOptions with some defaults and the ability to provide customizations. +func NewNodeRegistration(opts ...NodeRegistrationOption) kubeadmv1beta1.NodeRegistrationOptions { + nro := &kubeadmv1beta1.NodeRegistrationOptions{} + for _, opt := range opts { + opt(nro) + } + return *nro +} + +// WithTaints will set the taints on the NodeRegistration. +func WithTaints(taints []corev1.Taint) NodeRegistrationOption { + return func(n *kubeadmv1beta1.NodeRegistrationOptions) { + n.Taints = taints + } +} + +// WithCRISocket sets the location of the container runtime socket. +func WithCRISocket(socket string) NodeRegistrationOption { + return func(n *kubeadmv1beta1.NodeRegistrationOptions) { + n.CRISocket = socket + } +} + +// WithNodeRegistrationName sets the name of the provided node registration options struct. +func WithNodeRegistrationName(name string) NodeRegistrationOption { + return func(n *kubeadmv1beta1.NodeRegistrationOptions) { + n.Name = name + } +} + +// WithKubeletExtraArgs sets the extra args on the Kubelet. +// `node-labels` will be appendended to existing node-labels. +// All other other args will overwrite existing args. +func WithKubeletExtraArgs(args map[string]string) NodeRegistrationOption { + return func(n *kubeadmv1beta1.NodeRegistrationOptions) { + if n.KubeletExtraArgs == nil { + n.KubeletExtraArgs = map[string]string{} + } + for key, value := range args { + if key == "node-labels" { + if val, ok := n.KubeletExtraArgs[key]; ok { + nodeLabels := strings.Split(val, ",") + nodeLabels = append(nodeLabels, value) + value = strings.Join(nodeLabels, ",") + } + } + n.KubeletExtraArgs[key] = value + } + } +} diff --git a/pkg/cloud/openstack/services/kubeadm/noderegistration_test.go b/pkg/cloud/openstack/services/kubeadm/noderegistration_test.go new file mode 100644 index 0000000000..447404c6fc --- /dev/null +++ b/pkg/cloud/openstack/services/kubeadm/noderegistration_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm_test + +import ( + // "reflect" + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/kubeadm" +) + +func TestNewNodeRegistration(t *testing.T) { + testcases := []struct { + name string + expected kubeadmv1beta1.NodeRegistrationOptions + actual kubeadmv1beta1.NodeRegistrationOptions + }{ + { + name: "simple test", + expected: kubeadmv1beta1.NodeRegistrationOptions{ + Name: "test name", + CRISocket: "/test/path/to/socket.sock", + KubeletExtraArgs: map[string]string{ + "test key": "test value", + }, + Taints: []corev1.Taint{ + { + Key: "test", + Value: "test value", + Effect: "test effect", + }, + }, + }, + actual: kubeadm.NewNodeRegistration( + kubeadm.WithNodeRegistrationName("test name"), + kubeadm.WithCRISocket("/test/path/to/socket.sock"), + kubeadm.WithTaints([]corev1.Taint{ + { + Key: "test", + Value: "test value", + Effect: "test effect", + }, + }), + kubeadm.WithKubeletExtraArgs(map[string]string{"test key": "test value"}), + ), + }, + { + name: "test node-label appending", + expected: kubeadmv1beta1.NodeRegistrationOptions{ + KubeletExtraArgs: map[string]string{ + "node-labels": "test value one,test value two", + }, + }, + actual: kubeadm.NewNodeRegistration( + kubeadm.WithKubeletExtraArgs(map[string]string{"node-labels": "test value one"}), + kubeadm.WithKubeletExtraArgs(map[string]string{"node-labels": "test value two"}), + ), + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if !reflect.DeepEqual(tc.expected, tc.actual) { + t.Fatalf("Expected and actual: \n%v\n%v", tc.expected, tc.actual) + } + }) + } +} diff --git a/pkg/cloud/openstack/services/kubeadm/scheme.go b/pkg/cloud/openstack/services/kubeadm/scheme.go new file mode 100644 index 0000000000..99ac40f87e --- /dev/null +++ b/pkg/cloud/openstack/services/kubeadm/scheme.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" + "k8s.io/kubernetes/cmd/kubeadm/app/util" + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" +) + +// GetCodecs returns a type that can be used to deserialize most kubeadm +// configuration types. +func GetCodecs() serializer.CodecFactory { + sb := &scheme.Builder{GroupVersion: kubeadmv1beta1.SchemeGroupVersion} + + sb.Register(&kubeadmv1beta1.JoinConfiguration{}, &kubeadmv1beta1.InitConfiguration{}, &kubeadmv1beta1.ClusterConfiguration{}) + kubeadmScheme, err := sb.Build() + if err != nil { + panic(err) + } + return serializer.NewCodecFactory(kubeadmScheme) +} + +// ConfigurationToYAML converts a kubeadm configuration type to its YAML +// representation. +func ConfigurationToYAML(obj runtime.Object) (string, error) { + initcfg, err := util.MarshalToYamlForCodecs(obj, kubeadmv1beta1.SchemeGroupVersion, GetCodecs()) + if err != nil { + return "", errors.Wrap(err, "failed to marshal configuration") + } + return string(initcfg), nil +} diff --git a/pkg/cloud/openstack/services/networking/network.go b/pkg/cloud/openstack/services/networking/network.go index 69299e2bbf..20078725d1 100644 --- a/pkg/cloud/openstack/services/networking/network.go +++ b/pkg/cloud/openstack/services/networking/network.go @@ -27,6 +27,16 @@ import ( openstackconfigv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" ) +type createOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name string `json:"name,omitempty"` + PortSecurityEnabled *bool `json:"port_security_enabled,omitempty"` +} + +func (c createOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(c, "network") +} + func (s *Service) ReconcileNetwork(clusterName string, clusterProviderSpec *openstackconfigv1.OpenstackClusterProviderSpec, clusterProviderStatus *openstackconfigv1.OpenstackClusterProviderStatus) error { networkName := fmt.Sprintf("%s-cluster-%s", networkPrefix, clusterName) @@ -46,9 +56,16 @@ func (s *Service) ReconcileNetwork(clusterName string, clusterProviderSpec *open return nil } - opts := networks.CreateOpts{ - AdminStateUp: gophercloud.Enabled, - Name: networkName, + var portSecurityEnabled gophercloud.EnabledState + if clusterProviderSpec.DisablePortSecurity { + portSecurityEnabled = gophercloud.Disabled + } else { + portSecurityEnabled = gophercloud.Enabled + } + opts := createOpts{ + AdminStateUp: gophercloud.Enabled, + Name: networkName, + PortSecurityEnabled: portSecurityEnabled, } network, err := networks.Create(s.client, opts).Extract() if err != nil { diff --git a/pkg/cloud/openstack/services/userdata/kubeadm.go b/pkg/cloud/openstack/services/userdata/kubeadm.go new file mode 100644 index 0000000000..2f2627e35f --- /dev/null +++ b/pkg/cloud/openstack/services/userdata/kubeadm.go @@ -0,0 +1,168 @@ +package userdata + +import ( + "fmt" + "k8s.io/klog" + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" + providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/certificates" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/kubeadm" + clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" +) + +const ( + localIPV4Lookup = "${OPENSTACK_IPV4_LOCAL}" + + cloudProvider = "openstack" + cloudProviderConfig = "/etc/kubernetes/cloud.conf" + + nodeRole = "node-role.kubernetes.io/node=" +) + +func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster *clusterv1.Cluster, machine *clusterv1.Machine, machineProviderSpec *providerv1.OpenstackProviderSpec, clusterProviderSpec *providerv1.OpenstackClusterProviderSpec) (string, error) { + + caCertHash, err := certificates.GenerateCertificateHash(clusterProviderSpec.CAKeyPair.Cert) + if err != nil { + return "", err + } + + if bootstrapToken != "" && len(cluster.Status.APIEndpoints) == 0 { + return "", fmt.Errorf("no APIEndpoint set yet, waiting until APIEndpoints is set") + } + + if isControlPlane { + if bootstrapToken == "" { + klog.Info("Machine is the first control plane machine for the cluster") + + if !clusterProviderSpec.CAKeyPair.HasCertAndKey() { + return "", fmt.Errorf("failed to create controlplane kubeadm config, missing CAKeyPair") + } + + clusterConfigurationCopy := clusterProviderSpec.ClusterConfiguration.DeepCopy() + // Set default values. If they are not set, these two properties are added with an + // empty string and the CoreOS ignition postprocesser fails with "could not find expected key" + if clusterConfigurationCopy.CertificatesDir == "" { + clusterConfigurationCopy.CertificatesDir = "/etc/kubernetes/pki" + } + if string(clusterConfigurationCopy.DNS.Type) == "" { + clusterConfigurationCopy.DNS.Type = kubeadmv1beta1.CoreDNS + } + kubeadm.SetClusterConfigurationOptions( + clusterConfigurationCopy, + kubeadm.WithKubernetesVersion(machine.Spec.Versions.ControlPlane), + kubeadm.WithAPIServerCertificateSANs(localIPV4Lookup), + kubeadm.WithAPIServerExtraArgs(map[string]string{"cloud-provider": cloudProvider}), + kubeadm.WithAPIServerExtraArgs(map[string]string{"cloud-config": cloudProviderConfig}), + kubeadm.WithAPIServerExtraVolumes([]kubeadmv1beta1.HostPathMount{ + { + Name: "cloud", + HostPath: "/etc/kubernetes/cloud.conf", + MountPath: "/etc/kubernetes/cloud.conf", + }, + }), + kubeadm.WithControllerManagerExtraArgs(map[string]string{"cloud-provider": cloudProvider}), + kubeadm.WithControllerManagerExtraArgs(map[string]string{"cloud-config": cloudProviderConfig}), + kubeadm.WithControllerManagerExtraVolumes([]kubeadmv1beta1.HostPathMount{ + { + Name: "cloud", + HostPath: "/etc/kubernetes/cloud.conf", + MountPath: "/etc/kubernetes/cloud.conf", + }, { + Name: "cacert", + HostPath: "/etc/certs/cacert", + MountPath: "/etc/certs/cacert", + }, + }), + kubeadm.WithClusterNetworkFromClusterNetworkingConfig(cluster.Spec.ClusterNetwork), + kubeadm.WithKubernetesVersion(machine.Spec.Versions.ControlPlane), + ) + clusterConfigYAML, err := kubeadm.ConfigurationToYAML(clusterConfigurationCopy) + if err != nil { + return "", err + } + + kubeadm.SetInitConfigurationOptions( + &machineProviderSpec.KubeadmConfiguration.Init, + kubeadm.WithNodeRegistrationOptions( + kubeadm.NewNodeRegistration( + kubeadm.WithTaints(machine.Spec.Taints), + kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-provider": cloudProvider}), + kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-config": cloudProviderConfig}), + ), + ), + kubeadm.WithInitLocalAPIEndpointAndPort(localIPV4Lookup, cluster.Status.APIEndpoints[0].Port), + ) + initConfigYAML, err := kubeadm.ConfigurationToYAML(&machineProviderSpec.KubeadmConfiguration.Init) + if err != nil { + return "", err + } + return fmt.Sprintf("%s\n---\n%s", clusterConfigYAML, initConfigYAML), nil + + } else { + klog.Info("Allowing a machine to join the control plane") + + joinConfigurationCopy := machineProviderSpec.KubeadmConfiguration.Join + // Set default values. If they are not set, these two properties are added with an + // empty string and the CoreOS ignition postprocesser fails with "could not find expected key" + joinConfigurationCopy.CACertPath = "/etc/kubernetes/pki/ca.crt" + kubeadm.SetJoinConfigurationOptions( + &joinConfigurationCopy, + kubeadm.WithBootstrapTokenDiscovery( + kubeadm.NewBootstrapTokenDiscovery( + kubeadm.WithAPIServerEndpoint(clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint), + kubeadm.WithToken(bootstrapToken), + kubeadm.WithCACertificateHash(caCertHash), + ), + ), + kubeadm.WithTLSBootstrapToken(bootstrapToken), + kubeadm.WithJoinNodeRegistrationOptions( + kubeadm.NewNodeRegistration( + kubeadm.WithTaints(machine.Spec.Taints), + kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-provider": cloudProvider}), + kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-config": cloudProviderConfig}), + ), + ), + // this also creates .controlPlane + kubeadm.WithLocalAPIEndpointAndPort(localIPV4Lookup, cluster.Status.APIEndpoints[0].Port), + ) + joinConfigurationYAML, err := kubeadm.ConfigurationToYAML(&joinConfigurationCopy) + if err != nil { + return "", err + } + + return joinConfigurationYAML, nil + } + } else { + klog.Info("Joining a worker node to the cluster") + + joinConfigurationCopy := machineProviderSpec.KubeadmConfiguration.Join + // Set default values. If they are not set, these two properties are added with an + // empty string and the CoreOS ignition postprocesser fails with "could not find expected key" + joinConfigurationCopy.CACertPath = "/etc/kubernetes/pki/ca.crt" + kubeadm.SetJoinConfigurationOptions( + &joinConfigurationCopy, + kubeadm.WithBootstrapTokenDiscovery( + kubeadm.NewBootstrapTokenDiscovery( + kubeadm.WithAPIServerEndpoint(clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint), + kubeadm.WithToken(bootstrapToken), + kubeadm.WithCACertificateHash(caCertHash), + ), + ), + kubeadm.WithTLSBootstrapToken(bootstrapToken), + kubeadm.WithJoinNodeRegistrationOptions( + kubeadm.NewNodeRegistration( + kubeadm.WithTaints(machine.Spec.Taints), + kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-provider": cloudProvider}), + kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-config": cloudProviderConfig}), + kubeadm.WithKubeletExtraArgs(map[string]string{"node-labels": nodeRole}), + ), + ), + ) + joinConfigurationYAML, err := kubeadm.ConfigurationToYAML(&joinConfigurationCopy) + if err != nil { + return "", err + } + + return joinConfigurationYAML, nil + } +} diff --git a/pkg/cloud/openstack/services/userdata/machinescript.go b/pkg/cloud/openstack/services/userdata/machinescript.go index 5bcb2031ef..6e49e5321e 100644 --- a/pkg/cloud/openstack/services/userdata/machinescript.go +++ b/pkg/cloud/openstack/services/userdata/machinescript.go @@ -27,6 +27,8 @@ import ( "path" "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/options" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/compute" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/provider" "sigs.k8s.io/cluster-api-provider-openstack/pkg/deployer" "sigs.k8s.io/cluster-api/pkg/util" "sigs.k8s.io/controller-runtime/pkg/client" @@ -42,7 +44,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" tokenapi "k8s.io/cluster-bootstrap/token/api" tokenutil "k8s.io/cluster-bootstrap/token/util" - openstackconfigv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/bootstrap" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" @@ -56,10 +57,7 @@ const ( ) type setupParams struct { - Token string - Cluster *clusterv1.Cluster - Machine *clusterv1.Machine - MachineSpec *openstackconfigv1.OpenstackProviderSpec + Machine *clusterv1.Machine CACert string CAKey string @@ -70,9 +68,7 @@ type setupParams struct { SaCert string SaKey string - PodCIDR string - ServiceCIDR string - GetMasterEndpoint func() (string, error) + KubeadmConfig string } func GetUserData(controllerClient client.Client, kubeClient kubernetes.Interface, machineProviderSpec *providerv1.OpenstackProviderSpec, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { @@ -124,22 +120,30 @@ func GetUserData(controllerClient client.Client, kubeClient kubernetes.Interface var userDataRendered string if len(userData) > 0 && !disableTemplating { - if util.IsControlPlaneMachine(machine) { - userDataRendered, err = masterStartupScript(cluster, machine, string(userData)) - if err != nil { - return "", apierrors.CreateMachine("error creating Openstack instance: %v", err) - } - } else { + isNodeJoin, err := isNodeJoin(controllerClient, kubeClient, cluster, machine) + if err != nil { + return "", apierrors.CreateMachine("error creating Openstack instance: %v", err) + } + + var bootstrapToken string + if isNodeJoin { klog.Info("Creating bootstrap token") - token, err := createBootstrapToken(controllerClient) - if err != nil { - return "", apierrors.CreateMachine("error creating Openstack instance: %v", err) - } - userDataRendered, err = nodeStartupScript(cluster, machine, token, string(userData)) + bootstrapToken, err = createBootstrapToken(controllerClient) if err != nil { return "", apierrors.CreateMachine("error creating Openstack instance: %v", err) } } + + userDataRendered, err = startupScript(cluster, machine, machineProviderSpec, string(userData), bootstrapToken) + if err != nil { + return "", apierrors.CreateMachine("error creating Openstack instance: %v", err) + } + if util.IsControlPlaneMachine(machine) && isNodeJoin { + // A little bit hacky but maybe good enough until v1alpha2. The alternative would be to template + // either the kubeadm command or the whole kubeadm service file. But I think the 2nd option would + // reduce the flexibility too much. + userDataRendered = strings.ReplaceAll(userDataRendered, "kubeadm init", "kubeadm join") + } } else { userDataRendered = string(userData) } @@ -172,6 +176,72 @@ func GetUserData(controllerClient client.Client, kubeClient kubernetes.Interface return userDataRendered, nil } +func isNodeJoin(controllerClient client.Client, kubeClient kubernetes.Interface, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { + + // Worker machines always join + if !util.IsControlPlaneMachine(machine) { + klog.Infof("Worker machine %s is joining the cluster\n", machine.Name) + return true, nil + } + + // Get control plane machines and return false if none found + controlPlaneMachines, err := getControlPlaneMachines(controllerClient) + if err != nil { + return false, apierrors.CreateMachine("error retrieving control plane machines: %v", err) + } + if len(controlPlaneMachines) == 0 { + klog.Infof("Could not find control plane machine: creating first control plane machine %s\n", machine.Name) + return false, nil + } + + // Get control plane machine instances and return false if none found + osProviderClient, clientOpts, err := provider.NewClientFromCluster(kubeClient, cluster) + if err != nil { + return false, err + } + computeService, err := compute.NewService(osProviderClient, clientOpts) + if err != nil { + return false, err + } + instanceList, err := computeService.GetInstanceList(&compute.InstanceListOpts{}) + if err != nil { + return false, err + } + if len(instanceList) == 0 { + klog.Infof("Could not find control plane machine: creating first control plane machine %s\n", machine.Name) + return false, nil + } + + for _, controlPlaneMachine := range controlPlaneMachines { + for _, instance := range instanceList { + if controlPlaneMachine.Name == instance.Name { + klog.Infof("Found control plane machine %s: control plane machine %s is joining the cluster\n", controlPlaneMachine.Name, machine.Name) + return true, nil + } + } + } + klog.Infof("Could not find control plane machine: creating first control plane machine %s\n", machine.Name) + return false, nil +} + +func getControlPlaneMachines(controllerClient client.Client) ([]*clusterv1.Machine, error) { + var controlPlaneMachines []*clusterv1.Machine + msList := &clusterv1.MachineList{} + listOptions := &client.ListOptions{} + err := controllerClient.List(context.TODO(), listOptions, msList) + if err != nil { + return nil, fmt.Errorf("error retrieving machines: %v", err) + } + for _, m := range msList.Items { + if util.IsControlPlaneMachine(&m) { + // we need DeepCopy because if we append the Pointer it will all be + // the same machine + controlPlaneMachines = append(controlPlaneMachines, m.DeepCopy()) + } + } + return controlPlaneMachines, nil +} + func createBootstrapToken(controllerClient client.Client) (string, error) { token, err := tokenutil.GenerateBootstrapToken() if err != nil { @@ -201,32 +271,25 @@ func createBootstrapToken(controllerClient client.Client) (string, error) { } func getWorkloadClusterKubeClient(controllerClient client.Client) (client.Client, error) { - var master *clusterv1.Machine var cluster *clusterv1.Cluster - msList := &clusterv1.MachineList{} - listOptions := &client.ListOptions{} - err := controllerClient.List(context.TODO(), listOptions, msList) + controlPlaneMachines, err := getControlPlaneMachines(controllerClient) if err != nil { - return nil, fmt.Errorf("error retrieving machines: %v", err) - } - for _, m := range msList.Items { - if util.IsControlPlaneMachine(&m) { - master = &m - break - } + return nil, err } - if master == nil { - return nil, fmt.Errorf("could not find master node") + + if len(controlPlaneMachines) == 0 { + return nil, fmt.Errorf("could not find control plane nodes") } clusterList := &clusterv1.ClusterList{} + listOptions := &client.ListOptions{} err = controllerClient.List(context.TODO(), listOptions, clusterList) if err != nil { return nil, fmt.Errorf("error retrieving clusters: %v", err) } for _, c := range clusterList.Items { - if clusterName, ok := master.Labels[clusterv1.MachineClusterLabelName]; ok && clusterName == c.Name { + if clusterName, ok := controlPlaneMachines[0].Labels[clusterv1.MachineClusterLabelName]; ok && clusterName == c.Name { cluster = &c } } @@ -234,7 +297,7 @@ func getWorkloadClusterKubeClient(controllerClient client.Client) (client.Client return nil, fmt.Errorf("could not find cluster") } - kubeConfig, err := deployer.New().GetKubeConfig(cluster, master) + kubeConfig, err := deployer.New().GetKubeConfig(cluster, controlPlaneMachines[0]) if err != nil { return nil, err } @@ -257,8 +320,8 @@ func getWorkloadClusterKubeClient(controllerClient client.Client) (client.Client return mgr.GetClient(), nil } -func masterStartupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine, script string) (string, error) { - clusterProviderSpec, err := openstackconfigv1.ClusterSpecFromProviderSpec(cluster.Spec.ProviderSpec) +func startupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine, machineProviderSpec *providerv1.OpenstackProviderSpec, userdata, bootstrapToken string) (string, error) { + clusterProviderSpec, err := providerv1.ClusterSpecFromProviderSpec(cluster.Spec.ProviderSpec) if err != nil { return "", err } @@ -267,13 +330,12 @@ func masterStartupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine, return "", err } - machineProviderSpec, err := openstackconfigv1.MachineSpecFromProviderSpec(machine.Spec.ProviderSpec) + kubeadmConfig, err := generateKubeadmConfig(util.IsControlPlaneMachine(machine), bootstrapToken, cluster, machine, machineProviderSpec, clusterProviderSpec) if err != nil { return "", err } params := setupParams{ - Cluster: cluster, CACert: string(clusterProviderSpec.CAKeyPair.Cert), CAKey: string(clusterProviderSpec.CAKeyPair.Key), EtcdCACert: string(clusterProviderSpec.EtcdCAKeyPair.Cert), @@ -283,52 +345,17 @@ func masterStartupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine, SaCert: string(clusterProviderSpec.SAKeyPair.Cert), SaKey: string(clusterProviderSpec.SAKeyPair.Key), Machine: machine, - MachineSpec: machineProviderSpec, - PodCIDR: getSubnet(cluster.Spec.ClusterNetwork.Pods), - ServiceCIDR: getSubnet(cluster.Spec.ClusterNetwork.Services), + KubeadmConfig: kubeadmConfig, } fMap := map[string]interface{}{ "EscapeNewLines": templateEscapeNewLines, "Indent": templateYAMLIndent, } - - masterStartUpScript := template.Must(template.New("masterStartUp").Funcs(fMap).Parse(script)) + startUpScript := template.Must(template.New("startUp").Funcs(fMap).Parse(userdata)) var buf bytes.Buffer - if err := masterStartUpScript.Execute(&buf, params); err != nil { - return "", err - } - return buf.String(), nil -} - -func nodeStartupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine, token, script string) (string, error) { - machineProviderSpec, err := openstackconfigv1.MachineSpecFromProviderSpec(machine.Spec.ProviderSpec) - if err != nil { - return "", err - } - - GetMasterEndpoint := func() (string, error) { - if len(cluster.Status.APIEndpoints) == 0 { - return "", errors.New("no cluster status found") - } - return getEndpoint(cluster.Status.APIEndpoints[0]), nil - } - - params := setupParams{ - Token: token, - Cluster: cluster, - Machine: machine, - MachineSpec: machineProviderSpec, - PodCIDR: getSubnet(cluster.Spec.ClusterNetwork.Pods), - ServiceCIDR: getSubnet(cluster.Spec.ClusterNetwork.Services), - GetMasterEndpoint: GetMasterEndpoint, - } - - nodeStartUpScript := template.Must(template.New("nodeStartUp").Parse(script)) - - var buf bytes.Buffer - if err := nodeStartUpScript.Execute(&buf, params); err != nil { + if err := startUpScript.Execute(&buf, params); err != nil { return "", err } return buf.String(), nil @@ -357,18 +384,6 @@ func isKeyPairValid(cert, key []byte) bool { return len(cert) > 0 && len(key) > 0 } -func getEndpoint(apiEndpoint clusterv1.APIEndpoint) string { - return fmt.Sprintf("%s:%d", apiEndpoint.Host, apiEndpoint.Port) -} - -// Just a temporary hack to grab a single range from the config. -func getSubnet(netRange clusterv1.NetworkRanges) string { - if len(netRange.CIDRBlocks) == 0 { - return "" - } - return netRange.CIDRBlocks[0] -} - func templateEscapeNewLines(s string) string { return strings.ReplaceAll(s, "\n", "\\n") } diff --git a/pkg/cloud/openstack/services/userdata/machinescript_test.go b/pkg/cloud/openstack/services/userdata/machinescript_test.go deleted file mode 100644 index d922bc9512..0000000000 --- a/pkg/cloud/openstack/services/userdata/machinescript_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package userdata - -import ( - "testing" - - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/yaml" -) - -const providerSpecYAML = ` -value: - apiVersion: "openstackproviderconfig/v1alpha1" - kind: "OpenstackProviderSpec" -` - -func TestNodeStartupScriptEmpty(t *testing.T) { - cluster := &clusterv1.Cluster{} - machine := &clusterv1.Machine{} - err := yaml.Unmarshal([]byte(providerSpecYAML), &machine.Spec.ProviderSpec) - if err != nil { - t.Errorf("%v", err) - return - } - - token := "" - script_template := "" - - // `machine` has no endpoint specified so having `call - // .GetMasterEndpoint` in the script template would fail. But we - // don't, so this should succeed. - script, err := nodeStartupScript(cluster, machine, token, script_template) - if err != nil { - t.Errorf("%v", err) - return - } - - if script != "" { - t.Errorf("Expected script, found %q instead", script) - } -} - -func TestNodeStartupScriptEndpointError(t *testing.T) { - cluster := &clusterv1.Cluster{} - machine := &clusterv1.Machine{} - err := yaml.Unmarshal([]byte(providerSpecYAML), &machine.Spec.ProviderSpec) - if err != nil { - t.Errorf("%v", err) - return - } - - token := "" - script_template := "{{ call .GetMasterEndpoint }}" - // `machine` has no endpoint specified so having `call - // .GetMasterEndpoint` in the template should fail. - script, err := nodeStartupScript(cluster, machine, token, script_template) - if err == nil { - t.Errorf("Expected GetMasterEndpoint to fail, but it succeeded. Startup script %q", script) - } -} - -func TestNodeStartupScriptWithEndpoint(t *testing.T) { - cluster := clusterv1.Cluster{} - cluster.Status.APIEndpoints = make([]clusterv1.APIEndpoint, 1) - cluster.Status.APIEndpoints[0] = clusterv1.APIEndpoint{ - Host: "example.com", - Port: 8080, - } - - machine := &clusterv1.Machine{} - err := yaml.Unmarshal([]byte(providerSpecYAML), &machine.Spec.ProviderSpec) - if err != nil { - t.Errorf("%v", err) - return - } - - token := "" - script_template := "{{ call .GetMasterEndpoint }}" - script, err := nodeStartupScript(&cluster, machine, token, script_template) - if err != nil { - t.Errorf("%v", err) - return - } - - expected := "example.com:8080" - if script != expected { - t.Errorf("Expected %q master endpoint, found %q instead", expected, script) - } -} diff --git a/pkg/deployer/deployer.go b/pkg/deployer/deployer.go index 706e777dc4..efa3614100 100644 --- a/pkg/deployer/deployer.go +++ b/pkg/deployer/deployer.go @@ -4,9 +4,7 @@ import ( "fmt" "github.com/pkg/errors" "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog" providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" - constants "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/contants" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/certificates" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) @@ -20,28 +18,14 @@ func New() *Deployer { return &Deployer{} } -// GetIP returns the IP of a machine, but this is going away. +// GetIP returns the controlPlaneEndpoint of the Kubernetes cluster, which makes sense in the current +// contexts where this method is called. Seems to be going away anyway with v1alpha2. func (d *Deployer) GetIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { - if machine.ObjectMeta.Annotations != nil { - if ip, ok := machine.ObjectMeta.Annotations[constants.OpenstackIPAnnotationKey]; ok { - clusterProviderSpec, err := providerv1.ClusterSpecFromProviderSpec(cluster.Spec.ProviderSpec) - if err != nil { - return "", fmt.Errorf("could not get IP: %v", err) - } - var endpoint string - if clusterProviderSpec.ManagedAPIServerLoadBalancer { - endpoint = fmt.Sprintf("%s:%d", ip, clusterProviderSpec.APIServerLoadBalancerPort) - } else { - // TODO: replace hardcoded port 443 as soon as controlPlaneEndpoint is specified via - // ClusterConfiguration (in Cluster CRD) - endpoint = fmt.Sprintf("%s:443", ip) - } - klog.Infof("Returning endpoint from machine annotation %s", endpoint) - return endpoint, nil - } + clusterProviderSpec, err := providerv1.ClusterSpecFromProviderSpec(cluster.Spec.ProviderSpec) + if err != nil { + return "", fmt.Errorf("could not get IP: %v", err) } - - return "", errors.New("could not get IP") + return clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint, nil } // GetKubeConfig returns the kubeConfig after the bootstrap process is complete. @@ -67,27 +51,12 @@ func (d *Deployer) GetKubeConfig(cluster *clusterv1.Cluster, master *clusterv1.M return "", errors.New("key not found in clusterProviderSpec") } - var apiServerEndpoint string - if clusterProviderSpec.ManagedAPIServerLoadBalancer { - apiServerEndpoint = fmt.Sprintf("https://%s:%d", clusterProviderSpec.APIServerLoadBalancerFloatingIP, clusterProviderSpec.APIServerLoadBalancerPort) - } else { - var endpoint string - if master != nil { - endpoint, err = d.GetIP(cluster, master) - if err != nil { - return "", err - } - } else { - // This case means no master has been created yet, we need get from cluster info anyway - if len(clusterProviderSpec.MasterIP) == 0 { - return "", errors.New("MasterIP in cluster spec not set") - } - endpoint = clusterProviderSpec.MasterIP - } - apiServerEndpoint = fmt.Sprintf("https://%s", endpoint) + apiServerEndpoint, err := d.GetIP(cluster, master) + if err != nil { + return "", err } - cfg, err := certificates.NewKubeconfig(cluster.Name, apiServerEndpoint, cert, key) + cfg, err := certificates.NewKubeconfig(cluster.Name, fmt.Sprintf("https://%s", apiServerEndpoint), cert, key) if err != nil { return "", errors.Wrap(err, "failed to generate a kubeconfig") } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel index 20862228ef..c99f83e585 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel @@ -11,7 +11,6 @@ go_library( "errors.go", "fieldmask.go", "handler.go", - "marshal_httpbodyproto.go", "marshal_json.go", "marshal_jsonpb.go", "marshal_proto.go", @@ -30,7 +29,6 @@ go_library( "@com_github_golang_protobuf//jsonpb:go_default_library_gen", "@com_github_golang_protobuf//proto:go_default_library", "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen", - "@go_googleapis//google/api:httpbody_go_proto", "@io_bazel_rules_go//proto/wkt:any_go_proto", "@io_bazel_rules_go//proto/wkt:duration_go_proto", "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", @@ -51,7 +49,6 @@ go_test( "errors_test.go", "fieldmask_test.go", "handler_test.go", - "marshal_httpbodyproto_test.go", "marshal_json_test.go", "marshal_jsonpb_test.go", "marshal_proto_test.go", @@ -68,7 +65,6 @@ go_test( "@com_github_golang_protobuf//jsonpb:go_default_library_gen", "@com_github_golang_protobuf//proto:go_default_library", "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/api:httpbody_go_proto", "@go_googleapis//google/rpc:errdetails_go_proto", "@io_bazel_rules_go//proto/wkt:duration_go_proto", "@io_bazel_rules_go//proto/wkt:empty_go_proto", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go index 2af900650d..1fc63f7f58 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -1,7 +1,6 @@ package runtime import ( - "errors" "fmt" "io" "net/http" @@ -9,12 +8,13 @@ import ( "context" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" ) -var errEmptyResponse = errors.New("empty response") - // ForwardResponseStream forwards the stream from gRPC server to REST client. func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { f, ok := w.(http.Flusher) @@ -53,18 +53,18 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } if err != nil { - handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + handleForwardResponseStreamError(wroteHeader, marshaler, w, err) return } if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + handleForwardResponseStreamError(wroteHeader, marshaler, w, err) return } - buf, err := marshaler.Marshal(streamChunk(ctx, resp, mux.streamErrorHandler)) + buf, err := marshaler.Marshal(streamChunk(resp, nil)) if err != nil { grpclog.Infof("Failed to marshal response chunk: %v", err) - handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + handleForwardResponseStreamError(wroteHeader, marshaler, w, err) return } if _, err = w.Write(buf); err != nil { @@ -124,7 +124,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha contentType := marshaler.ContentType() // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on + // An interface param needs to be added to the ContentType() function on // the Marshal interface to be able to remove this check if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { contentType = httpBodyMarshaler.ContentTypeFromMessage(resp) @@ -168,42 +168,48 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re return nil } -func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { - serr := streamError(ctx, mux.streamErrorHandler, err) - if !wroteHeader { - w.WriteHeader(int(serr.HttpCode)) - } - buf, merr := marshaler.Marshal(errorChunk(serr)) +func handleForwardResponseStreamError(wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, err error) { + buf, merr := marshaler.Marshal(streamChunk(nil, err)) if merr != nil { grpclog.Infof("Failed to marshal an error: %v", merr) return } + if !wroteHeader { + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + w.WriteHeader(HTTPStatusFromCode(s.Code())) + } if _, werr := w.Write(buf); werr != nil { grpclog.Infof("Failed to notify error to client: %v", werr) return } } -// streamChunk returns a chunk in a response stream for the given result. The -// given errHandler is used to render an error chunk if result is nil. -func streamChunk(ctx context.Context, result proto.Message, errHandler StreamErrorHandlerFunc) map[string]proto.Message { +func streamChunk(result proto.Message, err error) map[string]proto.Message { + if err != nil { + grpcCode := codes.Unknown + grpcMessage := err.Error() + var grpcDetails []*any.Any + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + grpcMessage = s.Message() + grpcDetails = s.Proto().GetDetails() + } + httpCode := HTTPStatusFromCode(grpcCode) + return map[string]proto.Message{ + "error": &internal.StreamError{ + GrpcCode: int32(grpcCode), + HttpCode: int32(httpCode), + Message: grpcMessage, + HttpStatus: http.StatusText(httpCode), + Details: grpcDetails, + }, + } + } if result == nil { - return errorChunk(streamError(ctx, errHandler, errEmptyResponse)) + return streamChunk(nil, fmt.Errorf("empty response")) } return map[string]proto.Message{"result": result} } - -// streamError returns the payload for the final message in a response stream -// that represents the given err. -func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { - serr := errHandler(ctx, err) - if serr != nil { - return serr - } - // TODO: log about misbehaving stream error handler? - return DefaultHTTPStreamErrorHandler(ctx, err) -} - -func errorChunk(err *StreamError) map[string]proto.Message { - return map[string]proto.Message{"error": (*internal.StreamError)(err)} -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go index 2fbb262872..3530dddd0a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go @@ -151,15 +151,7 @@ func (d DecoderWrapper) Decode(v interface{}) error { // NewEncoder returns an Encoder which writes JSON stream into "w". func (j *JSONPb) NewEncoder(w io.Writer) Encoder { - return EncoderFunc(func(v interface{}) error { - if err := j.marshalTo(w, v); err != nil { - return err - } - // mimic json.Encoder by adding a newline (makes output - // easier to read when it contains multiple encoded items) - _, err := w.Write(j.Delimiter()) - return err - }) + return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) }) } func unmarshalJSONPb(data []byte, v interface{}) error { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go index 093373a204..ec81e55b5e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -16,14 +16,6 @@ import ( // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) -// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when -// a request is received with a URI path that does not match any registered -// service method. -// -// Since gRPC servers return an "Unimplemented" code for requests with an -// unrecognized URI path, this error also has a gRPC "Unimplemented" code. -var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { @@ -34,7 +26,6 @@ type ServeMux struct { incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc metadataAnnotators []func(context.Context, *http.Request) metadata.MD - streamErrorHandler StreamErrorHandlerFunc protoErrorHandler ProtoErrorHandlerFunc disablePathLengthFallback bool } @@ -119,27 +110,12 @@ func WithDisablePathLengthFallback() ServeMuxOption { } } -// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream -// error handler, which allows for customizing the error trailer for server-streaming -// calls. -// -// For stream errors that occur before any response has been written, the mux's -// ProtoErrorHandler will be invoked. However, once data has been written, the errors must -// be handled differently: they must be included in the response body. The response body's -// final message will include the error details returned by the stream error handler. -func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.streamErrorHandler = fn - } -} - // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ handlers: make(map[string][]handler), forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), marshalers: makeMarshalerMIMERegistry(), - streamErrorHandler: DefaultHTTPStreamErrorHandler, } for _, opt := range opts { @@ -198,7 +174,8 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { if s.protoErrorHandler != nil { _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) } else { OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) } @@ -258,7 +235,8 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } if s.protoErrorHandler != nil { _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) } else { OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) } @@ -268,7 +246,8 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { if s.protoErrorHandler != nil { _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) } else { OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go index ca76324efb..b7fa32e45d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -1,25 +1,15 @@ package runtime import ( - "context" "io" "net/http" - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/internal" + "context" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) -// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a -// a proto struct used to represent error at the end of a stream. -type StreamErrorHandlerFunc func(context.Context, error) *StreamError - -// StreamError is the payload for the final message in a server stream in the event that the server returns an -// error after a response message has already been sent. -type StreamError internal.StreamError - // ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) @@ -45,7 +35,7 @@ func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler contentType := marshaler.ContentType() // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on + // An interface param needs to be added to the ContentType() function on // the Marshal interface to be able to remove this check if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { pb := s.Proto() @@ -78,29 +68,3 @@ func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler handleForwardResponseTrailer(w, md) } - -// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via -// default logic. -// -// It extracts the gRPC status from err if possible. The fields of the status are -// used to populate the returned StreamError, and the HTTP status code is derived -// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a -// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. -func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { - grpcCode := codes.Unknown - grpcMessage := err.Error() - var grpcDetails []*any.Any - if s, ok := status.FromError(err); ok { - grpcCode = s.Code() - grpcMessage = s.Message() - grpcDetails = s.Proto().GetDetails() - } - httpCode := HTTPStatusFromCode(grpcCode) - return &StreamError{ - GrpcCode: int32(grpcCode), - HttpCode: int32(httpCode), - Message: grpcMessage, - HttpStatus: http.StatusText(httpCode), - Details: grpcDetails, - } -} diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 6e69b28c27..ca34e8aa0d 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -11,46 +11,22 @@ In order to protect both you and ourselves, you will need to sign the ## Guidelines for Pull Requests How to get your contributions merged smoothly and quickly. - -- Create **small PRs** that are narrowly focused on **addressing a single - concern**. We often times receive PRs that are trying to fix several things at - a time, but only one fix is considered acceptable, nothing gets merged and - both author's & review's time is wasted. Create more PRs to address different - concerns and everyone will be happy. - -- The grpc package should only depend on standard Go packages and a small number - of exceptions. If your contribution introduces new dependencies which are NOT - in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a - discussion with gRPC-Go authors and consultants. - -- For speculative changes, consider opening an issue and discussing it first. If - you are suggesting a behavioral or API change, consider starting with a [gRFC - proposal](https://github.com/grpc/proposal). - -- Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. - -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. - -- Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. - -- Maintain **clean commit history** and use **meaningful commit messages**. PRs - with messy commit history are difficult to review and won't be merged. Use - `rebase -i upstream/master` to curate your commit history and/or to bring in - latest changes from master (but avoid rebasing in the middle of a code - review). - -- Keep your PR up to date with upstream/master (if there are merge conflicts, we - can't really merge your change). - -- **All tests need to be passing** before your change can be merged. We - recommend you **run tests locally** before creating your PR to catch breakages - early on. + +- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. + +- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - `make all` to test everything, OR - `make vet` to catch vet errors - `make test` to run the tests @@ -58,3 +34,4 @@ How to get your contributions merged smoothly and quickly. - optional `make testappengine` to run tests with appengine - Exceptions to the rules can be made if there's a compelling reason for doing so. + diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index afbc43db51..f5eec6717f 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,96 +1,42 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) -[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) -[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) -The Go implementation of [gRPC](https://grpc.io/): A high performance, open -source, general RPC framework that puts mobile and HTTP/2 first. For more -information see the [gRPC Quick Start: -Go](https://grpc.io/docs/quickstart/go.html) guide. +The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. Installation ------------ -To install this package, you need to install Go and setup your Go workspace on -your computer. The simplest way to install the library is to run: +To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` $ go get -u google.golang.org/grpc ``` -With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in -your source code and `go [build|run|test]` will automatically download the -necessary dependencies ([Go modules -ref](https://github.com/golang/go/wiki/Modules)). - -If you are trying to access grpc-go from within China, please see the -[FAQ](#FAQ) below. - Prerequisites ------------- + gRPC-Go requires Go 1.9 or later. +Constraints +----------- +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. + Documentation ------------- -- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API - descriptions. -- Documentation on specific topics can be found in the [Documentation - directory](Documentation/). -- Examples can be found in the [examples directory](examples/). +See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). Performance ----------- -Performance benchmark data for grpc-go and other languages is maintained in -[this -dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). +See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). Status ------ -General Availability [Google Cloud Platform Launch -Stages](https://cloud.google.com/terms/launch-stages). +General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). FAQ --- -#### I/O Timeout Errors - -The `golang.org` domain may be blocked from some countries. `go get` usually -produces an error like the following when this happens: - -``` -$ go get -u google.golang.org/grpc -package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) -``` - -To build Go code, there are several options: - -- Set up a VPN and access google.golang.org through that. - -- Without Go module support: `git clone` the repo manually: - - ``` - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - -- With Go module support: it is possible to use the `replace` feature of `go - mod` to create aliases for golang.org packages. In your project's directory: - - ``` - go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest - go mod tidy - go mod vendor - go build -mod=vendor - ``` - - Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. Please refer to [this - issue](https://github.com/golang/go/issues/28652) in the golang repo regarding - this concern. - #### Compiling error, undefined: grpc.SupportPackageIsVersion Please update proto package, gRPC package and rebuild the proto files: diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 4b72daa8bb..fafede238c 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -138,8 +138,6 @@ type ClientConn interface { ResolveNow(resolver.ResolveNowOption) // Target returns the dial target for this ClientConn. - // - // Deprecated: Use the Target field in the BuildOptions instead. Target() string } @@ -157,10 +155,6 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) // ChannelzParentID is the entity parent's channelz unique identification number. ChannelzParentID int64 - // Target contains the parsed address info of the dial target. It is the same resolver.Target as - // passed to the resolver. - // See the documentation for the resolver.Target type for details about what it contains. - Target resolver.Target } // Builder creates a balancer. diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go index 66e9a44ac4..29bda6353d 100644 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -20,6 +20,7 @@ package grpc import ( "context" + "strings" "sync" "google.golang.org/grpc/balancer" @@ -33,7 +34,13 @@ type balancerWrapperBuilder struct { } func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ + targetAddr := cc.Target() + targetSplitted := strings.Split(targetAddr, ":///") + if len(targetSplitted) >= 2 { + targetAddr = targetSplitted[1] + } + + bwb.b.Start(targetAddr, BalancerConfig{ DialCreds: opts.DialCreds, Dialer: opts.Dialer, }) @@ -42,7 +49,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B balancer: bwb.b, pickfirst: pickfirst, cc: cc, - targetAddr: opts.Target.Endpoint, + targetAddr: targetAddr, startCh: make(chan struct{}), conns: make(map[resolver.Address]balancer.SubConn), connSt: make(map[balancer.SubConn]*scState), @@ -113,7 +120,7 @@ func (bw *balancerWrapper) lbWatcher() { } for addrs := range notifyCh { - grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) + grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) if bw.pickfirst { var ( oldA resolver.Address diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 78e6d178a1..bd2d2b3177 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -137,9 +137,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) - chainStreamClientInterceptors(cc) - defer func() { if err != nil { cc.Close() @@ -293,7 +290,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, } // Build the resolver. @@ -331,68 +327,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * return cc, nil } -// chainUnaryClientInterceptors chains all unary client interceptors into one. -func chainUnaryClientInterceptors(cc *ClientConn) { - interceptors := cc.dopts.chainUnaryInts - // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will - // be executed before any other chained interceptors. - if cc.dopts.unaryInt != nil { - interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) - } - var chainedInt UnaryClientInterceptor - if len(interceptors) == 0 { - chainedInt = nil - } else if len(interceptors) == 1 { - chainedInt = interceptors[0] - } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) - } - } - cc.dopts.unaryInt = chainedInt -} - -// getChainUnaryInvoker recursively generate the chained unary invoker. -func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { - if curr == len(interceptors)-1 { - return finalInvoker - } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { - return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) - } -} - -// chainStreamClientInterceptors chains all stream client interceptors into one. -func chainStreamClientInterceptors(cc *ClientConn) { - interceptors := cc.dopts.chainStreamInts - // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will - // be executed before any other chained interceptors. - if cc.dopts.streamInt != nil { - interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) - } - var chainedInt StreamClientInterceptor - if len(interceptors) == 0 { - chainedInt = nil - } else if len(interceptors) == 1 { - chainedInt = interceptors[0] - } else { - chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { - return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) - } - } - cc.dopts.streamInt = chainedInt -} - -// getChainStreamer recursively generate the chained client stream constructor. -func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { - if curr == len(interceptors)-1 { - return finalStreamer - } - return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { - return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) - } -} - // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. type connectivityStateManager struct { @@ -603,7 +537,6 @@ func (cc *ClientConn) updateResolverState(s resolver.State) error { } else if cc.balancerWrapper == nil { // Balancer dial option was set, and this is the first time handling // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) } diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 02738839dd..d9b9d5782e 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -132,8 +132,7 @@ const ( // Unavailable indicates the service is currently unavailable. // This is a most likely a transient condition and may be corrected - // by retrying with a backoff. Note that it is not always safe to retry - // non-idempotent operations. + // by retrying with a backoff. // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 69c003159d..e114fecbb7 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -39,12 +39,8 @@ import ( // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - - chainUnaryInts []UnaryClientInterceptor - chainStreamInts []StreamClientInterceptor - + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor cp Compressor dc Decompressor bs backoff.Strategy @@ -418,17 +414,6 @@ func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { }) } -// WithChainUnaryInterceptor returns a DialOption that specifies the chained -// interceptor for unary RPCs. The first interceptor will be the outer most, -// while the last interceptor will be the inner most wrapper around the real call. -// All interceptors added by this method will be chained, and the interceptor -// defined by WithUnaryInterceptor will always be prepended to the chain. -func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) - }) -} - // WithStreamInterceptor returns a DialOption that specifies the interceptor for // streaming RPCs. func WithStreamInterceptor(f StreamClientInterceptor) DialOption { @@ -437,17 +422,6 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption { }) } -// WithChainStreamInterceptor returns a DialOption that specifies the chained -// interceptor for unary RPCs. The first interceptor will be the outer most, -// while the last interceptor will be the inner most wrapper around the real call. -// All interceptors added by this method will be chained, and the interceptor -// defined by WithStreamInterceptor will always be prepended to the chain. -func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.chainStreamInts = append(o.chainStreamInts, interceptors...) - }) -} - // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header. This value only works with WithInsecure and has no // effect if TransportCredentials are present. @@ -466,12 +440,12 @@ func WithChannelzParentID(id int64) DialOption { }) } -// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any // service config provided by the resolver and provides a hint to the resolver // to not fetch service configs. // -// Note that this dial option only disables service config from resolver. If -// default service config is provided, gRPC will use the default service config. +// Note that, this dial option only disables service config from resolver. If +// default service config is provided, grpc will use the default service config. func WithDisableServiceConfig() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableServiceConfig = true diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index b75c069aac..9f3ef3a539 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -7,7 +7,6 @@ require ( github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/mock v1.1.1 github.com/golang/protobuf v1.2.0 - github.com/google/go-cmp v0.2.0 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 2a17234748..b8638ce769 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -10,8 +10,6 @@ github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f0744f9937..041520d351 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,7 +24,6 @@ package channelz import ( - "fmt" "sort" "sync" "sync/atomic" @@ -96,14 +95,9 @@ func (d *dbWrapper) get() *channelMap { // NewChannelzStorage initializes channelz data storage and id generator. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// // Note: This function is exported for testing purpose only. User should not call // it in most cases. -func NewChannelzStorage() (cleanup func() error) { +func NewChannelzStorage() { db.set(&channelMap{ topLevelChannels: make(map[int64]struct{}), channels: make(map[int64]*channel), @@ -113,28 +107,6 @@ func NewChannelzStorage() (cleanup func() error) { subChannels: make(map[int64]*subChannel), }) idGen.reset() - return func() error { - var err error - cm := db.get() - if cm == nil { - return nil - } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. - return nil - } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) - } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err - } } // GetTopChannels returns a slice of top channel's ChannelMetric, along with a diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index e26e28141e..9dee6db61d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -549,7 +549,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + if atomic.SwapUint32(&s.headerDone, 1) == 0 { close(s.headerChan) } @@ -713,7 +713,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. s.write(recvMsg{err: err}) } // If headerChan isn't closed, then close it. - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + if atomic.SwapUint32(&s.headerDone, 1) == 0 { s.noHeaders = true close(s.headerChan) } @@ -794,21 +794,21 @@ func (t *http2Client) Close() error { // stream is closed. If there are no active streams, the transport is closed // immediately. This does nothing if the transport is already draining or // closing. -func (t *http2Client) GracefulClose() { +func (t *http2Client) GracefulClose() error { t.mu.Lock() // Make sure we move to draining only from active. if t.state == draining || t.state == closing { t.mu.Unlock() - return + return nil } t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() - return + return t.Close() } t.controlBuf.put(&incomingGoAway{}) + return nil } // Write formats the data into HTTP2 data frame(s) and sends it out. The caller @@ -1142,24 +1142,26 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } endStream := frame.StreamEnded() atomic.StoreUint32(&s.bytesReceived, 1) - initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + initialHeader := atomic.SwapUint32(&s.headerDone, 1) == 0 if !initialHeader && !endStream { - // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + // As specified by RFC 7540, a HEADERS frame (and associated CONTINUATION frames) can only appear + // at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) return } state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. + // Initialize isGRPC value to be !initialHeader, since if a gRPC ResponseHeader has been received + // which indicates peer speaking gRPC, we are in gRPC mode. state.data.isGRPC = !initialHeader if err := state.decodeHeader(frame); err != nil { t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) return } - isHeader := false + var isHeader bool defer func() { if t.statsHandler != nil { if isHeader { @@ -1178,10 +1180,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } }() - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + // If headers haven't been received yet. + if initialHeader { if !endStream { - // HEADERS frame block carries a Response-Headers. + // Headers frame is ResponseHeader. isHeader = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed @@ -1190,17 +1192,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(state.data.mdata) > 0 { s.header = state.data.mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) + return } + // Headers frame is Trailers-only. + s.noHeaders = true close(s.headerChan) } - if !endStream { - return - } - // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4bf583efc2..7f82cbb080 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -204,8 +204,8 @@ type Stream struct { // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + headerChan chan struct{} // closed to indicate the end of header metadata. + headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -578,12 +578,9 @@ type ClientTransport interface { // is called only once. Close() error - // GracefulClose starts to tear down the transport: the transport will stop - // accepting new RPCs and NewStream will return error. Once all streams are - // finished, the transport will close. - // - // It does not block. - GracefulClose() + // GracefulClose starts to tear down the transport. It stops accepting + // new RPCs and wait the completion of the pending RPCs. + GracefulClose() error // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 45baa2ae13..f9625496c4 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -120,14 +120,6 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. bp.mu.Unlock() select { case <-ctx.Done(): - if connectionErr := bp.connectionError(); connectionErr != nil { - switch ctx.Err() { - case context.DeadlineExceeded: - return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr) - case context.Canceled: - return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr) - } - } return nil, nil, ctx.Err() case <-ch: } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go deleted file mode 100644 index 76acbbcc93..0000000000 --- a/vendor/google.golang.org/grpc/preloader.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// PreparedMsg is responsible for creating a Marshalled and Compressed object. -// -// This API is EXPERIMENTAL. -type PreparedMsg struct { - // Struct for preparing msg before sending them - encodedData []byte - hdr []byte - payload []byte -} - -// Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { - ctx := s.Context() - rpcInfo, ok := rpcInfoFromContext(ctx) - if !ok { - return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") - } - - // check if the context has the relevant information to prepareMsg - if rpcInfo.preloaderInfo == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") - } - if rpcInfo.preloaderInfo.codec == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") - } - - // prepare the msg - data, err := encode(rpcInfo.preloaderInfo.codec, msg) - if err != nil { - return err - } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) - if err != nil { - return err - } - p.hdr, p.payload = msgHeader(data, compData) - return nil -} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 297492e87a..5835599077 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -66,9 +66,6 @@ var ( var ( defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second ) var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { @@ -244,13 +241,7 @@ func (d *dnsResolver) watcher() { return case <-d.t.C: case <-d.rn: - if !d.t.Stop() { - // Before resetting a timer, it should be stopped to prevent racing with - // reads on it's channel. - <-d.t.C - } } - result, sc := d.lookup() // Next lookup should happen within an interval defined by d.freq. It may be // more often due to exponential retry on empty address list. @@ -263,16 +254,6 @@ func (d *dnsResolver) watcher() { } d.cc.NewServiceConfig(sc) d.cc.NewAddress(result) - - // Sleep to prevent excessive re-resolutions. Incoming resolution requests - // will be queued in d.rn. - t := time.NewTimer(minDNSResRate) - select { - case <-t.C: - case <-d.ctx.Done(): - t.Stop() - return - } } } diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index f6f6934d9b..52ec603daa 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -132,21 +132,6 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. -// -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. -// -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. type Target struct { Scheme string Authority string diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 088c3f1b25..2a595622d3 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -694,34 +694,14 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf return nil } -// Information about RPC type rpcInfo struct { - failfast bool - preloaderInfo *compressorInfo -} - -// Information about Preloader -// Responsible for storing codec, and compressors -// If stream (s) has context s.Context which stores rpcInfo that has non nil -// pointers to codec, and compressors, then we can use preparedMsg for Async message prep -// and reuse marshalled bytes -type compressorInfo struct { - codec baseCodec - cp Compressor - comp encoding.Compressor + failfast bool } type rpcInfoContextKey struct{} -func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { - return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ - failfast: failfast, - preloaderInfo: &compressorInfo{ - codec: codec, - cp: cp, - comp: comp, - }, - }) +func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) } func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 495a4f9b82..8115828fdf 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -86,7 +86,7 @@ type service struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts serverOptions + opts options mu sync.Mutex // guards following lis map[net.Listener]bool @@ -108,7 +108,7 @@ type Server struct { czData *channelzData } -type serverOptions struct { +type options struct { creds credentials.TransportCredentials codec baseCodec cp Compressor @@ -131,7 +131,7 @@ type serverOptions struct { maxHeaderListSize *uint32 } -var defaultServerOptions = serverOptions{ +var defaultServerOptions = options{ maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, @@ -140,33 +140,7 @@ var defaultServerOptions = serverOptions{ } // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. -type ServerOption interface { - apply(*serverOptions) -} - -// EmptyServerOption does not alter the server configuration. It can be embedded -// in another structure to build custom server options. -// -// This API is EXPERIMENTAL. -type EmptyServerOption struct{} - -func (EmptyServerOption) apply(*serverOptions) {} - -// funcServerOption wraps a function that modifies serverOptions into an -// implementation of the ServerOption interface. -type funcServerOption struct { - f func(*serverOptions) -} - -func (fdo *funcServerOption) apply(do *serverOptions) { - fdo.f(do) -} - -func newFuncServerOption(f func(*serverOptions)) *funcServerOption { - return &funcServerOption{ - f: f, - } -} +type ServerOption func(*options) // WriteBufferSize determines how much data can be batched before doing a write on the wire. // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. @@ -174,9 +148,9 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { // Zero will disable the write buffer such that each write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.writeBufferSize = s - }) + } } // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most @@ -185,25 +159,25 @@ func WriteBufferSize(s int) ServerOption { // Zero will disable read buffer for a connection so data framer can access the underlying // conn directly. func ReadBufferSize(s int) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.readBufferSize = s - }) + } } // InitialWindowSize returns a ServerOption that sets window size for stream. // The lower bound for window size is 64K and any value smaller than that will be ignored. func InitialWindowSize(s int32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.initialWindowSize = s - }) + } } // InitialConnWindowSize returns a ServerOption that sets window size for a connection. // The lower bound for window size is 64K and any value smaller than that will be ignored. func InitialConnWindowSize(s int32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.initialConnWindowSize = s - }) + } } // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. @@ -213,25 +187,25 @@ func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { kp.Time = time.Second } - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.keepaliveParams = kp - }) + } } // KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.keepalivePolicy = kep - }) + } } // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // // This will override any lookups by content-subtype for Codecs registered with RegisterCodec. func CustomCodec(codec Codec) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.codec = codec - }) + } } // RPCCompressor returns a ServerOption that sets a compressor for outbound @@ -242,9 +216,9 @@ func CustomCodec(codec Codec) ServerOption { // // Deprecated: use encoding.RegisterCompressor instead. func RPCCompressor(cp Compressor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.cp = cp - }) + } } // RPCDecompressor returns a ServerOption that sets a decompressor for inbound @@ -253,9 +227,9 @@ func RPCCompressor(cp Compressor) ServerOption { // // Deprecated: use encoding.RegisterCompressor instead. func RPCDecompressor(dc Decompressor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.dc = dc - }) + } } // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. @@ -269,73 +243,73 @@ func MaxMsgSize(m int) ServerOption { // MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // If this is not set, gRPC uses the default 4MB. func MaxRecvMsgSize(m int) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.maxReceiveMessageSize = m - }) + } } // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. // If this is not set, gRPC uses the default `math.MaxInt32`. func MaxSendMsgSize(m int) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.maxSendMessageSize = m - }) + } } // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.maxConcurrentStreams = n - }) + } } // Creds returns a ServerOption that sets credentials for server connections. func Creds(c credentials.TransportCredentials) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.creds = c - }) + } } // UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the // server. Only one unary interceptor can be installed. The construction of multiple // interceptors (e.g., chaining) can be implemented at the caller. func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { if o.unaryInt != nil { panic("The unary server interceptor was already set and may not be reset.") } o.unaryInt = i - }) + } } // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // server. Only one stream interceptor can be installed. func StreamInterceptor(i StreamServerInterceptor) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { if o.streamInt != nil { panic("The stream server interceptor was already set and may not be reset.") } o.streamInt = i - }) + } } // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. func InTapHandle(h tap.ServerInHandle) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { if o.inTapHandle != nil { panic("The tap handle was already set and may not be reset.") } o.inTapHandle = h - }) + } } // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.statsHandler = h - }) + } } // UnknownServiceHandler returns a ServerOption that allows for adding a custom @@ -345,7 +319,7 @@ func StatsHandler(h stats.Handler) ServerOption { // The handling function has full access to the Context of the request and the // stream, and the invocation bypasses interceptors. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.unknownStreamDesc = &StreamDesc{ StreamName: "unknown_service_handler", Handler: streamHandler, @@ -353,7 +327,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { ClientStreams: true, ServerStreams: true, } - }) + } } // ConnectionTimeout returns a ServerOption that sets the timeout for @@ -363,17 +337,17 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // // This API is EXPERIMENTAL. func ConnectionTimeout(d time.Duration) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.connectionTimeout = d - }) + } } // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // of header list that the server is prepared to accept. func MaxHeaderListSize(s uint32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { + return func(o *options) { o.maxHeaderListSize = &s - }) + } } // NewServer creates a gRPC server which has no service registered and has not @@ -381,7 +355,7 @@ func MaxHeaderListSize(s uint32) ServerOption { func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions for _, o := range opt { - o.apply(&opts) + o(&opts) } s := &Server{ lis: make(map[net.Listener]bool), diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index e10e62317d..6e2bf51e0a 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -245,7 +245,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth trInfo.tr.LazyLog(&trInfo.firstLine, false) ctx = trace.NewContext(ctx, trInfo.tr) } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) + ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler var beginTime time.Time if sh != nil { @@ -677,13 +677,15 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if !cs.desc.ClientStreams { cs.sentLast = true } - - // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + data, err := encode(cs.codec, m) if err != nil { return err } - + compData, err := compress(data, cs.cp, cs.comp) + if err != nil { + return err + } + hdr, payload := msgHeader(data, compData) // TODO(dfawley): should we be checking len(data) instead? if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) @@ -1148,13 +1150,15 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { if !as.desc.ClientStreams { as.sentLast = true } - - // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + data, err := encode(as.codec, m) if err != nil { return err } - + compData, err := compress(data, as.cp, as.comp) + if err != nil { + return err + } + hdr, payld := msgHeader(data, compData) // TODO(dfawley): should we be checking len(data) instead? if len(payld) > *as.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) @@ -1391,13 +1395,15 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { ss.t.IncrMsgSent() } }() - - // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + data, err := encode(ss.codec, m) if err != nil { return err } - + compData, err := compress(data, ss.cp, ss.comp) + if err != nil { + return err + } + hdr, payload := msgHeader(data, compData) // TODO(dfawley): should we be checking len(data) instead? if len(payload) > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) @@ -1490,24 +1496,3 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } - -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { - if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil - } - // The input interface is not a prepared msg. - // Marshal and Compress the data at this point - data, err = encode(codec, m) - if err != nil { - return nil, nil, nil, err - } - compData, err := compress(data, cp, comp) - if err != nil { - return nil, nil, nil, err - } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil -} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 5c8cb709a0..092e088258 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.21.1" +const Version = "1.20.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 11037b94dc..bd5c8de90c 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -75,7 +75,7 @@ git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test') +git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') # - Ensure all ptypes proto packages are renamed when importing. git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/") @@ -88,7 +88,7 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") -go vet -all . +go tool vet -all . # - Check that generated proto files are up to date. if [[ -z "${VET_SKIP_PROTO}" ]]; then diff --git a/vendor/k8s.io/apimachinery/pkg/apis/config/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/config/OWNERS new file mode 100644 index 0000000000..2f7b10df4b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/config/OWNERS @@ -0,0 +1,7 @@ +approvers: +- api-approvers +- sttts +- luxas +reviewers: +- api-reviewers +- hanxiaoshuai diff --git a/vendor/k8s.io/apimachinery/pkg/apis/config/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/config/doc.go new file mode 100644 index 0000000000..d849c7aa3b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/config/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package config // import "k8s.io/apimachinery/pkg/apis/config" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/config/types.go b/vendor/k8s.io/apimachinery/pkg/apis/config/types.go new file mode 100644 index 0000000000..b32fc8a281 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/config/types.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +// ClientConnectionConfiguration contains details for constructing a client. +type ClientConnectionConfiguration struct { + // kubeconfig is the path to a KubeConfig file. + Kubeconfig string + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string + // contentType is the content type used when sending data to the server from this client. + ContentType string + // qps controls the number of queries per second allowed for this connection. + QPS float32 + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f09beb0e38 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,37 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration. +func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration { + if in == nil { + return nil + } + out := new(ClientConnectionConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/doc.go b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go new file mode 100644 index 0000000000..5b2b22b6d0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version provides utilities for version number comparisons +package version // import "k8s.io/apimachinery/pkg/util/version" diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go new file mode 100644 index 0000000000..24724e50ac --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -0,0 +1,285 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Version is an opqaue representation of a version number +type Version struct { + components []uint + semver bool + preRelease string + buildMetadata string +} + +var ( + // versionMatchRE splits a version string into numeric and "extra" parts + versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`) + // extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release + extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`) +) + +func parse(str string, semver bool) (*Version, error) { + parts := versionMatchRE.FindStringSubmatch(str) + if parts == nil { + return nil, fmt.Errorf("could not parse %q as version", str) + } + numbers, extra := parts[1], parts[2] + + components := strings.Split(numbers, ".") + if (semver && len(components) != 3) || (!semver && len(components) < 2) { + return nil, fmt.Errorf("illegal version string %q", str) + } + + v := &Version{ + components: make([]uint, len(components)), + semver: semver, + } + for i, comp := range components { + if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + num, err := strconv.ParseUint(comp, 10, 0) + if err != nil { + return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err) + } + v.components[i] = uint(num) + } + + if semver && extra != "" { + extraParts := extraMatchRE.FindStringSubmatch(extra) + if extraParts == nil { + return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str) + } + v.preRelease, v.buildMetadata = extraParts[1], extraParts[2] + + for _, comp := range strings.Split(v.preRelease, ".") { + if _, err := strconv.ParseUint(comp, 10, 0); err == nil { + if strings.HasPrefix(comp, "0") && comp != "0" { + return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str) + } + } + } + } + + return v, nil +} + +// ParseGeneric parses a "generic" version string. The version string must consist of two +// or more dot-separated numeric fields (the first of which can't have leading zeroes), +// followed by arbitrary uninterpreted data (which need not be separated from the final +// numeric field by punctuation). For convenience, leading and trailing whitespace is +// ignored, and the version can be preceded by the letter "v". See also ParseSemantic. +func ParseGeneric(str string) (*Version, error) { + return parse(str, false) +} + +// MustParseGeneric is like ParseGeneric except that it panics on error +func MustParseGeneric(str string) *Version { + v, err := ParseGeneric(str) + if err != nil { + panic(err) + } + return v +} + +// ParseSemantic parses a version string that exactly obeys the syntax and semantics of +// the "Semantic Versioning" specification (http://semver.org/) (although it ignores +// leading and trailing whitespace, and allows the version to be preceded by "v"). For +// version strings that are not guaranteed to obey the Semantic Versioning syntax, use +// ParseGeneric. +func ParseSemantic(str string) (*Version, error) { + return parse(str, true) +} + +// MustParseSemantic is like ParseSemantic except that it panics on error +func MustParseSemantic(str string) *Version { + v, err := ParseSemantic(str) + if err != nil { + panic(err) + } + return v +} + +// Major returns the major release number +func (v *Version) Major() uint { + return v.components[0] +} + +// Minor returns the minor release number +func (v *Version) Minor() uint { + return v.components[1] +} + +// Patch returns the patch release number if v is a Semantic Version, or 0 +func (v *Version) Patch() uint { + if len(v.components) < 3 { + return 0 + } + return v.components[2] +} + +// BuildMetadata returns the build metadata, if v is a Semantic Version, or "" +func (v *Version) BuildMetadata() string { + return v.buildMetadata +} + +// PreRelease returns the prerelease metadata, if v is a Semantic Version, or "" +func (v *Version) PreRelease() string { + return v.preRelease +} + +// Components returns the version number components +func (v *Version) Components() []uint { + return v.components +} + +// String converts a Version back to a string; note that for versions parsed with +// ParseGeneric, this will not include the trailing uninterpreted portion of the version +// number. +func (v *Version) String() string { + var buffer bytes.Buffer + + for i, comp := range v.components { + if i > 0 { + buffer.WriteString(".") + } + buffer.WriteString(fmt.Sprintf("%d", comp)) + } + if v.preRelease != "" { + buffer.WriteString("-") + buffer.WriteString(v.preRelease) + } + if v.buildMetadata != "" { + buffer.WriteString("+") + buffer.WriteString(v.buildMetadata) + } + + return buffer.String() +} + +// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0 +// if they are equal +func (v *Version) compareInternal(other *Version) int { + + vLen := len(v.components) + oLen := len(other.components) + for i := 0; i < vLen && i < oLen; i++ { + switch { + case other.components[i] < v.components[i]: + return 1 + case other.components[i] > v.components[i]: + return -1 + } + } + + // If components are common but one has more items and they are not zeros, it is bigger + switch { + case oLen < vLen && !onlyZeros(v.components[oLen:]): + return 1 + case oLen > vLen && !onlyZeros(other.components[vLen:]): + return -1 + } + + if !v.semver || !other.semver { + return 0 + } + + switch { + case v.preRelease == "" && other.preRelease != "": + return 1 + case v.preRelease != "" && other.preRelease == "": + return -1 + case v.preRelease == other.preRelease: // includes case where both are "" + return 0 + } + + vPR := strings.Split(v.preRelease, ".") + oPR := strings.Split(other.preRelease, ".") + for i := 0; i < len(vPR) && i < len(oPR); i++ { + vNum, err := strconv.ParseUint(vPR[i], 10, 0) + if err == nil { + oNum, err := strconv.ParseUint(oPR[i], 10, 0) + if err == nil { + switch { + case oNum < vNum: + return 1 + case oNum > vNum: + return -1 + default: + continue + } + } + } + if oPR[i] < vPR[i] { + return 1 + } else if oPR[i] > vPR[i] { + return -1 + } + } + + switch { + case len(oPR) < len(vPR): + return 1 + case len(oPR) > len(vPR): + return -1 + } + + return 0 +} + +// returns false if array contain any non-zero element +func onlyZeros(array []uint) bool { + for _, num := range array { + if num != 0 { + return false + } + } + return true +} + +// AtLeast tests if a version is at least equal to a given minimum version. If both +// Versions are Semantic Versions, this will use the Semantic Version comparison +// algorithm. Otherwise, it will compare only the numeric components, with non-present +// components being considered "0" (ie, "1.4" is equal to "1.4.0"). +func (v *Version) AtLeast(min *Version) bool { + return v.compareInternal(min) != -1 +} + +// LessThan tests if a version is less than a given version. (It is exactly the opposite +// of AtLeast, for situations where asking "is v too old?" makes more sense than asking +// "is v new enough?".) +func (v *Version) LessThan(other *Version) bool { + return v.compareInternal(other) == -1 +} + +// Compare compares v against a version string (which will be parsed as either Semantic +// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if +// it is greater than other, or 0 if they are equal. +func (v *Version) Compare(other string) (int, error) { + ov, err := parse(other, v.semver) + if err != nil { + return 0, err + } + return v.compareInternal(ov), nil +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS index 62866d0b19..0c408a1aa9 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS +++ b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - approvers: - lavalamp - wojtek-t diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/README.md b/vendor/k8s.io/code-generator/cmd/client-gen/README.md index 092a61151c..d1d67abdf9 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/README.md +++ b/vendor/k8s.io/code-generator/cmd/client-gen/README.md @@ -1,4 +1,4 @@ -See [generating-clientset.md](https://git.k8s.io/community/contributors/devel/sig-api-machinery/generating-clientset.md) +See [generating-clientset.md](https://git.k8s.io/community/contributors/devel/generating-clientset.md) [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/staging/src/k8s.io/code-generator/client-gen/README.md?pixel)]() diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 18980744f0..ee6ebbcf09 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -27,7 +27,6 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" "k8s.io/code-generator/cmd/client-gen/path" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - codegennamer "k8s.io/code-generator/pkg/namer" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" @@ -102,7 +101,7 @@ func NameSystems() namer.NameSystems { "publicPlural": publicPluralNamer, "privatePlural": privatePluralNamer, "allLowercasePlural": lowercaseNamer, - "resource": codegennamer.NewTagOverrideNamer("resourceName", lowercaseNamer), + "resource": NewTagOverrideNamer("resourceName", lowercaseNamer), } } @@ -401,3 +400,27 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat return generator.Packages(packageList) } + +// tagOverrideNamer is a namer which pulls names from a given tag, if specified, +// and otherwise falls back to a different namer. +type tagOverrideNamer struct { + tagName string + fallback namer.Namer +} + +func (n *tagOverrideNamer) Name(t *types.Type) string { + if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { + return nameOverride + } + + return n.fallback.Name(t) +} + +// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as +// the name, or falls back to another Namer if the tag is not present. +func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { + return &tagOverrideNamer{ + tagName: tagName, + fallback: fallback, + } +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index 55709aab2e..61b3334f40 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -102,6 +102,10 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr } sw.Do(clientsetInterfaceImplTemplate, m) + // don't generated the default method if generating internalversion clientset + if group.IsDefaultVersion && group.Version != "" { + sw.Do(clientsetInterfaceDefaultVersionImpl, m) + } } return sw.Error() @@ -121,7 +125,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{tracker: o} + cs := &Clientset{} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -143,16 +147,11 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } - -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} ` var checkImpl = ` diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go index a1e67dcbdf..6fdb29a94a 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -88,6 +88,10 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr sw.Do(clientsetTemplate, m) for _, g := range allGroups { sw.Do(clientsetInterfaceImplTemplate, g) + // don't generated the default method if generating internalversion clientset + if g.IsDefaultVersion && g.Version != "" { + sw.Do(clientsetInterfaceDefaultVersionImpl, g) + } } sw.Do(getDiscoveryTemplate, m) sw.Do(newClientsetForConfigTemplate, m) @@ -101,7 +105,9 @@ var clientsetInterface = ` type Interface interface { Discovery() $.DiscoveryInterface|raw$ $range .allGroups$$.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface - $end$ + $if .IsDefaultVersion$// Deprecated: please explicitly pick a version if possible. + $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface + $end$$end$ } ` @@ -122,6 +128,14 @@ func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.V } ` +var clientsetInterfaceDefaultVersionImpl = ` +// Deprecated: $.GroupGoName$ retrieves the default version of $.GroupGoName$Client. +// Please explicitly pick a version. +func (c *Clientset) $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface { + return c.$.LowerCaseGroupGoName$$.Version$ +} +` + var getDiscoveryTemplate = ` // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() $.DiscoveryInterface|raw$ { diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go index 215a0171ca..fd59715c42 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go @@ -98,6 +98,7 @@ func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer "apiPath": apiPath(g.group), "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), "runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}), + "serializerDirectCodecFactory": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/serializer", Name: "DirectCodecFactory"}), "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), "restDefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}), "restRESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), @@ -235,7 +236,7 @@ func setConfigDefaults(config *$.restConfig|raw$) error { gv := $.SchemeGroupVersion|raw$ config.GroupVersion = &gv config.APIPath = $.apiPath$ - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = $.serializerDirectCodecFactory|raw${CodecFactory: scheme.Codecs} if config.UserAgent == "" { config.UserAgent = $.restDefaultKubernetesUserAgent|raw$() diff --git a/vendor/k8s.io/code-generator/pkg/namer/tag-override.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/tags.go similarity index 50% rename from vendor/k8s.io/code-generator/pkg/namer/tag-override.go rename to vendor/k8s.io/code-generator/cmd/client-gen/generators/tags.go index fd8c3a8553..b004081036 100644 --- a/vendor/k8s.io/code-generator/pkg/namer/tag-override.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/tags.go @@ -14,38 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package namer +package generators import ( - "k8s.io/gengo/namer" "k8s.io/gengo/types" ) -// TagOverrideNamer is a namer which pulls names from a given tag, if specified, -// and otherwise falls back to a different namer. -type TagOverrideNamer struct { - tagName string - fallback namer.Namer -} - -// Name returns the tag value if it exists. It no tag was found the fallback namer will be used -func (n *TagOverrideNamer) Name(t *types.Type) string { - if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { - return nameOverride - } - - return n.fallback.Name(t) -} - -// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as -// the name, or falls back to another Namer if the tag is not present. -func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { - return &TagOverrideNamer{ - tagName: tagName, - fallback: fallback, - } -} - // extractTag gets the comment-tags for the key. If the tag did not exist, it // returns the empty string. func extractTag(key string, lines []string) string { diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go index 59f2fd4449..33e6ac451b 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go @@ -73,7 +73,7 @@ func (a sortableSliceOfVersions) Less(i, j int) bool { } // Determine the default version among versions. If a user calls a group client -// without specifying the version (e.g., c.CoreV1(), instead of c.CoreV1()), the +// without specifying the version (e.g., c.Core(), instead of c.CoreV1()), the // default version will be returned. func defaultVersion(versions []PackageVersion) Version { var versionStrings []string @@ -88,12 +88,14 @@ func defaultVersion(versions []PackageVersion) Version { func ToGroupVersionInfo(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupVersionInfo { var groupVersionPackages []GroupVersionInfo for _, group := range groups { + defaultVersion := defaultVersion(group.Versions) for _, version := range group.Versions { groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: version.Version}] groupVersionPackages = append(groupVersionPackages, GroupVersionInfo{ Group: Group(namer.IC(group.Group.NonEmpty())), Version: Version(namer.IC(version.Version.String())), PackageAlias: strings.ToLower(groupGoName + version.Version.NonEmpty()), + IsDefaultVersion: version.Version == defaultVersion && version.Version != "", GroupGoName: groupGoName, LowerCaseGroupGoName: namer.IL(groupGoName), }) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go index 7d1606c508..17fd6e92a7 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go @@ -62,8 +62,11 @@ type GroupVersions struct { // GroupVersionInfo contains all the info around a group version. type GroupVersionInfo struct { - Group Group - Version Version + Group Group + Version Version + // If a user calls a group client without specifying the version (e.g., + // c.Core(), instead of c.CoreV1()), the default version will be returned. + IsDefaultVersion bool PackageAlias string GroupGoName string LowerCaseGroupGoName string diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go index 215b17bdee..698baa7db7 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -14,59 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -// conversion-gen is a tool for auto-generating functions that convert -// between internal and external types. A general conversion code -// generation task involves three sets of packages: (1) a set of -// packages containing internal types, (2) a single package containing -// the external types, and (3) a single destination package (i.e., -// where the generated conversion functions go, and where the -// developer-authored conversion functions are). The packages -// containing the internal types play the role known as "peer -// packages" in the general code-generation framework of Kubernetes. +// conversion-gen is a tool for auto-generating Conversion functions. // -// For each conversion task, `conversion-gen` will generate functions -// that efficiently convert between same-name types in the two -// (internal, external) packages. The generated functions include -// ones named -// autoConvert___To__ -// for each such pair of types --- both with (pkg1,pkg2) = -// (internal,external) and (pkg1,pkg2) = (external,internal). -// Additionally: if the destination package does not contain one in a -// non-generated file then a function named -// Convert___To__ -// is also generated and it simply calls the `autoConvert...` -// function. The generated conversion functions use standard value -// assignment wherever possible. For compound types, the generated -// conversion functions call the `Convert...` functions for the -// subsidiary types. Thus developers can override the behavior for -// selected types. For a top-level object type (i.e., the type of an -// object that will be input to an apiserver), for such an override to -// be used by the apiserver the developer-maintained conversion -// functions must also be registered by invoking the -// `AddConversionFuncs` method of the relevant `Scheme` object from -// k8s.io/apimachinery/pkg/runtime. +// Given a list of input directories, it will scan for "peer" packages and +// generate functions that efficiently convert between same-name types in each +// package. For any pair of types that has a +// `Convert___To__ -// This introduces a conversion task, for which the destination -// package is the one containing the file with the tag and the tag -// identifies a package containing internal types. If there is also a -// tag of the form -// // +k8s:conversion-gen-external-types= -// then it identifies the package containing the external types; -// otherwise they are in the destination package. -// -// For each conversion code generation task, the full set of internal -// packages (AKA peer packages) consists of the ones specified in the -// `k8s:conversion-gen` tags PLUS any specified in the -// `--base-peer-dirs` and `--extra-peer-dirs` flags on the command -// line. +// Generation is governed by comment tags in the source. Any package may +// request Conversion generation by including a comment in the file-comments of +// one file, of the form: +// // +k8s:conversion-gen= // // When generating for a package, individual types or fields of structs may opt // out of Conversion generation by specifying a comment on the of the form: diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go index 6e5793109b..62ae109a4a 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go @@ -152,7 +152,7 @@ func NewSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync // as specified here. // Deprecated: Please use NewSharedInformerFactoryWithOptions instead func NewFilteredSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}, namespace string, tweakListOptions {{.interfacesTweakListOptionsFunc|raw}}) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) } // NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. @@ -165,7 +165,7 @@ func NewSharedInformerFactoryWithOptions(client {{.clientSetInterface|raw}}, def startedInformers: make(map[{{.reflectType|raw}}]bool), customResync: make(map[{{.reflectType|raw}}]{{.timeDuration|raw}}), } - + // Apply all options for _, opt := range options { factory = opt(factory) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go index cad907990f..54632de053 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go @@ -22,7 +22,6 @@ import ( "strings" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - codegennamer "k8s.io/code-generator/pkg/namer" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" @@ -57,7 +56,6 @@ func (g *genericGenerator) Namers(c *generator.Context) namer.NameSystems { "raw": namer.NewRawNamer(g.outputPackage, g.imports), "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), - "resource": codegennamer.NewTagOverrideNamer("resourceName", namer.NewAllLowercasePluralNamer(pluralExceptions)), } } @@ -113,9 +111,7 @@ func (g *genericGenerator) GenerateType(c *generator.Context, t *types.Type, w i GoName: namer.IC(v.Version.NonEmpty()), Resources: orderer.OrderTypes(g.typesForGroupVersion[gv]), } - func() { - schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"}) - }() + schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"}) group.Versions = append(group.Versions, version) } sort.Sort(versionSort(group.Versions)) @@ -172,7 +168,7 @@ func (f *sharedInformerFactory) ForResource(resource {{.schemaGroupVersionResour {{range $version := .Versions -}} // Group={{$group.Name}}, Version={{.Name}} {{range .Resources -}} - case {{index $.schemeGVs $version|raw}}.WithResource("{{.|resource}}"): + case {{index $.schemeGVs $version|raw}}.WithResource("{{.|allLowercasePlural}}"): return &genericInformer{resource: resource.GroupResource(), informer: f.{{$GroupGoName}}().{{$version.GoName}}().{{.|publicPlural}}().Informer()}, nil {{end}} {{end}} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go new file mode 100644 index 0000000000..d25d5b6304 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/tags.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if +// it exists, the value is boolean. If the tag did not exist, it returns +// false. +func extractBoolTagOrDie(key string, lines []string) bool { + val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) + if err != nil { + klog.Fatal(err) + } + return val +} diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go new file mode 100644 index 0000000000..d25d5b6304 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/tags.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "k8s.io/gengo/types" + "k8s.io/klog" +) + +// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if +// it exists, the value is boolean. If the tag did not exist, it returns +// false. +func extractBoolTagOrDie(key string, lines []string) bool { + val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) + if err != nil { + klog.Fatal(err) + } + return val +} diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/k8s.io/kubernetes/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/kubernetes/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD new file mode 100644 index 0000000000..70d51c2bfa --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD @@ -0,0 +1,65 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "bootstraptokenhelpers.go", + "bootstraptokenstring.go", + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm", + deps = [ + "//pkg/kubelet/apis/config:go_default_library", + "//pkg/proxy/apis/config:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", + "//staging/src/k8s.io/cluster-bootstrap/token/util:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//cmd/kubeadm/app/apis/kubeadm/fuzzer:all-srcs", + "//cmd/kubeadm/app/apis/kubeadm/scheme:all-srcs", + "//cmd/kubeadm/app/apis/kubeadm/v1alpha3:all-srcs", + "//cmd/kubeadm/app/apis/kubeadm/v1beta1:all-srcs", + "//cmd/kubeadm/app/apis/kubeadm/validation:all-srcs", + ], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = [ + "bootstraptokenhelpers_test.go", + "bootstraptokenstring_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/bootstraptokenhelpers.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/bootstraptokenhelpers.go new file mode 100644 index 0000000000..cd2977ed64 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/bootstraptokenhelpers.go @@ -0,0 +1,169 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "sort" + "strings" + "time" + + "github.com/pkg/errors" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + bootstrapapi "k8s.io/cluster-bootstrap/token/api" + bootstraputil "k8s.io/cluster-bootstrap/token/util" +) + +// ToSecret converts the given BootstrapToken object to its Secret representation that +// may be submitted to the API Server in order to be stored. +func (bt *BootstrapToken) ToSecret() *v1.Secret { + return &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: bootstraputil.BootstrapTokenSecretName(bt.Token.ID), + Namespace: metav1.NamespaceSystem, + }, + Type: v1.SecretType(bootstrapapi.SecretTypeBootstrapToken), + Data: encodeTokenSecretData(bt, time.Now()), + } +} + +// encodeTokenSecretData takes the token discovery object and an optional duration and returns the .Data for the Secret +// now is passed in order to be able to used in unit testing +func encodeTokenSecretData(token *BootstrapToken, now time.Time) map[string][]byte { + data := map[string][]byte{ + bootstrapapi.BootstrapTokenIDKey: []byte(token.Token.ID), + bootstrapapi.BootstrapTokenSecretKey: []byte(token.Token.Secret), + } + + if len(token.Description) > 0 { + data[bootstrapapi.BootstrapTokenDescriptionKey] = []byte(token.Description) + } + + // If for some strange reason both token.TTL and token.Expires would be set + // (they are mutually exlusive in validation so this shouldn't be the case), + // token.Expires has higher priority, as can be seen in the logic here. + if token.Expires != nil { + // Format the expiration date accordingly + // TODO: This maybe should be a helper function in bootstraputil? + expirationString := token.Expires.Time.Format(time.RFC3339) + data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(expirationString) + + } else if token.TTL != nil && token.TTL.Duration > 0 { + // Only if .Expires is unset, TTL might have an effect + // Get the current time, add the specified duration, and format it accordingly + expirationString := now.Add(token.TTL.Duration).Format(time.RFC3339) + data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(expirationString) + } + + for _, usage := range token.Usages { + data[bootstrapapi.BootstrapTokenUsagePrefix+usage] = []byte("true") + } + + if len(token.Groups) > 0 { + data[bootstrapapi.BootstrapTokenExtraGroupsKey] = []byte(strings.Join(token.Groups, ",")) + } + return data +} + +// BootstrapTokenFromSecret returns a BootstrapToken object from the given Secret +func BootstrapTokenFromSecret(secret *v1.Secret) (*BootstrapToken, error) { + // Get the Token ID field from the Secret data + tokenID := getSecretString(secret, bootstrapapi.BootstrapTokenIDKey) + if len(tokenID) == 0 { + return nil, errors.Errorf("bootstrap Token Secret has no token-id data: %s", secret.Name) + } + + // Enforce the right naming convention + if secret.Name != bootstraputil.BootstrapTokenSecretName(tokenID) { + return nil, errors.Errorf("bootstrap token name is not of the form '%s(token-id)'. Actual: %q. Expected: %q", + bootstrapapi.BootstrapTokenSecretPrefix, secret.Name, bootstraputil.BootstrapTokenSecretName(tokenID)) + } + + tokenSecret := getSecretString(secret, bootstrapapi.BootstrapTokenSecretKey) + if len(tokenSecret) == 0 { + return nil, errors.Errorf("bootstrap Token Secret has no token-secret data: %s", secret.Name) + } + + // Create the BootstrapTokenString object based on the ID and Secret + bts, err := NewBootstrapTokenStringFromIDAndSecret(tokenID, tokenSecret) + if err != nil { + return nil, errors.Wrap(err, "bootstrap Token Secret is invalid and couldn't be parsed") + } + + // Get the description (if any) from the Secret + description := getSecretString(secret, bootstrapapi.BootstrapTokenDescriptionKey) + + // Expiration time is optional, if not specified this implies the token + // never expires. + secretExpiration := getSecretString(secret, bootstrapapi.BootstrapTokenExpirationKey) + var expires *metav1.Time + if len(secretExpiration) > 0 { + expTime, err := time.Parse(time.RFC3339, secretExpiration) + if err != nil { + return nil, errors.Wrapf(err, "can't parse expiration time of bootstrap token %q", secret.Name) + } + expires = &metav1.Time{Time: expTime} + } + + // Build an usages string slice from the Secret data + var usages []string + for k, v := range secret.Data { + // Skip all fields that don't include this prefix + if !strings.HasPrefix(k, bootstrapapi.BootstrapTokenUsagePrefix) { + continue + } + // Skip those that don't have this usage set to true + if string(v) != "true" { + continue + } + usages = append(usages, strings.TrimPrefix(k, bootstrapapi.BootstrapTokenUsagePrefix)) + } + // Only sort the slice if defined + if usages != nil { + sort.Strings(usages) + } + + // Get the extra groups information from the Secret + // It's done this way to make .Groups be nil in case there is no items, rather than an + // empty slice or an empty slice with a "" string only + var groups []string + groupsString := getSecretString(secret, bootstrapapi.BootstrapTokenExtraGroupsKey) + g := strings.Split(groupsString, ",") + if len(g) > 0 && len(g[0]) > 0 { + groups = g + } + + return &BootstrapToken{ + Token: bts, + Description: description, + Expires: expires, + Usages: usages, + Groups: groups, + }, nil +} + +// getSecretString returns the string value for the given key in the specified Secret +func getSecretString(secret *v1.Secret, key string) string { + if secret.Data == nil { + return "" + } + if val, ok := secret.Data[key]; ok { + return string(val) + } + return "" +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/bootstraptokenstring.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/bootstraptokenstring.go new file mode 100644 index 0000000000..2c1175b9d8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/bootstraptokenstring.go @@ -0,0 +1,92 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kubeadm holds the internal kubeadm API types +// Note: This file should be kept in sync with the similar one for the external API +// TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future +// (probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now. +package kubeadm + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + bootstrapapi "k8s.io/cluster-bootstrap/token/api" + bootstraputil "k8s.io/cluster-bootstrap/token/util" +) + +// BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used +// for both validation of the practically of the API server from a joining node's point +// of view and as an authentication method for the node in the bootstrap phase of +// "kubeadm join". This token is and should be short-lived +type BootstrapTokenString struct { + ID string + Secret string +} + +// MarshalJSON implements the json.Marshaler interface. +func (bts BootstrapTokenString) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, bts.String())), nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { + // If the token is represented as "", just return quickly without an error + if len(b) == 0 { + return nil + } + + // Remove unnecessary " characters coming from the JSON parser + token := strings.Replace(string(b), `"`, ``, -1) + // Convert the string Token to a BootstrapTokenString object + newbts, err := NewBootstrapTokenString(token) + if err != nil { + return err + } + bts.ID = newbts.ID + bts.Secret = newbts.Secret + return nil +} + +// String returns the string representation of the BootstrapTokenString +func (bts BootstrapTokenString) String() string { + if len(bts.ID) > 0 && len(bts.Secret) > 0 { + return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) + } + return "" +} + +// NewBootstrapTokenString converts the given Bootstrap Token as a string +// to the BootstrapTokenString object used for serialization/deserialization +// and internal usage. It also automatically validates that the given token +// is of the right format +func NewBootstrapTokenString(token string) (*BootstrapTokenString, error) { + substrs := bootstraputil.BootstrapTokenRegexp.FindStringSubmatch(token) + // TODO: Add a constant for the 3 value here, and explain better why it's needed (other than because how the regexp parsin works) + if len(substrs) != 3 { + return nil, errors.Errorf("the bootstrap token %q was not of the form %q", token, bootstrapapi.BootstrapTokenPattern) + } + + return &BootstrapTokenString{ID: substrs[1], Secret: substrs[2]}, nil +} + +// NewBootstrapTokenStringFromIDAndSecret is a wrapper around NewBootstrapTokenString +// that allows the caller to specify the ID and Secret separately +func NewBootstrapTokenStringFromIDAndSecret(id, secret string) (*BootstrapTokenString, error) { + return NewBootstrapTokenString(bootstraputil.TokenFromIDAndSecret(id, secret)) +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/doc.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/doc.go new file mode 100644 index 0000000000..8998f14fe7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=kubeadm.k8s.io + +// Package kubeadm is the package that contains the libraries that drive the kubeadm binary. +// kubeadm is responsible for handling a Kubernetes cluster's lifecycle. +package kubeadm // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/register.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/register.go new file mode 100644 index 0000000000..3f09aed5bb --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "kubeadm.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &InitConfiguration{}, + &ClusterConfiguration{}, + &ClusterStatus{}, + &JoinConfiguration{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go new file mode 100644 index 0000000000..ebbd20d025 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go @@ -0,0 +1,463 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeadm + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InitConfiguration contains a list of fields that are specifically "kubeadm init"-only runtime +// information. The cluster-wide config is stored in ClusterConfiguration. The InitConfiguration +// object IS NOT uploaded to the kubeadm-config ConfigMap in the cluster, only the +// ClusterConfiguration is. +type InitConfiguration struct { + metav1.TypeMeta + + // ClusterConfiguration holds the cluster-wide information, and embeds that struct (which can be (un)marshalled separately as well) + // When InitConfiguration is marshalled to bytes in the external version, this information IS NOT preserved (which can be seen from + // the `json:"-"` tag in the external variant of these API types. Here, in the internal version `json:",inline"` is used, which means + // that all of ClusterConfiguration's fields will appear as they would be InitConfiguration's fields. This is used in practice solely + // in kubeadm API roundtrip unit testing. Check out `cmd/kubeadm/app/util/config/*_test.go` for more information. Normally, the internal + // type is NEVER marshalled, but always converted to some external version first. + ClusterConfiguration `json:",inline"` + + // BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + BootstrapTokens []BootstrapToken + + // NodeRegistration holds fields that relate to registering the new master node to the cluster + NodeRegistration NodeRegistrationOptions + + // LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + // In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + // is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + // configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + // on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process + // fails you may set the desired value here. + LocalAPIEndpoint APIEndpoint +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster +type ClusterConfiguration struct { + metav1.TypeMeta + + // ComponentConfigs holds internal ComponentConfig struct types known to kubeadm, should long-term only exist in the internal kubeadm API + // +k8s:conversion-gen=false + ComponentConfigs ComponentConfigs + + // Etcd holds configuration for etcd. + Etcd Etcd + + // Networking holds configuration for the networking topology of the cluster. + Networking Networking + // KubernetesVersion is the target version of the control plane. + KubernetesVersion string + + // ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + // can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + // In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + // are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + // the BindPort is used. + // Possible usages are: + // e.g. In a cluster with more than one control plane instances, this field should be + // assigned the address of the external load balancer in front of the + // control plane instances. + // e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + // could be used for assigning a stable DNS to the control plane. + ControlPlaneEndpoint string + + // APIServer contains extra settings for the API server control plane component + APIServer APIServer + + // ControllerManager contains extra settings for the controller manager control plane component + ControllerManager ControlPlaneComponent + + // Scheduler contains extra settings for the scheduler control plane component + Scheduler ControlPlaneComponent + + // DNS defines the options for the DNS add-on installed in the cluster. + DNS DNS + + // CertificatesDir specifies where to store or look for all required certificates. + CertificatesDir string + + // ImageRepository sets the container registry to pull images from. + // If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + // `gcr.io/kubernetes-ci-images` will be used as a default for control plane components and for kube-proxy, while `k8s.gcr.io` + // will be used for all the other images. + ImageRepository string + + // CIImageRepository is the container registry for core images generated by CI. + // Useful for running kubeadm with images from CI builds. + // +k8s:conversion-gen=false + CIImageRepository string + + // UseHyperKubeImage controls if hyperkube should be used for Kubernetes components instead of their respective separate images + UseHyperKubeImage bool + + // FeatureGates enabled by the user. + FeatureGates map[string]bool + + // The cluster name + ClusterName string +} + +// ControlPlaneComponent holds settings common to control plane component of the cluster +type ControlPlaneComponent struct { + // ExtraArgs is an extra set of flags to pass to the control plane component. + // TODO: This is temporary and ideally we would like to switch all components to + // use ComponentConfig + ConfigMaps. + ExtraArgs map[string]string + + // ExtraVolumes is an extra set of host volumes, mounted to the control plane component. + ExtraVolumes []HostPathMount +} + +// APIServer holds settings necessary for API server deployments in the cluster +type APIServer struct { + ControlPlaneComponent + + // CertSANs sets extra Subject Alternative Names for the API Server signing cert. + CertSANs []string + + // TimeoutForControlPlane controls the timeout that we use for API server to appear + TimeoutForControlPlane *metav1.Duration +} + +// DNSAddOnType defines string identifying DNS add-on types +type DNSAddOnType string + +const ( + // CoreDNS add-on type + CoreDNS DNSAddOnType = "CoreDNS" + + // KubeDNS add-on type + KubeDNS DNSAddOnType = "kube-dns" +) + +// DNS defines the DNS addon that should be used in the cluster +type DNS struct { + // Type defines the DNS add-on to be used + Type DNSAddOnType + + // ImageMeta allows to customize the image used for the DNS component + ImageMeta `json:",inline"` +} + +// ImageMeta allows to customize the image used for components that are not +// originated from the Kubernetes/Kubernetes release process +type ImageMeta struct { + // ImageRepository sets the container registry to pull images from. + // if not set, the ImageRepository defined in ClusterConfiguration will be used instead. + ImageRepository string + + // ImageTag allows to specify a tag for the image. + // In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. + ImageTag string + + //TODO: evaluate if we need also a ImageName based on user feedbacks +} + +// ComponentConfigs holds known internal ComponentConfig types for other components +type ComponentConfigs struct { + // Kubelet holds the ComponentConfiguration for the kubelet + Kubelet *kubeletconfig.KubeletConfiguration + // KubeProxy holds the ComponentConfiguration for the kube-proxy + KubeProxy *kubeproxyconfig.KubeProxyConfiguration +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterStatus contains the cluster status. The ClusterStatus will be stored in the kubeadm-config +// ConfigMap in the cluster, and then updated by kubeadm when additional control plane instance joins or leaves the cluster. +type ClusterStatus struct { + metav1.TypeMeta + + // APIEndpoints currently available in the cluster, one for each control plane/api server instance. + // The key of the map is the IP of the host's default interface + APIEndpoints map[string]APIEndpoint +} + +// APIEndpoint struct contains elements of API server instance deployed on a node. +type APIEndpoint struct { + // AdvertiseAddress sets the IP address for the API server to advertise. + AdvertiseAddress string + + // BindPort sets the secure port for the API Server to bind to. + // Defaults to 6443. + BindPort int32 +} + +// NodeRegistrationOptions holds fields that relate to registering a new master or node to the cluster, either via "kubeadm init" or "kubeadm join" +type NodeRegistrationOptions struct { + + // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm joiń` operation. + // This field is also used in the CommonName field of the kubelet's client certificate to the API server. + // Defaults to the hostname of the node if not provided. + Name string + + // CRISocket is used to retrieve container runtime info. This information will be annotated to the Node API object, for later re-use + CRISocket string + + // Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your master node, set this field to an + // empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. + Taints []v1.Taint + + // KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + // Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. + KubeletExtraArgs map[string]string +} + +// Networking contains elements describing cluster's networking configuration. +type Networking struct { + // ServiceSubnet is the subnet used by k8s services. Defaults to "10.96.0.0/12". + ServiceSubnet string + // PodSubnet is the subnet used by pods. + PodSubnet string + // DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". + DNSDomain string +} + +// BootstrapToken describes one bootstrap token, stored as a Secret in the cluster +// TODO: The BootstrapToken object should move out to either k8s.io/client-go or k8s.io/api in the future +// (probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now. +type BootstrapToken struct { + // Token is used for establishing bidirectional trust between nodes and masters. + // Used for joining nodes in the cluster. + Token *BootstrapTokenString + // Description sets a human-friendly message why this token exists and what it's used + // for, so other administrators can know its purpose. + Description string + // TTL defines the time to live for this token. Defaults to 24h. + // Expires and TTL are mutually exclusive. + TTL *metav1.Duration + // Expires specifies the timestamp when this token expires. Defaults to being set + // dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. + Expires *metav1.Time + // Usages describes the ways in which this token can be used. Can by default be used + // for establishing bidirectional trust, but that can be changed here. + Usages []string + // Groups specifies the extra groups that this token will authenticate as when/if + // used for authentication + Groups []string +} + +// Etcd contains elements describing Etcd configuration. +type Etcd struct { + + // Local provides configuration knobs for configuring the local etcd instance + // Local and External are mutually exclusive + Local *LocalEtcd + + // External describes how to connect to an external etcd cluster + // Local and External are mutually exclusive + External *ExternalEtcd +} + +// LocalEtcd describes that kubeadm should run an etcd cluster locally +type LocalEtcd struct { + // ImageMeta allows to customize the container used for etcd + ImageMeta `json:",inline"` + + // DataDir is the directory etcd will place its data. + // Defaults to "/var/lib/etcd". + DataDir string + + // ExtraArgs are extra arguments provided to the etcd binary + // when run inside a static pod. + ExtraArgs map[string]string + + // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. + ServerCertSANs []string + // PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert. + PeerCertSANs []string +} + +// ExternalEtcd describes an external etcd cluster +type ExternalEtcd struct { + + // Endpoints of etcd members. Useful for using external etcd. + // If not provided, kubeadm will run etcd in a static pod. + Endpoints []string + // CAFile is an SSL Certificate Authority file used to secure etcd communication. + CAFile string + // CertFile is an SSL certification file used to secure etcd communication. + CertFile string + // KeyFile is an SSL key file used to secure etcd communication. + KeyFile string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// JoinConfiguration contains elements describing a particular node. +type JoinConfiguration struct { + metav1.TypeMeta + + // NodeRegistration holds fields that relate to registering the new master node to the cluster + NodeRegistration NodeRegistrationOptions + + // CACertPath is the path to the SSL certificate authority used to + // secure comunications between node and master. + // Defaults to "/etc/kubernetes/pki/ca.crt". + CACertPath string + + // Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + Discovery Discovery + + // ControlPlane defines the additional control plane instance to be deployed on the joining node. + // If nil, no additional control plane instance will be deployed. + ControlPlane *JoinControlPlane +} + +// JoinControlPlane contains elements describing an additional control plane instance to be deployed on the joining node. +type JoinControlPlane struct { + // LocalAPIEndpoint represents the endpoint of the API server instance to be deployed on this node. + LocalAPIEndpoint APIEndpoint +} + +// Discovery specifies the options for the kubelet to use during the TLS Bootstrap process +type Discovery struct { + // BootstrapToken is used to set the options for bootstrap token based discovery + // BootstrapToken and File are mutually exclusive + BootstrapToken *BootstrapTokenDiscovery + + // File is used to specify a file or URL to a kubeconfig file from which to load cluster information + // BootstrapToken and File are mutually exclusive + File *FileDiscovery + + // TLSBootstrapToken is a token used for TLS bootstrapping. + // If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + // If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information + TLSBootstrapToken string + + // Timeout modifies the discovery timeout + Timeout *metav1.Duration +} + +// BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery +type BootstrapTokenDiscovery struct { + // Token is a token used to validate cluster information + // fetched from the master. + Token string + + // APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. + APIServerEndpoint string + + // CACertHashes specifies a set of public key pins to verify + // when token-based discovery is used. The root CA found during discovery + // must match one of these values. Specifying an empty set disables root CA + // pinning, which can be unsafe. Each hash is specified as ":", + // where the only currently supported type is "sha256". This is a hex-encoded + // SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + // ASN.1. These hashes can be calculated using, for example, OpenSSL: + // openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex + CACertHashes []string + + // UnsafeSkipCAVerification allows token-based discovery + // without CA verification via CACertHashes. This can weaken + // the security of kubeadm since other nodes can impersonate the master. + UnsafeSkipCAVerification bool +} + +// FileDiscovery is used to specify a file or URL to a kubeconfig file from which to load cluster information +type FileDiscovery struct { + // KubeConfigPath is used to specify the actual file path or URL to the kubeconfig file from which to load cluster information + KubeConfigPath string +} + +// GetControlPlaneImageRepository returns name of image repository +// for control plane images (API,Controller Manager,Scheduler and Proxy) +// It will override location with CI registry name in case user requests special +// Kubernetes version from CI build area. +// (See: kubeadmconstants.DefaultCIImageRepository) +func (cfg *ClusterConfiguration) GetControlPlaneImageRepository() string { + if cfg.CIImageRepository != "" { + return cfg.CIImageRepository + } + return cfg.ImageRepository +} + +// HostPathMount contains elements describing volumes that are mounted from the +// host. +type HostPathMount struct { + // Name of the volume inside the pod template. + Name string + // HostPath is the path in the host that will be mounted inside + // the pod. + HostPath string + // MountPath is the path inside the pod where hostPath will be mounted. + MountPath string + // ReadOnly controls write access to the volume + ReadOnly bool + // PathType is the type of the HostPath. + PathType v1.HostPathType +} + +// CommonConfiguration defines the list of common configuration elements and the getter +// methods that must exist for both the InitConfiguration and JoinConfiguration objects. +// This is used internally to deduplicate the kubeadm preflight checks. +type CommonConfiguration interface { + GetCRISocket() string + GetNodeName() string + GetKubernetesVersion() string +} + +// GetCRISocket will return the CRISocket that is defined for the InitConfiguration. +// This is used internally to deduplicate the kubeadm preflight checks. +func (cfg *InitConfiguration) GetCRISocket() string { + return cfg.NodeRegistration.CRISocket +} + +// GetNodeName will return the NodeName that is defined for the InitConfiguration. +// This is used internally to deduplicate the kubeadm preflight checks. +func (cfg *InitConfiguration) GetNodeName() string { + return cfg.NodeRegistration.Name +} + +// GetKubernetesVersion will return the KubernetesVersion that is defined for the InitConfiguration. +// This is used internally to deduplicate the kubeadm preflight checks. +func (cfg *InitConfiguration) GetKubernetesVersion() string { + return cfg.KubernetesVersion +} + +// GetCRISocket will return the CRISocket that is defined for the JoinConfiguration. +// This is used internally to deduplicate the kubeadm preflight checks. +func (cfg *JoinConfiguration) GetCRISocket() string { + return cfg.NodeRegistration.CRISocket +} + +// GetNodeName will return the NodeName that is defined for the JoinConfiguration. +// This is used internally to deduplicate the kubeadm preflight checks. +func (cfg *JoinConfiguration) GetNodeName() string { + return cfg.NodeRegistration.Name +} + +// GetKubernetesVersion will return an empty string since KubernetesVersion is not a +// defined property for JoinConfiguration. This will just cause the regex validation +// of the defined version to be skipped during the preflight checks. +// This is used internally to deduplicate the kubeadm preflight checks. +func (cfg *JoinConfiguration) GetKubernetesVersion() string { + return "" +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/BUILD new file mode 100644 index 0000000000..667f9e4aba --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/BUILD @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "bootstraptokenstring.go", + "defaults.go", + "defaults_unix.go", + "defaults_windows.go", + "doc.go", + "register.go", + "types.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", + "//staging/src/k8s.io/cluster-bootstrap/token/util:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["bootstraptokenstring_test.go"], + embed = [":go_default_library"], + deps = ["//vendor/github.com/pkg/errors:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/bootstraptokenstring.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/bootstraptokenstring.go new file mode 100644 index 0000000000..45817a8705 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/bootstraptokenstring.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + + bootstrapapi "k8s.io/cluster-bootstrap/token/api" + bootstraputil "k8s.io/cluster-bootstrap/token/util" +) + +// BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used +// for both validation of the practically of the API server from a joining node's point +// of view and as an authentication method for the node in the bootstrap phase of +// "kubeadm join". This token is and should be short-lived +type BootstrapTokenString struct { + ID string + Secret string +} + +// MarshalJSON implements the json.Marshaler interface. +func (bts BootstrapTokenString) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, bts.String())), nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { + // If the token is represented as "", just return quickly without an error + if len(b) == 0 { + return nil + } + + // Remove unnecessary " characters coming from the JSON parser + token := strings.Replace(string(b), `"`, ``, -1) + // Convert the string Token to a BootstrapTokenString object + newbts, err := NewBootstrapTokenString(token) + if err != nil { + return err + } + bts.ID = newbts.ID + bts.Secret = newbts.Secret + return nil +} + +// String returns the string representation of the BootstrapTokenString +func (bts BootstrapTokenString) String() string { + if len(bts.ID) > 0 && len(bts.Secret) > 0 { + return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) + } + return "" +} + +// NewBootstrapTokenString converts the given Bootstrap Token as a string +// to the BootstrapTokenString object used for serialization/deserialization +// and internal usage. It also automatically validates that the given token +// is of the right format +func NewBootstrapTokenString(token string) (*BootstrapTokenString, error) { + substrs := bootstraputil.BootstrapTokenRegexp.FindStringSubmatch(token) + // TODO: Add a constant for the 3 value here, and explain better why it's needed (other than because how the regexp parsin works) + if len(substrs) != 3 { + return nil, errors.Errorf("the bootstrap token %q was not of the form %q", token, bootstrapapi.BootstrapTokenPattern) + } + + return &BootstrapTokenString{ID: substrs[1], Secret: substrs[2]}, nil +} + +// NewBootstrapTokenStringFromIDAndSecret is a wrapper around NewBootstrapTokenString +// that allows the caller to specify the ID and Secret separately +func NewBootstrapTokenStringFromIDAndSecret(id, secret string) (*BootstrapTokenString, error) { + return NewBootstrapTokenString(bootstraputil.TokenFromIDAndSecret(id, secret)) +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults.go new file mode 100644 index 0000000000..60c72a73d3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults.go @@ -0,0 +1,223 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "net/url" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +const ( + // DefaultServiceDNSDomain defines default cluster-internal domain name for Services and Pods + DefaultServiceDNSDomain = "cluster.local" + // DefaultServicesSubnet defines default service subnet range + DefaultServicesSubnet = "10.96.0.0/12" + // DefaultClusterDNSIP defines default DNS IP + DefaultClusterDNSIP = "10.96.0.10" + // DefaultKubernetesVersion defines default kubernetes version + DefaultKubernetesVersion = "stable-1" + // DefaultAPIBindPort defines default API port + DefaultAPIBindPort = 6443 + // DefaultCertificatesDir defines default certificate directory + DefaultCertificatesDir = "/etc/kubernetes/pki" + // DefaultImageRepository defines default image registry + DefaultImageRepository = "k8s.gcr.io" + // DefaultManifestsDir defines default manifests directory + DefaultManifestsDir = "/etc/kubernetes/manifests" + // DefaultClusterName defines the default cluster name + DefaultClusterName = "kubernetes" + + // DefaultEtcdDataDir defines default location of etcd where static pods will save data to + DefaultEtcdDataDir = "/var/lib/etcd" + // DefaultProxyBindAddressv4 is the default bind address when the advertise address is v4 + DefaultProxyBindAddressv4 = "0.0.0.0" + // DefaultProxyBindAddressv6 is the default bind address when the advertise address is v6 + DefaultProxyBindAddressv6 = "::" + // DefaultDiscoveryTimeout specifies the default discovery timeout for kubeadm (used unless one is specified in the JoinConfiguration) + DefaultDiscoveryTimeout = 5 * time.Minute +) + +var ( + // DefaultAuditPolicyLogMaxAge is defined as a var so its address can be taken + // It is the number of days to store audit logs + DefaultAuditPolicyLogMaxAge = int32(2) +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_InitConfiguration assigns default values for the InitConfiguration +func SetDefaults_InitConfiguration(obj *InitConfiguration) { + SetDefaults_ClusterConfiguration(&obj.ClusterConfiguration) + SetDefaults_NodeRegistrationOptions(&obj.NodeRegistration) + SetDefaults_BootstrapTokens(obj) + SetDefaults_APIEndpoint(&obj.LocalAPIEndpoint) +} + +// SetDefaults_ClusterConfiguration assigns default values for the ClusterConfiguration +func SetDefaults_ClusterConfiguration(obj *ClusterConfiguration) { + if obj.KubernetesVersion == "" { + obj.KubernetesVersion = DefaultKubernetesVersion + } + + if obj.Networking.ServiceSubnet == "" { + obj.Networking.ServiceSubnet = DefaultServicesSubnet + } + + if obj.Networking.DNSDomain == "" { + obj.Networking.DNSDomain = DefaultServiceDNSDomain + } + + if obj.CertificatesDir == "" { + obj.CertificatesDir = DefaultCertificatesDir + } + + if obj.ImageRepository == "" { + obj.ImageRepository = DefaultImageRepository + } + + if obj.ClusterName == "" { + obj.ClusterName = DefaultClusterName + } + + SetDefaults_DNS(obj) + SetDefaults_Etcd(obj) + SetDefaults_APIServer(&obj.APIServer) +} + +// SetDefaults_APIServer assigns default values for the API Server +func SetDefaults_APIServer(obj *APIServer) { + if obj.TimeoutForControlPlane == nil { + obj.TimeoutForControlPlane = &metav1.Duration{ + Duration: constants.DefaultControlPlaneTimeout, + } + } +} + +// SetDefaults_DNS assigns default values for the DNS component +func SetDefaults_DNS(obj *ClusterConfiguration) { + if obj.DNS.Type == "" { + obj.DNS.Type = CoreDNS + } +} + +// SetDefaults_Etcd assigns default values for the proxy +func SetDefaults_Etcd(obj *ClusterConfiguration) { + if obj.Etcd.External == nil && obj.Etcd.Local == nil { + obj.Etcd.Local = &LocalEtcd{} + } + if obj.Etcd.Local != nil { + if obj.Etcd.Local.DataDir == "" { + obj.Etcd.Local.DataDir = DefaultEtcdDataDir + } + } +} + +// SetDefaults_JoinConfiguration assigns default values to a regular node +func SetDefaults_JoinConfiguration(obj *JoinConfiguration) { + if obj.CACertPath == "" { + obj.CACertPath = DefaultCACertPath + } + + SetDefaults_NodeRegistrationOptions(&obj.NodeRegistration) + SetDefaults_JoinControlPlane(obj.ControlPlane) + SetDefaults_Discovery(&obj.Discovery) +} + +func SetDefaults_NodeRegistrationOptions(obj *NodeRegistrationOptions) { + if obj.CRISocket == "" { + obj.CRISocket = DefaultCRISocket + } +} + +func SetDefaults_JoinControlPlane(obj *JoinControlPlane) { + if obj != nil { + SetDefaults_APIEndpoint(&obj.LocalAPIEndpoint) + } +} + +// SetDefaults_Discovery assigns default values for the discovery process +func SetDefaults_Discovery(obj *Discovery) { + if len(obj.TLSBootstrapToken) == 0 && obj.BootstrapToken != nil { + obj.TLSBootstrapToken = obj.BootstrapToken.Token + } + + if obj.Timeout == nil { + obj.Timeout = &metav1.Duration{ + Duration: DefaultDiscoveryTimeout, + } + } + + if obj.File != nil { + SetDefaults_FileDiscovery(obj.File) + } +} + +// SetDefaults_FileDiscovery assigns default values for file based discovery +func SetDefaults_FileDiscovery(obj *FileDiscovery) { + // Make sure file URL becomes path + if len(obj.KubeConfigPath) != 0 { + u, err := url.Parse(obj.KubeConfigPath) + if err == nil && u.Scheme == "file" { + obj.KubeConfigPath = u.Path + } + } +} + +// SetDefaults_BootstrapTokens sets the defaults for the .BootstrapTokens field +// If the slice is empty, it's defaulted with one token. Otherwise it just loops +// through the slice and sets the defaults for the omitempty fields that are TTL, +// Usages and Groups. Token is NOT defaulted with a random one in the API defaulting +// layer, but set to a random value later at runtime if not set before. +func SetDefaults_BootstrapTokens(obj *InitConfiguration) { + + if obj.BootstrapTokens == nil || len(obj.BootstrapTokens) == 0 { + obj.BootstrapTokens = []BootstrapToken{{}} + } + + for i := range obj.BootstrapTokens { + SetDefaults_BootstrapToken(&obj.BootstrapTokens[i]) + } +} + +// SetDefaults_BootstrapToken sets the defaults for an individual Bootstrap Token +func SetDefaults_BootstrapToken(bt *BootstrapToken) { + if bt.TTL == nil { + bt.TTL = &metav1.Duration{ + Duration: constants.DefaultTokenDuration, + } + } + if len(bt.Usages) == 0 { + bt.Usages = constants.DefaultTokenUsages + } + + if len(bt.Groups) == 0 { + bt.Groups = constants.DefaultTokenGroups + } +} + +// SetDefaults_APIEndpoint sets the defaults for the API server instance deployed on a node. +func SetDefaults_APIEndpoint(obj *APIEndpoint) { + if obj.BindPort == 0 { + obj.BindPort = DefaultAPIBindPort + } +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_unix.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_unix.go new file mode 100644 index 0000000000..f4a50f9972 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +const ( + // DefaultCACertPath defines default location of CA certificate on Linux + DefaultCACertPath = "/etc/kubernetes/pki/ca.crt" + // DefaultUrlScheme defines default socket url prefix + DefaultUrlScheme = "unix" + // DefaultCRISocket defines the default cri socket + DefaultCRISocket = "/var/run/dockershim.sock" +) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_windows.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_windows.go new file mode 100644 index 0000000000..13b1d88ffc --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_windows.go @@ -0,0 +1,28 @@ +// +build windows + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +const ( + // DefaultCACertPath defines default location of CA certificate on Windows + DefaultCACertPath = "C:/etc/kubernetes/pki/ca.crt" + // DefaultUrlScheme defines default socket url prefix + DefaultUrlScheme = "tcp" + // DefaultCRISocket defines the default cri socket + DefaultCRISocket = "tcp://localhost:2375" +) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go new file mode 100644 index 0000000000..8b45d89be6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go @@ -0,0 +1,280 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:defaulter-gen=TypeMeta +// +groupName=kubeadm.k8s.io +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm + +// Package v1beta1 defines the v1beta1 version of the kubeadm configuration file format. +// This version graduates the configuration format to BETA and is a big step towards GA. +// +//A list of changes since v1alpha3: +// - "apiServerEndpoint" in InitConfiguration was renamed to "localAPIEndpoint" for better clarity of what the field +// represents. +// - Common fields in ClusterConfiguration such as "*extraArgs" and "*extraVolumes" for control plane components are now moved +// under component structs - i.e. "apiServer", "controllerManager", "scheduler". +// - "auditPolicy" was removed from ClusterConfiguration. Please use "extraArgs" in "apiServer" to configure this feature instead. +// - "unifiedControlPlaneImage" in ClusterConfiguration was changed to a boolean field called "useHyperKubeImage". +// - ClusterConfiguration now has a "dns" field which can be used to select and configure the cluster DNS addon. +// - "featureGates" still exists under ClusterConfiguration, but there are no supported feature gates in 1.13. +// See the Kubernetes 1.13 changelog for further details. +// - Both "localEtcd" and "dns" configurations now support custom image repositories. +// - The "controlPlane*"-related fields in JoinConfiguration were refactored into a sub-structure. +// - "clusterName" was removed from JoinConfiguration and the name is now fetched from the existing cluster. +// +// Migration from old kubeadm config versions +// +// Please convert your v1alpha3 configuration files to v1beta1 using the "kubeadm config migrate" command of kubeadm v1.13.x +// (conversion from older releases of kubeadm config files requires older release of kubeadm as well e.g. +// kubeadm v1.11 should be used to migrate v1alpha1 to v1alpha2; kubeadm v1.12 should be used to translate v1alpha2 to v1alpha3) +// +// Nevertheless, kubeadm v1.13.x will support reading from v1alpha3 version of the kubeadm config file format, but this support +// will be dropped in the v1.14 release. +// +// Basics +// +// The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the +// configuration options defined in the kubeadm config file are also available as command line flags, but only +// the most common/simple use case are supported with this approach. +// +// A kubeadm config file could contain multiple configuration types separated using three dashes (“---”). +// +// kubeadm supports the following configuration types: +// +// apiVersion: kubeadm.k8s.io/v1beta1 +// kind: InitConfiguration +// +// apiVersion: kubeadm.k8s.io/v1beta1 +// kind: ClusterConfiguration +// +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration +// +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration +// +// apiVersion: kubeadm.k8s.io/v1beta1 +// kind: JoinConfiguration +// +// To print the defaults for "init" and "join" actions use the following commands: +// kubeadm config print init-defaults +// kubeadm config print join-defaults +// +// The list of configuration types that must be included in a configuration file depends by the action you are +// performing (init or join) and by the configuration options you are going to use (defaults or advanced customization). +// +// If some configuration types are not provided, or provided only partially, kubeadm will use default values; defaults +// provided by kubeadm includes also enforcing consistency of values across components when required (e.g. +// cluster-cidr flag on controller manager and clusterCIDR on kube-proxy). +// +// Users are always allowed to override default values, with the only exception of a small subset of setting with +// relevance for security (e.g. enforce authorization-mode Node and RBAC on api server) +// +// If the user provides a configuration types that is not expected for the action you are performing, kubeadm will +// ignore those types and print a warning. +// +// Kubeadm init configuration types +// +// When executing kubeadm init with the --config option, the following configuration types could be used: +// InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one +// between InitConfiguration and ClusterConfiguration is mandatory. +// +// apiVersion: kubeadm.k8s.io/v1beta1 +// kind: InitConfiguration +// bootstrapTokens: +// ... +// nodeRegistration: +// ... +// +// The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init +// are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm +// is executed, including: +// +// - NodeRegistration, that holds fields that relate to registering the new node to the cluster; +// use it to customize the node name, the CRI socket to use or any other settings that should apply to this +// node only (e.g. the node ip). +// +// - LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node; +// use it e.g. to customize the API server advertise address. +// +// apiVersion: kubeadm.k8s.io/v1beta1 +// kind: ClusterConfiguration +// networking: +// ... +// etcd: +// ... +// apiServer: +// extraArgs: +// ... +// extraVolumes: +// ... +// ... +// +// The ClusterConfiguration type should be used to configure cluster-wide settings, +// including settings for: +// +// - Networking, that holds configuration for the networking topology of the cluster; use it e.g. to customize +// node subnet or services subnet. +// +// - Etcd configurations; use it e.g. to customize the local etcd or to configure the API server +// for using an external etcd cluster. +// +// - kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane +// components by adding customized setting or overriding kubeadm default settings. +// +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration +// ... +// +// The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed +// in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. +// +// See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration +// for kube proxy official documentation. +// +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration +// ... +// +// The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances +// deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. +// +// See https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ or https://godoc.org/k8s.io/kubelet/config/v1beta1#KubeletConfiguration +// for kube proxy official documentation. +// +// Here is a fully populated example of a single YAML file containing multiple +// configuration types to be used during a `kubeadm init` run. +// +// apiVersion: kubeadm.k8s.io/v1beta1 +// kind: InitConfiguration +// bootstrapTokens: +// - token: "9a08jv.c0izixklcxtmnze7" +// description: "kubeadm bootstrap token" +// ttl: "24h" +// - token: "783bde.3f89s0fje9f38fhf" +// description: "another bootstrap token" +// usages: +// - authentication +// - signing +// groups: +// - system:bootstrappers:kubeadm:default-node-token +// nodeRegistration: +// name: "ec2-10-100-0-1" +// criSocket: "/var/run/dockershim.sock" +// taints: +// - key: "kubeadmNode" +// value: "master" +// effect: "NoSchedule" +// kubeletExtraArgs: +// cgroupDriver: "cgroupfs" +// localApiEndpoint: +// advertiseAddress: "10.100.0.1" +// bindPort: 6443 +// --- +// apiVersion: kubeadm.k8s.io/v1beta1 +// kind: ClusterConfiguration +// etcd: +// # one of local or external +// local: +// imageRepository: "k8s.gcr.io" +// imageTag: "3.2.24" +// dataDir: "/var/lib/etcd" +// extraArgs: +// listen-client-urls: "http://10.100.0.1:2379" +// serverCertSANs: +// - "ec2-10-100-0-1.compute-1.amazonaws.com" +// peerCertSANs: +// - "10.100.0.1" +// # external: +// # endpoints: +// # - "10.100.0.1:2379" +// # - "10.100.0.2:2379" +// # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" +// # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" +// # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" +// networking: +// serviceSubnet: "10.96.0.0/12" +// podSubnet: "10.100.0.1/24" +// dnsDomain: "cluster.local" +// kubernetesVersion: "v1.12.0" +// controlPlaneEndpoint: "10.100.0.1:6443" +// apiServer: +// extraArgs: +// authorization-mode: "Node,RBAC" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// certSANs: +// - "10.100.1.1" +// - "ec2-10-100-0-1.compute-1.amazonaws.com" +// timeoutForControlPlane: 4m0s +// controllerManager: +// extraArgs: +// "node-cidr-mask-size": "20" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// scheduler: +// extraArgs: +// address: "10.100.0.1" +// extraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// readOnly: false +// pathType: File +// certificatesDir: "/etc/kubernetes/pki" +// imageRepository: "k8s.gcr.io" +// useHyperKubeImage: false +// clusterName: "example-cluster" +// --- +// apiVersion: kubelet.config.k8s.io/v1beta1 +// kind: KubeletConfiguration +// # kubelet specific options here +// --- +// apiVersion: kubeproxy.config.k8s.io/v1alpha1 +// kind: KubeProxyConfiguration +// # kube-proxy specific options here +// +// Kubeadm join configuration types +// +// When executing kubeadm join with the --config option, the JoinConfiguration type should be provided. +// +// apiVersion: kubeadm.k8s.io/v1beta1 +// kind: JoinConfiguration +// ... +// +// The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join +// are the discovery method used for accessing the cluster info and all the setting which are specific +// to the node where kubeadm is executed, including: +// +// - NodeRegistration, that holds fields that relate to registering the new node to the cluster; +// use it to customize the node name, the CRI socket to use or any other settings that should apply to this +// node only (e.g. the node ip). +// +// - APIEndpoint, that represents the endpoint of the instance of the API server to be eventually deployed on this node. +// +package v1beta1 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" + +//TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future +//(probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now. diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/register.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/register.go new file mode 100644 index 0000000000..4446e361ae --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/register.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "kubeadm.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) +} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &InitConfiguration{}, + &ClusterConfiguration{}, + &ClusterStatus{}, + &JoinConfiguration{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go new file mode 100644 index 0000000000..9daf711450 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go @@ -0,0 +1,390 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InitConfiguration contains a list of elements that is specific "kubeadm init"-only runtime +// information. +type InitConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // ClusterConfiguration holds the cluster-wide information, and embeds that struct (which can be (un)marshalled separately as well) + // When InitConfiguration is marshalled to bytes in the external version, this information IS NOT preserved (which can be seen from + // the `json:"-"` tag. This is due to that when InitConfiguration is (un)marshalled, it turns into two YAML documents, one for the + // InitConfiguration and ClusterConfiguration. Hence, the information must not be duplicated, and is therefore omitted here. + ClusterConfiguration `json:"-"` + + // `kubeadm init`-only information. These fields are solely used the first time `kubeadm init` runs. + // After that, the information in the fields IS NOT uploaded to the `kubeadm-config` ConfigMap + // that is used by `kubeadm upgrade` for instance. These fields must be omitempty. + + // BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. + // This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature + BootstrapTokens []BootstrapToken `json:"bootstrapTokens,omitempty"` + + // NodeRegistration holds fields that relate to registering the new master node to the cluster + NodeRegistration NodeRegistrationOptions `json:"nodeRegistration,omitempty"` + + // LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node + // In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint + // is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This + // configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible + // on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process + // fails you may set the desired value here. + LocalAPIEndpoint APIEndpoint `json:"localAPIEndpoint,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster +type ClusterConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // Etcd holds configuration for etcd. + Etcd Etcd `json:"etcd"` + + // Networking holds configuration for the networking topology of the cluster. + Networking Networking `json:"networking"` + + // KubernetesVersion is the target version of the control plane. + KubernetesVersion string `json:"kubernetesVersion"` + + // ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it + // can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. + // In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort + // are used; in case the ControlPlaneEndpoint is specified but without a TCP port, + // the BindPort is used. + // Possible usages are: + // e.g. In a cluster with more than one control plane instances, this field should be + // assigned the address of the external load balancer in front of the + // control plane instances. + // e.g. in environments with enforced node recycling, the ControlPlaneEndpoint + // could be used for assigning a stable DNS to the control plane. + ControlPlaneEndpoint string `json:"controlPlaneEndpoint"` + + // APIServer contains extra settings for the API server control plane component + APIServer APIServer `json:"apiServer,omitempty"` + + // ControllerManager contains extra settings for the controller manager control plane component + ControllerManager ControlPlaneComponent `json:"controllerManager,omitempty"` + + // Scheduler contains extra settings for the scheduler control plane component + Scheduler ControlPlaneComponent `json:"scheduler,omitempty"` + + // DNS defines the options for the DNS add-on installed in the cluster. + DNS DNS `json:"dns"` + + // CertificatesDir specifies where to store or look for all required certificates. + CertificatesDir string `json:"certificatesDir"` + + // ImageRepository sets the container registry to pull images from. + // If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) + // `gcr.io/kubernetes-ci-images` will be used as a default for control plane components and for kube-proxy, while `k8s.gcr.io` + // will be used for all the other images. + ImageRepository string `json:"imageRepository"` + + // UseHyperKubeImage controls if hyperkube should be used for Kubernetes components instead of their respective separate images + UseHyperKubeImage bool `json:"useHyperKubeImage,omitempty"` + + // FeatureGates enabled by the user. + FeatureGates map[string]bool `json:"featureGates,omitempty"` + + // The cluster name + ClusterName string `json:"clusterName,omitempty"` +} + +// ControlPlaneComponent holds settings common to control plane component of the cluster +type ControlPlaneComponent struct { + // ExtraArgs is an extra set of flags to pass to the control plane component. + // TODO: This is temporary and ideally we would like to switch all components to + // use ComponentConfig + ConfigMaps. + ExtraArgs map[string]string `json:"extraArgs,omitempty"` + + // ExtraVolumes is an extra set of host volumes, mounted to the control plane component. + ExtraVolumes []HostPathMount `json:"extraVolumes,omitempty"` +} + +// APIServer holds settings necessary for API server deployments in the cluster +type APIServer struct { + ControlPlaneComponent `json:",inline"` + + // CertSANs sets extra Subject Alternative Names for the API Server signing cert. + CertSANs []string `json:"certSANs,omitempty"` + + // TimeoutForControlPlane controls the timeout that we use for API server to appear + TimeoutForControlPlane *metav1.Duration `json:"timeoutForControlPlane,omitempty"` +} + +// DNSAddOnType defines string identifying DNS add-on types +type DNSAddOnType string + +const ( + // CoreDNS add-on type + CoreDNS DNSAddOnType = "CoreDNS" + + // KubeDNS add-on type + KubeDNS DNSAddOnType = "kube-dns" +) + +// DNS defines the DNS addon that should be used in the cluster +type DNS struct { + // Type defines the DNS add-on to be used + Type DNSAddOnType `json:"type"` + + // ImageMeta allows to customize the image used for the DNS component + ImageMeta `json:",inline"` +} + +// ImageMeta allows to customize the image used for components that are not +// originated from the Kubernetes/Kubernetes release process +type ImageMeta struct { + // ImageRepository sets the container registry to pull images from. + // if not set, the ImageRepository defined in ClusterConfiguration will be used instead. + ImageRepository string `json:"imageRepository,omitempty"` + + // ImageTag allows to specify a tag for the image. + // In case this value is set, kubeadm does not change automatically the version of the above components during upgrades. + ImageTag string `json:"imageTag,omitempty"` + + //TODO: evaluate if we need also a ImageName based on user feedbacks +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterStatus contains the cluster status. The ClusterStatus will be stored in the kubeadm-config +// ConfigMap in the cluster, and then updated by kubeadm when additional control plane instance joins or leaves the cluster. +type ClusterStatus struct { + metav1.TypeMeta `json:",inline"` + + // APIEndpoints currently available in the cluster, one for each control plane/api server instance. + // The key of the map is the IP of the host's default interface + APIEndpoints map[string]APIEndpoint `json:"apiEndpoints"` +} + +// APIEndpoint struct contains elements of API server instance deployed on a node. +type APIEndpoint struct { + // AdvertiseAddress sets the IP address for the API server to advertise. + AdvertiseAddress string `json:"advertiseAddress"` + + // BindPort sets the secure port for the API Server to bind to. + // Defaults to 6443. + BindPort int32 `json:"bindPort"` +} + +// NodeRegistrationOptions holds fields that relate to registering a new master or node to the cluster, either via "kubeadm init" or "kubeadm join" +type NodeRegistrationOptions struct { + + // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm joiń` operation. + // This field is also used in the CommonName field of the kubelet's client certificate to the API server. + // Defaults to the hostname of the node if not provided. + Name string `json:"name,omitempty"` + + // CRISocket is used to retrieve container runtime info. This information will be annotated to the Node API object, for later re-use + CRISocket string `json:"criSocket,omitempty"` + + // Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your master node, set this field to an + // empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. + Taints []v1.Taint `json:"taints,omitempty"` + + // KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + // Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. + KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"` +} + +// Networking contains elements describing cluster's networking configuration +type Networking struct { + // ServiceSubnet is the subnet used by k8s services. Defaults to "10.96.0.0/12". + ServiceSubnet string `json:"serviceSubnet"` + // PodSubnet is the subnet used by pods. + PodSubnet string `json:"podSubnet"` + // DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". + DNSDomain string `json:"dnsDomain"` +} + +// BootstrapToken describes one bootstrap token, stored as a Secret in the cluster +type BootstrapToken struct { + // Token is used for establishing bidirectional trust between nodes and masters. + // Used for joining nodes in the cluster. + Token *BootstrapTokenString `json:"token"` + // Description sets a human-friendly message why this token exists and what it's used + // for, so other administrators can know its purpose. + Description string `json:"description,omitempty"` + // TTL defines the time to live for this token. Defaults to 24h. + // Expires and TTL are mutually exclusive. + TTL *metav1.Duration `json:"ttl,omitempty"` + // Expires specifies the timestamp when this token expires. Defaults to being set + // dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. + Expires *metav1.Time `json:"expires,omitempty"` + // Usages describes the ways in which this token can be used. Can by default be used + // for establishing bidirectional trust, but that can be changed here. + Usages []string `json:"usages,omitempty"` + // Groups specifies the extra groups that this token will authenticate as when/if + // used for authentication + Groups []string `json:"groups,omitempty"` +} + +// Etcd contains elements describing Etcd configuration. +type Etcd struct { + + // Local provides configuration knobs for configuring the local etcd instance + // Local and External are mutually exclusive + Local *LocalEtcd `json:"local,omitempty"` + + // External describes how to connect to an external etcd cluster + // Local and External are mutually exclusive + External *ExternalEtcd `json:"external,omitempty"` +} + +// LocalEtcd describes that kubeadm should run an etcd cluster locally +type LocalEtcd struct { + // ImageMeta allows to customize the container used for etcd + ImageMeta `json:",inline"` + + // DataDir is the directory etcd will place its data. + // Defaults to "/var/lib/etcd". + DataDir string `json:"dataDir"` + + // ExtraArgs are extra arguments provided to the etcd binary + // when run inside a static pod. + ExtraArgs map[string]string `json:"extraArgs,omitempty"` + + // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. + ServerCertSANs []string `json:"serverCertSANs,omitempty"` + // PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert. + PeerCertSANs []string `json:"peerCertSANs,omitempty"` +} + +// ExternalEtcd describes an external etcd cluster. +// Kubeadm has no knowledge of where certificate files live and they must be supplied. +type ExternalEtcd struct { + // Endpoints of etcd members. Required for ExternalEtcd. + Endpoints []string `json:"endpoints"` + + // CAFile is an SSL Certificate Authority file used to secure etcd communication. + // Required if using a TLS connection. + CAFile string `json:"caFile"` + + // CertFile is an SSL certification file used to secure etcd communication. + // Required if using a TLS connection. + CertFile string `json:"certFile"` + + // KeyFile is an SSL key file used to secure etcd communication. + // Required if using a TLS connection. + KeyFile string `json:"keyFile"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// JoinConfiguration contains elements describing a particular node. +type JoinConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // NodeRegistration holds fields that relate to registering the new master node to the cluster + NodeRegistration NodeRegistrationOptions `json:"nodeRegistration"` + + // CACertPath is the path to the SSL certificate authority used to + // secure comunications between node and master. + // Defaults to "/etc/kubernetes/pki/ca.crt". + CACertPath string `json:"caCertPath"` + + // Discovery specifies the options for the kubelet to use during the TLS Bootstrap process + Discovery Discovery `json:"discovery"` + + // ControlPlane defines the additional control plane instance to be deployed on the joining node. + // If nil, no additional control plane instance will be deployed. + ControlPlane *JoinControlPlane `json:"controlPlane,omitempty"` +} + +// JoinControlPlane contains elements describing an additional control plane instance to be deployed on the joining node. +type JoinControlPlane struct { + // LocalAPIEndpoint represents the endpoint of the API server instance to be deployed on this node. + LocalAPIEndpoint APIEndpoint `json:"localAPIEndpoint,omitempty"` +} + +// Discovery specifies the options for the kubelet to use during the TLS Bootstrap process +type Discovery struct { + // BootstrapToken is used to set the options for bootstrap token based discovery + // BootstrapToken and File are mutually exclusive + BootstrapToken *BootstrapTokenDiscovery `json:"bootstrapToken,omitempty"` + + // File is used to specify a file or URL to a kubeconfig file from which to load cluster information + // BootstrapToken and File are mutually exclusive + File *FileDiscovery `json:"file,omitempty"` + + // TLSBootstrapToken is a token used for TLS bootstrapping. + // If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. + // If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information + TLSBootstrapToken string `json:"tlsBootstrapToken"` + + // Timeout modifies the discovery timeout + Timeout *metav1.Duration `json:"timeout,omitempty"` +} + +// BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery +type BootstrapTokenDiscovery struct { + // Token is a token used to validate cluster information + // fetched from the master. + Token string `json:"token"` + + // APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. + APIServerEndpoint string `json:"apiServerEndpoint,omitempty"` + + // CACertHashes specifies a set of public key pins to verify + // when token-based discovery is used. The root CA found during discovery + // must match one of these values. Specifying an empty set disables root CA + // pinning, which can be unsafe. Each hash is specified as ":", + // where the only currently supported type is "sha256". This is a hex-encoded + // SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded + // ASN.1. These hashes can be calculated using, for example, OpenSSL: + // openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex + CACertHashes []string `json:"caCertHashes,omitempty"` + + // UnsafeSkipCAVerification allows token-based discovery + // without CA verification via CACertHashes. This can weaken + // the security of kubeadm since other nodes can impersonate the master. + UnsafeSkipCAVerification bool `json:"unsafeSkipCAVerification"` +} + +// FileDiscovery is used to specify a file or URL to a kubeconfig file from which to load cluster information +type FileDiscovery struct { + // KubeConfigPath is used to specify the actual file path or URL to the kubeconfig file from which to load cluster information + KubeConfigPath string `json:"kubeConfigPath"` +} + +// HostPathMount contains elements describing volumes that are mounted from the +// host. +type HostPathMount struct { + // Name of the volume inside the pod template. + Name string `json:"name"` + // HostPath is the path in the host that will be mounted inside + // the pod. + HostPath string `json:"hostPath"` + // MountPath is the path inside the pod where hostPath will be mounted. + MountPath string `json:"mountPath"` + // ReadOnly controls write access to the volume + ReadOnly bool `json:"readOnly,omitempty"` + // PathType is the type of the HostPath. + PathType v1.HostPathType `json:"pathType,omitempty"` +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..4863c317fb --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.conversion.go @@ -0,0 +1,839 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*APIEndpoint)(nil), (*kubeadm.APIEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_APIEndpoint_To_kubeadm_APIEndpoint(a.(*APIEndpoint), b.(*kubeadm.APIEndpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.APIEndpoint)(nil), (*APIEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_APIEndpoint_To_v1beta1_APIEndpoint(a.(*kubeadm.APIEndpoint), b.(*APIEndpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*APIServer)(nil), (*kubeadm.APIServer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_APIServer_To_kubeadm_APIServer(a.(*APIServer), b.(*kubeadm.APIServer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.APIServer)(nil), (*APIServer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_APIServer_To_v1beta1_APIServer(a.(*kubeadm.APIServer), b.(*APIServer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BootstrapToken)(nil), (*kubeadm.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BootstrapToken_To_kubeadm_BootstrapToken(a.(*BootstrapToken), b.(*kubeadm.BootstrapToken), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_BootstrapToken_To_v1beta1_BootstrapToken(a.(*kubeadm.BootstrapToken), b.(*BootstrapToken), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BootstrapTokenDiscovery)(nil), (*kubeadm.BootstrapTokenDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery(a.(*BootstrapTokenDiscovery), b.(*kubeadm.BootstrapTokenDiscovery), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.BootstrapTokenDiscovery)(nil), (*BootstrapTokenDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_BootstrapTokenDiscovery_To_v1beta1_BootstrapTokenDiscovery(a.(*kubeadm.BootstrapTokenDiscovery), b.(*BootstrapTokenDiscovery), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BootstrapTokenString)(nil), (*kubeadm.BootstrapTokenString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BootstrapTokenString_To_kubeadm_BootstrapTokenString(a.(*BootstrapTokenString), b.(*kubeadm.BootstrapTokenString), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.BootstrapTokenString)(nil), (*BootstrapTokenString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_BootstrapTokenString_To_v1beta1_BootstrapTokenString(a.(*kubeadm.BootstrapTokenString), b.(*BootstrapTokenString), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterConfiguration)(nil), (*kubeadm.ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClusterConfiguration_To_kubeadm_ClusterConfiguration(a.(*ClusterConfiguration), b.(*kubeadm.ClusterConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.ClusterConfiguration)(nil), (*ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_ClusterConfiguration_To_v1beta1_ClusterConfiguration(a.(*kubeadm.ClusterConfiguration), b.(*ClusterConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterStatus)(nil), (*kubeadm.ClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClusterStatus_To_kubeadm_ClusterStatus(a.(*ClusterStatus), b.(*kubeadm.ClusterStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.ClusterStatus)(nil), (*ClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_ClusterStatus_To_v1beta1_ClusterStatus(a.(*kubeadm.ClusterStatus), b.(*ClusterStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ControlPlaneComponent)(nil), (*kubeadm.ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(a.(*ControlPlaneComponent), b.(*kubeadm.ControlPlaneComponent), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.ControlPlaneComponent)(nil), (*ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_ControlPlaneComponent_To_v1beta1_ControlPlaneComponent(a.(*kubeadm.ControlPlaneComponent), b.(*ControlPlaneComponent), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*DNS)(nil), (*kubeadm.DNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DNS_To_kubeadm_DNS(a.(*DNS), b.(*kubeadm.DNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.DNS)(nil), (*DNS)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_DNS_To_v1beta1_DNS(a.(*kubeadm.DNS), b.(*DNS), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Discovery)(nil), (*kubeadm.Discovery)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Discovery_To_kubeadm_Discovery(a.(*Discovery), b.(*kubeadm.Discovery), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.Discovery)(nil), (*Discovery)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_Discovery_To_v1beta1_Discovery(a.(*kubeadm.Discovery), b.(*Discovery), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Etcd)(nil), (*kubeadm.Etcd)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Etcd_To_kubeadm_Etcd(a.(*Etcd), b.(*kubeadm.Etcd), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.Etcd)(nil), (*Etcd)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_Etcd_To_v1beta1_Etcd(a.(*kubeadm.Etcd), b.(*Etcd), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExternalEtcd)(nil), (*kubeadm.ExternalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ExternalEtcd_To_kubeadm_ExternalEtcd(a.(*ExternalEtcd), b.(*kubeadm.ExternalEtcd), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.ExternalEtcd)(nil), (*ExternalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_ExternalEtcd_To_v1beta1_ExternalEtcd(a.(*kubeadm.ExternalEtcd), b.(*ExternalEtcd), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*FileDiscovery)(nil), (*kubeadm.FileDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_FileDiscovery_To_kubeadm_FileDiscovery(a.(*FileDiscovery), b.(*kubeadm.FileDiscovery), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.FileDiscovery)(nil), (*FileDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_FileDiscovery_To_v1beta1_FileDiscovery(a.(*kubeadm.FileDiscovery), b.(*FileDiscovery), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*HostPathMount)(nil), (*kubeadm.HostPathMount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_HostPathMount_To_kubeadm_HostPathMount(a.(*HostPathMount), b.(*kubeadm.HostPathMount), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.HostPathMount)(nil), (*HostPathMount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_HostPathMount_To_v1beta1_HostPathMount(a.(*kubeadm.HostPathMount), b.(*HostPathMount), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ImageMeta)(nil), (*kubeadm.ImageMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ImageMeta_To_kubeadm_ImageMeta(a.(*ImageMeta), b.(*kubeadm.ImageMeta), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.ImageMeta)(nil), (*ImageMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_ImageMeta_To_v1beta1_ImageMeta(a.(*kubeadm.ImageMeta), b.(*ImageMeta), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*InitConfiguration)(nil), (*kubeadm.InitConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_InitConfiguration_To_kubeadm_InitConfiguration(a.(*InitConfiguration), b.(*kubeadm.InitConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.InitConfiguration)(nil), (*InitConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_InitConfiguration_To_v1beta1_InitConfiguration(a.(*kubeadm.InitConfiguration), b.(*InitConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JoinConfiguration)(nil), (*kubeadm.JoinConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JoinConfiguration_To_kubeadm_JoinConfiguration(a.(*JoinConfiguration), b.(*kubeadm.JoinConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.JoinConfiguration)(nil), (*JoinConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_JoinConfiguration_To_v1beta1_JoinConfiguration(a.(*kubeadm.JoinConfiguration), b.(*JoinConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JoinControlPlane)(nil), (*kubeadm.JoinControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JoinControlPlane_To_kubeadm_JoinControlPlane(a.(*JoinControlPlane), b.(*kubeadm.JoinControlPlane), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.JoinControlPlane)(nil), (*JoinControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_JoinControlPlane_To_v1beta1_JoinControlPlane(a.(*kubeadm.JoinControlPlane), b.(*JoinControlPlane), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*LocalEtcd)(nil), (*kubeadm.LocalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_LocalEtcd_To_kubeadm_LocalEtcd(a.(*LocalEtcd), b.(*kubeadm.LocalEtcd), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.LocalEtcd)(nil), (*LocalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_LocalEtcd_To_v1beta1_LocalEtcd(a.(*kubeadm.LocalEtcd), b.(*LocalEtcd), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Networking)(nil), (*kubeadm.Networking)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Networking_To_kubeadm_Networking(a.(*Networking), b.(*kubeadm.Networking), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.Networking)(nil), (*Networking)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_Networking_To_v1beta1_Networking(a.(*kubeadm.Networking), b.(*Networking), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeRegistrationOptions)(nil), (*kubeadm.NodeRegistrationOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(a.(*NodeRegistrationOptions), b.(*kubeadm.NodeRegistrationOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*kubeadm.NodeRegistrationOptions)(nil), (*NodeRegistrationOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(a.(*kubeadm.NodeRegistrationOptions), b.(*NodeRegistrationOptions), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_APIEndpoint_To_kubeadm_APIEndpoint(in *APIEndpoint, out *kubeadm.APIEndpoint, s conversion.Scope) error { + out.AdvertiseAddress = in.AdvertiseAddress + out.BindPort = in.BindPort + return nil +} + +// Convert_v1beta1_APIEndpoint_To_kubeadm_APIEndpoint is an autogenerated conversion function. +func Convert_v1beta1_APIEndpoint_To_kubeadm_APIEndpoint(in *APIEndpoint, out *kubeadm.APIEndpoint, s conversion.Scope) error { + return autoConvert_v1beta1_APIEndpoint_To_kubeadm_APIEndpoint(in, out, s) +} + +func autoConvert_kubeadm_APIEndpoint_To_v1beta1_APIEndpoint(in *kubeadm.APIEndpoint, out *APIEndpoint, s conversion.Scope) error { + out.AdvertiseAddress = in.AdvertiseAddress + out.BindPort = in.BindPort + return nil +} + +// Convert_kubeadm_APIEndpoint_To_v1beta1_APIEndpoint is an autogenerated conversion function. +func Convert_kubeadm_APIEndpoint_To_v1beta1_APIEndpoint(in *kubeadm.APIEndpoint, out *APIEndpoint, s conversion.Scope) error { + return autoConvert_kubeadm_APIEndpoint_To_v1beta1_APIEndpoint(in, out, s) +} + +func autoConvert_v1beta1_APIServer_To_kubeadm_APIServer(in *APIServer, out *kubeadm.APIServer, s conversion.Scope) error { + if err := Convert_v1beta1_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(&in.ControlPlaneComponent, &out.ControlPlaneComponent, s); err != nil { + return err + } + out.CertSANs = *(*[]string)(unsafe.Pointer(&in.CertSANs)) + out.TimeoutForControlPlane = (*v1.Duration)(unsafe.Pointer(in.TimeoutForControlPlane)) + return nil +} + +// Convert_v1beta1_APIServer_To_kubeadm_APIServer is an autogenerated conversion function. +func Convert_v1beta1_APIServer_To_kubeadm_APIServer(in *APIServer, out *kubeadm.APIServer, s conversion.Scope) error { + return autoConvert_v1beta1_APIServer_To_kubeadm_APIServer(in, out, s) +} + +func autoConvert_kubeadm_APIServer_To_v1beta1_APIServer(in *kubeadm.APIServer, out *APIServer, s conversion.Scope) error { + if err := Convert_kubeadm_ControlPlaneComponent_To_v1beta1_ControlPlaneComponent(&in.ControlPlaneComponent, &out.ControlPlaneComponent, s); err != nil { + return err + } + out.CertSANs = *(*[]string)(unsafe.Pointer(&in.CertSANs)) + out.TimeoutForControlPlane = (*v1.Duration)(unsafe.Pointer(in.TimeoutForControlPlane)) + return nil +} + +// Convert_kubeadm_APIServer_To_v1beta1_APIServer is an autogenerated conversion function. +func Convert_kubeadm_APIServer_To_v1beta1_APIServer(in *kubeadm.APIServer, out *APIServer, s conversion.Scope) error { + return autoConvert_kubeadm_APIServer_To_v1beta1_APIServer(in, out, s) +} + +func autoConvert_v1beta1_BootstrapToken_To_kubeadm_BootstrapToken(in *BootstrapToken, out *kubeadm.BootstrapToken, s conversion.Scope) error { + out.Token = (*kubeadm.BootstrapTokenString)(unsafe.Pointer(in.Token)) + out.Description = in.Description + out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) + out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + return nil +} + +// Convert_v1beta1_BootstrapToken_To_kubeadm_BootstrapToken is an autogenerated conversion function. +func Convert_v1beta1_BootstrapToken_To_kubeadm_BootstrapToken(in *BootstrapToken, out *kubeadm.BootstrapToken, s conversion.Scope) error { + return autoConvert_v1beta1_BootstrapToken_To_kubeadm_BootstrapToken(in, out, s) +} + +func autoConvert_kubeadm_BootstrapToken_To_v1beta1_BootstrapToken(in *kubeadm.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { + out.Token = (*BootstrapTokenString)(unsafe.Pointer(in.Token)) + out.Description = in.Description + out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) + out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) + out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + return nil +} + +// Convert_kubeadm_BootstrapToken_To_v1beta1_BootstrapToken is an autogenerated conversion function. +func Convert_kubeadm_BootstrapToken_To_v1beta1_BootstrapToken(in *kubeadm.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { + return autoConvert_kubeadm_BootstrapToken_To_v1beta1_BootstrapToken(in, out, s) +} + +func autoConvert_v1beta1_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery(in *BootstrapTokenDiscovery, out *kubeadm.BootstrapTokenDiscovery, s conversion.Scope) error { + out.Token = in.Token + out.APIServerEndpoint = in.APIServerEndpoint + out.CACertHashes = *(*[]string)(unsafe.Pointer(&in.CACertHashes)) + out.UnsafeSkipCAVerification = in.UnsafeSkipCAVerification + return nil +} + +// Convert_v1beta1_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery is an autogenerated conversion function. +func Convert_v1beta1_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery(in *BootstrapTokenDiscovery, out *kubeadm.BootstrapTokenDiscovery, s conversion.Scope) error { + return autoConvert_v1beta1_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery(in, out, s) +} + +func autoConvert_kubeadm_BootstrapTokenDiscovery_To_v1beta1_BootstrapTokenDiscovery(in *kubeadm.BootstrapTokenDiscovery, out *BootstrapTokenDiscovery, s conversion.Scope) error { + out.Token = in.Token + out.APIServerEndpoint = in.APIServerEndpoint + out.CACertHashes = *(*[]string)(unsafe.Pointer(&in.CACertHashes)) + out.UnsafeSkipCAVerification = in.UnsafeSkipCAVerification + return nil +} + +// Convert_kubeadm_BootstrapTokenDiscovery_To_v1beta1_BootstrapTokenDiscovery is an autogenerated conversion function. +func Convert_kubeadm_BootstrapTokenDiscovery_To_v1beta1_BootstrapTokenDiscovery(in *kubeadm.BootstrapTokenDiscovery, out *BootstrapTokenDiscovery, s conversion.Scope) error { + return autoConvert_kubeadm_BootstrapTokenDiscovery_To_v1beta1_BootstrapTokenDiscovery(in, out, s) +} + +func autoConvert_v1beta1_BootstrapTokenString_To_kubeadm_BootstrapTokenString(in *BootstrapTokenString, out *kubeadm.BootstrapTokenString, s conversion.Scope) error { + out.ID = in.ID + out.Secret = in.Secret + return nil +} + +// Convert_v1beta1_BootstrapTokenString_To_kubeadm_BootstrapTokenString is an autogenerated conversion function. +func Convert_v1beta1_BootstrapTokenString_To_kubeadm_BootstrapTokenString(in *BootstrapTokenString, out *kubeadm.BootstrapTokenString, s conversion.Scope) error { + return autoConvert_v1beta1_BootstrapTokenString_To_kubeadm_BootstrapTokenString(in, out, s) +} + +func autoConvert_kubeadm_BootstrapTokenString_To_v1beta1_BootstrapTokenString(in *kubeadm.BootstrapTokenString, out *BootstrapTokenString, s conversion.Scope) error { + out.ID = in.ID + out.Secret = in.Secret + return nil +} + +// Convert_kubeadm_BootstrapTokenString_To_v1beta1_BootstrapTokenString is an autogenerated conversion function. +func Convert_kubeadm_BootstrapTokenString_To_v1beta1_BootstrapTokenString(in *kubeadm.BootstrapTokenString, out *BootstrapTokenString, s conversion.Scope) error { + return autoConvert_kubeadm_BootstrapTokenString_To_v1beta1_BootstrapTokenString(in, out, s) +} + +func autoConvert_v1beta1_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in *ClusterConfiguration, out *kubeadm.ClusterConfiguration, s conversion.Scope) error { + if err := Convert_v1beta1_Etcd_To_kubeadm_Etcd(&in.Etcd, &out.Etcd, s); err != nil { + return err + } + if err := Convert_v1beta1_Networking_To_kubeadm_Networking(&in.Networking, &out.Networking, s); err != nil { + return err + } + out.KubernetesVersion = in.KubernetesVersion + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint + if err := Convert_v1beta1_APIServer_To_kubeadm_APIServer(&in.APIServer, &out.APIServer, s); err != nil { + return err + } + if err := Convert_v1beta1_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(&in.ControllerManager, &out.ControllerManager, s); err != nil { + return err + } + if err := Convert_v1beta1_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(&in.Scheduler, &out.Scheduler, s); err != nil { + return err + } + if err := Convert_v1beta1_DNS_To_kubeadm_DNS(&in.DNS, &out.DNS, s); err != nil { + return err + } + out.CertificatesDir = in.CertificatesDir + out.ImageRepository = in.ImageRepository + out.UseHyperKubeImage = in.UseHyperKubeImage + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_v1beta1_ClusterConfiguration_To_kubeadm_ClusterConfiguration is an autogenerated conversion function. +func Convert_v1beta1_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in *ClusterConfiguration, out *kubeadm.ClusterConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in, out, s) +} + +func autoConvert_kubeadm_ClusterConfiguration_To_v1beta1_ClusterConfiguration(in *kubeadm.ClusterConfiguration, out *ClusterConfiguration, s conversion.Scope) error { + // INFO: in.ComponentConfigs opted out of conversion generation + if err := Convert_kubeadm_Etcd_To_v1beta1_Etcd(&in.Etcd, &out.Etcd, s); err != nil { + return err + } + if err := Convert_kubeadm_Networking_To_v1beta1_Networking(&in.Networking, &out.Networking, s); err != nil { + return err + } + out.KubernetesVersion = in.KubernetesVersion + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint + if err := Convert_kubeadm_APIServer_To_v1beta1_APIServer(&in.APIServer, &out.APIServer, s); err != nil { + return err + } + if err := Convert_kubeadm_ControlPlaneComponent_To_v1beta1_ControlPlaneComponent(&in.ControllerManager, &out.ControllerManager, s); err != nil { + return err + } + if err := Convert_kubeadm_ControlPlaneComponent_To_v1beta1_ControlPlaneComponent(&in.Scheduler, &out.Scheduler, s); err != nil { + return err + } + if err := Convert_kubeadm_DNS_To_v1beta1_DNS(&in.DNS, &out.DNS, s); err != nil { + return err + } + out.CertificatesDir = in.CertificatesDir + out.ImageRepository = in.ImageRepository + // INFO: in.CIImageRepository opted out of conversion generation + out.UseHyperKubeImage = in.UseHyperKubeImage + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_kubeadm_ClusterConfiguration_To_v1beta1_ClusterConfiguration is an autogenerated conversion function. +func Convert_kubeadm_ClusterConfiguration_To_v1beta1_ClusterConfiguration(in *kubeadm.ClusterConfiguration, out *ClusterConfiguration, s conversion.Scope) error { + return autoConvert_kubeadm_ClusterConfiguration_To_v1beta1_ClusterConfiguration(in, out, s) +} + +func autoConvert_v1beta1_ClusterStatus_To_kubeadm_ClusterStatus(in *ClusterStatus, out *kubeadm.ClusterStatus, s conversion.Scope) error { + out.APIEndpoints = *(*map[string]kubeadm.APIEndpoint)(unsafe.Pointer(&in.APIEndpoints)) + return nil +} + +// Convert_v1beta1_ClusterStatus_To_kubeadm_ClusterStatus is an autogenerated conversion function. +func Convert_v1beta1_ClusterStatus_To_kubeadm_ClusterStatus(in *ClusterStatus, out *kubeadm.ClusterStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterStatus_To_kubeadm_ClusterStatus(in, out, s) +} + +func autoConvert_kubeadm_ClusterStatus_To_v1beta1_ClusterStatus(in *kubeadm.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { + out.APIEndpoints = *(*map[string]APIEndpoint)(unsafe.Pointer(&in.APIEndpoints)) + return nil +} + +// Convert_kubeadm_ClusterStatus_To_v1beta1_ClusterStatus is an autogenerated conversion function. +func Convert_kubeadm_ClusterStatus_To_v1beta1_ClusterStatus(in *kubeadm.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { + return autoConvert_kubeadm_ClusterStatus_To_v1beta1_ClusterStatus(in, out, s) +} + +func autoConvert_v1beta1_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in *ControlPlaneComponent, out *kubeadm.ControlPlaneComponent, s conversion.Scope) error { + out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) + out.ExtraVolumes = *(*[]kubeadm.HostPathMount)(unsafe.Pointer(&in.ExtraVolumes)) + return nil +} + +// Convert_v1beta1_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent is an autogenerated conversion function. +func Convert_v1beta1_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in *ControlPlaneComponent, out *kubeadm.ControlPlaneComponent, s conversion.Scope) error { + return autoConvert_v1beta1_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in, out, s) +} + +func autoConvert_kubeadm_ControlPlaneComponent_To_v1beta1_ControlPlaneComponent(in *kubeadm.ControlPlaneComponent, out *ControlPlaneComponent, s conversion.Scope) error { + out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) + out.ExtraVolumes = *(*[]HostPathMount)(unsafe.Pointer(&in.ExtraVolumes)) + return nil +} + +// Convert_kubeadm_ControlPlaneComponent_To_v1beta1_ControlPlaneComponent is an autogenerated conversion function. +func Convert_kubeadm_ControlPlaneComponent_To_v1beta1_ControlPlaneComponent(in *kubeadm.ControlPlaneComponent, out *ControlPlaneComponent, s conversion.Scope) error { + return autoConvert_kubeadm_ControlPlaneComponent_To_v1beta1_ControlPlaneComponent(in, out, s) +} + +func autoConvert_v1beta1_DNS_To_kubeadm_DNS(in *DNS, out *kubeadm.DNS, s conversion.Scope) error { + out.Type = kubeadm.DNSAddOnType(in.Type) + if err := Convert_v1beta1_ImageMeta_To_kubeadm_ImageMeta(&in.ImageMeta, &out.ImageMeta, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_DNS_To_kubeadm_DNS is an autogenerated conversion function. +func Convert_v1beta1_DNS_To_kubeadm_DNS(in *DNS, out *kubeadm.DNS, s conversion.Scope) error { + return autoConvert_v1beta1_DNS_To_kubeadm_DNS(in, out, s) +} + +func autoConvert_kubeadm_DNS_To_v1beta1_DNS(in *kubeadm.DNS, out *DNS, s conversion.Scope) error { + out.Type = DNSAddOnType(in.Type) + if err := Convert_kubeadm_ImageMeta_To_v1beta1_ImageMeta(&in.ImageMeta, &out.ImageMeta, s); err != nil { + return err + } + return nil +} + +// Convert_kubeadm_DNS_To_v1beta1_DNS is an autogenerated conversion function. +func Convert_kubeadm_DNS_To_v1beta1_DNS(in *kubeadm.DNS, out *DNS, s conversion.Scope) error { + return autoConvert_kubeadm_DNS_To_v1beta1_DNS(in, out, s) +} + +func autoConvert_v1beta1_Discovery_To_kubeadm_Discovery(in *Discovery, out *kubeadm.Discovery, s conversion.Scope) error { + out.BootstrapToken = (*kubeadm.BootstrapTokenDiscovery)(unsafe.Pointer(in.BootstrapToken)) + out.File = (*kubeadm.FileDiscovery)(unsafe.Pointer(in.File)) + out.TLSBootstrapToken = in.TLSBootstrapToken + out.Timeout = (*v1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_v1beta1_Discovery_To_kubeadm_Discovery is an autogenerated conversion function. +func Convert_v1beta1_Discovery_To_kubeadm_Discovery(in *Discovery, out *kubeadm.Discovery, s conversion.Scope) error { + return autoConvert_v1beta1_Discovery_To_kubeadm_Discovery(in, out, s) +} + +func autoConvert_kubeadm_Discovery_To_v1beta1_Discovery(in *kubeadm.Discovery, out *Discovery, s conversion.Scope) error { + out.BootstrapToken = (*BootstrapTokenDiscovery)(unsafe.Pointer(in.BootstrapToken)) + out.File = (*FileDiscovery)(unsafe.Pointer(in.File)) + out.TLSBootstrapToken = in.TLSBootstrapToken + out.Timeout = (*v1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_kubeadm_Discovery_To_v1beta1_Discovery is an autogenerated conversion function. +func Convert_kubeadm_Discovery_To_v1beta1_Discovery(in *kubeadm.Discovery, out *Discovery, s conversion.Scope) error { + return autoConvert_kubeadm_Discovery_To_v1beta1_Discovery(in, out, s) +} + +func autoConvert_v1beta1_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error { + out.Local = (*kubeadm.LocalEtcd)(unsafe.Pointer(in.Local)) + out.External = (*kubeadm.ExternalEtcd)(unsafe.Pointer(in.External)) + return nil +} + +// Convert_v1beta1_Etcd_To_kubeadm_Etcd is an autogenerated conversion function. +func Convert_v1beta1_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error { + return autoConvert_v1beta1_Etcd_To_kubeadm_Etcd(in, out, s) +} + +func autoConvert_kubeadm_Etcd_To_v1beta1_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error { + out.Local = (*LocalEtcd)(unsafe.Pointer(in.Local)) + out.External = (*ExternalEtcd)(unsafe.Pointer(in.External)) + return nil +} + +// Convert_kubeadm_Etcd_To_v1beta1_Etcd is an autogenerated conversion function. +func Convert_kubeadm_Etcd_To_v1beta1_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error { + return autoConvert_kubeadm_Etcd_To_v1beta1_Etcd(in, out, s) +} + +func autoConvert_v1beta1_ExternalEtcd_To_kubeadm_ExternalEtcd(in *ExternalEtcd, out *kubeadm.ExternalEtcd, s conversion.Scope) error { + out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) + out.CAFile = in.CAFile + out.CertFile = in.CertFile + out.KeyFile = in.KeyFile + return nil +} + +// Convert_v1beta1_ExternalEtcd_To_kubeadm_ExternalEtcd is an autogenerated conversion function. +func Convert_v1beta1_ExternalEtcd_To_kubeadm_ExternalEtcd(in *ExternalEtcd, out *kubeadm.ExternalEtcd, s conversion.Scope) error { + return autoConvert_v1beta1_ExternalEtcd_To_kubeadm_ExternalEtcd(in, out, s) +} + +func autoConvert_kubeadm_ExternalEtcd_To_v1beta1_ExternalEtcd(in *kubeadm.ExternalEtcd, out *ExternalEtcd, s conversion.Scope) error { + out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) + out.CAFile = in.CAFile + out.CertFile = in.CertFile + out.KeyFile = in.KeyFile + return nil +} + +// Convert_kubeadm_ExternalEtcd_To_v1beta1_ExternalEtcd is an autogenerated conversion function. +func Convert_kubeadm_ExternalEtcd_To_v1beta1_ExternalEtcd(in *kubeadm.ExternalEtcd, out *ExternalEtcd, s conversion.Scope) error { + return autoConvert_kubeadm_ExternalEtcd_To_v1beta1_ExternalEtcd(in, out, s) +} + +func autoConvert_v1beta1_FileDiscovery_To_kubeadm_FileDiscovery(in *FileDiscovery, out *kubeadm.FileDiscovery, s conversion.Scope) error { + out.KubeConfigPath = in.KubeConfigPath + return nil +} + +// Convert_v1beta1_FileDiscovery_To_kubeadm_FileDiscovery is an autogenerated conversion function. +func Convert_v1beta1_FileDiscovery_To_kubeadm_FileDiscovery(in *FileDiscovery, out *kubeadm.FileDiscovery, s conversion.Scope) error { + return autoConvert_v1beta1_FileDiscovery_To_kubeadm_FileDiscovery(in, out, s) +} + +func autoConvert_kubeadm_FileDiscovery_To_v1beta1_FileDiscovery(in *kubeadm.FileDiscovery, out *FileDiscovery, s conversion.Scope) error { + out.KubeConfigPath = in.KubeConfigPath + return nil +} + +// Convert_kubeadm_FileDiscovery_To_v1beta1_FileDiscovery is an autogenerated conversion function. +func Convert_kubeadm_FileDiscovery_To_v1beta1_FileDiscovery(in *kubeadm.FileDiscovery, out *FileDiscovery, s conversion.Scope) error { + return autoConvert_kubeadm_FileDiscovery_To_v1beta1_FileDiscovery(in, out, s) +} + +func autoConvert_v1beta1_HostPathMount_To_kubeadm_HostPathMount(in *HostPathMount, out *kubeadm.HostPathMount, s conversion.Scope) error { + out.Name = in.Name + out.HostPath = in.HostPath + out.MountPath = in.MountPath + out.ReadOnly = in.ReadOnly + out.PathType = corev1.HostPathType(in.PathType) + return nil +} + +// Convert_v1beta1_HostPathMount_To_kubeadm_HostPathMount is an autogenerated conversion function. +func Convert_v1beta1_HostPathMount_To_kubeadm_HostPathMount(in *HostPathMount, out *kubeadm.HostPathMount, s conversion.Scope) error { + return autoConvert_v1beta1_HostPathMount_To_kubeadm_HostPathMount(in, out, s) +} + +func autoConvert_kubeadm_HostPathMount_To_v1beta1_HostPathMount(in *kubeadm.HostPathMount, out *HostPathMount, s conversion.Scope) error { + out.Name = in.Name + out.HostPath = in.HostPath + out.MountPath = in.MountPath + out.ReadOnly = in.ReadOnly + out.PathType = corev1.HostPathType(in.PathType) + return nil +} + +// Convert_kubeadm_HostPathMount_To_v1beta1_HostPathMount is an autogenerated conversion function. +func Convert_kubeadm_HostPathMount_To_v1beta1_HostPathMount(in *kubeadm.HostPathMount, out *HostPathMount, s conversion.Scope) error { + return autoConvert_kubeadm_HostPathMount_To_v1beta1_HostPathMount(in, out, s) +} + +func autoConvert_v1beta1_ImageMeta_To_kubeadm_ImageMeta(in *ImageMeta, out *kubeadm.ImageMeta, s conversion.Scope) error { + out.ImageRepository = in.ImageRepository + out.ImageTag = in.ImageTag + return nil +} + +// Convert_v1beta1_ImageMeta_To_kubeadm_ImageMeta is an autogenerated conversion function. +func Convert_v1beta1_ImageMeta_To_kubeadm_ImageMeta(in *ImageMeta, out *kubeadm.ImageMeta, s conversion.Scope) error { + return autoConvert_v1beta1_ImageMeta_To_kubeadm_ImageMeta(in, out, s) +} + +func autoConvert_kubeadm_ImageMeta_To_v1beta1_ImageMeta(in *kubeadm.ImageMeta, out *ImageMeta, s conversion.Scope) error { + out.ImageRepository = in.ImageRepository + out.ImageTag = in.ImageTag + return nil +} + +// Convert_kubeadm_ImageMeta_To_v1beta1_ImageMeta is an autogenerated conversion function. +func Convert_kubeadm_ImageMeta_To_v1beta1_ImageMeta(in *kubeadm.ImageMeta, out *ImageMeta, s conversion.Scope) error { + return autoConvert_kubeadm_ImageMeta_To_v1beta1_ImageMeta(in, out, s) +} + +func autoConvert_v1beta1_InitConfiguration_To_kubeadm_InitConfiguration(in *InitConfiguration, out *kubeadm.InitConfiguration, s conversion.Scope) error { + if err := Convert_v1beta1_ClusterConfiguration_To_kubeadm_ClusterConfiguration(&in.ClusterConfiguration, &out.ClusterConfiguration, s); err != nil { + return err + } + out.BootstrapTokens = *(*[]kubeadm.BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if err := Convert_v1beta1_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { + return err + } + if err := Convert_v1beta1_APIEndpoint_To_kubeadm_APIEndpoint(&in.LocalAPIEndpoint, &out.LocalAPIEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_InitConfiguration_To_kubeadm_InitConfiguration is an autogenerated conversion function. +func Convert_v1beta1_InitConfiguration_To_kubeadm_InitConfiguration(in *InitConfiguration, out *kubeadm.InitConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_InitConfiguration_To_kubeadm_InitConfiguration(in, out, s) +} + +func autoConvert_kubeadm_InitConfiguration_To_v1beta1_InitConfiguration(in *kubeadm.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { + if err := Convert_kubeadm_ClusterConfiguration_To_v1beta1_ClusterConfiguration(&in.ClusterConfiguration, &out.ClusterConfiguration, s); err != nil { + return err + } + out.BootstrapTokens = *(*[]BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) + if err := Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { + return err + } + if err := Convert_kubeadm_APIEndpoint_To_v1beta1_APIEndpoint(&in.LocalAPIEndpoint, &out.LocalAPIEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_kubeadm_InitConfiguration_To_v1beta1_InitConfiguration is an autogenerated conversion function. +func Convert_kubeadm_InitConfiguration_To_v1beta1_InitConfiguration(in *kubeadm.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { + return autoConvert_kubeadm_InitConfiguration_To_v1beta1_InitConfiguration(in, out, s) +} + +func autoConvert_v1beta1_JoinConfiguration_To_kubeadm_JoinConfiguration(in *JoinConfiguration, out *kubeadm.JoinConfiguration, s conversion.Scope) error { + if err := Convert_v1beta1_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { + return err + } + out.CACertPath = in.CACertPath + if err := Convert_v1beta1_Discovery_To_kubeadm_Discovery(&in.Discovery, &out.Discovery, s); err != nil { + return err + } + out.ControlPlane = (*kubeadm.JoinControlPlane)(unsafe.Pointer(in.ControlPlane)) + return nil +} + +// Convert_v1beta1_JoinConfiguration_To_kubeadm_JoinConfiguration is an autogenerated conversion function. +func Convert_v1beta1_JoinConfiguration_To_kubeadm_JoinConfiguration(in *JoinConfiguration, out *kubeadm.JoinConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_JoinConfiguration_To_kubeadm_JoinConfiguration(in, out, s) +} + +func autoConvert_kubeadm_JoinConfiguration_To_v1beta1_JoinConfiguration(in *kubeadm.JoinConfiguration, out *JoinConfiguration, s conversion.Scope) error { + if err := Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { + return err + } + out.CACertPath = in.CACertPath + if err := Convert_kubeadm_Discovery_To_v1beta1_Discovery(&in.Discovery, &out.Discovery, s); err != nil { + return err + } + out.ControlPlane = (*JoinControlPlane)(unsafe.Pointer(in.ControlPlane)) + return nil +} + +// Convert_kubeadm_JoinConfiguration_To_v1beta1_JoinConfiguration is an autogenerated conversion function. +func Convert_kubeadm_JoinConfiguration_To_v1beta1_JoinConfiguration(in *kubeadm.JoinConfiguration, out *JoinConfiguration, s conversion.Scope) error { + return autoConvert_kubeadm_JoinConfiguration_To_v1beta1_JoinConfiguration(in, out, s) +} + +func autoConvert_v1beta1_JoinControlPlane_To_kubeadm_JoinControlPlane(in *JoinControlPlane, out *kubeadm.JoinControlPlane, s conversion.Scope) error { + if err := Convert_v1beta1_APIEndpoint_To_kubeadm_APIEndpoint(&in.LocalAPIEndpoint, &out.LocalAPIEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_JoinControlPlane_To_kubeadm_JoinControlPlane is an autogenerated conversion function. +func Convert_v1beta1_JoinControlPlane_To_kubeadm_JoinControlPlane(in *JoinControlPlane, out *kubeadm.JoinControlPlane, s conversion.Scope) error { + return autoConvert_v1beta1_JoinControlPlane_To_kubeadm_JoinControlPlane(in, out, s) +} + +func autoConvert_kubeadm_JoinControlPlane_To_v1beta1_JoinControlPlane(in *kubeadm.JoinControlPlane, out *JoinControlPlane, s conversion.Scope) error { + if err := Convert_kubeadm_APIEndpoint_To_v1beta1_APIEndpoint(&in.LocalAPIEndpoint, &out.LocalAPIEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_kubeadm_JoinControlPlane_To_v1beta1_JoinControlPlane is an autogenerated conversion function. +func Convert_kubeadm_JoinControlPlane_To_v1beta1_JoinControlPlane(in *kubeadm.JoinControlPlane, out *JoinControlPlane, s conversion.Scope) error { + return autoConvert_kubeadm_JoinControlPlane_To_v1beta1_JoinControlPlane(in, out, s) +} + +func autoConvert_v1beta1_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kubeadm.LocalEtcd, s conversion.Scope) error { + if err := Convert_v1beta1_ImageMeta_To_kubeadm_ImageMeta(&in.ImageMeta, &out.ImageMeta, s); err != nil { + return err + } + out.DataDir = in.DataDir + out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) + out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) + out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) + return nil +} + +// Convert_v1beta1_LocalEtcd_To_kubeadm_LocalEtcd is an autogenerated conversion function. +func Convert_v1beta1_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kubeadm.LocalEtcd, s conversion.Scope) error { + return autoConvert_v1beta1_LocalEtcd_To_kubeadm_LocalEtcd(in, out, s) +} + +func autoConvert_kubeadm_LocalEtcd_To_v1beta1_LocalEtcd(in *kubeadm.LocalEtcd, out *LocalEtcd, s conversion.Scope) error { + if err := Convert_kubeadm_ImageMeta_To_v1beta1_ImageMeta(&in.ImageMeta, &out.ImageMeta, s); err != nil { + return err + } + out.DataDir = in.DataDir + out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) + out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) + out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) + return nil +} + +// Convert_kubeadm_LocalEtcd_To_v1beta1_LocalEtcd is an autogenerated conversion function. +func Convert_kubeadm_LocalEtcd_To_v1beta1_LocalEtcd(in *kubeadm.LocalEtcd, out *LocalEtcd, s conversion.Scope) error { + return autoConvert_kubeadm_LocalEtcd_To_v1beta1_LocalEtcd(in, out, s) +} + +func autoConvert_v1beta1_Networking_To_kubeadm_Networking(in *Networking, out *kubeadm.Networking, s conversion.Scope) error { + out.ServiceSubnet = in.ServiceSubnet + out.PodSubnet = in.PodSubnet + out.DNSDomain = in.DNSDomain + return nil +} + +// Convert_v1beta1_Networking_To_kubeadm_Networking is an autogenerated conversion function. +func Convert_v1beta1_Networking_To_kubeadm_Networking(in *Networking, out *kubeadm.Networking, s conversion.Scope) error { + return autoConvert_v1beta1_Networking_To_kubeadm_Networking(in, out, s) +} + +func autoConvert_kubeadm_Networking_To_v1beta1_Networking(in *kubeadm.Networking, out *Networking, s conversion.Scope) error { + out.ServiceSubnet = in.ServiceSubnet + out.PodSubnet = in.PodSubnet + out.DNSDomain = in.DNSDomain + return nil +} + +// Convert_kubeadm_Networking_To_v1beta1_Networking is an autogenerated conversion function. +func Convert_kubeadm_Networking_To_v1beta1_Networking(in *kubeadm.Networking, out *Networking, s conversion.Scope) error { + return autoConvert_kubeadm_Networking_To_v1beta1_Networking(in, out, s) +} + +func autoConvert_v1beta1_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in *NodeRegistrationOptions, out *kubeadm.NodeRegistrationOptions, s conversion.Scope) error { + out.Name = in.Name + out.CRISocket = in.CRISocket + out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints)) + out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs)) + return nil +} + +// Convert_v1beta1_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions is an autogenerated conversion function. +func Convert_v1beta1_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in *NodeRegistrationOptions, out *kubeadm.NodeRegistrationOptions, s conversion.Scope) error { + return autoConvert_v1beta1_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in, out, s) +} + +func autoConvert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error { + out.Name = in.Name + out.CRISocket = in.CRISocket + out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints)) + out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs)) + return nil +} + +// Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions is an autogenerated conversion function. +func Convert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error { + return autoConvert_kubeadm_NodeRegistrationOptions_To_v1beta1_NodeRegistrationOptions(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d6cdfda335 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,552 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. +func (in *APIEndpoint) DeepCopy() *APIEndpoint { + if in == nil { + return nil + } + out := new(APIEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServer) DeepCopyInto(out *APIServer) { + *out = *in + in.ControlPlaneComponent.DeepCopyInto(&out.ControlPlaneComponent) + if in.CertSANs != nil { + in, out := &in.CertSANs, &out.CertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TimeoutForControlPlane != nil { + in, out := &in.TimeoutForControlPlane, &out.TimeoutForControlPlane + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. +func (in *APIServer) DeepCopy() *APIServer { + if in == nil { + return nil + } + out := new(APIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapToken) DeepCopyInto(out *BootstrapToken) { + *out = *in + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(BootstrapTokenString) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(v1.Duration) + **out = **in + } + if in.Expires != nil { + in, out := &in.Expires, &out.Expires + *out = (*in).DeepCopy() + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapToken. +func (in *BootstrapToken) DeepCopy() *BootstrapToken { + if in == nil { + return nil + } + out := new(BootstrapToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapTokenDiscovery) DeepCopyInto(out *BootstrapTokenDiscovery) { + *out = *in + if in.CACertHashes != nil { + in, out := &in.CACertHashes, &out.CACertHashes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapTokenDiscovery. +func (in *BootstrapTokenDiscovery) DeepCopy() *BootstrapTokenDiscovery { + if in == nil { + return nil + } + out := new(BootstrapTokenDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapTokenString) DeepCopyInto(out *BootstrapTokenString) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapTokenString. +func (in *BootstrapTokenString) DeepCopy() *BootstrapTokenString { + if in == nil { + return nil + } + out := new(BootstrapTokenString) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfiguration) DeepCopyInto(out *ClusterConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Etcd.DeepCopyInto(&out.Etcd) + out.Networking = in.Networking + in.APIServer.DeepCopyInto(&out.APIServer) + in.ControllerManager.DeepCopyInto(&out.ControllerManager) + in.Scheduler.DeepCopyInto(&out.Scheduler) + out.DNS = in.DNS + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfiguration. +func (in *ClusterConfiguration) DeepCopy() *ClusterConfiguration { + if in == nil { + return nil + } + out := new(ClusterConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.APIEndpoints != nil { + in, out := &in.APIEndpoints, &out.APIEndpoints + *out = make(map[string]APIEndpoint, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponent) DeepCopyInto(out *ControlPlaneComponent) { + *out = *in + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraVolumes != nil { + in, out := &in.ExtraVolumes, &out.ExtraVolumes + *out = make([]HostPathMount, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponent. +func (in *ControlPlaneComponent) DeepCopy() *ControlPlaneComponent { + if in == nil { + return nil + } + out := new(ControlPlaneComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.ImageMeta = in.ImageMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Discovery) DeepCopyInto(out *Discovery) { + *out = *in + if in.BootstrapToken != nil { + in, out := &in.BootstrapToken, &out.BootstrapToken + *out = new(BootstrapTokenDiscovery) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(FileDiscovery) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Discovery. +func (in *Discovery) DeepCopy() *Discovery { + if in == nil { + return nil + } + out := new(Discovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Etcd) DeepCopyInto(out *Etcd) { + *out = *in + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalEtcd) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalEtcd) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. +func (in *Etcd) DeepCopy() *Etcd { + if in == nil { + return nil + } + out := new(Etcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEtcd. +func (in *ExternalEtcd) DeepCopy() *ExternalEtcd { + if in == nil { + return nil + } + out := new(ExternalEtcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileDiscovery) DeepCopyInto(out *FileDiscovery) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileDiscovery. +func (in *FileDiscovery) DeepCopy() *FileDiscovery { + if in == nil { + return nil + } + out := new(FileDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathMount) DeepCopyInto(out *HostPathMount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathMount. +func (in *HostPathMount) DeepCopy() *HostPathMount { + if in == nil { + return nil + } + out := new(HostPathMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageMeta) DeepCopyInto(out *ImageMeta) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageMeta. +func (in *ImageMeta) DeepCopy() *ImageMeta { + if in == nil { + return nil + } + out := new(ImageMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitConfiguration) DeepCopyInto(out *InitConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ClusterConfiguration.DeepCopyInto(&out.ClusterConfiguration) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]BootstrapToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) + out.LocalAPIEndpoint = in.LocalAPIEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitConfiguration. +func (in *InitConfiguration) DeepCopy() *InitConfiguration { + if in == nil { + return nil + } + out := new(InitConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InitConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JoinConfiguration) DeepCopyInto(out *JoinConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) + in.Discovery.DeepCopyInto(&out.Discovery) + if in.ControlPlane != nil { + in, out := &in.ControlPlane, &out.ControlPlane + *out = new(JoinControlPlane) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JoinConfiguration. +func (in *JoinConfiguration) DeepCopy() *JoinConfiguration { + if in == nil { + return nil + } + out := new(JoinConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JoinConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JoinControlPlane) DeepCopyInto(out *JoinControlPlane) { + *out = *in + out.LocalAPIEndpoint = in.LocalAPIEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JoinControlPlane. +func (in *JoinControlPlane) DeepCopy() *JoinControlPlane { + if in == nil { + return nil + } + out := new(JoinControlPlane) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalEtcd) DeepCopyInto(out *LocalEtcd) { + *out = *in + out.ImageMeta = in.ImageMeta + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServerCertSANs != nil { + in, out := &in.ServerCertSANs, &out.ServerCertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PeerCertSANs != nil { + in, out := &in.PeerCertSANs, &out.PeerCertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalEtcd. +func (in *LocalEtcd) DeepCopy() *LocalEtcd { + if in == nil { + return nil + } + out := new(LocalEtcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeRegistrationOptions) DeepCopyInto(out *NodeRegistrationOptions) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]corev1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubeletExtraArgs != nil { + in, out := &in.KubeletExtraArgs, &out.KubeletExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRegistrationOptions. +func (in *NodeRegistrationOptions) DeepCopy() *NodeRegistrationOptions { + if in == nil { + return nil + } + out := new(NodeRegistrationOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000000..4860aec6a2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.defaults.go @@ -0,0 +1,68 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ClusterConfiguration{}, func(obj interface{}) { SetObjectDefaults_ClusterConfiguration(obj.(*ClusterConfiguration)) }) + scheme.AddTypeDefaultingFunc(&ClusterStatus{}, func(obj interface{}) { SetObjectDefaults_ClusterStatus(obj.(*ClusterStatus)) }) + scheme.AddTypeDefaultingFunc(&InitConfiguration{}, func(obj interface{}) { SetObjectDefaults_InitConfiguration(obj.(*InitConfiguration)) }) + scheme.AddTypeDefaultingFunc(&JoinConfiguration{}, func(obj interface{}) { SetObjectDefaults_JoinConfiguration(obj.(*JoinConfiguration)) }) + return nil +} + +func SetObjectDefaults_ClusterConfiguration(in *ClusterConfiguration) { + SetDefaults_ClusterConfiguration(in) + SetDefaults_APIServer(&in.APIServer) +} + +func SetObjectDefaults_ClusterStatus(in *ClusterStatus) { +} + +func SetObjectDefaults_InitConfiguration(in *InitConfiguration) { + SetDefaults_InitConfiguration(in) + SetObjectDefaults_ClusterConfiguration(&in.ClusterConfiguration) + for i := range in.BootstrapTokens { + a := &in.BootstrapTokens[i] + SetDefaults_BootstrapToken(a) + } + SetDefaults_NodeRegistrationOptions(&in.NodeRegistration) + SetDefaults_APIEndpoint(&in.LocalAPIEndpoint) +} + +func SetObjectDefaults_JoinConfiguration(in *JoinConfiguration) { + SetDefaults_JoinConfiguration(in) + SetDefaults_NodeRegistrationOptions(&in.NodeRegistration) + SetDefaults_Discovery(&in.Discovery) + if in.Discovery.File != nil { + SetDefaults_FileDiscovery(in.Discovery.File) + } + if in.ControlPlane != nil { + SetDefaults_JoinControlPlane(in.ControlPlane) + SetDefaults_APIEndpoint(&in.ControlPlane.LocalAPIEndpoint) + } +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go new file mode 100644 index 0000000000..ebdf10bb2e --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go @@ -0,0 +1,581 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package kubeadm + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + config "k8s.io/kubernetes/pkg/kubelet/apis/config" + apisconfig "k8s.io/kubernetes/pkg/proxy/apis/config" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. +func (in *APIEndpoint) DeepCopy() *APIEndpoint { + if in == nil { + return nil + } + out := new(APIEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServer) DeepCopyInto(out *APIServer) { + *out = *in + in.ControlPlaneComponent.DeepCopyInto(&out.ControlPlaneComponent) + if in.CertSANs != nil { + in, out := &in.CertSANs, &out.CertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TimeoutForControlPlane != nil { + in, out := &in.TimeoutForControlPlane, &out.TimeoutForControlPlane + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. +func (in *APIServer) DeepCopy() *APIServer { + if in == nil { + return nil + } + out := new(APIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapToken) DeepCopyInto(out *BootstrapToken) { + *out = *in + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(BootstrapTokenString) + **out = **in + } + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(v1.Duration) + **out = **in + } + if in.Expires != nil { + in, out := &in.Expires, &out.Expires + *out = (*in).DeepCopy() + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapToken. +func (in *BootstrapToken) DeepCopy() *BootstrapToken { + if in == nil { + return nil + } + out := new(BootstrapToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapTokenDiscovery) DeepCopyInto(out *BootstrapTokenDiscovery) { + *out = *in + if in.CACertHashes != nil { + in, out := &in.CACertHashes, &out.CACertHashes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapTokenDiscovery. +func (in *BootstrapTokenDiscovery) DeepCopy() *BootstrapTokenDiscovery { + if in == nil { + return nil + } + out := new(BootstrapTokenDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapTokenString) DeepCopyInto(out *BootstrapTokenString) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapTokenString. +func (in *BootstrapTokenString) DeepCopy() *BootstrapTokenString { + if in == nil { + return nil + } + out := new(BootstrapTokenString) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterConfiguration) DeepCopyInto(out *ClusterConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ComponentConfigs.DeepCopyInto(&out.ComponentConfigs) + in.Etcd.DeepCopyInto(&out.Etcd) + out.Networking = in.Networking + in.APIServer.DeepCopyInto(&out.APIServer) + in.ControllerManager.DeepCopyInto(&out.ControllerManager) + in.Scheduler.DeepCopyInto(&out.Scheduler) + out.DNS = in.DNS + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfiguration. +func (in *ClusterConfiguration) DeepCopy() *ClusterConfiguration { + if in == nil { + return nil + } + out := new(ClusterConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.APIEndpoints != nil { + in, out := &in.APIEndpoints, &out.APIEndpoints + *out = make(map[string]APIEndpoint, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentConfigs) DeepCopyInto(out *ComponentConfigs) { + *out = *in + if in.Kubelet != nil { + in, out := &in.Kubelet, &out.Kubelet + *out = new(config.KubeletConfiguration) + (*in).DeepCopyInto(*out) + } + if in.KubeProxy != nil { + in, out := &in.KubeProxy, &out.KubeProxy + *out = new(apisconfig.KubeProxyConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentConfigs. +func (in *ComponentConfigs) DeepCopy() *ComponentConfigs { + if in == nil { + return nil + } + out := new(ComponentConfigs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneComponent) DeepCopyInto(out *ControlPlaneComponent) { + *out = *in + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExtraVolumes != nil { + in, out := &in.ExtraVolumes, &out.ExtraVolumes + *out = make([]HostPathMount, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponent. +func (in *ControlPlaneComponent) DeepCopy() *ControlPlaneComponent { + if in == nil { + return nil + } + out := new(ControlPlaneComponent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.ImageMeta = in.ImageMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Discovery) DeepCopyInto(out *Discovery) { + *out = *in + if in.BootstrapToken != nil { + in, out := &in.BootstrapToken, &out.BootstrapToken + *out = new(BootstrapTokenDiscovery) + (*in).DeepCopyInto(*out) + } + if in.File != nil { + in, out := &in.File, &out.File + *out = new(FileDiscovery) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Discovery. +func (in *Discovery) DeepCopy() *Discovery { + if in == nil { + return nil + } + out := new(Discovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Etcd) DeepCopyInto(out *Etcd) { + *out = *in + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalEtcd) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalEtcd) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. +func (in *Etcd) DeepCopy() *Etcd { + if in == nil { + return nil + } + out := new(Etcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEtcd. +func (in *ExternalEtcd) DeepCopy() *ExternalEtcd { + if in == nil { + return nil + } + out := new(ExternalEtcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileDiscovery) DeepCopyInto(out *FileDiscovery) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileDiscovery. +func (in *FileDiscovery) DeepCopy() *FileDiscovery { + if in == nil { + return nil + } + out := new(FileDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathMount) DeepCopyInto(out *HostPathMount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathMount. +func (in *HostPathMount) DeepCopy() *HostPathMount { + if in == nil { + return nil + } + out := new(HostPathMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageMeta) DeepCopyInto(out *ImageMeta) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageMeta. +func (in *ImageMeta) DeepCopy() *ImageMeta { + if in == nil { + return nil + } + out := new(ImageMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InitConfiguration) DeepCopyInto(out *InitConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ClusterConfiguration.DeepCopyInto(&out.ClusterConfiguration) + if in.BootstrapTokens != nil { + in, out := &in.BootstrapTokens, &out.BootstrapTokens + *out = make([]BootstrapToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) + out.LocalAPIEndpoint = in.LocalAPIEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitConfiguration. +func (in *InitConfiguration) DeepCopy() *InitConfiguration { + if in == nil { + return nil + } + out := new(InitConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InitConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JoinConfiguration) DeepCopyInto(out *JoinConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) + in.Discovery.DeepCopyInto(&out.Discovery) + if in.ControlPlane != nil { + in, out := &in.ControlPlane, &out.ControlPlane + *out = new(JoinControlPlane) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JoinConfiguration. +func (in *JoinConfiguration) DeepCopy() *JoinConfiguration { + if in == nil { + return nil + } + out := new(JoinConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JoinConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JoinControlPlane) DeepCopyInto(out *JoinControlPlane) { + *out = *in + out.LocalAPIEndpoint = in.LocalAPIEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JoinControlPlane. +func (in *JoinControlPlane) DeepCopy() *JoinControlPlane { + if in == nil { + return nil + } + out := new(JoinControlPlane) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalEtcd) DeepCopyInto(out *LocalEtcd) { + *out = *in + out.ImageMeta = in.ImageMeta + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServerCertSANs != nil { + in, out := &in.ServerCertSANs, &out.ServerCertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PeerCertSANs != nil { + in, out := &in.PeerCertSANs, &out.PeerCertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalEtcd. +func (in *LocalEtcd) DeepCopy() *LocalEtcd { + if in == nil { + return nil + } + out := new(LocalEtcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeRegistrationOptions) DeepCopyInto(out *NodeRegistrationOptions) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]corev1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.KubeletExtraArgs != nil { + in, out := &in.KubeletExtraArgs, &out.KubeletExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRegistrationOptions. +func (in *NodeRegistrationOptions) DeepCopy() *NodeRegistrationOptions { + if in == nil { + return nil + } + out := new(NodeRegistrationOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD new file mode 100644 index 0000000000..7c1d85a611 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD @@ -0,0 +1,45 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["constants.go"], + importpath = "k8s.io/kubernetes/cmd/kubeadm/app/constants", + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//pkg/registry/core/service/ipallocator:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", + "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["constants_test.go"], + embed = [":go_default_library"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go new file mode 100644 index 0000000000..da728133f7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go @@ -0,0 +1,509 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constants + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "time" + + "github.com/pkg/errors" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/version" + bootstrapapi "k8s.io/cluster-bootstrap/token/api" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" +) + +// KubernetesDir is the directory Kubernetes owns for storing various configuration files +// This semi-constant MUST NOT be modified during runtime. It's a variable solely for use in unit testing. +var KubernetesDir = "/etc/kubernetes" + +const ( + // ManifestsSubDirName defines directory name to store manifests + ManifestsSubDirName = "manifests" + // TempDirForKubeadm defines temporary directory for kubeadm + // should be joined with KubernetesDir. + TempDirForKubeadm = "tmp" + + // CACertAndKeyBaseName defines certificate authority base name + CACertAndKeyBaseName = "ca" + // CACertName defines certificate name + CACertName = "ca.crt" + // CAKeyName defines certificate name + CAKeyName = "ca.key" + + // APIServerCertAndKeyBaseName defines API's server certificate and key base name + APIServerCertAndKeyBaseName = "apiserver" + // APIServerCertName defines API's server certificate name + APIServerCertName = "apiserver.crt" + // APIServerKeyName defines API's server key name + APIServerKeyName = "apiserver.key" + // APIServerCertCommonName defines API's server certificate common name (CN) + APIServerCertCommonName = "kube-apiserver" + + // APIServerKubeletClientCertAndKeyBaseName defines kubelet client certificate and key base name + APIServerKubeletClientCertAndKeyBaseName = "apiserver-kubelet-client" + // APIServerKubeletClientCertName defines kubelet client certificate name + APIServerKubeletClientCertName = "apiserver-kubelet-client.crt" + // APIServerKubeletClientKeyName defines kubelet client key name + APIServerKubeletClientKeyName = "apiserver-kubelet-client.key" + // APIServerKubeletClientCertCommonName defines kubelet client certificate common name (CN) + APIServerKubeletClientCertCommonName = "kube-apiserver-kubelet-client" + + // EtcdCACertAndKeyBaseName defines etcd's CA certificate and key base name + EtcdCACertAndKeyBaseName = "etcd/ca" + // EtcdCACertName defines etcd's CA certificate name + EtcdCACertName = "etcd/ca.crt" + // EtcdCAKeyName defines etcd's CA key name + EtcdCAKeyName = "etcd/ca.key" + + // EtcdServerCertAndKeyBaseName defines etcd's server certificate and key base name + EtcdServerCertAndKeyBaseName = "etcd/server" + // EtcdServerCertName defines etcd's server certificate name + EtcdServerCertName = "etcd/server.crt" + // EtcdServerKeyName defines etcd's server key name + EtcdServerKeyName = "etcd/server.key" + + // EtcdListenClientPort defines the port etcd listen on for client traffic + EtcdListenClientPort = 2379 + + // EtcdPeerCertAndKeyBaseName defines etcd's peer certificate and key base name + EtcdPeerCertAndKeyBaseName = "etcd/peer" + // EtcdPeerCertName defines etcd's peer certificate name + EtcdPeerCertName = "etcd/peer.crt" + // EtcdPeerKeyName defines etcd's peer key name + EtcdPeerKeyName = "etcd/peer.key" + + // EtcdListenPeerPort defines the port etcd listen on for peer traffic + EtcdListenPeerPort = 2380 + + // EtcdHealthcheckClientCertAndKeyBaseName defines etcd's healthcheck client certificate and key base name + EtcdHealthcheckClientCertAndKeyBaseName = "etcd/healthcheck-client" + // EtcdHealthcheckClientCertName defines etcd's healthcheck client certificate name + EtcdHealthcheckClientCertName = "etcd/healthcheck-client.crt" + // EtcdHealthcheckClientKeyName defines etcd's healthcheck client key name + EtcdHealthcheckClientKeyName = "etcd/healthcheck-client.key" + // EtcdHealthcheckClientCertCommonName defines etcd's healthcheck client certificate common name (CN) + EtcdHealthcheckClientCertCommonName = "kube-etcd-healthcheck-client" + + // APIServerEtcdClientCertAndKeyBaseName defines apiserver's etcd client certificate and key base name + APIServerEtcdClientCertAndKeyBaseName = "apiserver-etcd-client" + // APIServerEtcdClientCertName defines apiserver's etcd client certificate name + APIServerEtcdClientCertName = "apiserver-etcd-client.crt" + // APIServerEtcdClientKeyName defines apiserver's etcd client key name + APIServerEtcdClientKeyName = "apiserver-etcd-client.key" + // APIServerEtcdClientCertCommonName defines apiserver's etcd client certificate common name (CN) + APIServerEtcdClientCertCommonName = "kube-apiserver-etcd-client" + + // ServiceAccountKeyBaseName defines SA key base name + ServiceAccountKeyBaseName = "sa" + // ServiceAccountPublicKeyName defines SA public key base name + ServiceAccountPublicKeyName = "sa.pub" + // ServiceAccountPrivateKeyName defines SA private key base name + ServiceAccountPrivateKeyName = "sa.key" + + // FrontProxyCACertAndKeyBaseName defines front proxy CA certificate and key base name + FrontProxyCACertAndKeyBaseName = "front-proxy-ca" + // FrontProxyCACertName defines front proxy CA certificate name + FrontProxyCACertName = "front-proxy-ca.crt" + // FrontProxyCAKeyName defines front proxy CA key name + FrontProxyCAKeyName = "front-proxy-ca.key" + + // FrontProxyClientCertAndKeyBaseName defines front proxy certificate and key base name + FrontProxyClientCertAndKeyBaseName = "front-proxy-client" + // FrontProxyClientCertName defines front proxy certificate name + FrontProxyClientCertName = "front-proxy-client.crt" + // FrontProxyClientKeyName defines front proxy key name + FrontProxyClientKeyName = "front-proxy-client.key" + // FrontProxyClientCertCommonName defines front proxy certificate common name + FrontProxyClientCertCommonName = "front-proxy-client" //used as subject.commonname attribute (CN) + + // AdminKubeConfigFileName defines name for the kubeconfig aimed to be used by the superuser/admin of the cluster + AdminKubeConfigFileName = "admin.conf" + // KubeletBootstrapKubeConfigFileName defines the file name for the kubeconfig that the kubelet will use to do + // the TLS bootstrap to get itself an unique credential + KubeletBootstrapKubeConfigFileName = "bootstrap-kubelet.conf" + + // KubeletKubeConfigFileName defines the file name for the kubeconfig that the master kubelet will use for talking + // to the API server + KubeletKubeConfigFileName = "kubelet.conf" + // ControllerManagerKubeConfigFileName defines the file name for the controller manager's kubeconfig file + ControllerManagerKubeConfigFileName = "controller-manager.conf" + // SchedulerKubeConfigFileName defines the file name for the scheduler's kubeconfig file + SchedulerKubeConfigFileName = "scheduler.conf" + + // Some well-known users and groups in the core Kubernetes authorization system + + // ControllerManagerUser defines the well-known user the controller-manager should be authenticated as + ControllerManagerUser = "system:kube-controller-manager" + // SchedulerUser defines the well-known user the scheduler should be authenticated as + SchedulerUser = "system:kube-scheduler" + // MastersGroup defines the well-known group for the apiservers. This group is also superuser by default + // (i.e. bound to the cluster-admin ClusterRole) + MastersGroup = "system:masters" + // NodesGroup defines the well-known group for all nodes. + NodesGroup = "system:nodes" + // NodesUserPrefix defines the user name prefix as requested by the Node authorizer. + NodesUserPrefix = "system:node:" + // NodesClusterRoleBinding defines the well-known ClusterRoleBinding which binds the too permissive system:node + // ClusterRole to the system:nodes group. Since kubeadm is using the Node Authorizer, this ClusterRoleBinding's + // system:nodes group subject is removed if present. + NodesClusterRoleBinding = "system:node" + + // APICallRetryInterval defines how long kubeadm should wait before retrying a failed API operation + APICallRetryInterval = 500 * time.Millisecond + // DiscoveryRetryInterval specifies how long kubeadm should wait before retrying to connect to the master when doing discovery + DiscoveryRetryInterval = 5 * time.Second + // PatchNodeTimeout specifies how long kubeadm should wait for applying the label and taint on the master before timing out + PatchNodeTimeout = 2 * time.Minute + // UpdateNodeTimeout specifies how long kubeadm should wait for updating node with the initial remote configuration of kubelet before timing out + UpdateNodeTimeout = 2 * time.Minute + // TLSBootstrapTimeout specifies how long kubeadm should wait for the kubelet to perform the TLS Bootstrap + TLSBootstrapTimeout = 2 * time.Minute + + // DefaultControlPlaneTimeout specifies the default control plane (actually API Server) timeout for use by kubeadm + DefaultControlPlaneTimeout = 4 * time.Minute + + // MinimumAddressesInServiceSubnet defines minimum amount of nodes the Service subnet should allow. + // We need at least ten, because the DNS service is always at the tenth cluster clusterIP + MinimumAddressesInServiceSubnet = 10 + + // DefaultTokenDuration specifies the default amount of time that a bootstrap token will be valid + // Default behaviour is 24 hours + DefaultTokenDuration = 24 * time.Hour + + // LabelNodeRoleMaster specifies that a node is a master + // This is a duplicate definition of the constant in pkg/controller/service/service_controller.go + LabelNodeRoleMaster = "node-role.kubernetes.io/master" + + // AnnotationKubeadmCRISocket specifies the annotation kubeadm uses to preserve the crisocket information given to kubeadm at + // init/join time for use later. kubeadm annotates the node object with this information + AnnotationKubeadmCRISocket = "kubeadm.alpha.kubernetes.io/cri-socket" + + // KubeadmConfigConfigMap specifies in what ConfigMap in the kube-system namespace the `kubeadm init` configuration should be stored + KubeadmConfigConfigMap = "kubeadm-config" + + // ClusterConfigurationConfigMapKey specifies in what ConfigMap key the cluster configuration should be stored + ClusterConfigurationConfigMapKey = "ClusterConfiguration" + + // ClusterStatusConfigMapKey specifies in what ConfigMap key the cluster status should be stored + ClusterStatusConfigMapKey = "ClusterStatus" + + // KubeProxyConfigMap specifies in what ConfigMap in the kube-system namespace the kube-proxy configuration should be stored + KubeProxyConfigMap = "kube-proxy" + + // KubeProxyConfigMapKey specifies in what ConfigMap key the component config of kube-proxy should be stored + KubeProxyConfigMapKey = "config.conf" + + // KubeletBaseConfigurationConfigMapPrefix specifies in what ConfigMap in the kube-system namespace the initial remote configuration of kubelet should be stored + KubeletBaseConfigurationConfigMapPrefix = "kubelet-config-" + + // KubeletBaseConfigurationConfigMapKey specifies in what ConfigMap key the initial remote configuration of kubelet should be stored + KubeletBaseConfigurationConfigMapKey = "kubelet" + + // KubeletBaseConfigMapRolePrefix defines the base kubelet configuration ConfigMap. + KubeletBaseConfigMapRolePrefix = "kubeadm:kubelet-config-" + + // KubeletRunDirectory specifies the directory where the kubelet runtime information is stored. + KubeletRunDirectory = "/var/lib/kubelet" + + // KubeletConfigurationFileName specifies the file name on the node which stores initial remote configuration of kubelet + // This file should exist under KubeletRunDirectory + KubeletConfigurationFileName = "config.yaml" + + // DynamicKubeletConfigurationDirectoryName specifies the directory which stores the dynamic configuration checkpoints for the kubelet + // This directory should exist under KubeletRunDirectory + DynamicKubeletConfigurationDirectoryName = "dynamic-config" + + // KubeletEnvFileName is a file "kubeadm init" writes at runtime. Using that interface, kubeadm can customize certain + // kubelet flags conditionally based on the environment at runtime. Also, parameters given to the configuration file + // might be passed through this file. "kubeadm init" writes one variable, with the name ${KubeletEnvFileVariableName}. + // This file should exist under KubeletRunDirectory + KubeletEnvFileName = "kubeadm-flags.env" + + // KubeletEnvFileVariableName specifies the shell script variable name "kubeadm init" should write a value to in KubeletEnvFile + KubeletEnvFileVariableName = "KUBELET_KUBEADM_ARGS" + + // KubeletHealthzPort is the port of the kubelet healthz endpoint + KubeletHealthzPort = 10248 + + // MinExternalEtcdVersion indicates minimum external etcd version which kubeadm supports + MinExternalEtcdVersion = "3.2.18" + + // DefaultEtcdVersion indicates the default etcd version that kubeadm uses + DefaultEtcdVersion = "3.2.24" + + // PauseVersion indicates the default pause image version for kubeadm + PauseVersion = "3.1" + + // Etcd defines variable used internally when referring to etcd component + Etcd = "etcd" + // KubeAPIServer defines variable used internally when referring to kube-apiserver component + KubeAPIServer = "kube-apiserver" + // KubeControllerManager defines variable used internally when referring to kube-controller-manager component + KubeControllerManager = "kube-controller-manager" + // KubeScheduler defines variable used internally when referring to kube-scheduler component + KubeScheduler = "kube-scheduler" + // KubeProxy defines variable used internally when referring to kube-proxy component + KubeProxy = "kube-proxy" + // HyperKube defines variable used internally when referring to the hyperkube image + HyperKube = "hyperkube" + + // SelfHostingPrefix describes the prefix workloads that are self-hosted by kubeadm has + SelfHostingPrefix = "self-hosted-" + + // KubeCertificatesVolumeName specifies the name for the Volume that is used for injecting certificates to control plane components (can be both a hostPath volume or a projected, all-in-one volume) + KubeCertificatesVolumeName = "k8s-certs" + + // KubeConfigVolumeName specifies the name for the Volume that is used for injecting the kubeconfig to talk securely to the api server for a control plane component if applicable + KubeConfigVolumeName = "kubeconfig" + + // NodeBootstrapTokenAuthGroup specifies which group a Node Bootstrap Token should be authenticated in + NodeBootstrapTokenAuthGroup = "system:bootstrappers:kubeadm:default-node-token" + + // DefaultCIImageRepository points to image registry where CI uploads images from ci-cross build job + DefaultCIImageRepository = "gcr.io/kubernetes-ci-images" + + // CoreDNSConfigMap specifies in what ConfigMap in the kube-system namespace the CoreDNS config should be stored + CoreDNSConfigMap = "coredns" + + // CoreDNSDeploymentName specifies the name of the Deployment for CoreDNS add-on + CoreDNSDeploymentName = "coredns" + + // CoreDNSImageName specifies the name of the image for CoreDNS add-on + CoreDNSImageName = "coredns" + + // KubeDNSConfigMap specifies in what ConfigMap in the kube-system namespace the kube-dns config should be stored + KubeDNSConfigMap = "kube-dns" + + // KubeDNSDeploymentName specifies the name of the Deployment for kube-dns add-on + KubeDNSDeploymentName = "kube-dns" + + // KubeDNSKubeDNSImageName specifies the name of the image for the kubedns container in the kube-dns add-on + KubeDNSKubeDNSImageName = "k8s-dns-kube-dns" + + // KubeDNSSidecarImageName specifies the name of the image for the sidecar container in the kube-dns add-on + KubeDNSSidecarImageName = "k8s-dns-sidecar" + + // KubeDNSDnsMasqNannyImageName specifies the name of the image for the dnsmasq container in the kube-dns add-on + KubeDNSDnsMasqNannyImageName = "k8s-dns-dnsmasq-nanny" + + // CRICtlPackage defines the go package that installs crictl + CRICtlPackage = "github.com/kubernetes-incubator/cri-tools/cmd/crictl" + + // KubeAuditPolicyVolumeName is the name of the volume that will contain the audit policy + KubeAuditPolicyVolumeName = "audit" + // AuditPolicyDir is the directory that will contain the audit policy + AuditPolicyDir = "audit" + // AuditPolicyFile is the name of the audit policy file itself + AuditPolicyFile = "audit.yaml" + // AuditPolicyLogFile is the name of the file audit logs get written to + AuditPolicyLogFile = "audit.log" + // KubeAuditPolicyLogVolumeName is the name of the volume that will contain the audit logs + KubeAuditPolicyLogVolumeName = "audit-log" + // StaticPodAuditPolicyLogDir is the name of the directory in the static pod that will have the audit logs + StaticPodAuditPolicyLogDir = "/var/log/kubernetes/audit" + + // LeaseEndpointReconcilerType will select a storage based reconciler + // Copied from pkg/master/reconcilers to avoid pulling extra dependencies + // TODO: Import this constant from a consts only package, that does not pull any further dependencies. + LeaseEndpointReconcilerType = "lease" + + // KubeDNSVersion is the version of kube-dns to be deployed if it is used + KubeDNSVersion = "1.14.13" + + // CoreDNSVersion is the version of CoreDNS to be deployed if it is used + CoreDNSVersion = "1.2.6" + + // ClusterConfigurationKind is the string kind value for the ClusterConfiguration struct + ClusterConfigurationKind = "ClusterConfiguration" + + // InitConfigurationKind is the string kind value for the InitConfiguration struct + InitConfigurationKind = "InitConfiguration" + + // JoinConfigurationKind is the string kind value for the JoinConfiguration struct + JoinConfigurationKind = "JoinConfiguration" + + // YAMLDocumentSeparator is the separator for YAML documents + // TODO: Find a better place for this constant + YAMLDocumentSeparator = "---\n" + + // DefaultAPIServerBindAddress is the default bind address for the API Server + DefaultAPIServerBindAddress = "0.0.0.0" + + // MasterNumCPU is the number of CPUs required on master + MasterNumCPU = 2 +) + +var ( + // MasterTaint is the taint to apply on the PodSpec for being able to run that Pod on the master + MasterTaint = v1.Taint{ + Key: LabelNodeRoleMaster, + Effect: v1.TaintEffectNoSchedule, + } + + // MasterToleration is the toleration to apply on the PodSpec for being able to run that Pod on the master + MasterToleration = v1.Toleration{ + Key: LabelNodeRoleMaster, + Effect: v1.TaintEffectNoSchedule, + } + + // DefaultTokenUsages specifies the default functions a token will get + DefaultTokenUsages = bootstrapapi.KnownTokenUsages + + // DefaultTokenGroups specifies the default groups that this token will authenticate as when used for authentication + DefaultTokenGroups = []string{NodeBootstrapTokenAuthGroup} + + // MasterComponents defines the master component names + MasterComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} + + // MinimumControlPlaneVersion specifies the minimum control plane version kubeadm can deploy + MinimumControlPlaneVersion = version.MustParseSemantic("v1.12.0") + + // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports + MinimumKubeletVersion = version.MustParseSemantic("v1.12.0") + + // SupportedEtcdVersion lists officially supported etcd versions with corresponding Kubernetes releases + SupportedEtcdVersion = map[uint8]string{ + 10: "3.1.12", + 11: "3.2.18", + 12: "3.2.24", + 13: "3.2.24", + } +) + +// EtcdSupportedVersion returns officially supported version of etcd for a specific Kubernetes release +// if passed version is not listed, the function returns nil and an error +func EtcdSupportedVersion(versionString string) (*version.Version, error) { + kubernetesVersion, err := version.ParseSemantic(versionString) + if err != nil { + return nil, err + } + + if etcdStringVersion, ok := SupportedEtcdVersion[uint8(kubernetesVersion.Minor())]; ok { + etcdVersion, err := version.ParseSemantic(etcdStringVersion) + if err != nil { + return nil, err + } + return etcdVersion, nil + } + return nil, errors.Errorf("Unsupported or unknown Kubernetes version(%v)", kubernetesVersion) +} + +// GetStaticPodDirectory returns the location on the disk where the Static Pod should be present +func GetStaticPodDirectory() string { + return filepath.Join(KubernetesDir, ManifestsSubDirName) +} + +// GetStaticPodFilepath returns the location on the disk where the Static Pod should be present +func GetStaticPodFilepath(componentName, manifestsDir string) string { + return filepath.Join(manifestsDir, componentName+".yaml") +} + +// GetAdminKubeConfigPath returns the location on the disk where admin kubeconfig is located by default +func GetAdminKubeConfigPath() string { + return filepath.Join(KubernetesDir, AdminKubeConfigFileName) +} + +// GetBootstrapKubeletKubeConfigPath returns the location on the disk where bootstrap kubelet kubeconfig is located by default +func GetBootstrapKubeletKubeConfigPath() string { + return filepath.Join(KubernetesDir, KubeletBootstrapKubeConfigFileName) +} + +// GetKubeletKubeConfigPath returns the location on the disk where kubelet kubeconfig is located by default +func GetKubeletKubeConfigPath() string { + return filepath.Join(KubernetesDir, KubeletKubeConfigFileName) +} + +// AddSelfHostedPrefix adds the self-hosted- prefix to the component name +func AddSelfHostedPrefix(componentName string) string { + return fmt.Sprintf("%s%s", SelfHostingPrefix, componentName) +} + +// CreateTempDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp (not using /tmp as that would potentially be dangerous) +func CreateTempDirForKubeadm(dirName string) (string, error) { + tempDir := path.Join(KubernetesDir, TempDirForKubeadm) + // creates target folder if not already exists + if err := os.MkdirAll(tempDir, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create directory %q", tempDir) + } + + tempDir, err := ioutil.TempDir(tempDir, dirName) + if err != nil { + return "", errors.Wrap(err, "couldn't create a temporary directory") + } + return tempDir, nil +} + +// CreateTimestampDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp formatted with the current date +func CreateTimestampDirForKubeadm(dirName string) (string, error) { + tempDir := path.Join(KubernetesDir, TempDirForKubeadm) + // creates target folder if not already exists + if err := os.MkdirAll(tempDir, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create directory %q", tempDir) + } + + timestampDirName := fmt.Sprintf("%s-%s", dirName, time.Now().Format("2006-01-02-15-04-05")) + timestampDir := path.Join(tempDir, timestampDirName) + if err := os.Mkdir(timestampDir, 0700); err != nil { + return "", errors.Wrap(err, "could not create timestamp directory") + } + + return timestampDir, nil +} + +// GetDNSIP returns a dnsIP, which is 10th IP in svcSubnet CIDR range +func GetDNSIP(svcSubnet string) (net.IP, error) { + // Get the service subnet CIDR + _, svcSubnetCIDR, err := net.ParseCIDR(svcSubnet) + if err != nil { + return nil, errors.Wrapf(err, "couldn't parse service subnet CIDR %q", svcSubnet) + } + + // Selects the 10th IP in service subnet CIDR range as dnsIP + dnsIP, err := ipallocator.GetIndexedIP(svcSubnetCIDR, 10) + if err != nil { + return nil, errors.Wrapf(err, "unable to get tenth IP address from service subnet CIDR %s", svcSubnetCIDR.String()) + } + + return dnsIP, nil +} + +// GetStaticPodAuditPolicyFile returns the path to the audit policy file within a static pod +func GetStaticPodAuditPolicyFile() string { + return filepath.Join(KubernetesDir, AuditPolicyDir, AuditPolicyFile) +} + +// GetDNSVersion is a handy function that returns the DNS version by DNS type +func GetDNSVersion(dnsType kubeadmapi.DNSAddOnType) string { + switch dnsType { + case kubeadmapi.KubeDNS: + return KubeDNSVersion + default: + return CoreDNSVersion + } +} + +// GetKubeletConfigMapName returns the right ConfigMap name for the right branch of k8s +func GetKubeletConfigMapName(k8sVersion *version.Version) string { + return fmt.Sprintf("%s%d.%d", KubeletBaseConfigurationConfigMapPrefix, k8sVersion.Major(), k8sVersion.Minor()) +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/BUILD new file mode 100644 index 0000000000..becafa093b --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/BUILD @@ -0,0 +1,94 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "arguments.go", + "cgroupdriver.go", + "chroot_unix.go", + "chroot_windows.go", + "copy.go", + "endpoint.go", + "error.go", + "marshal.go", + "template.go", + "version.go", + ], + importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util", + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//pkg/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/sigs.k8s.io/yaml:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "arguments_test.go", + "cgroupdriver_test.go", + "chroot_test.go", + "endpoint_test.go", + "error_test.go", + "marshal_test.go", + "template_test.go", + "version_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//cmd/kubeadm/app/util/apiclient:all-srcs", + "//cmd/kubeadm/app/util/audit:all-srcs", + "//cmd/kubeadm/app/util/config:all-srcs", + "//cmd/kubeadm/app/util/dryrun:all-srcs", + "//cmd/kubeadm/app/util/etcd:all-srcs", + "//cmd/kubeadm/app/util/kubeconfig:all-srcs", + "//cmd/kubeadm/app/util/pkiutil:all-srcs", + "//cmd/kubeadm/app/util/pubkeypin:all-srcs", + "//cmd/kubeadm/app/util/runtime:all-srcs", + "//cmd/kubeadm/app/util/staticpod:all-srcs", + "//cmd/kubeadm/app/util/system:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/arguments.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/arguments.go new file mode 100644 index 0000000000..619aba2eb4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/arguments.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "sort" + "strings" + + "github.com/pkg/errors" +) + +// BuildArgumentListFromMap takes two string-string maps, one with the base arguments and one +// with optional override arguments. In the return list override arguments will precede base +// arguments +func BuildArgumentListFromMap(baseArguments map[string]string, overrideArguments map[string]string) []string { + var command []string + var keys []string + for k := range overrideArguments { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := overrideArguments[k] + // values of "" are allowed as well + command = append(command, fmt.Sprintf("--%s=%s", k, v)) + } + keys = []string{} + for k := range baseArguments { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := baseArguments[k] + if _, overrideExists := overrideArguments[k]; !overrideExists { + command = append(command, fmt.Sprintf("--%s=%s", k, v)) + } + } + return command +} + +// ParseArgumentListToMap parses a CLI argument list in the form "--foo=bar" to a string-string map +func ParseArgumentListToMap(arguments []string) map[string]string { + resultingMap := map[string]string{} + for i, arg := range arguments { + key, val, err := parseArgument(arg) + + // Ignore if the first argument doesn't satisfy the criteria, it's most often the binary name + // Warn in all other cases, but don't error out. This can happen only if the user has edited the argument list by hand, so they might know what they are doing + if err != nil { + if i != 0 { + fmt.Printf("[kubeadm] WARNING: The component argument %q could not be parsed correctly. The argument must be of the form %q. Skipping...", arg, "--") + } + continue + } + + resultingMap[key] = val + } + return resultingMap +} + +// ReplaceArgument gets a command list; converts it to a map for easier modification, runs the provided function that +// returns a new modified map, and then converts the map back to a command string slice +func ReplaceArgument(command []string, argMutateFunc func(map[string]string) map[string]string) []string { + argMap := ParseArgumentListToMap(command) + + // Save the first command (the executable) if we're sure it's not an argument (i.e. no --) + var newCommand []string + if len(command) > 0 && !strings.HasPrefix(command[0], "--") { + newCommand = append(newCommand, command[0]) + } + newArgMap := argMutateFunc(argMap) + newCommand = append(newCommand, BuildArgumentListFromMap(newArgMap, map[string]string{})...) + return newCommand +} + +// parseArgument parses the argument "--foo=bar" to "foo" and "bar" +func parseArgument(arg string) (string, string, error) { + if !strings.HasPrefix(arg, "--") { + return "", "", errors.New("the argument should start with '--'") + } + if !strings.Contains(arg, "=") { + return "", "", errors.New("the argument should have a '=' between the flag and the value") + } + // Remove the starting -- + arg = strings.TrimPrefix(arg, "--") + // Split the string on =. Return only two substrings, since we want only key/value, but the value can include '=' as well + keyvalSlice := strings.SplitN(arg, "=", 2) + + // Make sure both a key and value is present + if len(keyvalSlice) != 2 { + return "", "", errors.New("the argument must have both a key and a value") + } + if len(keyvalSlice[0]) == 0 { + return "", "", errors.New("the argument must have a key") + } + + return keyvalSlice[0], keyvalSlice[1], nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/cgroupdriver.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/cgroupdriver.go new file mode 100644 index 0000000000..2d1e450689 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/cgroupdriver.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "strings" + + "github.com/pkg/errors" + + utilsexec "k8s.io/utils/exec" +) + +// TODO: add support for detecting the cgroup driver for CRI other than +// Docker. Currently only Docker driver detection is supported: +// Discussion: +// https://github.com/kubernetes/kubeadm/issues/844 + +// GetCgroupDriverDocker runs 'docker info' to obtain the docker cgroup driver +func GetCgroupDriverDocker(execer utilsexec.Interface) (string, error) { + info, err := callDockerInfo(execer) + if err != nil { + return "", err + } + return getCgroupDriverFromDockerInfo(info) +} + +func validateCgroupDriver(driver string) error { + if driver != "cgroupfs" && driver != "systemd" { + return errors.Errorf("unknown cgroup driver %q", driver) + } + return nil +} + +// TODO: Docker 1.13 has a new way to obatain the cgroup driver: +// docker info -f "{{.CgroupDriver}} +// If the minimum supported Docker version in K8s becomes 1.13, move to +// this syntax. +func callDockerInfo(execer utilsexec.Interface) (string, error) { + out, err := execer.Command("docker", "info").Output() + if err != nil { + return "", errors.Wrap(err, "cannot execute 'docker info'") + } + return string(out), nil +} + +func getCgroupDriverFromDockerInfo(info string) (string, error) { + lineSeparator := ": " + prefix := "Cgroup Driver" + for _, line := range strings.Split(info, "\n") { + if !strings.Contains(line, prefix+lineSeparator) { + continue + } + lineSplit := strings.Split(line, lineSeparator) + // At this point len(lineSplit) is always >= 2 + driver := lineSplit[1] + if err := validateCgroupDriver(driver); err != nil { + return "", err + } + return driver, nil + } + return "", errors.New("cgroup driver is not defined in 'docker info'") +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/chroot_unix.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/chroot_unix.go new file mode 100644 index 0000000000..b8b83c373f --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/chroot_unix.go @@ -0,0 +1,40 @@ +// +build !windows + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "github.com/pkg/errors" + "os" + "path/filepath" + "syscall" +) + +// Chroot chroot()s to the new path. +// NB: All file paths after this call are effectively relative to +// `rootfs` +func Chroot(rootfs string) error { + if err := syscall.Chroot(rootfs); err != nil { + return errors.Wrapf(err, "unable to chroot to %s", rootfs) + } + root := filepath.FromSlash("/") + if err := os.Chdir(root); err != nil { + return errors.Wrapf(err, "unable to chdir to %s", root) + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/chroot_windows.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/chroot_windows.go new file mode 100644 index 0000000000..b7e434e082 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/chroot_windows.go @@ -0,0 +1,30 @@ +// +build windows + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "github.com/pkg/errors" +) + +// Chroot chroot()s to the new path. +// NB: All file paths after this call are effectively relative to +// `rootfs` +func Chroot(rootfs string) error { + return errors.New("chroot is not implemented on Windows") +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/copy.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/copy.go new file mode 100644 index 0000000000..6465547d2c --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/copy.go @@ -0,0 +1,31 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "os/exec" +) + +// CopyDir copies the content of a folder +func CopyDir(src string, dst string) error { + cmd := exec.Command("cp", "-r", src, dst) + err := cmd.Run() + if err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/endpoint.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/endpoint.go new file mode 100644 index 0000000000..8760aec6b0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/endpoint.go @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "net" + "net/url" + "strconv" + + "github.com/pkg/errors" + + "k8s.io/apimachinery/pkg/util/validation" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" +) + +// GetMasterEndpoint returns a properly formatted endpoint for the control plane built according following rules: +// - If the ControlPlaneEndpoint is defined, use it. +// - if the ControlPlaneEndpoint is defined but without a port number, use the ControlPlaneEndpoint + api.BindPort is used. +// - Otherwise, in case the ControlPlaneEndpoint is not defined, use the api.AdvertiseAddress + the api.BindPort. +func GetMasterEndpoint(cfg *kubeadmapi.InitConfiguration) (string, error) { + // parse the bind port + bindPortString := strconv.Itoa(int(cfg.LocalAPIEndpoint.BindPort)) + if _, err := ParsePort(bindPortString); err != nil { + return "", errors.Wrapf(err, "invalid value %q given for api.bindPort", cfg.LocalAPIEndpoint.BindPort) + } + + // parse the AdvertiseAddress + var ip = net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress) + if ip == nil { + return "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", cfg.LocalAPIEndpoint.AdvertiseAddress) + } + + // set the master url using cfg.API.AdvertiseAddress + the cfg.API.BindPort + masterURL := &url.URL{ + Scheme: "https", + Host: net.JoinHostPort(ip.String(), bindPortString), + } + + // if the controlplane endpoint is defined + if len(cfg.ControlPlaneEndpoint) > 0 { + // parse the controlplane endpoint + var host, port string + var err error + if host, port, err = ParseHostPort(cfg.ControlPlaneEndpoint); err != nil { + return "", errors.Wrapf(err, "invalid value %q given for controlPlaneEndpoint", cfg.ControlPlaneEndpoint) + } + + // if a port is provided within the controlPlaneAddress warn the users we are using it, else use the bindport + if port != "" { + if port != bindPortString { + fmt.Println("[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address") + } + } else { + port = bindPortString + } + + // overrides the master url using the controlPlaneAddress (and eventually the bindport) + masterURL = &url.URL{ + Scheme: "https", + Host: net.JoinHostPort(host, port), + } + } + + return masterURL.String(), nil +} + +// ParseHostPort parses a network address of the form "host:port", "ipv4:port", "[ipv6]:port" into host and port; +// ":port" can be eventually omitted. +// If the string is not a valid representation of network address, ParseHostPort returns an error. +func ParseHostPort(hostport string) (string, string, error) { + var host, port string + var err error + + // try to split host and port + if host, port, err = net.SplitHostPort(hostport); err != nil { + // if SplitHostPort returns an error, the entire hostport is considered as host + host = hostport + } + + // if port is defined, parse and validate it + if port != "" { + if _, err := ParsePort(port); err != nil { + return "", "", errors.New("port must be a valid number between 1 and 65535, inclusive") + } + } + + // if host is a valid IP, returns it + if ip := net.ParseIP(host); ip != nil { + return host, port, nil + } + + // if host is a validate RFC-1123 subdomain, returns it + if errs := validation.IsDNS1123Subdomain(host); len(errs) == 0 { + return host, port, nil + } + + return "", "", errors.New("host must be a valid IP address or a valid RFC-1123 DNS subdomain") +} + +// ParsePort parses a string representing a TCP port. +// If the string is not a valid representation of a TCP port, ParsePort returns an error. +func ParsePort(port string) (int, error) { + portInt, err := strconv.Atoi(port) + if err == nil && (1 <= portInt && portInt <= 65535) { + return portInt, nil + } + + return 0, errors.New("port must be a valid number between 1 and 65535, inclusive") +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/error.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/error.go new file mode 100644 index 0000000000..4eaf984fd4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/error.go @@ -0,0 +1,87 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "os" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +const ( + // DefaultErrorExitCode defines exit the code for failed action generally + DefaultErrorExitCode = 1 + // PreFlightExitCode defines exit the code for preflight checks + PreFlightExitCode = 2 + // ValidationExitCode defines the exit code validation checks + ValidationExitCode = 3 +) + +// fatal prints the message if set and then exits. +func fatal(msg string, code int) { + if len(msg) > 0 { + // add newline if needed + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + + fmt.Fprint(os.Stderr, msg) + } + os.Exit(code) +} + +// CheckErr prints a user friendly error to STDERR and exits with a non-zero +// exit code. Unrecognized errors will be printed with an "error: " prefix. +// +// This method is generic to the command in use and may be used by non-Kubectl +// commands. +func CheckErr(err error) { + checkErr(err, fatal) +} + +// preflightError allows us to know if the error is a preflight error or not +// defining the interface here avoids an import cycle of pulling in preflight into the util package +type preflightError interface { + Preflight() bool +} + +// checkErr formats a given error as a string and calls the passed handleErr +// func with that string and an exit code. +func checkErr(err error, handleErr func(string, int)) { + switch err.(type) { + case nil: + return + case preflightError: + handleErr(err.Error(), PreFlightExitCode) + case utilerrors.Aggregate: + handleErr(err.Error(), ValidationExitCode) + + default: + handleErr(err.Error(), DefaultErrorExitCode) + } +} + +// FormatErrMsg returns a human-readable string describing the slice of errors passed to the function +func FormatErrMsg(errs []error) string { + var errMsg string + for _, err := range errs { + errMsg = fmt.Sprintf("%s\t- %s\n", errMsg, err.Error()) + } + return errMsg +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/marshal.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/marshal.go new file mode 100644 index 0000000000..d38dce75f2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/marshal.go @@ -0,0 +1,163 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bufio" + "bytes" + "io" + + pkgerrors "github.com/pkg/errors" + "sigs.k8s.io/yaml" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/errors" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" + clientsetscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +// MarshalToYaml marshals an object into yaml. +func MarshalToYaml(obj runtime.Object, gv schema.GroupVersion) ([]byte, error) { + return MarshalToYamlForCodecs(obj, gv, clientsetscheme.Codecs) +} + +// MarshalToYamlForCodecs marshals an object into yaml using the specified codec +// TODO: Is specifying the gv really needed here? +// TODO: Can we support json out of the box easily here? +func MarshalToYamlForCodecs(obj runtime.Object, gv schema.GroupVersion, codecs serializer.CodecFactory) ([]byte, error) { + mediaType := "application/yaml" + info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType) + if !ok { + return []byte{}, pkgerrors.Errorf("unsupported media type %q", mediaType) + } + + encoder := codecs.EncoderForVersion(info.Serializer, gv) + return runtime.Encode(encoder, obj) +} + +// UnmarshalFromYaml unmarshals yaml into an object. +func UnmarshalFromYaml(buffer []byte, gv schema.GroupVersion) (runtime.Object, error) { + return UnmarshalFromYamlForCodecs(buffer, gv, clientsetscheme.Codecs) +} + +// UnmarshalFromYamlForCodecs unmarshals yaml into an object using the specified codec +// TODO: Is specifying the gv really needed here? +// TODO: Can we support json out of the box easily here? +func UnmarshalFromYamlForCodecs(buffer []byte, gv schema.GroupVersion, codecs serializer.CodecFactory) (runtime.Object, error) { + mediaType := "application/yaml" + info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType) + if !ok { + return nil, pkgerrors.Errorf("unsupported media type %q", mediaType) + } + + decoder := codecs.DecoderToVersion(info.Serializer, gv) + return runtime.Decode(decoder, buffer) +} + +// SplitYAMLDocuments reads the YAML bytes per-document, unmarshals the TypeMeta information from each document +// and returns a map between the GroupVersionKind of the document and the document bytes +func SplitYAMLDocuments(yamlBytes []byte) (map[schema.GroupVersionKind][]byte, error) { + gvkmap := map[schema.GroupVersionKind][]byte{} + knownKinds := map[string]bool{} + errs := []error{} + buf := bytes.NewBuffer(yamlBytes) + reader := utilyaml.NewYAMLReader(bufio.NewReader(buf)) + for { + typeMetaInfo := runtime.TypeMeta{} + // Read one YAML document at a time, until io.EOF is returned + b, err := reader.Read() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + if len(b) == 0 { + break + } + // Deserialize the TypeMeta information of this byte slice + if err := yaml.Unmarshal(b, &typeMetaInfo); err != nil { + return nil, err + } + // Require TypeMeta information to be present + if len(typeMetaInfo.APIVersion) == 0 || len(typeMetaInfo.Kind) == 0 { + errs = append(errs, pkgerrors.New("invalid configuration: kind and apiVersion is mandatory information that needs to be specified in all YAML documents")) + continue + } + // Check whether the kind has been registered before. If it has, throw an error + if known := knownKinds[typeMetaInfo.Kind]; known { + errs = append(errs, pkgerrors.Errorf("invalid configuration: kind %q is specified twice in YAML file", typeMetaInfo.Kind)) + continue + } + knownKinds[typeMetaInfo.Kind] = true + + // Build a GroupVersionKind object from the deserialized TypeMeta object + gv, err := schema.ParseGroupVersion(typeMetaInfo.APIVersion) + if err != nil { + errs = append(errs, pkgerrors.Wrap(err, "unable to parse apiVersion")) + continue + } + gvk := gv.WithKind(typeMetaInfo.Kind) + + // Save the mapping between the gvk and the bytes that object consists of + gvkmap[gvk] = b + } + if err := errors.NewAggregate(errs); err != nil { + return nil, err + } + return gvkmap, nil +} + +// GroupVersionKindsFromBytes parses the bytes and returns a gvk slice +func GroupVersionKindsFromBytes(b []byte) ([]schema.GroupVersionKind, error) { + gvkmap, err := SplitYAMLDocuments(b) + if err != nil { + return nil, err + } + gvks := []schema.GroupVersionKind{} + for gvk := range gvkmap { + gvks = append(gvks, gvk) + } + return gvks, nil +} + +// GroupVersionKindsHasKind returns whether the following gvk slice contains the kind given as a parameter +func GroupVersionKindsHasKind(gvks []schema.GroupVersionKind, kind string) bool { + for _, gvk := range gvks { + if gvk.Kind == kind { + return true + } + } + return false +} + +// GroupVersionKindsHasClusterConfiguration returns whether the following gvk slice contains a ClusterConfiguration object +func GroupVersionKindsHasClusterConfiguration(gvks ...schema.GroupVersionKind) bool { + return GroupVersionKindsHasKind(gvks, constants.ClusterConfigurationKind) +} + +// GroupVersionKindsHasInitConfiguration returns whether the following gvk slice contains a InitConfiguration object +func GroupVersionKindsHasInitConfiguration(gvks ...schema.GroupVersionKind) bool { + return GroupVersionKindsHasKind(gvks, constants.InitConfigurationKind) +} + +// GroupVersionKindsHasJoinConfiguration returns whether the following gvk slice contains a JoinConfiguration object +func GroupVersionKindsHasJoinConfiguration(gvks ...schema.GroupVersionKind) bool { + return GroupVersionKindsHasKind(gvks, constants.JoinConfigurationKind) +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/template.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/template.go new file mode 100644 index 0000000000..5951f8c829 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/template.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "text/template" + + "github.com/pkg/errors" +) + +// ParseTemplate validates and parses passed as argument template +func ParseTemplate(strtmpl string, obj interface{}) ([]byte, error) { + var buf bytes.Buffer + tmpl, err := template.New("template").Parse(strtmpl) + if err != nil { + return nil, errors.Wrap(err, "error when parsing template") + } + err = tmpl.Execute(&buf, obj) + if err != nil { + return nil, errors.Wrap(err, "error when executing template") + } + return buf.Bytes(), nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/version.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/version.go new file mode 100644 index 0000000000..4da6ce48ae --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/version.go @@ -0,0 +1,241 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strings" + "time" + + pkgerrors "github.com/pkg/errors" + netutil "k8s.io/apimachinery/pkg/util/net" + versionutil "k8s.io/apimachinery/pkg/util/version" + "k8s.io/klog" + pkgversion "k8s.io/kubernetes/pkg/version" +) + +const ( + getReleaseVersionTimeout = time.Duration(10 * time.Second) +) + +var ( + kubeReleaseBucketURL = "https://dl.k8s.io" + kubeReleaseRegex = regexp.MustCompile(`^v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$`) + kubeReleaseLabelRegex = regexp.MustCompile(`^[[:lower:]]+(-[-\w_\.]+)?$`) + kubeBucketPrefixes = regexp.MustCompile(`^((release|ci|ci-cross)/)?([-\w_\.+]+)$`) +) + +// KubernetesReleaseVersion is helper function that can fetch +// available version information from release servers based on +// label names, like "stable" or "latest". +// +// If argument is already semantic version string, it +// will return same string. +// +// In case of labels, it tries to fetch from release +// servers and then return actual semantic version. +// +// Available names on release servers: +// stable (latest stable release) +// stable-1 (latest stable release in 1.x) +// stable-1.0 (and similarly 1.1, 1.2, 1.3, ...) +// latest (latest release, including alpha/beta) +// latest-1 (latest release in 1.x, including alpha/beta) +// latest-1.0 (and similarly 1.1, 1.2, 1.3, ...) +func KubernetesReleaseVersion(version string) (string, error) { + ver := normalizedBuildVersion(version) + if len(ver) != 0 { + return ver, nil + } + + bucketURL, versionLabel, err := splitVersion(version) + if err != nil { + return "", err + } + + // revalidate, if exact build from e.g. CI bucket requested. + ver = normalizedBuildVersion(versionLabel) + if len(ver) != 0 { + return ver, nil + } + + // kubeReleaseLabelRegex matches labels such as: latest, latest-1, latest-1.10 + if kubeReleaseLabelRegex.MatchString(versionLabel) { + var clientVersion string + // Try to obtain a client version. + clientVersion, _ = kubeadmVersion(pkgversion.Get().String()) + // Fetch version from the internet. + url := fmt.Sprintf("%s/%s.txt", bucketURL, versionLabel) + body, err := fetchFromURL(url, getReleaseVersionTimeout) + if err != nil { + // If the network operaton was successful but the server did not reply with StatusOK + if body != "" { + return "", err + } + // Handle air-gapped environments by falling back to the client version. + klog.Infof("could not fetch a Kubernetes version from the internet: %v", err) + klog.Infof("falling back to the local client version: %s", clientVersion) + return KubernetesReleaseVersion(clientVersion) + } + // both the client and the remote version are obtained; validate them and pick a stable version + body, err = validateStableVersion(body, clientVersion) + if err != nil { + return "", err + } + // Re-validate received version and return. + return KubernetesReleaseVersion(body) + } + return "", pkgerrors.Errorf("version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version) +} + +// KubernetesVersionToImageTag is helper function that replaces all +// non-allowed symbols in tag strings with underscores. +// Image tag can only contain lowercase and uppercase letters, digits, +// underscores, periods and dashes. +// Current usage is for CI images where all of symbols except '+' are valid, +// but function is for generic usage where input can't be always pre-validated. +func KubernetesVersionToImageTag(version string) string { + allowed := regexp.MustCompile(`[^-a-zA-Z0-9_\.]`) + return allowed.ReplaceAllString(version, "_") +} + +// KubernetesIsCIVersion checks if user requested CI version +func KubernetesIsCIVersion(version string) bool { + subs := kubeBucketPrefixes.FindAllStringSubmatch(version, 1) + if len(subs) == 1 && len(subs[0]) == 4 && strings.HasPrefix(subs[0][2], "ci") { + return true + } + return false +} + +// Internal helper: returns normalized build version (with "v" prefix if needed) +// If input doesn't match known version pattern, returns empty string. +func normalizedBuildVersion(version string) string { + if kubeReleaseRegex.MatchString(version) { + if strings.HasPrefix(version, "v") { + return version + } + return "v" + version + } + return "" +} + +// Internal helper: split version parts, +// Return base URL and cleaned-up version +func splitVersion(version string) (string, string, error) { + var urlSuffix string + subs := kubeBucketPrefixes.FindAllStringSubmatch(version, 1) + if len(subs) != 1 || len(subs[0]) != 4 { + return "", "", pkgerrors.Errorf("invalid version %q", version) + } + + switch { + case strings.HasPrefix(subs[0][2], "ci"): + // Just use whichever the user specified + urlSuffix = subs[0][2] + default: + urlSuffix = "release" + } + url := fmt.Sprintf("%s/%s", kubeReleaseBucketURL, urlSuffix) + return url, subs[0][3], nil +} + +// Internal helper: return content of URL +func fetchFromURL(url string, timeout time.Duration) (string, error) { + klog.V(2).Infof("fetching Kubernetes version from URL: %s", url) + client := &http.Client{Timeout: timeout, Transport: netutil.SetOldTransportDefaults(&http.Transport{})} + resp, err := client.Get(url) + if err != nil { + return "", pkgerrors.Errorf("unable to get URL %q: %s", url, err.Error()) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", pkgerrors.Errorf("unable to read content of URL %q: %s", url, err.Error()) + } + bodyString := strings.TrimSpace(string(body)) + + if resp.StatusCode != http.StatusOK { + msg := fmt.Sprintf("unable to fetch file. URL: %q, status: %v", url, resp.Status) + return bodyString, errors.New(msg) + } + return bodyString, nil +} + +// kubeadmVersion returns the version of the client without metadata. +func kubeadmVersion(info string) (string, error) { + v, err := versionutil.ParseSemantic(info) + if err != nil { + return "", pkgerrors.Wrap(err, "kubeadm version error") + } + // There is no utility in versionutil to get the version without the metadata, + // so this needs some manual formatting. + // Discard offsets after a release label and keep the labels down to e.g. `alpha.0` instead of + // including the offset e.g. `alpha.0.206`. This is done to comply with GCR image tags. + pre := v.PreRelease() + patch := v.Patch() + if len(pre) > 0 { + if patch > 0 { + // If the patch version is more than zero, decrement it and remove the label. + // this is done to comply with the latest stable patch release. + patch = patch - 1 + pre = "" + } else { + split := strings.Split(pre, ".") + if len(split) > 2 { + pre = split[0] + "." + split[1] // Exclude the third element + } else if len(split) < 2 { + pre = split[0] + ".0" // Append .0 to a partial label + } + pre = "-" + pre + } + } + vStr := fmt.Sprintf("v%d.%d.%d%s", v.Major(), v.Minor(), patch, pre) + return vStr, nil +} + +// Validate if the remote version is one Minor release newer than the client version. +// This is done to conform with "stable-X" and only allow remote versions from +// the same Patch level release. +func validateStableVersion(remoteVersion, clientVersion string) (string, error) { + if clientVersion == "" { + klog.Infof("could not obtain client version; using remote version: %s", remoteVersion) + return remoteVersion, nil + } + + verRemote, err := versionutil.ParseGeneric(remoteVersion) + if err != nil { + return "", pkgerrors.Wrap(err, "remote version error") + } + verClient, err := versionutil.ParseGeneric(clientVersion) + if err != nil { + return "", pkgerrors.Wrap(err, "client version error") + } + // If the remote Major version is bigger or if the Major versions are the same, + // but the remote Minor is bigger use the client version release. This handles Major bumps too. + if verClient.Major() < verRemote.Major() || + (verClient.Major() == verRemote.Major()) && verClient.Minor() < verRemote.Minor() { + estimatedRelease := fmt.Sprintf("stable-%d.%d", verClient.Major(), verClient.Minor()) + klog.Infof("remote version is much newer: %s; falling back to: %s", remoteVersion, estimatedRelease) + return estimatedRelease, nil + } + return remoteVersion, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD new file mode 100644 index 0000000000..6ca92930e7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD @@ -0,0 +1,60 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "annotation_key_constants.go", + "doc.go", + "field_constants.go", + "json.go", + "objectreference.go", + "register.go", + "resource.go", + "taint.go", + "toleration.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/core", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "taint_test.go", + "toleration_test.go", + ], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/core/fuzzer:all-srcs", + "//pkg/apis/core/helper:all-srcs", + "//pkg/apis/core/install:all-srcs", + "//pkg/apis/core/pods:all-srcs", + "//pkg/apis/core/v1:all-srcs", + "//pkg/apis/core/validation:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS new file mode 100644 index 0000000000..7e195eca38 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS @@ -0,0 +1,45 @@ +approvers: +- erictune +- lavalamp +- smarterclayton +- thockin +- liggitt +# - bgrant0607 # manual escalations only +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- janetkuo +- justinsb +- pwittrock +- roberthbailey +- ncdc +- tallclair +- yifan-gu +- eparis +- mwielgus +- soltysh +- piosz +- jsafrane +- jbeda +labels: +- sig/apps diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go new file mode 100644 index 0000000000..bef73c0db0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -0,0 +1,104 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/v1/annotation_key_constants.go. + +package core + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation + PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. + SeccompProfileRuntimeDefault string = "runtime/default" + + // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. + // This is now deprecated and should be replaced by SeccompProfileRuntimeDefault. + DeprecatedSeccompProfileDockerDefault string = "docker/default" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by + // the kubelet prior to running + BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go new file mode 100644 index 0000000000..6017bfdab1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package api contains the latest (or "internal") version of the +// Kubernetes API objects. This is the API objects as represented in memory. +// The contract presented to clients is located in the versioned packages, +// which are sub-directories. The first one is "v1". Those packages +// describe how a particular version is serialized to storage/network. +package core // import "k8s.io/kubernetes/pkg/apis/core" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go new file mode 100644 index 0000000000..a26f80568c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +// Field path constants that are specific to the internal API +// representation. +const ( + NodeUnschedulableField = "spec.unschedulable" + ObjectNameField = "metadata.name" + PodHostField = "spec.nodeName" + PodStatusField = "status.phase" + SecretTypeField = "type" + + EventReasonField = "action" + EventSourceField = "reportingComponent" + EventTypeField = "type" + EventInvolvedKindField = "involvedObject.kind" + EventInvolvedNamespaceField = "involvedObject.namespace" + EventInvolvedNameField = "involvedObject.name" + EventInvolvedUIDField = "involvedObject.uid" + EventInvolvedAPIVersionField = "involvedObject.apiVersion" + EventInvolvedResourceVersionField = "involvedObject.resourceVersion" + EventInvolvedFieldPathField = "involvedObject.fieldPath" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/json.go b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go new file mode 100644 index 0000000000..937cd056c0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import "encoding/json" + +// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations +// to prevent anyone from marshaling these internal structs. + +var _ = json.Marshaler(&AvoidPods{}) +var _ = json.Unmarshaler(&AvoidPods{}) + +func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go new file mode 100644 index 0000000000..55b27f30b6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go @@ -0,0 +1,34 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go new file mode 100644 index 0000000000..c6cd8681d8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationControllerList{}, + &ReplicationController{}, + &ServiceList{}, + &Service{}, + &ServiceProxyOptions{}, + &NodeList{}, + &Node{}, + &NodeProxyOptions{}, + &Endpoints{}, + &EndpointsList{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &Secret{}, + &SecretList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go new file mode 100644 index 0000000000..1367e00e56 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) StorageEphemeral() *resource.Quantity { + if val, ok := (*self)[ResourceEphemeralStorage]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go new file mode 100644 index 0000000000..ae1feb74d7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +import "fmt" + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go new file mode 100644 index 0000000000..1dfbc9f1bb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go new file mode 100644 index 0000000000..251547f601 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -0,0 +1,4725 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic = "kube-public" + // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) + NamespaceNodeLease = "kube-node-lease" + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault = "/dev/termination-log" +) + +// Volume represents a named volume in a pod that may be accessed by any containers in the pod. +type Volume struct { + // Required: This must be a DNS_LABEL. Each volume in a pod must have + // a unique name. + Name string + // The VolumeSource represents the location and type of a volume to mount. + // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + // +optional + VolumeSource +} + +// VolumeSource represents the source location of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents file or directory on the host machine that is + // directly exposed to the container. This is generally used for system + // agents or other privileged things that are allowed to see the host + // machine. Most containers will NOT need this. + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // +optional + EmptyDir *EmptyDirVolumeSource + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. + // +optional + GitRepo *GitRepoVolumeSource + // Secret represents a secret that should populate this volume. + // +optional + Secret *SecretVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // +optional + Glusterfs *GlusterfsVolumeSource + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexVolumeSource + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + + // DownwardAPI represents metadata about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // +optional + StorageOS *StorageOSVolumeSource +} + +// Similar to VolumeSource but meant for the administrator who creates PVs. +// Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // +optional + HostPath *HostPathVolumeSource + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // +optional + Glusterfs *GlusterfsPersistentVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDPersistentVolumeSource + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIPersistentVolumeSource + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexPersistentVolumeSource + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderPersistentVolumeSource + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSPersistentVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFilePersistentVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOPersistentVolumeSource + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource + // CSI (Container Storage Interface) represents storage that handled by an external CSI driver. + // +optional + CSI *CSIPersistentVolumeSource +} + +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume + ClaimName string + // Optional: Defaults to false (read/write). ReadOnly here + // will force the ReadOnly setting in VolumeMounts + // +optional + ReadOnly bool +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's deprecated and will be removed in a future release. (#51440) + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolume struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + //Spec defines a persistent volume owned by the cluster + // +optional + Spec PersistentVolumeSpec + + // Status represents the current information about persistent volume. + // +optional + Status PersistentVolumeStatus +} + +type PersistentVolumeSpec struct { + // Resources represents the actual resources of the volume + Capacity ResourceList + // Source represents the location and type of a volume to mount. + PersistentVolumeSource + // AccessModes contains all ways the volume can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // ClaimRef is expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is + // ignored, i.e. labels of this PV do not need to match PVC selector. + // +optional + ClaimRef *ObjectReference + // Optional: what happens to a persistent volume when released from its claim. + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // +optional + MountOptions []string + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is a beta feature. + // +optional + VolumeMode *PersistentVolumeMode + // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // This field influences the scheduling of pods that use this volume. + // +optional + NodeAffinity *VolumeNodeAffinity +} + +// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. +type VolumeNodeAffinity struct { + // Required specifies hard node constraints that must be met. + Required *NodeSelector +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + // DEPRECATED: The PersistentVolumeReclaimRecycle called Recycle is being deprecated. See announcement here: https://groups.google.com/forum/#!topic/kubernetes-dev/uexugCza84I + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim + // +optional + Phase PersistentVolumePhase + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string + // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI + // +optional + Reason string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolume +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the volume requested by a pod author + // +optional + Spec PersistentVolumeClaimSpec + + // Status represents the current information about a claim + // +optional + Status PersistentVolumeClaimStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeClaimList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolumeClaim +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // Contains the types of access modes required + // +optional + AccessModes []PersistentVolumeAccessMode + // A label query over volumes to consider for binding. This selector is + // ignored when VolumeName is set + // +optional + Selector *metav1.LabelSelector + // Resources represents the minimum resources required + // +optional + Resources ResourceRequirements + // VolumeName is the binding reference to the PersistentVolume backing this + // claim. When set to non-empty value Selector is not evaluated + // +optional + VolumeName string + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 + // +optional + StorageClassName *string + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is a beta feature. + // +optional + VolumeMode *PersistentVolumeMode + // This field requires the VolumeSnapshotDataSource alpha feature gate to be + // enabled and currently VolumeSnapshot is the only supported data source. + // If the provisioner can support VolumeSnapshot data source, it will create + // a new volume and data will be restored to the volume at the same time. + // If the provisioner does not support VolumeSnapshot data source, volume will + // not be created and the failure will be reported as an event. + // In the future, we plan to support more data source types and the behavior + // of the provisioner may change. + // +optional + DataSource *TypedLocalObjectReference +} + +type PersistentVolumeClaimConditionType string + +// These are valid conditions of Pvc +const ( + // An user trigger resize of pvc has been started + PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" + // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node + PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" +) + +type PersistentVolumeClaimCondition struct { + Type PersistentVolumeClaimConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim + // +optional + Phase PersistentVolumeClaimPhase + // AccessModes contains all ways the volume backing the PVC can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // Represents the actual resources of the underlying volume + // +optional + Capacity ResourceList + // +optional + Conditions []PersistentVolumeClaimCondition +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +type HostPathType string + +const ( + // For backwards compatible, leave it empty if unset + HostPathUnset HostPathType = "" + // If nothing exists at the given path, an empty directory will be created there + // as needed with file mode 0755, having the same group and ownership with Kubelet. + HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" + // A directory must exist at the given path + HostPathDirectory HostPathType = "Directory" + // If nothing exists at the given path, an empty file will be created there + // as needed with file mode 0644, having the same group and ownership with Kubelet. + HostPathFileOrCreate HostPathType = "FileOrCreate" + // A file must exist at the given path + HostPathFile HostPathType = "File" + // A UNIX socket must exist at the given path + HostPathSocket HostPathType = "Socket" + // A character device must exist at the given path + HostPathCharDev HostPathType = "CharDevice" + // A block device must exist at the given path + HostPathBlockDev HostPathType = "BlockDevice" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // If the path is a symlink, it will follow the link to the real path. + Path string + // Defaults to "" + Type *HostPathType +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // TODO: Longer term we want to represent the selection of underlying + // media more like a scheduling problem - user says what traits they + // need, we give them a backing store that satisfies that. For now + // this will cover the most common needs. + // Optional: what type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // +optional + Medium StorageMedium + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit *resource.Quantity +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" + // ProtocolSCTP is the SCTP protocol. + ProtocolSCTP Protocol = "SCTP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource. Used to identify the disk in GCE + PDName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *LocalObjectReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *SecretReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Optional: FC target worldwide names (WWNs) + // +optional + TargetWWNs []string + // Optional: FC target lun number + // +optional + Lun *int32 + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: FC volume World Wide Identifiers (WWIDs) + // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. + // +optional + WWIDs []string +} + +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +type FlexPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique id of the persistent disk resource. Used to identify the disk in AWS + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. +type GitRepoVolumeSource struct { + // Repository URL + Repository string + // Commit hash, this is optional + // +optional + Revision string + // Clone target, this is optional + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string + // TODO: Consider credentials here. +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // +optional + SecretName string + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string + + // Path is the exported NFS share + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the NFS export to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string + + // Volume is a string that references an already created Quobyte volume by name. + Volume string + + // Defaults to false (read/write). ReadOnly here will force + // the Quobyte to be mounted with read-only permissions + // +optional + ReadOnly bool + + // User to map volume access to + // Defaults to the root user + // +optional + User string + + // Group to map volume access to + // Default is no group + // +optional + Group string +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string + + // Required: Path is the Glusterfs volume path + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsPersistentVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + EndpointsNamespace *string +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDPersistentVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderPersistentVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *SecretReference +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +type SecretReference struct { + // Name is unique within a namespace to reference a secret resource. + // +optional + Name string + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace string +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSPersistentVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string +} + +// Represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Represents a single file containing information from the downward API +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFilePersistentVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + SecretNamespace *string +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +type AzureDataDiskCachingMode string +type AzureDataDiskKind string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string + // The URI of the data disk in the blob storage + DataDiskURI string + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume that can be defined +// by a an admin via a storage class, for instance. +type ScaleIOPersistentVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *SecretReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + StorageMode string + // The name of a volume created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// ServiceAccountTokenProjection represents a projected service account token +// volume. This projection can be used to insert a service account token into +// the pods runtime filesystem for use against APIs (Kubernetes API Server or +// otherwise). +type ServiceAccountTokenProjection struct { + // Audience is the intended audience of the token. A recipient of a token + // must identify itself with an identifier specified in the audience of the + // token, and otherwise should reject the token. The audience defaults to the + // identifier of the apiserver. + Audience string + // ExpirationSeconds is the requested duration of validity of the service + // account token. As the token approaches expiration, the kubelet volume + // plugin will proactively rotate the service account token. The kubelet will + // start trying to rotate the token if the token is older than 80 percent of + // its time to live or if the token is older than 24 hours.Defaults to 1 hour + // and must be at least 10 minutes. + ExpirationSeconds int64 + // Path is the path relative to the mount point of the file to project the + // token into. + Path string +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection + // information about the serviceAccountToken data to project + ServiceAccountToken *ServiceAccountTokenProjection +} + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string + // Optional: mode bits to use on this file, should be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Local represents directly-attached storage with node affinity (Beta feature) +type LocalVolumeSource struct { + // The full path to the volume on the node. + // It can be either a directory or block device (disk, partition, ...). + Path string + + // Filesystem type to mount. + // It applies only when the Path is a block device. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. + // +optional + FSType *string +} + +// Represents storage that is managed by an external CSI volume driver. +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // +optional + FSType string + + // Attributes of the volume to publish. + // +optional + VolumeAttributes map[string]string + + // ControllerPublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerPublishVolume and ControllerUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerPublishSecretRef *SecretReference + + // NodeStageSecretRef is a reference to the secret object containing sensitive + // information to pass to the CSI driver to complete the CSI NodeStageVolume + // and NodeStageVolume and NodeUnstageVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodeStageSecretRef *SecretReference + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodePublishSecretRef *SecretReference +} + +// ContainerPort represents a network port in a single container +type ContainerPort struct { + // Optional: If specified, this must be an IANA_SVC_NAME Each named port + // in a pod must have a unique name. + // +optional + Name string + // Optional: If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // +optional + HostPort int32 + // Required: This must be a valid port number, 0 < x < 65536. + ContainerPort int32 + // Required: Supports "TCP", "UDP" and "SCTP" + // +optional + Protocol Protocol + // Optional: What host IP to bind the external port to. + // +optional + HostIP string +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // Required: This must match the Name of a Volume [above]. + Name string + // Optional: Defaults to false (read-write). + // +optional + ReadOnly bool + // Required. If the path is not an absolute path (e.g. some/path) it + // will be prepended with the appropriate root prefix for the operating + // system. On Linux this is '/', on Windows this is 'C:\'. + MountPath string + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationNone is used. + // This field is beta in 1.10. + // +optional + MountPropagation *MountPropagationMode +} + +// MountPropagationMode describes mount propagation. +type MountPropagationMode string + +const ( + // MountPropagationNone means that the volume in a container will + // not receive new mounts from the host or other containers, and filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode corresponds to "private" in Linux terminology. + MountPropagationNone MountPropagationMode = "None" + // MountPropagationHostToContainer means that the volume in a container will + // receive new mounts from the host or other containers, but filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rslave" in Linux terminology). + MountPropagationHostToContainer MountPropagationMode = "HostToContainer" + // MountPropagationBidirectional means that the volume in a container will + // receive new mounts from the host or other containers, and its own mounts + // will be propagated from the container to the host or other containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rshared" in Linux terminology). + MountPropagationBidirectional MountPropagationMode = "Bidirectional" +) + +// VolumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Required: This must be a C_IDENTIFIER. + Name string + // Optional: no more than one of the following may be specified. + // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // +optional + Value string + // Optional: Specifies a source the value of this var should come from. + // +optional + ValueFrom *EnvVarSource +} + +// EnvVarSource represents a source for the value of an EnvVar. +// Only one of its fields may be set. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector + // Selects a key of a secret in the pod's namespace. + // +optional + SecretKeyRef *SecretKeySelector +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Required: Version of the schema the FieldPath is written in terms of. + // If no value is specified, it will be defaulted to the APIVersion of the + // enclosing object. + APIVersion string + // Required: Path of the field to select in the specified API version + FieldPath string +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string + // Required: resource to select + Resource string + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference + // The key to select. + Key string + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference + // The key of the secret to select from. Must be a valid secret key. + Key string + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string + // The header field value + Value string +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Optional: Path to access on the HTTP server. + // +optional + Path string + // Required: Name or number of the port to access on the container. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. You + // probably want to set "Host" in httpHeaders instead. + // +optional + Host string + // Optional: Scheme to use for connecting to the host, defaults to HTTP. + // +optional + Scheme URIScheme + // Optional: Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Required: Port to connect to. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // +optional + Command []string +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler + // Length of time before health checking is activated. In seconds. + // +optional + InitialDelaySeconds int32 + // Length of time before health checking times out. In seconds. + // +optional + TimeoutSeconds int32 + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds int32 + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Must be 1 for liveness. + // +optional + SuccessThreshold int32 + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // +optional + FailureThreshold int32 +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Capabilities represent POSIX capabilities that can be added or removed to a running container. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability + // Removed capabilities + // +optional + Drop []Capability +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + +// Container represents a single container that is expected to be run on the host. +type Container struct { + // Required: This must be a DNS_LABEL. Each container in a pod must + // have a unique name. + Name string + // Required. + Image string + // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Command []string + // Optional: The docker image's cmd is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Args []string + // Optional: Defaults to Docker's default. + // +optional + WorkingDir string + // +optional + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource + // +optional + Env []EnvVar + // Compute resource requirements. + // +optional + Resources ResourceRequirements + // +optional + VolumeMounts []VolumeMount + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +optional + VolumeDevices []VolumeDevice + // +optional + LivenessProbe *Probe + // +optional + ReadinessProbe *Probe + // +optional + Lifecycle *Lifecycle + // Required. + // +optional + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy + // Required: Policy for pulling images for this container + ImagePullPolicy PullPolicy + // Optional: SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + // +optional + Stdin bool + // +optional + StdinOnce bool + // +optional + TTY bool +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction + // TCPSocket specifies an action involving a TCP port. + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, the container + // is terminated and restarted. + // +optional + PostStart *Handler + // PreStop is called immediately before a container is terminated. The reason for termination is + // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. + // +optional + PreStop *Handler +} + +// The below types are used by kube_client and api_server. + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ContainerStateWaiting struct { + // A brief CamelCase string indicating details about why the container is in waiting state. + // +optional + Reason string + // A human-readable message indicating details about why the container is in waiting state. + // +optional + Message string +} + +type ContainerStateRunning struct { + // +optional + StartedAt metav1.Time +} + +type ContainerStateTerminated struct { + ExitCode int32 + // +optional + Signal int32 + // +optional + Reason string + // +optional + Message string + // +optional + StartedAt metav1.Time + // +optional + FinishedAt metav1.Time + // +optional + ContainerID string +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // +optional + Waiting *ContainerStateWaiting + // +optional + Running *ContainerStateRunning + // +optional + Terminated *ContainerStateTerminated +} + +type ContainerStatus struct { + // Each container in a pod must have a unique name. + Name string + // +optional + State ContainerState + // +optional + LastTerminationState ContainerState + // Ready specifies whether the container has passed its readiness check. + Ready bool + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 + Image string + ImageID string + // +optional + ContainerID string +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" +) + +type PodCondition struct { + Type PodConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Pod +} + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm +} + +// A null or empty node selector term matches no objects. The requirements of +// them are ANDed. +// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +type NodeSelectorTerm struct { + // A list of node selector requirements by node's labels. + MatchExpressions []NodeSelectorRequirement + // A list of node selector requirements by node's fields. + MatchFields []NodeSelectorRequirement +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// A topology selector term represents the result of label queries. +// A null or empty topology selector term matches no objects. +// The requirements of them are ANDed. +// It provides a subset of functionality as NodeSelectorTerm. +// This is an alpha feature and may change in the future. +type TopologySelectorTerm struct { + // A list of topology selector requirements by labels. + // +optional + MatchLabelExpressions []TopologySelectorLabelRequirement +} + +// A topology selector requirement is a selector that matches given label. +// This is an alpha feature and may change in the future. +type TopologySelectorLabelRequirement struct { + // The label key that the selector applies to. + Key string + // An array of string values. One value must match the label to be selected. + // Each entry in Values is ORed. + Values []string +} + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + // +optional + Namespaces []string + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // Empty topologyKey is not allowed. + TopologyKey string +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm +} + +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string + // Required. The taint value corresponding to the taint key. + // +optional + Value string + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded *metav1.Time +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodReadinessGate contains the reference to a pod condition +type PodReadinessGate struct { + // ConditionType refers to a condition in the pod's condition list with matching type. + ConditionType PodConditionType +} + +// PodSpec is a description of a pod +type PodSpec struct { + Volumes []Volume + // List of initialization containers belonging to the pod. + InitContainers []Container + // List of containers belonging to the pod. + Containers []Container + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy + // NodeSelector is a selector which must be true for the pod to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // +optional + ImagePullSecrets []LocalObjectReference + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + HostAliases []HostAlias + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig + // If specified, all readiness gates will be evaluated for pod readiness. + // A pod is ready when all its containers are ready AND + // all conditions specified in the readiness gates have status equal to "True" + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md + // +optional + ReadinessGates []PodReadinessGate + // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + // empty definition that uses the default runtime handler. + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md + // This is an alpha feature and may change in the future. + // +optional + RuntimeClassName *string + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // If not specified, the default is true. + // +optional + EnableServiceLinks *bool +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + IP string + Hostnames []string +} + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string + // Value of a property to set + Value string +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // Use the host's network namespace. If this option is set, the ports that will be + // used must be specified. + // Optional: Default to false + // +k8s:conversion-gen=false + // +optional + HostNetwork bool + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. + // +k8s:conversion-gen=false + // +optional + ShareProcessNamespace *bool + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsGroup *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsNonRoot *bool + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 + // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + // sysctls (by the container runtime) might fail to launch. + // +optional + Sysctls []Sysctl +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string + // +optional + Value *string +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // +optional + Phase PodPhase + // +optional + Conditions []PodCondition + // A human readable message indicating details about why the pod is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' + // +optional + Reason string + // nominatedNodeName is set when this pod preempts other pods on the node, but it cannot be + // scheduled right away as preemption victims receive their graceful termination periods. + // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide + // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to + // give the resources on this node to a higher priority pod that is created after preemption. + // +optional + NominatedNodeName string + + // +optional + HostIP string + // +optional + PodIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status + InitContainerStatuses []ContainerStatus + // The list has one entry per container in the manifest. Each entry is + // currently the output of `docker inspect`. This output format is *not* + // final and should not be relied upon. + // TODO: Make real decisions about what our info should look like. Re-enable fuzz test + // when we have done this. + // +optional + ContainerStatuses []ContainerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Pod is a collection of containers, used as either input (create, update) or as output (list, get). +type Pod struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec + + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Template defines the pods that will be created from this pod template + // +optional + Template PodTemplateSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodTemplate +} + +// ReplicationControllerSpec is the specification of a replication controller. +// As the internal representation of a replication controller, it may have either +// a TemplateRef or a Template set. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. This reference is ignored if a Template is set. + // Must be set before converting to a versioned API object + // +optional + //TemplateRef *ObjectReference + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Internally, this takes precedence over a + // TemplateRef. + // +optional + Template *PodTemplateSpec +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this replication controller. + // +optional + Spec ReplicationControllerSpec + + // Status is the current status of this replication controller. This data may be + // out of date by some window of time. + // +optional + Status ReplicationControllerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicationController +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Service +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +const ( + // DefaultClientIPServiceAffinitySeconds is the default timeout seconds + // of Client IP based session affinity - 3 hours. + DefaultClientIPServiceAffinitySeconds int32 = 10800 + // MaxClientIPServiceAffinitySeconds is the max timeout seconds + // of Client IP based session affinity - 1 day. + MaxClientIPServiceAffinitySeconds int32 = 86400 +) + +// SessionAffinityConfig represents the configurations of session affinity. +type SessionAffinityConfig struct { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + ClientIP *ClientIPConfig +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +type ClientIPConfig struct { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + TimeoutSeconds *int32 +} + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the ClusterIP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// ServiceStatus represents the current status of a service +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus +} + +// LoadBalancerStatus represents the status of a load-balancer +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer; + // traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string +} + +// ServiceSpec describes the attributes that a user creates on a service +type ServiceSpec struct { + // Type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + Type ServiceType + + // Required: The list of ports that are exposed by this service. + Ports []ServicePort + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + Selector map[string]string + + // ClusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string + + // ExternalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. + ExternalName string + + // ExternalIPs are used by external load balancers, or can be set by + // users to handle external traffic that arrives at a node. + // +optional + ExternalIPs []string + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string + + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. + // +optional + SessionAffinity ServiceAffinity + + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + SessionAffinityConfig *SessionAffinityConfig + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // +optional + LoadBalancerSourceRanges []string + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 + + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // +optional + PublishNotReadyAddresses bool +} + +type ServicePort struct { + // Optional if only one ServicePort is defined on this service: The + // name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + Name string + + // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Protocol Protocol + + // The port that will be exposed on the service. + Port int32 + + // Optional: The target port on pods selected by this service. If this + // is a string, it will be looked up as a named port in the target + // Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + TargetPort intstr.IntOrString + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a service. + // +optional + Spec ServiceSpec + + // Status represents the current status of a service. + // +optional + Status ServiceStatus +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount + Secrets []ObjectReference + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // +optional + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ServiceAccount +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The set of all endpoints is the union of all subsets. + Subsets []EndpointSubset +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + Addresses []EndpointAddress + NotReadyAddresses []EndpointAddress + Ports []EndpointPort +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, see #4447. + IP string + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + // +optional + Hostname string + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string + // Optional: The kubernetes object related to the entry point. + TargetRef *ObjectReference +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). Optional + // if only one port is defined. Must be a DNS_LABEL. + Name string + + // The port number. + Port int32 + + // The IP protocol for this port. + Protocol Protocol +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Endpoints +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node + // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. + // +optional + PodCIDR string + + // ID of the node assigned by the cloud provider + // Note: format is "://" + // +optional + ProviderID string + + // Unschedulable controls node schedulability of new pods. By default node is schedulable. + // +optional + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint + + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource + + // Deprecated. Not all kubelets will set this field. Remove field after 1.13. + // see: https://issues.k8s.io/61966 + // +optional + DoNotUse_ExternalID string +} + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. +type NodeConfigSource struct { + ConfigMap *ConfigMapNodeConfigSource +} + +type ConfigMapNodeConfigSource struct { + // Namespace is the metadata.namespace of the referenced ConfigMap. + // This field is required in all cases. + Namespace string + + // Name is the metadata.name of the referenced ConfigMap. + // This field is required in all cases. + Name string + + // UID is the metadata.UID of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + UID types.UID + + // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + ResourceVersion string + + // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure + // This field is required in all cases. + KubeletConfigKey string +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercase for backwards compatibility (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string + // Boot ID reported by the node. + BootID string + // Kernel Version reported by the node. + KernelVersion string + // OS Image reported by the node. + OSImage string + // ContainerRuntime Version reported by the node. + ContainerRuntimeVersion string + // Kubelet Version reported by the node. + KubeletVersion string + // KubeProxy Version reported by the node. + KubeProxyVersion string + // The Operating System reported by the node + OperatingSystem string + // The Architecture reported by the node + Architecture string +} + +// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. +type NodeConfigStatus struct { + // Assigned reports the checkpointed config the node will try to use. + // When Node.Spec.ConfigSource is updated, the node checkpoints the associated + // config payload to local disk, along with a record indicating intended + // config. The node refers to this record to choose its config checkpoint, and + // reports this record in Assigned. Assigned only updates in the status after + // the record has been checkpointed to disk. When the Kubelet is restarted, + // it tries to make the Assigned config the Active config by loading and + // validating the checkpointed payload identified by Assigned. + // +optional + Assigned *NodeConfigSource + // Active reports the checkpointed config the node is actively using. + // Active will represent either the current version of the Assigned config, + // or the current LastKnownGood config, depending on whether attempting to use the + // Assigned config results in an error. + // +optional + Active *NodeConfigSource + // LastKnownGood reports the checkpointed config the node will fall back to + // when it encounters an error attempting to use the Assigned config. + // The Assigned config becomes the LastKnownGood config when the node determines + // that the Assigned config is stable and correct. + // This is currently implemented as a 10-minute soak period starting when the local + // record of Assigned config is updated. If the Assigned config is Active at the end + // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is + // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, + // because the local default config is always assumed good. + // You should not make assumptions about the node's method of determining config stability + // and correctness, as this may change or become configurable in the future. + // +optional + LastKnownGood *NodeConfigSource + // Error describes any problems reconciling the Spec.ConfigSource to the Active config. + // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned + // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting + // to load or validate the Assigned config, etc. + // Errors may occur at different points while syncing config. Earlier errors (e.g. download or + // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across + // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in + // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error + // by fixing the config assigned in Spec.ConfigSource. + // You can find additional information for debugging by searching the error message in the Kubelet log. + // Error is a human-readable description of the error state; machines can check whether or not Error + // is empty, but should not rely on the stability of the Error text across Kubelet versions. + // +optional + Error string +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // +optional + Capacity ResourceList + // Allocatable represents the resources of a node that are available for scheduling. + // +optional + Allocatable ResourceList + // NodePhase is the current lifecycle phase of the node. + // +optional + Phase NodePhase + // Conditions is an array of current node conditions. + // +optional + Conditions []NodeCondition + // Queried from cloud provider, if available. + // +optional + Addresses []NodeAddress + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints + // Set of ids/uuids to uniquely identify the node. + // +optional + NodeInfo NodeSystemInfo + // List of container images on this node + // +optional + Images []ContainerImage + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume + // Status of the config assigned to the node via the dynamic Kubelet config feature. + // +optional + Config *NodeConfigStatus +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName + + // DevicePath represents the device path where the volume should be available + DevicePath string +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time + // (brief) reason why this entry was added to the list. + // +optional + Reason string + // Human readable message indicating why this entry was added to the list. + // +optional + Message string +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + Names []string + // The size of the image in bytes. + // +optional + SizeBytes int64 +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReady, NodeReachable +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" +) + +type NodeCondition struct { + Type NodeConditionType + Status ConditionStatus + // +optional + LastHeartbeatTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type NodeAddressType string + +const ( + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +type NodeAddress struct { + Type NodeAddressType + Address string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + // +optional + Capacity ResourceList +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. + ResourceEphemeralStorage ResourceName = "ephemeral-storage" +) + +const ( + // Default namespace prefix. + ResourceDefaultNamespacePrefix = "kubernetes.io/" + // Name prefix for huge page resources (alpha). + ResourceHugePagesPrefix = "hugepages-" + // Name prefix for storage resource limits + ResourceAttachableVolumesPrefix = "attachable-volumes-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Node is a worker node in Kubernetes +// The name of the node according to etcd is in ObjectMeta.Name. +type Node struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a node. + // +optional + Spec NodeSpec + + // Status describes the current status of a Node + // +optional + Status NodeStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeList is a list of nodes. +type NodeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Node +} + +// NamespaceSpec describes the attributes on a Namespace +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []FinalizerName +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // +optional + Phase NamespacePhase +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A namespace provides a scope for Names. +// Use of multiple namespaces is optional +type Namespace struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of the Namespace. + // +optional + Spec NamespaceSpec + + // Status describes the current status of a Namespace + // +optional + Status NamespaceStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Namespace +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +type Binding struct { + metav1.TypeMeta + // ObjectMeta describes the object that is being bound. + // +optional + metav1.ObjectMeta + + // Target is the object to bind to. + Target ObjectReference +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodLogOptions is the query options for a Pod's logs REST call +type PodLogOptions struct { + metav1.TypeMeta + + // Container for which to return logs + Container string + // If true, follow the logs for the pod + Follow bool + // If true, return previous terminated container logs + Previous bool + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. + Timestamps bool + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the attach call + // +optional + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the attach call + // +optional + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the attach call + // +optional + Stderr bool + + // TTY if true indicates that a tty will be allocated for the attach call + // +optional + TTY bool + + // Container to attach to. + // +optional + Container string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodExecOptions is the query options to a Pod's remote exec call +type PodExecOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the exec call + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the exec call + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the exec call + Stderr bool + + // TTY if true indicates that a tty will be allocated for the exec call + TTY bool + + // Container in which to execute the command. + Container string + + // Command is the remote command to execute; argv array; not executed within a shell. + Command []string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodProxyOptions is the query options to a Pod's proxy call +type PodProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeProxyOptions is the query options to a Node's proxy call +type NodeProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + Path string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ObjectReference struct { + // +optional + Kind string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // +optional + APIVersion string + // +optional + ResourceVersion string + + // Optional. If referring to a piece of an object instead of an entire object, this string + // should contain information to identify the sub-object. For example, if the object + // reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string +} + +// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +type LocalObjectReference struct { + //TODO: Add other useful fields. apiVersion, kind, uid? + Name string +} + +// TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. +type TypedLocalObjectReference struct { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + APIGroup *string + // Kind is the type of resource being referenced + Kind string + // Name is the name of resource being referenced + Name string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SerializedReference struct { + metav1.TypeMeta + // +optional + Reference ObjectReference +} + +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string + // Node name on which the event is generated. + // +optional + Host string +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Required. The object that this event is about. Mapped to events.Event.regarding + // +optional + InvolvedObject ObjectReference + + // Optional; this should be a short, machine understandable string that gives the reason + // for this event being generated. For example, if the event is reporting that a container + // can't start, the Reason might be "ImageNotFound". + // TODO: provide exact specification for format. + // +optional + Reason string + + // Optional. A human-readable description of the status of this operation. + // TODO: decide on maximum length. Mapped to events.Event.note + // +optional + Message string + + // Optional. The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time + + // The number of times this event has occurred. + // +optional + Count int32 + + // Type of this event (Normal, Warning), new types could be added in the future. + // +optional + Type string + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string +} + +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 + // Time of the last occurrence observed + LastObservedTime metav1.MicroTime + // State of this Series: Ongoing or Finished + State EventSeriesState +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List metainternalversion.List + +// A type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind +type LimitRangeItem struct { + // Type of resource that this limit applies to + // +optional + Type LimitType + // Max usage constraints on this kind by resource name + // +optional + Max ResourceList + // Min usage constraints on this kind by resource name + // +optional + Min ResourceList + // Default resource requirement limit value by resource name. + // +optional + Default ResourceList + // DefaultRequest resource requirement request value by resource name. + // +optional + DefaultRequest ResourceList + // MaxLimitRequestRatio represents the max burst value for the named resource + // +optional + MaxLimitRequestRatio ResourceList +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced + Limits []LimitRangeItem +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRange sets resource usage limits for each kind of resource in a Namespace +type LimitRange struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the limits enforced + // +optional + Spec LimitRangeSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of LimitRange objects + Items []LimitRange +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" + // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" +) + +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" + // Default resource requests prefix + DefaultResourceRequestsPrefix = "requests." +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" + // Match all pod objects that have priority class mentioned + ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard ResourceList + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope + // ScopeSelector is also a collection of filters like Scopes that must match each object tracked by a quota + // but expressed using ScopeSelectorOperator in combination with possible values. + // +optional + ScopeSelector *ScopeSelector +} + +// A scope selector represents the AND of the selectors represented +// by the scoped-resource selector terms. +type ScopeSelector struct { + // A list of scope selector requirements by scope of the resources. + // +optional + MatchExpressions []ScopedResourceSelectorRequirement +} + +// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator +// that relates the scope name and values. +type ScopedResourceSelectorRequirement struct { + // The name of the scope that the selector applies to. + ScopeName ResourceQuotaScope + // Represents a scope's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + Operator ScopeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A scope selector operator is the set of operators that can be used in +// a scope selector requirement. +type ScopeSelectorOperator string + +const ( + ScopeSelectorOpIn ScopeSelectorOperator = "In" + ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" + ScopeSelectorOpExists ScopeSelectorOperator = "Exists" + ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist" +) + +// ResourceQuotaStatus defines the enforced hard limits and observed use +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard ResourceList + // Used is the current observed total usage of the resource in the namespace + // +optional + Used ResourceList +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired quota + // +optional + Spec ResourceQuotaSpec + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuotaList is a list of ResourceQuota items +type ResourceQuotaList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of ResourceQuota objects + Items []ResourceQuota +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. + // +optional + Data map[string][]byte + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default; arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authentication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secret. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" + // SecretTypeBootstrapToken is used during the automated bootstrap process (first + // implemented by kubeadm). It stores tokens that are used to sign well known + // ConfigMaps. They are used for authn. + SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SecretList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Secret +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMap holds configuration data for components or applications to consume. +type ConfigMap struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // Values with non-UTF-8 byte sequences must use the BinaryData field. + // The keys stored in Data must not overlap with the keys in + // the BinaryData field, this is enforced during validation process. + // +optional + Data map[string]string + + // BinaryData contains the binary data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // BinaryData can contain byte sequences that are not in the UTF-8 range. + // The keys stored in BinaryData must not overlap with the ones in + // the Data field, this is enforced during validation process. + // Using this field will require 1.10+ apiserver and + // kubelet. + // +optional + BinaryData map[string][]byte +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ConfigMaps. + Items []ConfigMap +} + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParam = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType + Status ConditionStatus + // +optional + Message string + // +optional + Error string +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Conditions []ComponentCondition +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ComponentStatusList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ComponentStatus +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsGroup *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // The read-only root filesystem allows you to restrict the locations that an application can write + // files to, ensuring the persistent data can only be written to mounts. + // +optional + ReadOnlyRootFilesystem *bool + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // +optional + AllowPrivilegeEscalation *bool + // ProcMount denotes the type of proc mount to use for the containers. + // The default is DefaultProcMount which uses the container runtime defaults for + // readonly paths and masked paths. + // +optional + ProcMount *ProcMountType +} + +type ProcMountType string + +const ( + // DefaultProcMount uses the container runtime defaults for readonly and masked + // paths for /proc. Most container runtimes mask certain paths in /proc to avoid + // accidental security exposure of special devices or information. + DefaultProcMount ProcMountType = "Default" + + // UnmaskedProcMount bypasses the default masking behavior of the container + // runtime and ensures the newly created /proc the container stays intact with + // no modifications. + UnmaskedProcMount ProcMountType = "Unmasked" +) + +// SELinuxOptions are the labels to be applied to the container. +type SELinuxOptions struct { + // SELinux user label + // +optional + User string + // SELinux role label + // +optional + Role string + // SELinux type label + // +optional + Type string + // SELinux level label. + // +optional + Level string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record +// the global allocation state of the cluster. The schema of Range and Data generic, in that Range +// should be a string representation of the inputs to a range (for instance, for IP allocation it +// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a +// binary range. Consumers should use annotations to record additional information (schema version, +// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation +// of the cluster, thus the object is less strongly typed than most. +type RangeAllocation struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or + // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define + // a start and end unless there is an implicit end. + Range string + // A byte array representing the serialized state of a range allocation. Additional clarifiers on + // the type or format of data should be represented with annotations. For IP allocations, this is + // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing + // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). + Data []byte +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int32 = 1 +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a4801c2e31 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -0,0 +1,5415 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package core + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { + if in == nil { + return nil + } + out := new(AWSElasticBlockStoreVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Affinity) DeepCopyInto(out *Affinity) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. +func (in *Affinity) DeepCopy() *Affinity { + if in == nil { + return nil + } + out := new(Affinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. +func (in *AttachedVolume) DeepCopy() *AttachedVolume { + if in == nil { + return nil + } + out := new(AttachedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. +func (in *AvoidPods) DeepCopy() *AvoidPods { + if in == nil { + return nil + } + out := new(AvoidPods) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(AzureDataDiskKind) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. +func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { + if in == nil { + return nil + } + out := new(AzureDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { + *out = *in + if in.SecretNamespace != nil { + in, out := &in.SecretNamespace, &out.SecretNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. +func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { + if in == nil { + return nil + } + out := new(AzureFilePersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. +func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { + if in == nil { + return nil + } + out := new(AzureFileVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Binding) DeepCopyInto(out *Binding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Target = in.Target + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. +func (in *Binding) DeepCopy() *Binding { + if in == nil { + return nil + } + out := new(Binding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Binding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ControllerPublishSecretRef != nil { + in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef + *out = new(SecretReference) + **out = **in + } + if in.NodeStageSecretRef != nil { + in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef + *out = new(SecretReference) + **out = **in + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capabilities) DeepCopyInto(out *Capabilities) { + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. +func (in *Capabilities) DeepCopy() *Capabilities { + if in == nil { + return nil + } + out := new(Capabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. +func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CephFSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. +func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { + if in == nil { + return nil + } + out := new(CephFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource. +func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CinderPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. +func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { + if in == nil { + return nil + } + out := new(CinderVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. +func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { + if in == nil { + return nil + } + out := new(ClientIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. +func (in *ComponentCondition) DeepCopy() *ComponentCondition { + if in == nil { + return nil + } + out := new(ComponentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. +func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { + if in == nil { + return nil + } + out := new(ComponentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.BinaryData != nil { + in, out := &in.BinaryData, &out.BinaryData + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. +func (in *ConfigMap) DeepCopy() *ConfigMap { + if in == nil { + return nil + } + out := new(ConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMap) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. +func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { + if in == nil { + return nil + } + out := new(ConfigMapEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. +func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { + if in == nil { + return nil + } + out := new(ConfigMapKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. +func (in *ConfigMapList) DeepCopy() *ConfigMapList { + if in == nil { + return nil + } + out := new(ConfigMapList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMapList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource. +func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource { + if in == nil { + return nil + } + out := new(ConfigMapNodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. +func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { + if in == nil { + return nil + } + out := new(ConfigMapProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. +func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { + if in == nil { + return nil + } + out := new(ConfigMapVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. +func (in *ContainerImage) DeepCopy() *ContainerImage { + if in == nil { + return nil + } + out := new(ContainerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. +func (in *ContainerPort) DeepCopy() *ContainerPort { + if in == nil { + return nil + } + out := new(ContainerPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerState) DeepCopyInto(out *ContainerState) { + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + (*in).DeepCopyInto(*out) + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. +func (in *ContainerState) DeepCopy() *ContainerState { + if in == nil { + return nil + } + out := new(ContainerState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. +func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { + if in == nil { + return nil + } + out := new(ContainerStateRunning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.FinishedAt.DeepCopyInto(&out.FinishedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. +func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { + if in == nil { + return nil + } + out := new(ContainerStateTerminated) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. +func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { + if in == nil { + return nil + } + out := new(ContainerStateWaiting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { + *out = *in + in.State.DeepCopyInto(&out.State) + in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. +func (in *ContainerStatus) DeepCopy() *ContainerStatus { + if in == nil { + return nil + } + out := new(ContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. +func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { + if in == nil { + return nil + } + out := new(DaemonEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. +func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { + if in == nil { + return nil + } + out := new(DownwardAPIProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. +func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. +func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { + *out = *in + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. +func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { + if in == nil { + return nil + } + out := new(EmptyDirVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. +func (in *EndpointAddress) DeepCopy() *EndpointAddress { + if in == nil { + return nil + } + out := new(EndpointAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. +func (in *EndpointSubset) DeepCopy() *EndpointSubset { + if in == nil { + return nil + } + out := new(EndpointSubset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoints) DeepCopyInto(out *Endpoints) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. +func (in *Endpoints) DeepCopy() *Endpoints { + if in == nil { + return nil + } + out := new(Endpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. +func (in *EndpointsList) DeepCopy() *EndpointsList { + if in == nil { + return nil + } + out := new(EndpointsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. +func (in *EnvFromSource) DeepCopy() *EnvFromSource { + if in == nil { + return nil + } + out := new(EnvFromSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. +func (in *EnvVarSource) DeepCopy() *EnvVarSource { + if in == nil { + return nil + } + out := new(EnvVarSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.InvolvedObject = in.InvolvedObject + out.Source = in.Source + in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) + in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + if in.Related != nil { + in, out := &in.Related, &out.Related + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSource) DeepCopyInto(out *EventSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. +func (in *EventSource) DeepCopy() *EventSource { + if in == nil { + return nil + } + out := new(EventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecAction) DeepCopyInto(out *ExecAction) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. +func (in *ExecAction) DeepCopy() *ExecAction { + if in == nil { + return nil + } + out := new(ExecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + if in.WWIDs != nil { + in, out := &in.WWIDs, &out.WWIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. +func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { + if in == nil { + return nil + } + out := new(FCVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. +func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { + if in == nil { + return nil + } + out := new(FlexPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. +func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { + if in == nil { + return nil + } + out := new(FlexVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. +func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { + if in == nil { + return nil + } + out := new(FlockerVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. +func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(GCEPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. +func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { + if in == nil { + return nil + } + out := new(GitRepoVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { + *out = *in + if in.EndpointsNamespace != nil { + in, out := &in.EndpointsNamespace, &out.EndpointsNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. +func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. +func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { + *out = *in + out.Port = in.Port + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. +func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { + if in == nil { + return nil + } + out := new(HTTPGetAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Handler) DeepCopyInto(out *Handler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. +func (in *Handler) DeepCopy() *Handler { + if in == nil { + return nil + } + out := new(Handler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAlias) DeepCopyInto(out *HostAlias) { + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. +func (in *HostAlias) DeepCopy() *HostAlias { + if in == nil { + return nil + } + out := new(HostAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(HostPathType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. +func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { + if in == nil { + return nil + } + out := new(HostPathVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. +func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. +func (in *KeyToPath) DeepCopy() *KeyToPath { + if in == nil { + return nil + } + out := new(KeyToPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. +func (in *Lifecycle) DeepCopy() *Lifecycle { + if in == nil { + return nil + } + out := new(Lifecycle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRange) DeepCopyInto(out *LimitRange) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. +func (in *LimitRange) DeepCopy() *LimitRange { + if in == nil { + return nil + } + out := new(LimitRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRange) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. +func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { + if in == nil { + return nil + } + out := new(LimitRangeItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. +func (in *LimitRangeList) DeepCopy() *LimitRangeList { + if in == nil { + return nil + } + out := new(LimitRangeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRangeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. +func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { + if in == nil { + return nil + } + out := new(LimitRangeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] != nil { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. +func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { + if in == nil { + return nil + } + out := new(LoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. +func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { + if in == nil { + return nil + } + out := new(LoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { + *out = *in + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. +func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { + if in == nil { + return nil + } + out := new(LocalVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. +func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { + if in == nil { + return nil + } + out := new(NFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Namespace) DeepCopyInto(out *Namespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. +func (in *Namespace) DeepCopy() *Namespace { + if in == nil { + return nil + } + out := new(Namespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Namespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. +func (in *NamespaceList) DeepCopy() *NamespaceList { + if in == nil { + return nil + } + out := new(NamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. +func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { + if in == nil { + return nil + } + out := new(NamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. +func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { + if in == nil { + return nil + } + out := new(NamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. +func (in *NodeAddress) DeepCopy() *NodeAddress { + if in == nil { + return nil + } + out := new(NodeAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. +func (in *NodeAffinity) DeepCopy() *NodeAffinity { + if in == nil { + return nil + } + out := new(NodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. +func (in *NodeCondition) DeepCopy() *NodeCondition { + if in == nil { + return nil + } + out := new(NodeCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { + *out = *in + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapNodeConfigSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. +func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { + if in == nil { + return nil + } + out := new(NodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) { + *out = *in + if in.Assigned != nil { + in, out := &in.Assigned, &out.Assigned + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + if in.LastKnownGood != nil { + in, out := &in.LastKnownGood, &out.LastKnownGood + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus. +func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus { + if in == nil { + return nil + } + out := new(NodeConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { + *out = *in + out.KubeletEndpoint = in.KubeletEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. +func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { + if in == nil { + return nil + } + out := new(NodeDaemonEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. +func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { + if in == nil { + return nil + } + out := new(NodeProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResources) DeepCopyInto(out *NodeResources) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. +func (in *NodeResources) DeepCopy() *NodeResources { + if in == nil { + return nil + } + out := new(NodeResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. +func (in *NodeSelector) DeepCopy() *NodeSelector { + if in == nil { + return nil + } + out := new(NodeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. +func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { + if in == nil { + return nil + } + out := new(NodeSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchFields != nil { + in, out := &in.MatchFields, &out.MatchFields + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. +func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { + if in == nil { + return nil + } + out := new(NodeSelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigSource != nil { + in, out := &in.ConfigSource, &out.ConfigSource + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + out.DaemonEndpoints = in.DaemonEndpoints + out.NodeInfo = in.NodeInfo + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(NodeConfigStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. +func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { + if in == nil { + return nil + } + out := new(NodeSystemInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. +func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { + if in == nil { + return nil + } + out := new(ObjectFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. +func (in *PersistentVolume) DeepCopy() *PersistentVolume { + if in == nil { + return nil + } + out := new(PersistentVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. +func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { + if in == nil { + return nil + } + out := new(PersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. +func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. +func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(PersistentVolumeMode) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. +func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PersistentVolumeClaimCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. +func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. +func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. +func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { + if in == nil { + return nil + } + out := new(PersistentVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFilePersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. +func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(PersistentVolumeMode) + **out = **in + } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(VolumeNodeAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. +func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. +func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. +func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(PhotonPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pod) DeepCopyInto(out *Pod) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. +func (in *Pod) DeepCopy() *Pod { + if in == nil { + return nil + } + out := new(Pod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pod) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. +func (in *PodAffinity) DeepCopy() *PodAffinity { + if in == nil { + return nil + } + out := new(PodAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. +func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { + if in == nil { + return nil + } + out := new(PodAffinityTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. +func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { + if in == nil { + return nil + } + out := new(PodAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. +func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { + if in == nil { + return nil + } + out := new(PodAttachOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodAttachOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCondition) DeepCopyInto(out *PodCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. +func (in *PodCondition) DeepCopy() *PodCondition { + if in == nil { + return nil + } + out := new(PodCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. +func (in *PodExecOptions) DeepCopy() *PodExecOptions { + if in == nil { + return nil + } + out := new(PodExecOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodExecOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodList) DeepCopyInto(out *PodList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. +func (in *PodList) DeepCopy() *PodList { + if in == nil { + return nil + } + out := new(PodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. +func (in *PodLogOptions) DeepCopy() *PodLogOptions { + if in == nil { + return nil + } + out := new(PodLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. +func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { + if in == nil { + return nil + } + out := new(PodPortForwardOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. +func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { + if in == nil { + return nil + } + out := new(PodProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate. +func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { + if in == nil { + return nil + } + out := new(PodReadinessGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { + *out = *in + if in.ShareProcessNamespace != nil { + in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(int64) + **out = **in + } + if in.Sysctls != nil { + in, out := &in.Sysctls, &out.Sysctls + *out = make([]Sysctl, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. +func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { + if in == nil { + return nil + } + out := new(PodSecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSignature) DeepCopyInto(out *PodSignature) { + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + *out = new(v1.OwnerReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. +func (in *PodSignature) DeepCopy() *PodSignature { + if in == nil { + return nil + } + out := new(PodSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpec) DeepCopyInto(out *PodSpec) { + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]PodReadinessGate, len(*in)) + copy(*out, *in) + } + if in.RuntimeClassName != nil { + in, out := &in.RuntimeClassName, &out.RuntimeClassName + *out = new(string) + **out = **in + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. +func (in *PodSpec) DeepCopy() *PodSpec { + if in == nil { + return nil + } + out := new(PodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatus) DeepCopyInto(out *PodStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. +func (in *PodStatus) DeepCopy() *PodStatus { + if in == nil { + return nil + } + out := new(PodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. +func (in *PodStatusResult) DeepCopy() *PodStatusResult { + if in == nil { + return nil + } + out := new(PodStatusResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodStatusResult) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. +func (in *PodTemplateList) DeepCopy() *PodTemplateList { + if in == nil { + return nil + } + out := new(PodTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. +func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { + if in == nil { + return nil + } + out := new(PodTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. +func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { + if in == nil { + return nil + } + out := new(PortworxVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { + return nil + } + out := new(Preconditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { + *out = *in + in.PodSignature.DeepCopyInto(&out.PodSignature) + in.EvictionTime.DeepCopyInto(&out.EvictionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. +func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { + if in == nil { + return nil + } + out := new(PreferAvoidPodsEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { + *out = *in + in.Preference.DeepCopyInto(&out.Preference) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. +func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { + if in == nil { + return nil + } + out := new(PreferredSchedulingTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + in.Handler.DeepCopyInto(&out.Handler) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. +func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { + if in == nil { + return nil + } + out := new(ProjectedVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. +func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { + if in == nil { + return nil + } + out := new(QuobyteVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. +func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { + if in == nil { + return nil + } + out := new(RBDPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. +func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { + if in == nil { + return nil + } + out := new(RBDVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. +func (in *ReplicationController) DeepCopy() *ReplicationController { + if in == nil { + return nil + } + out := new(ReplicationController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. +func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { + if in == nil { + return nil + } + out := new(ReplicationControllerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. +func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { + if in == nil { + return nil + } + out := new(ReplicationControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. +func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { + if in == nil { + return nil + } + out := new(ReplicationControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. +func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { + if in == nil { + return nil + } + out := new(ReplicationControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. +func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { + if in == nil { + return nil + } + out := new(ResourceFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceList) DeepCopyInto(out *ResourceList) { + { + in := &in + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in ResourceList) DeepCopy() ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. +func (in *ResourceQuota) DeepCopy() *ResourceQuota { + if in == nil { + return nil + } + out := new(ResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. +func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { + if in == nil { + return nil + } + out := new(ResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + if in.ScopeSelector != nil { + in, out := &in.ScopeSelector, &out.ScopeSelector + *out = new(ScopeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. +func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. +func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. +func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { + if in == nil { + return nil + } + out := new(SELinuxOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. +func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. +func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]ScopedResourceSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector. +func (in *ScopeSelector) DeepCopy() *ScopeSelector { + if in == nil { + return nil + } + out := new(ScopeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement. +func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement { + if in == nil { + return nil + } + out := new(ScopedResourceSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Secret) DeepCopyInto(out *Secret) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. +func (in *Secret) DeepCopy() *Secret { + if in == nil { + return nil + } + out := new(Secret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Secret) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. +func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { + if in == nil { + return nil + } + out := new(SecretEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. +func (in *SecretProjection) DeepCopy() *SecretProjection { + if in == nil { + return nil + } + out := new(SecretProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. +func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { + if in == nil { + return nil + } + out := new(SecretVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + (*in).DeepCopyInto(*out) + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.ProcMount != nil { + in, out := &in.ProcMount, &out.ProcMount + *out = new(ProcMountType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. +func (in *SecurityContext) DeepCopy() *SecurityContext { + if in == nil { + return nil + } + out := new(SecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. +func (in *SerializedReference) DeepCopy() *SerializedReference { + if in == nil { + return nil + } + out := new(SerializedReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SerializedReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. +func (in *ServiceAccount) DeepCopy() *ServiceAccount { + if in == nil { + return nil + } + out := new(ServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. +func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { + if in == nil { + return nil + } + out := new(ServiceAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. +func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { + if in == nil { + return nil + } + out := new(ServiceAccountTokenProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePort) DeepCopyInto(out *ServicePort) { + *out = *in + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. +func (in *ServicePort) DeepCopy() *ServicePort { + if in == nil { + return nil + } + out := new(ServicePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. +func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { + if in == nil { + return nil + } + out := new(ServiceProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SessionAffinityConfig != nil { + in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig + *out = new(SessionAffinityConfig) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { + *out = *in + if in.ClientIP != nil { + in, out := &in.ClientIP, &out.ClientIP + *out = new(ClientIPConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. +func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { + if in == nil { + return nil + } + out := new(SessionAffinityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. +func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. +func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sysctl) DeepCopyInto(out *Sysctl) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. +func (in *Sysctl) DeepCopy() *Sysctl { + if in == nil { + return nil + } + out := new(Sysctl) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { + *out = *in + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. +func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { + if in == nil { + return nil + } + out := new(TCPSocketAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Taint) DeepCopyInto(out *Taint) { + *out = *in + if in.TimeAdded != nil { + in, out := &in.TimeAdded, &out.TimeAdded + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. +func (in *Taint) DeepCopy() *Taint { + if in == nil { + return nil + } + out := new(Taint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement. +func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement { + if in == nil { + return nil + } + out := new(TopologySelectorLabelRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) { + *out = *in + if in.MatchLabelExpressions != nil { + in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions + *out = make([]TopologySelectorLabelRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm. +func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { + if in == nil { + return nil + } + out := new(TopologySelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { + *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference. +func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { + if in == nil { + return nil + } + out := new(TypedLocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { + *out = *in + if in.MountPropagation != nil { + in, out := &in.MountPropagation, &out.MountPropagation + *out = new(MountPropagationMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. +func (in *VolumeMount) DeepCopy() *VolumeMount { + if in == nil { + return nil + } + out := new(VolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { + *out = *in + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity. +func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity { + if in == nil { + return nil + } + out := new(VolumeNodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + (*in).DeepCopyInto(*out) + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + *out = new(ServiceAccountTokenProjection) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. +func (in *VolumeProjection) DeepCopy() *VolumeProjection { + if in == nil { + return nil + } + out := new(VolumeProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. +func (in *VolumeSource) DeepCopy() *VolumeSource { + if in == nil { + return nil + } + out := new(VolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. +func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { + if in == nil { + return nil + } + out := new(VsphereVirtualDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { + *out = *in + in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. +func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { + if in == nil { + return nil + } + out := new(WeightedPodAffinityTerm) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/BUILD new file mode 100644 index 0000000000..eab836ff81 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/BUILD @@ -0,0 +1,54 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "helpers.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/config", + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/kubelet/apis/config/fuzzer:all-srcs", + "//pkg/kubelet/apis/config/scheme:all-srcs", + "//pkg/kubelet/apis/config/v1beta1:all-srcs", + "//pkg/kubelet/apis/config/validation:all-srcs", + ], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/OWNERS new file mode 100644 index 0000000000..7eed1121ed --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/OWNERS @@ -0,0 +1,4 @@ +approvers: +- mtaufen +reviewers: +- sig-node-reviewers diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go new file mode 100644 index 0000000000..ad40e68340 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=kubelet.config.k8s.io + +package config // import "k8s.io/kubernetes/pkg/kubelet/apis/config" diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/helpers.go new file mode 100644 index 0000000000..0217fadb61 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/helpers.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +// KubeletConfigurationPathRefs returns pointers to all of the KubeletConfiguration fields that contain filepaths. +// You might use this, for example, to resolve all relative paths against some common root before +// passing the configuration to the application. This method must be kept up to date as new fields are added. +func KubeletConfigurationPathRefs(kc *KubeletConfiguration) []*string { + paths := []*string{} + paths = append(paths, &kc.StaticPodPath) + paths = append(paths, &kc.Authentication.X509.ClientCAFile) + paths = append(paths, &kc.TLSCertFile) + paths = append(paths, &kc.TLSPrivateKeyFile) + paths = append(paths, &kc.ResolverConfig) + return paths +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go new file mode 100644 index 0000000000..cbebd990ab --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "kubelet.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes registers known types to the given scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeletConfiguration{}, + &SerializedNodeConfigSource{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go new file mode 100644 index 0000000000..941d837486 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go @@ -0,0 +1,382 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HairpinMode denotes how the kubelet should configure networking to handle +// hairpin packets. +type HairpinMode string + +// Enum settings for different ways to handle hairpin packets. +const ( + // Set the hairpin flag on the veth of containers in the respective + // container runtime. + HairpinVeth = "hairpin-veth" + // Make the container bridge promiscuous. This will force it to accept + // hairpin packets, even if the flag isn't set on ports of the bridge. + PromiscuousBridge = "promiscuous-bridge" + // Neither of the above. If the kubelet is started in this hairpin mode + // and kube-proxy is running in iptables mode, hairpin packets will be + // dropped by the container bridge. + HairpinNone = "none" +) + +// ResourceChangeDetectionStrategy denotes a mode in which internal +// managers (secret, configmap) are discovering object changes. +type ResourceChangeDetectionStrategy string + +// Enum settings for different strategies of kubelet managers. +const ( + // GetChangeDetectionStrategy is a mode in which kubelet fetches + // necessary objects directly from apiserver. + GetChangeDetectionStrategy ResourceChangeDetectionStrategy = "Get" + // TTLCacheChangeDetectionStrategy is a mode in which kubelet uses + // ttl cache for object directly fetched from apiserver. + TTLCacheChangeDetectionStrategy ResourceChangeDetectionStrategy = "Cache" + // WatchChangeDetectionStrategy is a mode in which kubelet uses + // watches to observe changes to objects that are in its interest. + WatchChangeDetectionStrategy ResourceChangeDetectionStrategy = "Watch" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeletConfiguration contains the configuration for the Kubelet +type KubeletConfiguration struct { + metav1.TypeMeta + + // staticPodPath is the path to the directory containing local (static) pods to + // run, or the path to a single static pod file. + StaticPodPath string + // syncFrequency is the max period between synchronizing running + // containers and config + SyncFrequency metav1.Duration + // fileCheckFrequency is the duration between checking config files for + // new data + FileCheckFrequency metav1.Duration + // httpCheckFrequency is the duration between checking http for new data + HTTPCheckFrequency metav1.Duration + // staticPodURL is the URL for accessing static pods to run + StaticPodURL string + // staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL + StaticPodURLHeader map[string][]string + // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 + // for all interfaces) + Address string + // port is the port for the Kubelet to serve on. + Port int32 + // readOnlyPort is the read-only port for the Kubelet to serve on with + // no authentication/authorization (set to 0 to disable) + ReadOnlyPort int32 + // tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert, + // if any, concatenated after server cert). If tlsCertFile and + // tlsPrivateKeyFile are not provided, a self-signed certificate + // and key are generated for the public address and saved to the directory + // passed to the Kubelet's --cert-dir flag. + TLSCertFile string + // tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile + TLSPrivateKeyFile string + // TLSCipherSuites is the list of allowed cipher suites for the server. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + TLSCipherSuites []string + // TLSMinVersion is the minimum TLS version supported. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + TLSMinVersion string + // rotateCertificates enables client certificate rotation. The Kubelet will request a + // new certificate from the certificates.k8s.io API. This requires an approver to approve the + // certificate signing requests. The RotateKubeletClientCertificate feature + // must be enabled. + RotateCertificates bool + // serverTLSBootstrap enables server certificate bootstrap. Instead of self + // signing a serving certificate, the Kubelet will request a certificate from + // the certificates.k8s.io API. This requires an approver to approve the + // certificate signing requests. The RotateKubeletServerCertificate feature + // must be enabled. + ServerTLSBootstrap bool + // authentication specifies how requests to the Kubelet's server are authenticated + Authentication KubeletAuthentication + // authorization specifies how requests to the Kubelet's server are authorized + Authorization KubeletAuthorization + // registryPullQPS is the limit of registry pulls per second. + // Set to 0 for no limit. + RegistryPullQPS int32 + // registryBurst is the maximum size of bursty pulls, temporarily allows + // pulls to burst to this number, while still not exceeding registryPullQPS. + // Only used if registryPullQPS > 0. + RegistryBurst int32 + // eventRecordQPS is the maximum event creations per second. If 0, there + // is no limit enforced. + EventRecordQPS int32 + // eventBurst is the maximum size of a burst of event creations, temporarily + // allows event creations to burst to this number, while still not exceeding + // eventRecordQPS. Only used if eventRecordQPS > 0. + EventBurst int32 + // enableDebuggingHandlers enables server endpoints for log collection + // and local running of containers and commands + EnableDebuggingHandlers bool + // enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true. + EnableContentionProfiling bool + // healthzPort is the port of the localhost healthz endpoint (set to 0 to disable) + HealthzPort int32 + // healthzBindAddress is the IP address for the healthz server to serve on + HealthzBindAddress string + // oomScoreAdj is The oom-score-adj value for kubelet process. Values + // must be within the range [-1000, 1000]. + OOMScoreAdj int32 + // clusterDomain is the DNS domain for this cluster. If set, kubelet will + // configure all containers to search this domain in addition to the + // host's search domains. + ClusterDomain string + // clusterDNS is a list of IP addresses for a cluster DNS server. If set, + // kubelet will configure all containers to use this for DNS resolution + // instead of the host's DNS servers. + ClusterDNS []string + // streamingConnectionIdleTimeout is the maximum time a streaming connection + // can be idle before the connection is automatically closed. + StreamingConnectionIdleTimeout metav1.Duration + // nodeStatusUpdateFrequency is the frequency that kubelet computes node + // status. If node lease feature is not enabled, it is also the frequency that + // kubelet posts node status to master. In that case, be cautious when + // changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. + NodeStatusUpdateFrequency metav1.Duration + // nodeStatusReportFrequency is the frequency that kubelet posts node + // status to master if node status does not change. Kubelet will ignore this + // frequency and post node status immediately if any change is detected. It is + // only used when node lease feature is enabled. + NodeStatusReportFrequency metav1.Duration + // nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease. + NodeLeaseDurationSeconds int32 + // imageMinimumGCAge is the minimum age for an unused image before it is + // garbage collected. + ImageMinimumGCAge metav1.Duration + // imageGCHighThresholdPercent is the percent of disk usage after which + // image garbage collection is always run. The percent is calculated as + // this field value out of 100. + ImageGCHighThresholdPercent int32 + // imageGCLowThresholdPercent is the percent of disk usage before which + // image garbage collection is never run. Lowest disk usage to garbage + // collect to. The percent is calculated as this field value out of 100. + ImageGCLowThresholdPercent int32 + // How frequently to calculate and cache volume disk usage for all pods + VolumeStatsAggPeriod metav1.Duration + // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in + KubeletCgroups string + // SystemCgroups is absolute name of cgroups in which to place + // all non-kernel processes that are not already in a container. Empty + // for no container. Rolling back the flag requires a reboot. + SystemCgroups string + // CgroupRoot is the root cgroup to use for pods. + // If CgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy. + CgroupRoot string + // Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes + // And all Burstable and BestEffort pods are brought up under their + // specific top level QoS cgroup. + CgroupsPerQOS bool + // driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) + CgroupDriver string + // CPUManagerPolicy is the name of the policy to use. + // Requires the CPUManager feature gate to be enabled. + CPUManagerPolicy string + // CPU Manager reconciliation period. + // Requires the CPUManager feature gate to be enabled. + CPUManagerReconcilePeriod metav1.Duration + // Map of QoS resource reservation percentages (memory only for now). + // Requires the QOSReserved feature gate to be enabled. + QOSReserved map[string]string + // runtimeRequestTimeout is the timeout for all runtime requests except long running + // requests - pull, logs, exec and attach. + RuntimeRequestTimeout metav1.Duration + // hairpinMode specifies how the Kubelet should configure the container + // bridge for hairpin packets. + // Setting this flag allows endpoints in a Service to loadbalance back to + // themselves if they should try to access their own Service. Values: + // "promiscuous-bridge": make the container bridge promiscuous. + // "hairpin-veth": set the hairpin flag on container veth interfaces. + // "none": do nothing. + // Generally, one must set --hairpin-mode=hairpin-veth to achieve hairpin NAT, + // because promiscuous-bridge assumes the existence of a container bridge named cbr0. + HairpinMode string + // maxPods is the number of pods that can run on this Kubelet. + MaxPods int32 + // The CIDR to use for pod IP addresses, only used in standalone mode. + // In cluster mode, this is obtained from the master. + PodCIDR string + // PodPidsLimit is the maximum number of pids in any pod. + PodPidsLimit int64 + // ResolverConfig is the resolver configuration file used as the basis + // for the container DNS resolution configuration. + ResolverConfig string + // cpuCFSQuota enables CPU CFS quota enforcement for containers that + // specify CPU limits + CPUCFSQuota bool + // CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms + CPUCFSQuotaPeriod metav1.Duration + // maxOpenFiles is Number of files that can be opened by Kubelet process. + MaxOpenFiles int64 + // contentType is contentType of requests sent to apiserver. + ContentType string + // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver + KubeAPIQPS int32 + // kubeAPIBurst is the burst to allow while talking with kubernetes + // apiserver + KubeAPIBurst int32 + // serializeImagePulls when enabled, tells the Kubelet to pull images one at a time. + SerializeImagePulls bool + // Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. + EvictionHard map[string]string + // Map of signal names to quantities that defines soft eviction thresholds. For example: {"memory.available": "300Mi"}. + EvictionSoft map[string]string + // Map of signal names to quantities that defines grace periods for each soft eviction signal. For example: {"memory.available": "30s"}. + EvictionSoftGracePeriod map[string]string + // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. + EvictionPressureTransitionPeriod metav1.Duration + // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. + EvictionMaxPodGracePeriod int32 + // Map of signal names to quantities that defines minimum reclaims, which describe the minimum + // amount of a given resource the kubelet will reclaim when performing a pod eviction while + // that resource is under pressure. For example: {"imagefs.available": "2Gi"} + EvictionMinimumReclaim map[string]string + // podsPerCore is the maximum number of pods per core. Cannot exceed MaxPods. + // If 0, this field is ignored. + PodsPerCore int32 + // enableControllerAttachDetach enables the Attach/Detach controller to + // manage attachment/detachment of volumes scheduled to this node, and + // disables kubelet from executing any attach/detach operations + EnableControllerAttachDetach bool + // protectKernelDefaults, if true, causes the Kubelet to error if kernel + // flags are not as it expects. Otherwise the Kubelet will attempt to modify + // kernel flags to match its expectation. + ProtectKernelDefaults bool + // If true, Kubelet ensures a set of iptables rules are present on host. + // These rules will serve as utility for various components, e.g. kube-proxy. + // The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit. + MakeIPTablesUtilChains bool + // iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT + // Values must be within the range [0, 31]. Must be different from other mark bits. + // Warning: Please match the value of the corresponding parameter in kube-proxy. + // TODO: clean up IPTablesMasqueradeBit in kube-proxy + IPTablesMasqueradeBit int32 + // iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. + // Values must be within the range [0, 31]. Must be different from other mark bits. + IPTablesDropBit int32 + // featureGates is a map of feature names to bools that enable or disable alpha/experimental + // features. This field modifies piecemeal the built-in default values from + // "k8s.io/kubernetes/pkg/features/kube_features.go". + FeatureGates map[string]bool + // Tells the Kubelet to fail to start if swap is enabled on the node. + FailSwapOn bool + // A quantity defines the maximum size of the container log file before it is rotated. For example: "5Mi" or "256Ki". + ContainerLogMaxSize string + // Maximum number of container log files that can be present for a container. + ContainerLogMaxFiles int32 + // ConfigMapAndSecretChangeDetectionStrategy is a mode in which config map and secret managers are running. + ConfigMapAndSecretChangeDetectionStrategy ResourceChangeDetectionStrategy + + /* the following fields are meant for Node Allocatable */ + + // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs + // that describe resources reserved for non-kubernetes components. + // Currently only cpu and memory are supported. + // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. + SystemReserved map[string]string + // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs + // that describe resources reserved for kubernetes system components. + // Currently cpu, memory and local ephemeral storage for root file system are supported. + // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. + KubeReserved map[string]string + // This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. + SystemReservedCgroup string + // This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. + KubeReservedCgroup string + // This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. + // This flag accepts a list of options. Acceptable options are `pods`, `system-reserved` & `kube-reserved`. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. + EnforceNodeAllocatable []string +} + +type KubeletAuthorizationMode string + +const ( + // KubeletAuthorizationModeAlwaysAllow authorizes all authenticated requests + KubeletAuthorizationModeAlwaysAllow KubeletAuthorizationMode = "AlwaysAllow" + // KubeletAuthorizationModeWebhook uses the SubjectAccessReview API to determine authorization + KubeletAuthorizationModeWebhook KubeletAuthorizationMode = "Webhook" +) + +type KubeletAuthorization struct { + // mode is the authorization mode to apply to requests to the kubelet server. + // Valid values are AlwaysAllow and Webhook. + // Webhook mode uses the SubjectAccessReview API to determine authorization. + Mode KubeletAuthorizationMode + + // webhook contains settings related to Webhook authorization. + Webhook KubeletWebhookAuthorization +} + +type KubeletWebhookAuthorization struct { + // cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer. + CacheAuthorizedTTL metav1.Duration + // cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer. + CacheUnauthorizedTTL metav1.Duration +} + +type KubeletAuthentication struct { + // x509 contains settings related to x509 client certificate authentication + X509 KubeletX509Authentication + // webhook contains settings related to webhook bearer token authentication + Webhook KubeletWebhookAuthentication + // anonymous contains settings related to anonymous authentication + Anonymous KubeletAnonymousAuthentication +} + +type KubeletX509Authentication struct { + // clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate + // signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, + // and groups corresponding to the Organization in the client certificate. + ClientCAFile string +} + +type KubeletWebhookAuthentication struct { + // enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API + Enabled bool + // cacheTTL enables caching of authentication results + CacheTTL metav1.Duration +} + +type KubeletAnonymousAuthentication struct { + // enabled allows anonymous requests to the kubelet server. + // Requests that are not rejected by another authentication method are treated as anonymous requests. + // Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. + Enabled bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SerializedNodeConfigSource allows us to serialize NodeConfigSource +// This type is used internally by the Kubelet for tracking checkpointed dynamic configs. +// It exists in the kubeletconfig API group because it is classified as a versioned input to the Kubelet. +type SerializedNodeConfigSource struct { + metav1.TypeMeta + // Source is the source that we are serializing + // +optional + Source v1.NodeConfigSource +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..1644f968c5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,279 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletAnonymousAuthentication) DeepCopyInto(out *KubeletAnonymousAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAnonymousAuthentication. +func (in *KubeletAnonymousAuthentication) DeepCopy() *KubeletAnonymousAuthentication { + if in == nil { + return nil + } + out := new(KubeletAnonymousAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletAuthentication) DeepCopyInto(out *KubeletAuthentication) { + *out = *in + out.X509 = in.X509 + out.Webhook = in.Webhook + out.Anonymous = in.Anonymous + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthentication. +func (in *KubeletAuthentication) DeepCopy() *KubeletAuthentication { + if in == nil { + return nil + } + out := new(KubeletAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletAuthorization) DeepCopyInto(out *KubeletAuthorization) { + *out = *in + out.Webhook = in.Webhook + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthorization. +func (in *KubeletAuthorization) DeepCopy() *KubeletAuthorization { + if in == nil { + return nil + } + out := new(KubeletAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + out.SyncFrequency = in.SyncFrequency + out.FileCheckFrequency = in.FileCheckFrequency + out.HTTPCheckFrequency = in.HTTPCheckFrequency + if in.StaticPodURLHeader != nil { + in, out := &in.StaticPodURLHeader, &out.StaticPodURLHeader + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.TLSCipherSuites != nil { + in, out := &in.TLSCipherSuites, &out.TLSCipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Authentication = in.Authentication + out.Authorization = in.Authorization + if in.ClusterDNS != nil { + in, out := &in.ClusterDNS, &out.ClusterDNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout + out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + out.NodeStatusReportFrequency = in.NodeStatusReportFrequency + out.ImageMinimumGCAge = in.ImageMinimumGCAge + out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod + out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod + if in.QOSReserved != nil { + in, out := &in.QOSReserved, &out.QOSReserved + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RuntimeRequestTimeout = in.RuntimeRequestTimeout + out.CPUCFSQuotaPeriod = in.CPUCFSQuotaPeriod + if in.EvictionHard != nil { + in, out := &in.EvictionHard, &out.EvictionHard + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EvictionSoft != nil { + in, out := &in.EvictionSoft, &out.EvictionSoft + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EvictionSoftGracePeriod != nil { + in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod + if in.EvictionMinimumReclaim != nil { + in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SystemReserved != nil { + in, out := &in.SystemReserved, &out.SystemReserved + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.KubeReserved != nil { + in, out := &in.KubeReserved, &out.KubeReserved + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EnforceNodeAllocatable != nil { + in, out := &in.EnforceNodeAllocatable, &out.EnforceNodeAllocatable + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfiguration. +func (in *KubeletConfiguration) DeepCopy() *KubeletConfiguration { + if in == nil { + return nil + } + out := new(KubeletConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeletConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletWebhookAuthentication) DeepCopyInto(out *KubeletWebhookAuthentication) { + *out = *in + out.CacheTTL = in.CacheTTL + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthentication. +func (in *KubeletWebhookAuthentication) DeepCopy() *KubeletWebhookAuthentication { + if in == nil { + return nil + } + out := new(KubeletWebhookAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletWebhookAuthorization) DeepCopyInto(out *KubeletWebhookAuthorization) { + *out = *in + out.CacheAuthorizedTTL = in.CacheAuthorizedTTL + out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthorization. +func (in *KubeletWebhookAuthorization) DeepCopy() *KubeletWebhookAuthorization { + if in == nil { + return nil + } + out := new(KubeletWebhookAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletX509Authentication) DeepCopyInto(out *KubeletX509Authentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletX509Authentication. +func (in *KubeletX509Authentication) DeepCopy() *KubeletX509Authentication { + if in == nil { + return nil + } + out := new(KubeletX509Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializedNodeConfigSource) DeepCopyInto(out *SerializedNodeConfigSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedNodeConfigSource. +func (in *SerializedNodeConfigSource) DeepCopy() *SerializedNodeConfigSource { + if in == nil { + return nil + } + out := new(SerializedNodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SerializedNodeConfigSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/BUILD new file mode 100644 index 0000000000..475d4233b4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/BUILD @@ -0,0 +1,42 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/pkg/proxy/apis/config", + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/proxy/apis/config/fuzzer:all-srcs", + "//pkg/proxy/apis/config/scheme:all-srcs", + "//pkg/proxy/apis/config/v1alpha1:all-srcs", + "//pkg/proxy/apis/config/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS new file mode 100644 index 0000000000..34823356d2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS @@ -0,0 +1,4 @@ +approvers: +- thockin +reviewers: +- sig-network-reviewers diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go new file mode 100644 index 0000000000..64cad5ced1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=kubeproxy.config.k8s.io + +package config // import "k8s.io/kubernetes/pkg/proxy/apis/config" diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go new file mode 100644 index 0000000000..fda569221c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/register.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "kubeproxy.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes registers known types to the given scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeProxyConfiguration{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go new file mode 100644 index 0000000000..dacee32bb3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go @@ -0,0 +1,253 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "sort" + "strings" + + apimachineryconfig "k8s.io/apimachinery/pkg/apis/config" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// KubeProxyIPTablesConfiguration contains iptables-related configuration +// details for the Kubernetes proxy server. +type KubeProxyIPTablesConfiguration struct { + // masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using + // the pure iptables proxy mode. Values must be within the range [0, 31]. + MasqueradeBit *int32 + // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. + MasqueradeAll bool + // syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', + // '2h22m'). Must be greater than 0. + SyncPeriod metav1.Duration + // minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', + // '2h22m'). + MinSyncPeriod metav1.Duration +} + +// KubeProxyIPVSConfiguration contains ipvs-related configuration +// details for the Kubernetes proxy server. +type KubeProxyIPVSConfiguration struct { + // syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', + // '2h22m'). Must be greater than 0. + SyncPeriod metav1.Duration + // minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', + // '2h22m'). + MinSyncPeriod metav1.Duration + // ipvs scheduler + Scheduler string + // excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch + // when cleaning up ipvs services. + ExcludeCIDRs []string +} + +// KubeProxyConntrackConfiguration contains conntrack settings for +// the Kubernetes proxy server. +type KubeProxyConntrackConfiguration struct { + // max is the maximum number of NAT connections to track (0 to + // leave as-is). This takes precedence over maxPerCore and min. + Max *int32 + // maxPerCore is the maximum number of NAT connections to track + // per CPU core (0 to leave the limit as-is and ignore min). + MaxPerCore *int32 + // min is the minimum value of connect-tracking records to allocate, + // regardless of maxPerCore (set maxPerCore=0 to leave the limit as-is). + Min *int32 + // tcpEstablishedTimeout is how long an idle TCP connection will be kept open + // (e.g. '2s'). Must be greater than 0 to set. + TCPEstablishedTimeout *metav1.Duration + // tcpCloseWaitTimeout is how long an idle conntrack entry + // in CLOSE_WAIT state will remain in the conntrack + // table. (e.g. '60s'). Must be greater than 0 to set. + TCPCloseWaitTimeout *metav1.Duration +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeProxyConfiguration contains everything necessary to configure the +// Kubernetes proxy server. +type KubeProxyConfiguration struct { + metav1.TypeMeta + + // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. + FeatureGates map[string]bool + + // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 + // for all interfaces) + BindAddress string + // healthzBindAddress is the IP address and port for the health check server to serve on, + // defaulting to 0.0.0.0:10256 + HealthzBindAddress string + // metricsBindAddress is the IP address and port for the metrics server to serve on, + // defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces) + MetricsBindAddress string + // enableProfiling enables profiling via web interface on /debug/pprof handler. + // Profiling handlers will be handled by metrics server. + EnableProfiling bool + // clusterCIDR is the CIDR range of the pods in the cluster. It is used to + // bridge traffic coming from outside of the cluster. If not provided, + // no off-cluster bridging will be performed. + ClusterCIDR string + // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. + HostnameOverride string + // clientConnection specifies the kubeconfig file and client connection settings for the proxy + // server to use when communicating with the apiserver. + ClientConnection apimachineryconfig.ClientConnectionConfiguration + // iptables contains iptables-related configuration options. + IPTables KubeProxyIPTablesConfiguration + // ipvs contains ipvs-related configuration options. + IPVS KubeProxyIPVSConfiguration + // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within + // the range [-1000, 1000] + OOMScoreAdj *int32 + // mode specifies which proxy mode to use. + Mode ProxyMode + // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed + // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. + PortRange string + // resourceContainer is the absolute name of the resource-only container to create and run + // the Kube-proxy in (Default: /kube-proxy). + ResourceContainer string + // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). + // Must be greater than 0. Only applicable for proxyMode=userspace. + UDPIdleTimeout metav1.Duration + // conntrack contains conntrack-related configuration options. + Conntrack KubeProxyConntrackConfiguration + // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater + // than 0. + ConfigSyncPeriod metav1.Duration + // nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid + // IP blocks. These values are as a parameter to select the interfaces where nodeport works. + // In case someone would like to expose a service on localhost for local visit and some other interfaces for + // particular purpose, a list of IP blocks would do that. + // If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort. + // If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. + // An empty string slice is meant to select all network interfaces. + NodePortAddresses []string +} + +// Currently, three modes of proxy are available in Linux platform: 'userspace' (older, going to be EOL), 'iptables' +// (newer, faster), 'ipvs'(newest, better in performance and scalability). +// +// Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster). +// +// In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the +// future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are +// insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs', +// and the fall back path is firstly iptables and then userspace. + +// In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the +// future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy, +// this always falls back to the userspace proxy. +type ProxyMode string + +const ( + ProxyModeUserspace ProxyMode = "userspace" + ProxyModeIPTables ProxyMode = "iptables" + ProxyModeIPVS ProxyMode = "ipvs" + ProxyModeKernelspace ProxyMode = "kernelspace" +) + +// IPVSSchedulerMethod is the algorithm for allocating TCP connections and +// UDP datagrams to real servers. Scheduling algorithms are imple- +//wanted as kernel modules. Ten are shipped with the Linux Virtual Server. +type IPVSSchedulerMethod string + +const ( + // RoundRobin distributes jobs equally amongst the available real servers. + RoundRobin IPVSSchedulerMethod = "rr" + // WeightedRoundRobin assigns jobs to real servers proportionally to their real servers' weight. + // Servers with higher weights receive new jobs first and get more jobs than servers with lower weights. + // Servers with equal weights get an equal distribution of new jobs. + WeightedRoundRobin IPVSSchedulerMethod = "wrr" + // LeastConnection assigns more jobs to real servers with fewer active jobs. + LeastConnection IPVSSchedulerMethod = "lc" + // WeightedLeastConnection assigns more jobs to servers with fewer jobs and + // relative to the real servers' weight(Ci/Wi). + WeightedLeastConnection IPVSSchedulerMethod = "wlc" + // LocalityBasedLeastConnection assigns jobs destined for the same IP address to the same server if + // the server is not overloaded and available; otherwise assigns jobs to servers with fewer jobs, + // and keep it for future assignment. + LocalityBasedLeastConnection IPVSSchedulerMethod = "lblc" + // LocalityBasedLeastConnectionWithReplication with Replication assigns jobs destined for the same IP address to the + // least-connection node in the server set for the IP address. If all the node in the server set are overloaded, + // it picks up a node with fewer jobs in the cluster and adds it to the sever set for the target. + // If the server set has not been modified for the specified time, the most loaded node is removed from the server set, + // in order to avoid high degree of replication. + LocalityBasedLeastConnectionWithReplication IPVSSchedulerMethod = "lblcr" + // SourceHashing assigns jobs to servers through looking up a statically assigned hash table + // by their source IP addresses. + SourceHashing IPVSSchedulerMethod = "sh" + // DestinationHashing assigns jobs to servers through looking up a statically assigned hash table + // by their destination IP addresses. + DestinationHashing IPVSSchedulerMethod = "dh" + // ShortestExpectedDelay assigns an incoming job to the server with the shortest expected delay. + // The expected delay that the job will experience is (Ci + 1) / Ui if sent to the ith server, in which + // Ci is the number of jobs on the ith server and Ui is the fixed service rate (weight) of the ith server. + ShortestExpectedDelay IPVSSchedulerMethod = "sed" + // NeverQueue assigns an incoming job to an idle server if there is, instead of waiting for a fast one; + // if all the servers are busy, it adopts the ShortestExpectedDelay policy to assign the job. + NeverQueue IPVSSchedulerMethod = "nq" +) + +func (m *ProxyMode) Set(s string) error { + *m = ProxyMode(s) + return nil +} + +func (m *ProxyMode) String() string { + if m != nil { + return string(*m) + } + return "" +} + +func (m *ProxyMode) Type() string { + return "ProxyMode" +} + +type ConfigurationMap map[string]string + +func (m *ConfigurationMap) String() string { + pairs := []string{} + for k, v := range *m { + pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (m *ConfigurationMap) Set(value string) error { + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) + } else { + (*m)[strings.TrimSpace(arr[0])] = "" + } + } + return nil +} + +func (*ConfigurationMap) Type() string { + return "mapStringString" +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go new file mode 100644 index 0000000000..983b5113b7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go @@ -0,0 +1,183 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package config + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ConfigurationMap) DeepCopyInto(out *ConfigurationMap) { + { + in := &in + *out = make(ConfigurationMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigurationMap. +func (in ConfigurationMap) DeepCopy() ConfigurationMap { + if in == nil { + return nil + } + out := new(ConfigurationMap) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.ClientConnection = in.ClientConnection + in.IPTables.DeepCopyInto(&out.IPTables) + in.IPVS.DeepCopyInto(&out.IPVS) + if in.OOMScoreAdj != nil { + in, out := &in.OOMScoreAdj, &out.OOMScoreAdj + *out = new(int32) + **out = **in + } + out.UDPIdleTimeout = in.UDPIdleTimeout + in.Conntrack.DeepCopyInto(&out.Conntrack) + out.ConfigSyncPeriod = in.ConfigSyncPeriod + if in.NodePortAddresses != nil { + in, out := &in.NodePortAddresses, &out.NodePortAddresses + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration. +func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(int32) + **out = **in + } + if in.MaxPerCore != nil { + in, out := &in.MaxPerCore, &out.MaxPerCore + *out = new(int32) + **out = **in + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(int32) + **out = **in + } + if in.TCPEstablishedTimeout != nil { + in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout + *out = new(v1.Duration) + **out = **in + } + if in.TCPCloseWaitTimeout != nil { + in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration. +func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyConntrackConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) { + *out = *in + if in.MasqueradeBit != nil { + in, out := &in.MasqueradeBit, &out.MasqueradeBit + *out = new(int32) + **out = **in + } + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration. +func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyIPTablesConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) { + *out = *in + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + if in.ExcludeCIDRs != nil { + in, out := &in.ExcludeCIDRs, &out.ExcludeCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration. +func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyIPVSConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/BUILD new file mode 100644 index 0000000000..3728f20744 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/BUILD @@ -0,0 +1,43 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "bitmap.go", + "interfaces.go", + "utils.go", + ], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/allocator", +) + +go_test( + name = "go_default_test", + srcs = [ + "bitmap_test.go", + "utils_test.go", + ], + embed = [":go_default_library"], + deps = ["//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/registry/core/service/allocator/storage:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/bitmap.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/bitmap.go new file mode 100644 index 0000000000..84c09eadfc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/bitmap.go @@ -0,0 +1,236 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package allocator + +import ( + "errors" + "math/big" + "math/rand" + "sync" + "time" +) + +// AllocationBitmap is a contiguous block of resources that can be allocated atomically. +// +// Each resource has an offset. The internal structure is a bitmap, with a bit for each offset. +// +// If a resource is taken, the bit at that offset is set to one. +// r.count is always equal to the number of set bits and can be recalculated at any time +// by counting the set bits in r.allocated. +// +// TODO: use RLE and compact the allocator to minimize space. +type AllocationBitmap struct { + // strategy carries the details of how to choose the next available item out of the range + strategy bitAllocator + // max is the maximum size of the usable items in the range + max int + // rangeSpec is the range specifier, matching RangeAllocation.Range + rangeSpec string + + // lock guards the following members + lock sync.Mutex + // count is the number of currently allocated elements in the range + count int + // allocated is a bit array of the allocated items in the range + allocated *big.Int +} + +// AllocationBitmap implements Interface and Snapshottable +var _ Interface = &AllocationBitmap{} +var _ Snapshottable = &AllocationBitmap{} + +// bitAllocator represents a search strategy in the allocation map for a valid item. +type bitAllocator interface { + AllocateBit(allocated *big.Int, max, count int) (int, bool) +} + +// NewAllocationMap creates an allocation bitmap using the random scan strategy. +func NewAllocationMap(max int, rangeSpec string) *AllocationBitmap { + a := AllocationBitmap{ + strategy: randomScanStrategy{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), + }, + allocated: big.NewInt(0), + count: 0, + max: max, + rangeSpec: rangeSpec, + } + return &a +} + +// NewContiguousAllocationMap creates an allocation bitmap using the contiguous scan strategy. +func NewContiguousAllocationMap(max int, rangeSpec string) *AllocationBitmap { + a := AllocationBitmap{ + strategy: contiguousScanStrategy{}, + allocated: big.NewInt(0), + count: 0, + max: max, + rangeSpec: rangeSpec, + } + return &a +} + +// Allocate attempts to reserve the provided item. +// Returns true if it was allocated, false if it was already in use +func (r *AllocationBitmap) Allocate(offset int) (bool, error) { + r.lock.Lock() + defer r.lock.Unlock() + + if r.allocated.Bit(offset) == 1 { + return false, nil + } + r.allocated = r.allocated.SetBit(r.allocated, offset, 1) + r.count++ + return true, nil +} + +// AllocateNext reserves one of the items from the pool. +// (0, false, nil) may be returned if there are no items left. +func (r *AllocationBitmap) AllocateNext() (int, bool, error) { + r.lock.Lock() + defer r.lock.Unlock() + + next, ok := r.strategy.AllocateBit(r.allocated, r.max, r.count) + if !ok { + return 0, false, nil + } + r.count++ + r.allocated = r.allocated.SetBit(r.allocated, next, 1) + return next, true, nil +} + +// Release releases the item back to the pool. Releasing an +// unallocated item or an item out of the range is a no-op and +// returns no error. +func (r *AllocationBitmap) Release(offset int) error { + r.lock.Lock() + defer r.lock.Unlock() + + if r.allocated.Bit(offset) == 0 { + return nil + } + + r.allocated = r.allocated.SetBit(r.allocated, offset, 0) + r.count-- + return nil +} + +const ( + // Find the size of a big.Word in bytes. + notZero = uint64(^big.Word(0)) + wordPower = (notZero>>8)&1 + (notZero>>16)&1 + (notZero>>32)&1 + wordSize = 1 << wordPower +) + +// ForEach calls the provided function for each allocated bit. The +// AllocationBitmap may not be modified while this loop is running. +func (r *AllocationBitmap) ForEach(fn func(int)) { + r.lock.Lock() + defer r.lock.Unlock() + + words := r.allocated.Bits() + for wordIdx, word := range words { + bit := 0 + for word > 0 { + if (word & 1) != 0 { + fn((wordIdx * wordSize * 8) + bit) + word = word &^ 1 + } + bit++ + word = word >> 1 + } + } +} + +// Has returns true if the provided item is already allocated and a call +// to Allocate(offset) would fail. +func (r *AllocationBitmap) Has(offset int) bool { + r.lock.Lock() + defer r.lock.Unlock() + + return r.allocated.Bit(offset) == 1 +} + +// Free returns the count of items left in the range. +func (r *AllocationBitmap) Free() int { + r.lock.Lock() + defer r.lock.Unlock() + return r.max - r.count +} + +// Snapshot saves the current state of the pool. +func (r *AllocationBitmap) Snapshot() (string, []byte) { + r.lock.Lock() + defer r.lock.Unlock() + + return r.rangeSpec, r.allocated.Bytes() +} + +// Restore restores the pool to the previously captured state. +func (r *AllocationBitmap) Restore(rangeSpec string, data []byte) error { + r.lock.Lock() + defer r.lock.Unlock() + + if r.rangeSpec != rangeSpec { + return errors.New("the provided range does not match the current range") + } + + r.allocated = big.NewInt(0).SetBytes(data) + r.count = countBits(r.allocated) + + return nil +} + +// randomScanStrategy chooses a random address from the provided big.Int, and then +// scans forward looking for the next available address (it will wrap the range if +// necessary). +type randomScanStrategy struct { + rand *rand.Rand +} + +func (rss randomScanStrategy) AllocateBit(allocated *big.Int, max, count int) (int, bool) { + if count >= max { + return 0, false + } + offset := rss.rand.Intn(max) + for i := 0; i < max; i++ { + at := (offset + i) % max + if allocated.Bit(at) == 0 { + return at, true + } + } + return 0, false +} + +var _ bitAllocator = randomScanStrategy{} + +// contiguousScanStrategy tries to allocate starting at 0 and filling in any gaps +type contiguousScanStrategy struct{} + +func (contiguousScanStrategy) AllocateBit(allocated *big.Int, max, count int) (int, bool) { + if count >= max { + return 0, false + } + for i := 0; i < max; i++ { + if allocated.Bit(i) == 0 { + return i, true + } + } + return 0, false +} + +var _ bitAllocator = contiguousScanStrategy{} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/interfaces.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/interfaces.go new file mode 100644 index 0000000000..0d18feff20 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/interfaces.go @@ -0,0 +1,42 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package allocator + +// Interface manages the allocation of items out of a range. Interface +// should be threadsafe. +type Interface interface { + Allocate(int) (bool, error) + AllocateNext() (int, bool, error) + Release(int) error + ForEach(func(int)) + + // For testing + Has(int) bool + + // For testing + Free() int +} + +// Snapshottable is an Interface that can be snapshotted and restored. Snapshottable +// should be threadsafe. +type Snapshottable interface { + Interface + Snapshot() (string, []byte) + Restore(string, []byte) error +} + +type AllocatorFactory func(max int, rangeSpec string) Interface diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/utils.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/utils.go new file mode 100644 index 0000000000..4691f57a15 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/utils.go @@ -0,0 +1,64 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package allocator + +import "math/big" + +// countBits returns the number of set bits in n +func countBits(n *big.Int) int { + var count int = 0 + for _, b := range n.Bytes() { + count += int(bitCounts[b]) + } + return count +} + +// bitCounts is all of the bits counted for each number between 0-255 +var bitCounts = []int8{ + 0, 1, 1, 2, 1, 2, 2, 3, + 1, 2, 2, 3, 2, 3, 3, 4, + 1, 2, 2, 3, 2, 3, 3, 4, + 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, + 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, + 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, + 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, + 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, + 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, + 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, + 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, + 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, + 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, + 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, + 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, + 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, + 4, 5, 5, 6, 5, 6, 6, 7, + 4, 5, 5, 6, 5, 6, 6, 7, + 5, 6, 6, 7, 6, 7, 7, 8, +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/BUILD new file mode 100644 index 0000000000..10db164981 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["allocator.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/ipallocator", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/registry/core/service/allocator:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["allocator_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/registry/core/service/ipallocator/controller:all-srcs", + "//pkg/registry/core/service/ipallocator/storage:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/allocator.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/allocator.go new file mode 100644 index 0000000000..bb5c2984df --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/allocator.go @@ -0,0 +1,286 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipallocator + +import ( + "errors" + "fmt" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/registry/core/service/allocator" + "math/big" + "net" +) + +// Interface manages the allocation of IP addresses out of a range. Interface +// should be threadsafe. +type Interface interface { + Allocate(net.IP) error + AllocateNext() (net.IP, error) + Release(net.IP) error + ForEach(func(net.IP)) + + // For testing + Has(ip net.IP) bool +} + +var ( + ErrFull = errors.New("range is full") + ErrAllocated = errors.New("provided IP is already allocated") + ErrMismatchedNetwork = errors.New("the provided network does not match the current range") +) + +type ErrNotInRange struct { + ValidRange string +} + +func (e *ErrNotInRange) Error() string { + return fmt.Sprintf("provided IP is not in the valid range. The range of valid IPs is %s", e.ValidRange) +} + +// Range is a contiguous block of IPs that can be allocated atomically. +// +// The internal structure of the range is: +// +// For CIDR 10.0.0.0/24 +// 254 addresses usable out of 256 total (minus base and broadcast IPs) +// The number of usable addresses is r.max +// +// CIDR base IP CIDR broadcast IP +// 10.0.0.0 10.0.0.255 +// | | +// 0 1 2 3 4 5 ... ... 253 254 255 +// | | +// r.base r.base + r.max +// | | +// offset #0 of r.allocated last offset of r.allocated +type Range struct { + net *net.IPNet + // base is a cached version of the start IP in the CIDR range as a *big.Int + base *big.Int + // max is the maximum size of the usable addresses in the range + max int + + alloc allocator.Interface +} + +// NewAllocatorCIDRRange creates a Range over a net.IPNet, calling allocatorFactory to construct the backing store. +func NewAllocatorCIDRRange(cidr *net.IPNet, allocatorFactory allocator.AllocatorFactory) *Range { + max := RangeSize(cidr) + base := bigForIP(cidr.IP) + rangeSpec := cidr.String() + + r := Range{ + net: cidr, + base: base.Add(base, big.NewInt(1)), // don't use the network base + max: maximum(0, int(max-2)), // don't use the network broadcast, + } + r.alloc = allocatorFactory(r.max, rangeSpec) + return &r +} + +// Helper that wraps NewAllocatorCIDRRange, for creating a range backed by an in-memory store. +func NewCIDRRange(cidr *net.IPNet) *Range { + return NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) allocator.Interface { + return allocator.NewAllocationMap(max, rangeSpec) + }) +} + +// NewFromSnapshot allocates a Range and initializes it from a snapshot. +func NewFromSnapshot(snap *api.RangeAllocation) (*Range, error) { + _, ipnet, err := net.ParseCIDR(snap.Range) + if err != nil { + return nil, err + } + r := NewCIDRRange(ipnet) + if err := r.Restore(ipnet, snap.Data); err != nil { + return nil, err + } + return r, nil +} + +func maximum(a, b int) int { + if a > b { + return a + } + return b +} + +// Free returns the count of IP addresses left in the range. +func (r *Range) Free() int { + return r.alloc.Free() +} + +// Used returns the count of IP addresses used in the range. +func (r *Range) Used() int { + return r.max - r.alloc.Free() +} + +// CIDR returns the CIDR covered by the range. +func (r *Range) CIDR() net.IPNet { + return *r.net +} + +// Allocate attempts to reserve the provided IP. ErrNotInRange or +// ErrAllocated will be returned if the IP is not valid for this range +// or has already been reserved. ErrFull will be returned if there +// are no addresses left. +func (r *Range) Allocate(ip net.IP) error { + ok, offset := r.contains(ip) + if !ok { + return &ErrNotInRange{r.net.String()} + } + + allocated, err := r.alloc.Allocate(offset) + if err != nil { + return err + } + if !allocated { + return ErrAllocated + } + return nil +} + +// AllocateNext reserves one of the IPs from the pool. ErrFull may +// be returned if there are no addresses left. +func (r *Range) AllocateNext() (net.IP, error) { + offset, ok, err := r.alloc.AllocateNext() + if err != nil { + return nil, err + } + if !ok { + return nil, ErrFull + } + return addIPOffset(r.base, offset), nil +} + +// Release releases the IP back to the pool. Releasing an +// unallocated IP or an IP out of the range is a no-op and +// returns no error. +func (r *Range) Release(ip net.IP) error { + ok, offset := r.contains(ip) + if !ok { + return nil + } + + return r.alloc.Release(offset) +} + +// ForEach calls the provided function for each allocated IP. +func (r *Range) ForEach(fn func(net.IP)) { + r.alloc.ForEach(func(offset int) { + ip, _ := GetIndexedIP(r.net, offset+1) // +1 because Range doesn't store IP 0 + fn(ip) + }) +} + +// Has returns true if the provided IP is already allocated and a call +// to Allocate(ip) would fail with ErrAllocated. +func (r *Range) Has(ip net.IP) bool { + ok, offset := r.contains(ip) + if !ok { + return false + } + + return r.alloc.Has(offset) +} + +// Snapshot saves the current state of the pool. +func (r *Range) Snapshot(dst *api.RangeAllocation) error { + snapshottable, ok := r.alloc.(allocator.Snapshottable) + if !ok { + return fmt.Errorf("not a snapshottable allocator") + } + rangeString, data := snapshottable.Snapshot() + dst.Range = rangeString + dst.Data = data + return nil +} + +// Restore restores the pool to the previously captured state. ErrMismatchedNetwork +// is returned if the provided IPNet range doesn't exactly match the previous range. +func (r *Range) Restore(net *net.IPNet, data []byte) error { + if !net.IP.Equal(r.net.IP) || net.Mask.String() != r.net.Mask.String() { + return ErrMismatchedNetwork + } + snapshottable, ok := r.alloc.(allocator.Snapshottable) + if !ok { + return fmt.Errorf("not a snapshottable allocator") + } + snapshottable.Restore(net.String(), data) + return nil +} + +// contains returns true and the offset if the ip is in the range, and false +// and nil otherwise. The first and last addresses of the CIDR are omitted. +func (r *Range) contains(ip net.IP) (bool, int) { + if !r.net.Contains(ip) { + return false, 0 + } + + offset := calculateIPOffset(r.base, ip) + if offset < 0 || offset >= r.max { + return false, 0 + } + return true, offset +} + +// bigForIP creates a big.Int based on the provided net.IP +func bigForIP(ip net.IP) *big.Int { + b := ip.To4() + if b == nil { + b = ip.To16() + } + return big.NewInt(0).SetBytes(b) +} + +// addIPOffset adds the provided integer offset to a base big.Int representing a +// net.IP +func addIPOffset(base *big.Int, offset int) net.IP { + return net.IP(big.NewInt(0).Add(base, big.NewInt(int64(offset))).Bytes()) +} + +// calculateIPOffset calculates the integer offset of ip from base such that +// base + offset = ip. It requires ip >= base. +func calculateIPOffset(base *big.Int, ip net.IP) int { + return int(big.NewInt(0).Sub(bigForIP(ip), base).Int64()) +} + +// RangeSize returns the size of a range in valid addresses. +func RangeSize(subnet *net.IPNet) int64 { + ones, bits := subnet.Mask.Size() + if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 { + return 0 + } + // For IPv6, the max size will be limited to 65536 + // This is due to the allocator keeping track of all the + // allocated IP's in a bitmap. This will keep the size of + // the bitmap to 64k. + if bits == 128 && (bits-ones) >= 16 { + return int64(1) << uint(16) + } else { + return int64(1) << uint(bits-ones) + } +} + +// GetIndexedIP returns a net.IP that is subnet.IP + index in the contiguous IP space. +func GetIndexedIP(subnet *net.IPNet, index int) (net.IP, error) { + ip := addIPOffset(bigForIP(subnet.IP), index) + if !subnet.Contains(ip) { + return nil, fmt.Errorf("can't generate IP with index %d from subnet. subnet too small. subnet: %q", index, subnet) + } + return ip, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/version/.gitattributes b/vendor/k8s.io/kubernetes/pkg/version/.gitattributes new file mode 100644 index 0000000000..7e349eff60 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/.gitattributes @@ -0,0 +1 @@ +base.go export-subst diff --git a/vendor/k8s.io/kubernetes/pkg/version/BUILD b/vendor/k8s.io/kubernetes/pkg/version/BUILD new file mode 100644 index 0000000000..26b0a16123 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/BUILD @@ -0,0 +1,34 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "base.go", + "doc.go", + "version.go", + ], + importpath = "k8s.io/kubernetes/pkg/version", + deps = ["//staging/src/k8s.io/apimachinery/pkg/version:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/version/prometheus:all-srcs", + "//pkg/version/verflag:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/kubernetes/pkg/version/base.go new file mode 100644 index 0000000000..730e79f03d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/base.go @@ -0,0 +1,63 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Base version information. +// +// This is the fallback data used when version information from git is not +// provided via go ldflags. It provides an approximation of the Kubernetes +// version for ad-hoc builds (e.g. `go build`) that cannot get the version +// information from git. +// +// If you are looking at these fields in the git tree, they look +// strange. They are modified on the fly by the build process. The +// in-tree values are dummy values used for "git archive", which also +// works for GitHub tar downloads. +// +// When releasing a new Kubernetes version, this file is updated by +// build/mark_new_version.sh to reflect the new version, and then a +// git annotated tag (using format vX.Y where X == Major version and Y +// == Minor version) is created to point to the commit that updates +// pkg/version/base.go +var ( + // TODO: Deprecate gitMajor and gitMinor, use only gitVersion + // instead. First step in deprecation, keep the fields but make + // them irrelevant. (Next we'll take it out, which may muck with + // scripts consuming the kubectl version output - but most of + // these should be looking at gitVersion already anyways.) + gitMajor string // major version, always numeric + gitMinor string // minor version, numeric possibly followed by "+" + + // semantic version, derived by build scripts (see + // https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md + // for a detailed discussion of this field) + // + // TODO: This field is still called "gitVersion" for legacy + // reasons. For prerelease versions, the build metadata on the + // semantic version is a git hash, but the version itself is no + // longer the direct output of "git describe", but a slight + // translation to be semver compliant. + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes + gitVersion = "v0.0.0-master+$Format:%h$" + gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState = "" // state of git tree, either "clean" or "dirty" + + buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +) diff --git a/vendor/k8s.io/kubernetes/pkg/version/def.bzl b/vendor/k8s.io/kubernetes/pkg/version/def.bzl new file mode 100644 index 0000000000..302893b1b8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/def.bzl @@ -0,0 +1,39 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel. +def version_x_defs(): + # This should match the list of packages in kube::version::ldflag + stamp_pkgs = [ + "k8s.io/kubernetes/pkg/version", + "k8s.io/kubernetes/vendor/k8s.io/client-go/pkg/version", + ] + + # This should match the list of vars in kube::version::ldflags + # It should also match the list of vars set in hack/print-workspace-status.sh. + stamp_vars = [ + "buildDate", + "gitCommit", + "gitMajor", + "gitMinor", + "gitTreeState", + "gitVersion", + ] + + # Generate the cross-product. + x_defs = {} + for pkg in stamp_pkgs: + for var in stamp_vars: + x_defs["%s.%s" % (pkg, var)] = "{%s}" % var + return x_defs diff --git a/vendor/k8s.io/kubernetes/pkg/version/doc.go b/vendor/k8s.io/kubernetes/pkg/version/doc.go new file mode 100644 index 0000000000..a4a1c035fc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +// Package version supplies version information collected at build time to +// kubernetes components. +package version // import "k8s.io/kubernetes/pkg/version" diff --git a/vendor/k8s.io/kubernetes/pkg/version/version.go b/vendor/k8s.io/kubernetes/pkg/version/version.go new file mode 100644 index 0000000000..8c8350d13b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/version/version.go @@ -0,0 +1,42 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() apimachineryversion.Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in pkg/version/base.go + return apimachineryversion.Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: gitVersion, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/vendor/k8s.io/utils/LICENSE b/vendor/k8s.io/utils/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/utils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/utils/exec/doc.go b/vendor/k8s.io/utils/exec/doc.go new file mode 100644 index 0000000000..cbb44bdb5d --- /dev/null +++ b/vendor/k8s.io/utils/exec/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec provides an injectable interface and implementations for running commands. +package exec // import "k8s.io/utils/exec" diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go new file mode 100644 index 0000000000..96bec01ca8 --- /dev/null +++ b/vendor/k8s.io/utils/exec/exec.go @@ -0,0 +1,252 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "context" + "io" + osexec "os/exec" + "syscall" + "time" +) + +// ErrExecutableNotFound is returned if the executable is not found. +var ErrExecutableNotFound = osexec.ErrNotFound + +// Interface is an interface that presents a subset of the os/exec API. Use this +// when you want to inject fakeable/mockable exec behavior. +type Interface interface { + // Command returns a Cmd instance which can be used to run a single command. + // This follows the pattern of package os/exec. + Command(cmd string, args ...string) Cmd + + // CommandContext returns a Cmd instance which can be used to run a single command. + // + // The provided context is used to kill the process if the context becomes done + // before the command completes on its own. For example, a timeout can be set in + // the context. + CommandContext(ctx context.Context, cmd string, args ...string) Cmd + + // LookPath wraps os/exec.LookPath + LookPath(file string) (string, error) +} + +// Cmd is an interface that presents an API that is very similar to Cmd from os/exec. +// As more functionality is needed, this can grow. Since Cmd is a struct, we will have +// to replace fields with get/set method pairs. +type Cmd interface { + // Run runs the command to the completion. + Run() error + // CombinedOutput runs the command and returns its combined standard output + // and standard error. This follows the pattern of package os/exec. + CombinedOutput() ([]byte, error) + // Output runs the command and returns standard output, but not standard err + Output() ([]byte, error) + SetDir(dir string) + SetStdin(in io.Reader) + SetStdout(out io.Writer) + SetStderr(out io.Writer) + SetEnv(env []string) + + // StdoutPipe and StderrPipe for getting the process' Stdout and Stderr as + // Readers + StdoutPipe() (io.ReadCloser, error) + StderrPipe() (io.ReadCloser, error) + + // Start and Wait are for running a process non-blocking + Start() error + Wait() error + + // Stops the command by sending SIGTERM. It is not guaranteed the + // process will stop before this function returns. If the process is not + // responding, an internal timer function will send a SIGKILL to force + // terminate after 10 seconds. + Stop() +} + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// Implements Interface in terms of really exec()ing. +type executor struct{} + +// New returns a new Interface which will os/exec to run commands. +func New() Interface { + return &executor{} +} + +// Command is part of the Interface interface. +func (executor *executor) Command(cmd string, args ...string) Cmd { + return (*cmdWrapper)(osexec.Command(cmd, args...)) +} + +// CommandContext is part of the Interface interface. +func (executor *executor) CommandContext(ctx context.Context, cmd string, args ...string) Cmd { + return (*cmdWrapper)(osexec.CommandContext(ctx, cmd, args...)) +} + +// LookPath is part of the Interface interface +func (executor *executor) LookPath(file string) (string, error) { + return osexec.LookPath(file) +} + +// Wraps exec.Cmd so we can capture errors. +type cmdWrapper osexec.Cmd + +var _ Cmd = &cmdWrapper{} + +func (cmd *cmdWrapper) SetDir(dir string) { + cmd.Dir = dir +} + +func (cmd *cmdWrapper) SetStdin(in io.Reader) { + cmd.Stdin = in +} + +func (cmd *cmdWrapper) SetStdout(out io.Writer) { + cmd.Stdout = out +} + +func (cmd *cmdWrapper) SetStderr(out io.Writer) { + cmd.Stderr = out +} + +func (cmd *cmdWrapper) SetEnv(env []string) { + cmd.Env = env +} + +func (cmd *cmdWrapper) StdoutPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StdoutPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) StderrPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StderrPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) Start() error { + err := (*osexec.Cmd)(cmd).Start() + return handleError(err) +} + +func (cmd *cmdWrapper) Wait() error { + err := (*osexec.Cmd)(cmd).Wait() + return handleError(err) +} + +// Run is part of the Cmd interface. +func (cmd *cmdWrapper) Run() error { + err := (*osexec.Cmd)(cmd).Run() + return handleError(err) +} + +// CombinedOutput is part of the Cmd interface. +func (cmd *cmdWrapper) CombinedOutput() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).CombinedOutput() + return out, handleError(err) +} + +func (cmd *cmdWrapper) Output() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).Output() + return out, handleError(err) +} + +// Stop is part of the Cmd interface. +func (cmd *cmdWrapper) Stop() { + c := (*osexec.Cmd)(cmd) + + if c.Process == nil { + return + } + + c.Process.Signal(syscall.SIGTERM) + + time.AfterFunc(10*time.Second, func() { + if !c.ProcessState.Exited() { + c.Process.Signal(syscall.SIGKILL) + } + }) +} + +func handleError(err error) error { + if err == nil { + return nil + } + + switch e := err.(type) { + case *osexec.ExitError: + return &ExitErrorWrapper{e} + case *osexec.Error: + if e.Err == osexec.ErrNotFound { + return ErrExecutableNotFound + } + } + + return err +} + +// ExitErrorWrapper is an implementation of ExitError in terms of os/exec ExitError. +// Note: standard exec.ExitError is type *os.ProcessState, which already implements Exited(). +type ExitErrorWrapper struct { + *osexec.ExitError +} + +var _ ExitError = &ExitErrorWrapper{} + +// ExitStatus is part of the ExitError interface. +func (eew ExitErrorWrapper) ExitStatus() int { + ws, ok := eew.Sys().(syscall.WaitStatus) + if !ok { + panic("can't call ExitStatus() on a non-WaitStatus exitErrorWrapper") + } + return ws.ExitStatus() +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +// Exited is to check if the process has finished +func (e CodeExitError) Exited() bool { + return true +} + +// ExitStatus is for checking the error code +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c22f504bb9..5f7d70f267 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -128,7 +128,7 @@ github.com/gophercloud/utils/openstack/clientconfig # github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 github.com/gregjones/httpcache github.com/gregjones/httpcache/diskcache -# github.com/grpc-ecosystem/grpc-gateway v1.9.1 +# github.com/grpc-ecosystem/grpc-gateway v1.8.5 github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities github.com/grpc-ecosystem/grpc-gateway/internal @@ -303,11 +303,11 @@ google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api -# google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3 +# google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/protobuf/field_mask google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.21.1 +# google.golang.org/grpc v1.20.1 google.golang.org/grpc google.golang.org/grpc/credentials google.golang.org/grpc/metadata @@ -344,7 +344,7 @@ google.golang.org/grpc/internal/syscall gopkg.in/inf.v0 # gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0-20190612125737-db0771252981 => k8s.io/api v0.0.0-20190222213804-5cb15d344471 +# k8s.io/api v0.0.0 => k8s.io/api v0.0.0-20190222213804-5cb15d344471 k8s.io/api/core/v1 k8s.io/api/autoscaling/v1 k8s.io/api/apps/v1 @@ -378,10 +378,10 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/admission/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 +# k8s.io/apiextensions-apiserver v0.0.0-00010101000000-000000000000 => k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions -# k8s.io/apimachinery v0.0.0-20190612125636-6a5db36e93ad => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 +# k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/apis/meta/v1 @@ -390,11 +390,13 @@ k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/runtime/serializer/json +k8s.io/apimachinery/pkg/runtime/serializer k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams -k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/api/resource @@ -407,30 +409,30 @@ k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/util/clock k8s.io/apimachinery/pkg/util/strategicpatch -k8s.io/apimachinery/pkg/runtime/serializer k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/runtime/serializer/recognizer k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/yaml -k8s.io/apimachinery/pkg/util/validation -k8s.io/apimachinery/pkg/version +k8s.io/apimachinery/pkg/runtime/serializer/protobuf +k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/version +k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/runtime/serializer/streaming k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/util/uuid k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/third_party/forked/golang/json -k8s.io/apimachinery/pkg/runtime/serializer/protobuf -k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/apis/config k8s.io/apimachinery/pkg/apis/meta/internalversion -# k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 +# k8s.io/client-go v0.0.0 => k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 k8s.io/client-go/kubernetes +k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/record k8s.io/client-go/tools/clientcmd/api -k8s.io/client-go/tools/clientcmd k8s.io/client-go/discovery k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 @@ -466,14 +468,14 @@ k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/rest k8s.io/client-go/util/flowcontrol +k8s.io/client-go/tools/auth +k8s.io/client-go/tools/clientcmd/api/latest +k8s.io/client-go/util/homedir k8s.io/client-go/kubernetes/scheme k8s.io/client-go/tools/leaderelection k8s.io/client-go/tools/leaderelection/resourcelock k8s.io/client-go/tools/reference k8s.io/client-go/dynamic -k8s.io/client-go/tools/auth -k8s.io/client-go/tools/clientcmd/api/latest -k8s.io/client-go/util/homedir k8s.io/client-go/plugin/pkg/client/auth k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec @@ -482,10 +484,10 @@ k8s.io/client-go/tools/metrics k8s.io/client-go/transport k8s.io/client-go/util/cert k8s.io/client-go/util/integer +k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/util/workqueue k8s.io/client-go/tools/cache k8s.io/client-go/restmapper -k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/plugin/pkg/client/auth/azure k8s.io/client-go/plugin/pkg/client/auth/gcp k8s.io/client-go/plugin/pkg/client/auth/oidc @@ -499,10 +501,10 @@ k8s.io/client-go/util/buffer k8s.io/client-go/util/retry k8s.io/client-go/util/jsonpath k8s.io/client-go/third_party/forked/golang/template -# k8s.io/cluster-bootstrap v0.0.0-20190612131323-aa3fd9f69a09 +# k8s.io/cluster-bootstrap v0.0.0 => k8s.io/cluster-bootstrap v0.0.0-20190228181738-e96ff33745e4 k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util -# k8s.io/code-generator v0.0.0-20190612125529-c522cb6c26aa +# k8s.io/code-generator v0.0.0 => k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b k8s.io/code-generator/cmd/client-gen k8s.io/code-generator/cmd/conversion-gen k8s.io/code-generator/cmd/deepcopy-gen @@ -529,8 +531,7 @@ k8s.io/code-generator/cmd/client-gen/generators/fake k8s.io/code-generator/cmd/client-gen/generators/scheme k8s.io/code-generator/cmd/client-gen/generators/util k8s.io/code-generator/cmd/client-gen/path -k8s.io/code-generator/pkg/namer -# k8s.io/component-base v0.0.0-20190615090122-7edc45fbd48e +# k8s.io/component-base v0.0.0-00010101000000-000000000000 => k8s.io/component-base v0.0.0-20190620085130-185d68e6e6ea k8s.io/component-base/cli/flag # k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a k8s.io/gengo/args @@ -546,6 +547,19 @@ k8s.io/gengo/examples/set-gen/sets k8s.io/klog # k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 k8s.io/kube-openapi/pkg/util/proto +# k8s.io/kubernetes v1.13.4 +k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 +k8s.io/kubernetes/cmd/kubeadm/app/util +k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm +k8s.io/kubernetes/cmd/kubeadm/app/constants +k8s.io/kubernetes/pkg/version +k8s.io/kubernetes/pkg/kubelet/apis/config +k8s.io/kubernetes/pkg/proxy/apis/config +k8s.io/kubernetes/pkg/registry/core/service/ipallocator +k8s.io/kubernetes/pkg/apis/core +k8s.io/kubernetes/pkg/registry/core/service/allocator +# k8s.io/utils v0.0.0-20190801114015-581e00157fb1 +k8s.io/utils/exec # sigs.k8s.io/cluster-api v0.1.4 sigs.k8s.io/cluster-api/cmd/clusterctl/cmd sigs.k8s.io/cluster-api/pkg/apis/cluster/common @@ -573,17 +587,16 @@ sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube sigs.k8s.io/cluster-api/pkg/controller/noderefutil # sigs.k8s.io/controller-runtime v0.1.12 -sigs.k8s.io/controller-runtime/pkg/client/config sigs.k8s.io/controller-runtime/pkg/manager sigs.k8s.io/controller-runtime/pkg/runtime/signals sigs.k8s.io/controller-runtime/pkg/runtime/scheme sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/patch +sigs.k8s.io/controller-runtime/pkg/client/config sigs.k8s.io/controller-runtime/pkg/controller sigs.k8s.io/controller-runtime/pkg/handler sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/source -sigs.k8s.io/controller-runtime/pkg/runtime/log sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/internal/recorder @@ -591,6 +604,7 @@ sigs.k8s.io/controller-runtime/pkg/leaderelection sigs.k8s.io/controller-runtime/pkg/metrics sigs.k8s.io/controller-runtime/pkg/recorder sigs.k8s.io/controller-runtime/pkg/runtime/inject +sigs.k8s.io/controller-runtime/pkg/runtime/log sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/admission/types sigs.k8s.io/controller-runtime/pkg/internal/controller