diff --git a/config.toml b/config.toml index 071c527e98a16..0f85a554e7eaa 100644 --- a/config.toml +++ b/config.toml @@ -64,10 +64,10 @@ time_format_blog = "Monday, January 02, 2006" description = "Production-Grade Container Orchestration" showedit = true -latest = "v1.13" +latest = "v1.14" -fullversion = "v1.13.0" -version = "v1.13" +fullversion = "v1.14.0" +version = "v1.14" githubbranch = "master" docsbranch = "master" deprecated = false @@ -77,10 +77,10 @@ githubWebsiteRepo = "github.com/kubernetes/website" githubWebsiteRaw = "raw.githubusercontent.com/kubernetes/website" [[params.versions]] -fullversion = "v1.13.0" -version = "v1.13" -githubbranch = "v1.13.0" -docsbranch = "release-1.13" +fullversion = "v1.14.0" +version = "v1.14" +githubbranch = "v1.14.0" +docsbranch = "release-1.14" url = "https://kubernetes.io" [params.pushAssets] @@ -95,33 +95,33 @@ js = [ ] [[params.versions]] -fullversion = "v1.12.3" +fullversion = "v1.13.4" +version = "v1.13" +githubbranch = "v1.13.4" +docsbranch = "release-1.13" +url = "https://v1-13.docs.kubernetes.io" + +[[params.versions]] +fullversion = "v1.12.6" version = "v1.12" -githubbranch = "v1.12.3" +githubbranch = "v1.12.6" docsbranch = "release-1.12" url = "https://v1-12.docs.kubernetes.io" [[params.versions]] -fullversion = "v1.11.5" +fullversion = "v1.11.8" version = "v1.11" -githubbranch = "v1.11.5" +githubbranch = "v1.11.8" docsbranch = "release-1.11" url = "https://v1-11.docs.kubernetes.io" [[params.versions]] -fullversion = "v1.10.11" +fullversion = "v1.10.13" version = "v1.10" -githubbranch = "v1.10.11" +githubbranch = "v1.10.13" docsbranch = "release-1.10" url = "https://v1-10.docs.kubernetes.io" -[[params.versions]] -fullversion = "v1.9.11" -version = "v1.9" -githubbranch = "v1.9.11" -docsbranch = "release-1.9" -url = "https://v1-9.docs.kubernetes.io" - # Language definitions. [languages] diff --git a/content/en/blog/_posts/2018-11-07-grpc-load-balancing-with-linkerd.md.md b/content/en/blog/_posts/2018-11-07-grpc-load-balancing-with-linkerd.md similarity index 100% rename from content/en/blog/_posts/2018-11-07-grpc-load-balancing-with-linkerd.md.md rename to content/en/blog/_posts/2018-11-07-grpc-load-balancing-with-linkerd.md diff --git a/content/en/docs/concepts/cluster-administration/logging.md b/content/en/docs/concepts/cluster-administration/logging.md index 1040fae424584..67fa8a5699dac 100644 --- a/content/en/docs/concepts/cluster-administration/logging.md +++ b/content/en/docs/concepts/cluster-administration/logging.md @@ -35,7 +35,7 @@ a container that writes some text to standard output once per second. To run this pod, use the following command: ```shell -kubectl create -f https://k8s.io/examples/debug/counter-pod.yaml +kubectl apply -f https://k8s.io/examples/debug/counter-pod.yaml pod/counter created ``` diff --git a/content/en/docs/concepts/cluster-administration/manage-deployment.md b/content/en/docs/concepts/cluster-administration/manage-deployment.md index b216f8c8ae301..d5ef64ab99aaa 100644 --- a/content/en/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/en/docs/concepts/cluster-administration/manage-deployment.md @@ -26,7 +26,7 @@ Many applications require multiple resources to be created, such as a Deployment Multiple resources can be created the same way as a single resource: ```shell -kubectl create -f https://k8s.io/examples/application/nginx-app.yaml +kubectl apply -f https://k8s.io/examples/application/nginx-app.yaml ``` ```shell @@ -36,16 +36,16 @@ deployment.apps/my-nginx created The resources will be created in the order they appear in the file. Therefore, it's best to specify the service first, since that will ensure the scheduler can spread the pods associated with the service as they are created by the controller(s), such as Deployment. -`kubectl create` also accepts multiple `-f` arguments: +`kubectl apply` also accepts multiple `-f` arguments: ```shell -kubectl create -f https://k8s.io/examples/application/nginx/nginx-svc.yaml -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml +kubectl apply -f https://k8s.io/examples/application/nginx/nginx-svc.yaml -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml ``` And a directory can be specified rather than or in addition to individual files: ```shell -kubectl create -f https://k8s.io/examples/application/nginx/ +kubectl apply -f https://k8s.io/examples/application/nginx/ ``` `kubectl` will read any files with suffixes `.yaml`, `.yml`, or `.json`. @@ -55,7 +55,7 @@ It is a recommended practice to put resources related to the same microservice o A URL can also be specified as a configuration source, which is handy for deploying directly from configuration files checked into github: ```shell -kubectl create -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/nginx/nginx-deployment.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/nginx/nginx-deployment.yaml ``` ```shell @@ -123,7 +123,7 @@ project/k8s/development By default, performing a bulk operation on `project/k8s/development` will stop at the first level of the directory, not processing any subdirectories. If we had tried to create the resources in this directory using the following command, we would have encountered an error: ```shell -kubectl create -f project/k8s/development +kubectl apply -f project/k8s/development ``` ```shell @@ -133,7 +133,7 @@ error: you must provide one or more resources by argument or filename (.json|.ya Instead, specify the `--recursive` or `-R` flag with the `--filename,-f` flag as such: ```shell -kubectl create -f project/k8s/development --recursive +kubectl apply -f project/k8s/development --recursive ``` ```shell @@ -147,7 +147,7 @@ The `--recursive` flag works with any operation that accepts the `--filename,-f` The `--recursive` flag also works when multiple `-f` arguments are provided: ```shell -kubectl create -f project/k8s/namespaces -f project/k8s/development --recursive +kubectl apply -f project/k8s/namespaces -f project/k8s/development --recursive ``` ```shell @@ -193,7 +193,7 @@ and The labels allow us to slice and dice our resources along any dimension specified by a label: ```shell -kubectl create -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml +kubectl apply -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml kubectl get pods -Lapp -Ltier -Lrole ``` @@ -366,7 +366,6 @@ This command will compare the version of the configuration that you're pushing w ```shell kubectl apply -f https://k8s.io/examples/application/nginx/nginx-deployment.yaml -``` ```shell deployment.apps/my-nginx configured ``` @@ -377,10 +376,6 @@ Currently, resources are created without this annotation, so the first invocatio All subsequent calls to `kubectl apply`, and other commands that modify the configuration, such as `kubectl replace` and `kubectl edit`, will update the annotation, allowing subsequent calls to `kubectl apply` to detect and perform deletions using a three-way diff. -{{< note >}} -To use apply, always create resource initially with either `kubectl apply` or `kubectl create --save-config`. -{{< /note >}} - ### kubectl edit Alternatively, you may also update resources with `kubectl edit`: @@ -430,8 +425,7 @@ deployment.apps/my-nginx replaced At some point, you'll eventually need to update your deployed application, typically by specifying a new image or image tag, as in the canary deployment scenario above. `kubectl` supports several update operations, each of which is applicable to different scenarios. -We'll guide you through how to create and update applications with Deployments. If your deployed application is managed by Replication Controllers, -you should read [how to use `kubectl rolling-update`](/docs/tasks/run-application/rolling-update-replication-controller/) instead. +We'll guide you through how to create and update applications with Deployments. Let's say you were running version 1.7.9 of nginx: diff --git a/content/en/docs/concepts/configuration/assign-pod-node.md b/content/en/docs/concepts/configuration/assign-pod-node.md index 70ec7f2938ea5..99129cfa0f8a2 100644 --- a/content/en/docs/concepts/configuration/assign-pod-node.md +++ b/content/en/docs/concepts/configuration/assign-pod-node.md @@ -69,7 +69,7 @@ Then add a nodeSelector like so: {{< codenew file="pods/pod-nginx.yaml" >}} -When you then run `kubectl create -f https://k8s.io/examples/pods/pod-nginx.yaml`, +When you then run `kubectl apply -f https://k8s.io/examples/pods/pod-nginx.yaml`, the Pod will get scheduled on the node that you attached the label to. You can verify that it worked by running `kubectl get pods -o wide` and looking at the "NODE" that the Pod was assigned to. @@ -83,8 +83,8 @@ with a standard set of labels. As of Kubernetes v1.4 these labels are * `failure-domain.beta.kubernetes.io/zone` * `failure-domain.beta.kubernetes.io/region` * `beta.kubernetes.io/instance-type` -* `beta.kubernetes.io/os` -* `beta.kubernetes.io/arch` +* `kubernetes.io/os` +* `kubernetes.io/arch` {{< note >}} The value of these labels is cloud provider specific and is not guaranteed to be reliable. diff --git a/content/en/docs/concepts/configuration/overview.md b/content/en/docs/concepts/configuration/overview.md index d1069d34d2393..1f67727322842 100644 --- a/content/en/docs/concepts/configuration/overview.md +++ b/content/en/docs/concepts/configuration/overview.md @@ -23,7 +23,7 @@ This is a living document. If you think of something that is not on this list bu - Group related objects into a single file whenever it makes sense. One file is often easier to manage than several. See the [guestbook-all-in-one.yaml](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/all-in-one/guestbook-all-in-one.yaml) file as an example of this syntax. -- Note also that many `kubectl` commands can be called on a directory. For example, you can call `kubectl create` on a directory of config files. +- Note also that many `kubectl` commands can be called on a directory. For example, you can call `kubectl apply` on a directory of config files. - Don't specify default values unnecessarily: simple, minimal configuration will make errors less likely. @@ -100,7 +100,7 @@ The caching semantics of the underlying image provider make even `imagePullPolic ## Using kubectl -- Use `kubectl apply -f ` or `kubectl create -f `. This looks for Kubernetes configuration in all `.yaml`, `.yml`, and `.json` files in `` and passes it to `apply` or `create`. +- Use `kubectl apply -f `. This looks for Kubernetes configuration in all `.yaml`, `.yml`, and `.json` files in `` and passes it to `apply`. - Use label selectors for `get` and `delete` operations instead of specific object names. See the sections on [label selectors](/docs/concepts/overview/working-with-objects/labels/#label-selectors) and [using labels effectively](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively). diff --git a/content/en/docs/concepts/configuration/pod-priority-preemption.md b/content/en/docs/concepts/configuration/pod-priority-preemption.md index 9ccfa2d53c558..ef5cf9b6c3a18 100644 --- a/content/en/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/en/docs/concepts/configuration/pod-priority-preemption.md @@ -9,7 +9,7 @@ weight: 70 {{% capture overview %}} -{{< feature-state for_k8s_version="1.11" state="beta" >}} +{{< feature-state for_k8s_version="1.14" state="stable" >}} [Pods](/docs/user-guide/pods) can have _priority_. Priority indicates the importance of a Pod relative to other Pods. If a Pod cannot be scheduled, the @@ -19,8 +19,8 @@ pending Pod possible. In Kubernetes 1.9 and later, Priority also affects scheduling order of Pods and out-of-resource eviction ordering on the Node. -Pod priority and preemption are moved to beta since Kubernetes 1.11 and are -enabled by default in this release and later. +Pod priority and preemption graduated to beta in Kubernetes 1.11 and to GA in +Kubernetes 1.14. They have been enabled by default since 1.11. In Kubernetes versions where Pod priority and preemption is still an alpha-level feature, you need to explicitly enable it. To use these features in the older @@ -34,6 +34,7 @@ Kubernetes Version | Priority and Preemption State | Enabled by default 1.9 | alpha | no 1.10 | alpha | no 1.11 | beta | yes +1.14 | GA | yes {{< warning >}}In a cluster where not all users are trusted, a malicious user could create pods at the highest possible priorities, causing @@ -71,15 +72,15 @@ Pods. ## How to disable preemption {{< note >}} -In Kubernetes 1.11, critical pods (except DaemonSet pods, which are -still scheduled by the DaemonSet controller) rely on scheduler preemption to be -scheduled when a cluster is under resource pressure. For this reason, you will -need to run an older version of Rescheduler if you decide to disable preemption. -More on this is provided below. +In Kubernetes 1.12+, critical pods rely on scheduler preemption to be scheduled +when a cluster is under resource pressure. For this reason, it is not +recommended to disable preemption. {{< /note >}} In Kubernetes 1.11 and later, preemption is controlled by a kube-scheduler flag `disablePreemption`, which is set to `false` by default. +If you want to disable preemption despite the above note, you can set +`disablePreemption` to `true`. This option is available in component configs only and is not available in old-style command line options. Below is a sample component config to disable @@ -96,20 +97,6 @@ algorithmSource: disablePreemption: true ``` -### Start an older version of Rescheduler in the cluster - -When priority or preemption is disabled, we must run Rescheduler v0.3.1 (instead -of v0.4.0) to ensure that critical Pods are scheduled when nodes or cluster are -under resource pressure. Since critical Pod annotation is still supported in -this release, running Rescheduler should be enough and no other changes to the -configuration of Pods should be needed. - -Rescheduler images can be found at: -[gcr.io/k8s-image-staging/rescheduler](http://gcr.io/k8s-image-staging/rescheduler). - -In the code, changing the Rescheduler version back to v.0.3.1 is the reverse of -[this PR](https://github.com/kubernetes/kubernetes/pull/65454). - ## PriorityClass A PriorityClass is a non-namespaced object that defines a mapping from a diff --git a/content/en/docs/concepts/configuration/scheduler-perf-tuning.md b/content/en/docs/concepts/configuration/scheduler-perf-tuning.md index 41ce9dfb1ae15..b04d1522942e3 100644 --- a/content/en/docs/concepts/configuration/scheduler-perf-tuning.md +++ b/content/en/docs/concepts/configuration/scheduler-perf-tuning.md @@ -8,15 +8,15 @@ weight: 70 {{% capture overview %}} -{{< feature-state for_k8s_version="1.12" >}} +{{< feature-state for_k8s_version="1.14" state="beta" >}} Kube-scheduler is the Kubernetes default scheduler. It is responsible for placement of Pods on Nodes in a cluster. Nodes in a cluster that meet the scheduling requirements of a Pod are called "feasible" Nodes for the Pod. The scheduler finds feasible Nodes for a Pod and then runs a set of functions to score the feasible Nodes and picks a Node with the highest score among the -feasible ones to run the Pod. The scheduler then notifies the API server about this -decision in a process called "Binding". +feasible ones to run the Pod. The scheduler then notifies the API server about +this decision in a process called "Binding". {{% /capture %}} @@ -24,15 +24,23 @@ decision in a process called "Binding". ## Percentage of Nodes to Score -Before Kubernetes 1.12, Kube-scheduler used to check the feasibility of all the -nodes in a cluster and then scored the feasible ones. Kubernetes 1.12 has a new -feature that allows the scheduler to stop looking for more feasible nodes once -it finds a certain number of them. This improves the scheduler's performance in -large clusters. The number is specified as a percentage of the cluster size and -is controlled by a configuration option called `percentageOfNodesToScore`. The -range should be between 1 and 100. Other values are considered as 100%. The -default value of this option is 50%. A cluster administrator can change this value by providing a -different value in the scheduler configuration. However, it may not be necessary to change this value. +Before Kubernetes 1.12, Kube-scheduler used to check the feasibility of all +nodes in a cluster and then scored the feasible ones. Kubernetes 1.12 added a +new feature that allows the scheduler to stop looking for more feasible nodes +once it finds a certain number of them. This improves the scheduler's +performance in large clusters. The number is specified as a percentage of the +cluster size. The percentage can be controlled by a configuration option called +`percentageOfNodesToScore`. The range should be between 1 and 100. Larger values +are considered as 100%. Zero is equivalent to not providing the config option. +Kubernetes 1.14 has logic to find the percentage of nodes to score based on the +size of the cluster if it is not specified in the configuration. It uses a +linear formula which yields 50% for a 100-node cluster. The formula yields 10% +for a 5000-node cluster. The lower bound for the automatic value is 5%. In other +words, the scheduler always scores at least 5% of the cluster no matter how +large the cluster is, unless the user provides the config option with a value +smaller than 5. + +Below is an example configuration that sets `percentageOfNodesToScore` to 50%. ```yaml apiVersion: componentconfig/v1alpha1 @@ -45,26 +53,22 @@ algorithmSource: percentageOfNodesToScore: 50 ``` -{{< note >}} -In clusters with zero or less than 50 feasible nodes, the -scheduler still checks all the nodes, simply because there are not enough -feasible nodes to stop the scheduler's search early. -{{< /note >}} +{{< note >}} In clusters with less than 50 feasible nodes, the scheduler still +checks all the nodes, simply because there are not enough feasible nodes to stop +the scheduler's search early. {{< /note >}} **To disable this feature**, you can set `percentageOfNodesToScore` to 100. ### Tuning percentageOfNodesToScore -`percentageOfNodesToScore` must be a value between 1 and 100 -with the default value of 50. There is also a hardcoded minimum value of 50 -nodes which is applied internally. The scheduler tries to find at -least 50 nodes regardless of the value of `percentageOfNodesToScore`. This means -that changing this option to lower values in clusters with several hundred nodes -will not have much impact on the number of feasible nodes that the scheduler -tries to find. This is intentional as this option is unlikely to improve -performance noticeably in smaller clusters. In large clusters with over a 1000 -nodes setting this value to lower numbers may show a noticeable performance -improvement. +`percentageOfNodesToScore` must be a value between 1 and 100 with the default +value being calculated based on the cluster size. There is also a hardcoded +minimum value of 50 nodes. This means that changing +this option to lower values in clusters with several hundred nodes will not have +much impact on the number of feasible nodes that the scheduler tries to find. +This is intentional as this option is unlikely to improve performance noticeably +in smaller clusters. In large clusters with over a 1000 nodes setting this value +to lower numbers may show a noticeable performance improvement. An important note to consider when setting this value is that when a smaller number of nodes in a cluster are checked for feasibility, some nodes are not @@ -72,14 +76,14 @@ sent to be scored for a given Pod. As a result, a Node which could possibly score a higher value for running the given Pod might not even be passed to the scoring phase. This would result in a less than ideal placement of the Pod. For this reason, the value should not be set to very low percentages. A general rule -of thumb is to never set the value to anything lower than 30. Lower values +of thumb is to never set the value to anything lower than 10. Lower values should be used only when the scheduler's throughput is critical for your application and the score of nodes is not important. In other words, you prefer to run the Pod on any Node as long as it is feasible. -It is not recommended to lower this value from its default if your cluster has -only several hundred Nodes. It is unlikely to improve the scheduler's -performance significantly. +If your cluster has several hundred Nodes or fewer, we do not recommend lowering +the default value of this configuration option. It is unlikely to improve the +scheduler's performance significantly. ### How the scheduler iterates over Nodes @@ -91,8 +95,8 @@ for running Pods, the scheduler iterates over the nodes in a round robin fashion. You can imagine that Nodes are in an array. The scheduler starts from the start of the array and checks feasibility of the nodes until it finds enough Nodes as specified by `percentageOfNodesToScore`. For the next Pod, the -scheduler continues from the point in the Node array that it stopped at when checking -feasibility of Nodes for the previous Pod. +scheduler continues from the point in the Node array that it stopped at when +checking feasibility of Nodes for the previous Pod. If Nodes are in multiple zones, the scheduler iterates over Nodes in various zones to ensure that Nodes from different zones are considered in the diff --git a/content/en/docs/concepts/configuration/secret.md b/content/en/docs/concepts/configuration/secret.md index 31b3aca072a81..419ad33f476a6 100644 --- a/content/en/docs/concepts/configuration/secret.md +++ b/content/en/docs/concepts/configuration/secret.md @@ -75,6 +75,12 @@ kubectl create secret generic db-user-pass --from-file=./username.txt --from-fil ``` secret "db-user-pass" created ``` +{{< note >}} +Special characters such as `$`, `\*`, and `!` require escaping. +If the password you are using has special characters, you need to escape them using the `\\` character. For example, if your actual password is `S!B\*d$zDsb`, you should execute the command this way: + kubectl create secret generic dev-db-secret --from-literal=username=devuser --from-literal=password=S\\!B\\\*d\\$zDsb + You do not need to escape special characters in passwords from files (`--from-file`). +{{< /note >}} You can check that the secret was created like this: @@ -143,10 +149,10 @@ data: password: MWYyZDFlMmU2N2Rm ``` -Now create the Secret using [`kubectl create`](/docs/reference/generated/kubectl/kubectl-commands#create): +Now create the Secret using [`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands#apply): ```shell -kubectl create -f ./secret.yaml +kubectl apply -f ./secret.yaml ``` ``` secret "mysecret" created @@ -184,7 +190,7 @@ stringData: ``` Your deployment tool could then replace the `{{username}}` and `{{password}}` -template variables before running `kubectl create`. +template variables before running `kubectl apply`. stringData is a write-only convenience field. It is never output when retrieving Secrets. For example, if you run the following command: @@ -254,6 +260,73 @@ using the `-b` option to split long lines. Conversely Linux users *should* add the option `-w 0` to `base64` commands or the pipeline `base64 | tr -d '\n'` if `-w` option is not available. +#### Creating a Secret from Generator +Kubectl supports [managing objects using Kustomize](/docs/concepts/overview/object-management-kubectl/kustomization/) +since 1.14. With this new feature, +you can also create a Secret from generators and then apply it to create the object on +the Apiserver. The generators +should be specified in a `kustomization.yaml` inside a directory. + +For example, to generate a Secret from files `./username.txt` and `./password.txt` +```shell +# Create a kustomization.yaml file with SecretGenerator +cat <./kustomization.yaml +secretGenerator: +- name: db-user-pass + files: + - username.txt + - password.txt +EOF +``` +Apply the kustomization directory to create the Secret object. +```shell +$ kubectl apply -k . +secret/db-user-pass-96mffmfh4k created +``` + +You can check that the secret was created like this: + +```shell +$ kubectl get secrets +NAME TYPE DATA AGE +db-user-pass-96mffmfh4k Opaque 2 51s + +$ kubectl describe secrets/db-user-pass-96mffmfh4k +Name: db-user-pass +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +password.txt: 12 bytes +username.txt: 5 bytes +``` + +For example, to generate a Secret from literals `username=admin` and `password=secret`, +you can specify the secret generator in `kusotmization.yaml` as +```shell +# Create a kustomization.yaml file with SecretGenerator +$ cat <./kustomization.yaml +secretGenerator: +- name: db-user-pass + literals: + - username=admin + - password=secret +EOF +``` +Apply the kustomization directory to create the Secret object. +```shell +$ kubectl apply -k . +secret/db-user-pass-dddghtt9b5 created +``` +{{< note >}} +The generated Secrets name has a suffix appended by hashing the contents. This ensures that a new +Secret is generated each time the contents is modified. +{{< /note >}} + #### Decoding a Secret Secrets can be retrieved via the `kubectl get secret` command. For example, to retrieve the secret created in the previous section: @@ -620,8 +693,7 @@ start until all the pod's volumes are mounted. ### Use-Case: Pod with ssh keys -Create a secret containing some ssh keys: - +Create a kustomization.yaml with SecretGenerator containing some ssh keys: ```shell kubectl create secret generic ssh-key-secret --from-file=ssh-privatekey=/path/to/.ssh/id_rsa --from-file=ssh-publickey=/path/to/.ssh/id_rsa.pub ``` @@ -673,8 +745,7 @@ This example illustrates a pod which consumes a secret containing prod credentials and another pod which consumes a secret with test environment credentials. -Make the secrets: - +Make the kustomization.yaml with SecretGenerator ```shell kubectl create secret generic prod-db-secret --from-literal=username=produser --from-literal=password=Y4nys7f11 ``` @@ -682,24 +753,10 @@ kubectl create secret generic prod-db-secret --from-literal=username=produser -- secret "prod-db-secret" created ``` -```shell -kubectl create secret generic test-db-secret --from-literal=username=testuser --from-literal=password=iluvtests -``` -``` -secret "test-db-secret" created -``` -{{< note >}} -Special characters such as `$`, `\*`, and `!` require escaping. -If the password you are using has special characters, you need to escape them using the `\\` character. For example, if your actual password is `S!B\*d$zDsb`, you should execute the command this way: - - kubectl create secret generic dev-db-secret --from-literal=username=devuser --from-literal=password=S\\!B\\\*d\\$zDsb - -You do not need to escape special characters in passwords from files (`--from-file`). -{{< /note >}} - Now make the pods: -```yaml +```shell +$ cat < pod.yaml apiVersion: v1 kind: List items: @@ -739,6 +796,21 @@ items: - name: secret-volume readOnly: true mountPath: "/etc/secret-volume" +EOF +``` + +Add the pods to the same kustomization.yaml +```shell +$ cat <> kustomization.yaml +resources: +- pod.yaml +EOF +``` + +Apply all those objects on the Apiserver by + +```shell +kubectl apply --k . ``` Both containers will have the following files present on their filesystems with the values for each container's environment: diff --git a/content/en/docs/concepts/containers/images.md b/content/en/docs/concepts/containers/images.md index ab6206925f567..6f22211cedc83 100644 --- a/content/en/docs/concepts/containers/images.md +++ b/content/en/docs/concepts/containers/images.md @@ -205,7 +205,7 @@ example, run these on your desktop/laptop: Verify by creating a pod that uses a private image, e.g.: ```yaml -kubectl create -f - < ./kustomization.yaml +secretGenerator: +- name: myregistrykey + type: docker-registry + literals: + - docker-server=DOCKER_REGISTRY_SERVER + - docker-username=DOCKER_USER + - docker-password=DOCKER_PASSWORD + - docker-email=DOCKER_EMAIL +EOF + +kubectl apply -k . +secret/myregistrykey-66h7d4d986 created ``` If you already have a Docker credentials file then, rather than using the above @@ -300,7 +311,8 @@ so this process needs to be done one time per namespace. Now, you can create pods which reference that secret by adding an `imagePullSecrets` section to a pod definition. -```yaml +```shell +cat < pod.yaml apiVersion: v1 kind: Pod metadata: @@ -312,6 +324,12 @@ spec: image: janedoe/awesomeapp:v1 imagePullSecrets: - name: myregistrykey +EOF + +cat <> ./kustomization.yaml +resources: +- pod.yaml +EOF ``` This needs to be done for each pod that is using a private registry. diff --git a/content/en/docs/concepts/containers/runtime-class.md b/content/en/docs/concepts/containers/runtime-class.md index b8363b2e1311d..b9815cfccb82d 100644 --- a/content/en/docs/concepts/containers/runtime-class.md +++ b/content/en/docs/concepts/containers/runtime-class.md @@ -9,10 +9,16 @@ weight: 20 {{% capture overview %}} -{{< feature-state for_k8s_version="v1.12" state="alpha" >}} +{{< feature-state for_k8s_version="v1.14" state="beta" >}} This page describes the RuntimeClass resource and runtime selection mechanism. +{{< warning >}} +RuntimeClass includes *breaking* changes in the beta upgrade in v1.14. If you were using +RuntimeClass prior to v1.14, see [Upgrading RuntimeClass from Alpha to +Beta](#upgrading-runtimeclass-from-alpha-to-beta). +{{< /warning >}} + {{% /capture %}} @@ -20,72 +26,51 @@ This page describes the RuntimeClass resource and runtime selection mechanism. ## Runtime Class -RuntimeClass is an alpha feature for selecting the container runtime configuration to use to run a -pod's containers. +RuntimeClass is a feature for selecting the container runtime configuration. The container runtime +configuration is used to run a Pod's containers. ### Set Up -As an early alpha feature, there are some additional setup steps that must be taken in order to use -the RuntimeClass feature: - -1. Enable the RuntimeClass feature gate (on apiservers & kubelets, requires version 1.12+) -2. Install the RuntimeClass CRD -3. Configure the CRI implementation on nodes (runtime dependent) -4. Create the corresponding RuntimeClass resources - -#### 1. Enable the RuntimeClass feature gate - -See [Feature Gates](/docs/reference/command-line-tools-reference/feature-gates/) for an explanation -of enabling feature gates. The `RuntimeClass` feature gate must be enabled on apiservers _and_ -kubelets. - -#### 2. Install the RuntimeClass CRD - -The RuntimeClass [CustomResourceDefinition][] (CRD) can be found in the addons directory of the -Kubernetes git repo: [kubernetes/cluster/addons/runtimeclass/runtimeclass_crd.yaml][runtimeclass_crd] - -Install the CRD with `kubectl apply -f runtimeclass_crd.yaml`. - -[CustomResourceDefinition]: /docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ -[runtimeclass_crd]: https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/runtimeclass/runtimeclass_crd.yaml +Ensure the RuntimeClass feature gate is enabled (it is by default). See [Feature +Gates](/docs/reference/command-line-tools-reference/feature-gates/) for an explanation of enabling +feature gates. The `RuntimeClass` feature gate must be enabled on apiservers _and_ kubelets. +1. Configure the CRI implementation on nodes (runtime dependent) +2. Create the corresponding RuntimeClass resources -#### 3. Configure the CRI implementation on nodes +#### 1. Configure the CRI implementation on nodes -The configurations to select between with RuntimeClass are CRI implementation dependent. See the -corresponding documentation for your CRI implementation for how to configure. As this is an alpha -feature, not all CRIs support multiple RuntimeClasses yet. +The configurations available through RuntimeClass are Container Runtime Interface (CRI) +implementation dependent. See the corresponding documentation ([below](#cri-documentation)) for your +CRI implementation for how to configure. {{< note >}} -RuntimeClass currently assumes a homogeneous node configuration across the cluster -(which means that all nodes are configured the same way with respect to container runtimes). Any heterogeneity (varying configurations) must be -managed independently of RuntimeClass through scheduling features (see [Assigning Pods to -Nodes](/docs/concepts/configuration/assign-pod-node/)). +RuntimeClass currently assumes a homogeneous node configuration across the cluster (which means that +all nodes are configured the same way with respect to container runtimes). Any heterogeneity +(varying configurations) must be managed independently of RuntimeClass through scheduling features +(see [Assigning Pods to Nodes](/docs/concepts/configuration/assign-pod-node/)). {{< /note >}} -The configurations have a corresponding `RuntimeHandler` name, referenced by the RuntimeClass. The -RuntimeHandler must be a valid DNS 1123 subdomain (alpha-numeric + `-` and `.` characters). +The configurations have a corresponding `handler` name, referenced by the RuntimeClass. The +handler must be a valid DNS 1123 label (alpha-numeric + `-` characters). -#### 4. Create the corresponding RuntimeClass resources +#### 2. Create the corresponding RuntimeClass resources -The configurations setup in step 3 should each have an associated `RuntimeHandler` name, which -identifies the configuration. For each RuntimeHandler (and optionally the empty `""` handler), -create a corresponding RuntimeClass object. +The configurations setup in step 1 should each have an associated `handler` name, which identifies +the configuration. For each handler, create a corresponding RuntimeClass object. The RuntimeClass resource currently only has 2 significant fields: the RuntimeClass name -(`metadata.name`) and the RuntimeHandler (`spec.runtimeHandler`). The object definition looks like this: +(`metadata.name`) and the handler (`handler`). The object definition looks like this: ```yaml -apiVersion: node.k8s.io/v1alpha1 # RuntimeClass is defined in the node.k8s.io API group +apiVersion: node.k8s.io/v1beta1 # RuntimeClass is defined in the node.k8s.io API group kind: RuntimeClass metadata: name: myclass # The name the RuntimeClass will be referenced by # RuntimeClass is a non-namespaced resource -spec: - runtimeHandler: myconfiguration # The name of the corresponding CRI configuration +handler: myconfiguration # The name of the corresponding CRI configuration ``` - {{< note >}} It is recommended that RuntimeClass write operations (create/update/patch/delete) be restricted to the cluster administrator. This is typically the default. See [Authorization @@ -116,4 +101,66 @@ error message. If no `runtimeClassName` is specified, the default RuntimeHandler will be used, which is equivalent to the behavior when the RuntimeClass feature is disabled. +### CRI Configuration + +For more details on setting up CRI runtimes, see [CRI installation](/docs/setup/cri/). + +#### dockershim + +Kubernetes built-in dockershim CRI does not support runtime handlers. + +#### [containerd](https://containerd.io/) + +Runtime handlers are configured through containerd's configuration at +`/etc/containerd/config.toml`. Valid handlers are configured under the runtimes section: + +``` +[plugins.cri.containerd.runtimes.${HANDLER_NAME}] +``` + +See containerd's config documentation for more details: +https://github.com/containerd/cri/blob/master/docs/config.md + +#### [cri-o](https://cri-o.io/) + +Runtime handlers are configured through cri-o's configuration at `/etc/crio/crio.conf`. Valid +handlers are configured under the [crio.runtime +table](https://github.com/kubernetes-sigs/cri-o/blob/master/docs/crio.conf.5.md#crioruntime-table): + +``` +[crio.runtime.runtimes.${HANDLER_NAME}] + runtime_path = "${PATH_TO_BINARY}" +``` + +See cri-o's config documentation for more details: +https://github.com/kubernetes-sigs/cri-o/blob/master/cmd/crio/config.go + + +### Upgrading RuntimeClass from Alpha to Beta + +The RuntimeClass Beta feature includes the following changes: + +- The `node.k8s.io` API group and `runtimeclasses.node.k8s.io` resource have been migrated to a + built-in API from a CustomResourceDefinition. +- The `spec` has been inlined in the RuntimeClass definition (i.e. there is no more + RuntimeClassSpec). +- The `runtimeHandler` field has been renamed `handler`. +- The `handler` field is now required in all API versions. This means the `runtimeHandler` field in + the Alpha API is also required. +- The `handler` field must be a valid DNS label ([RFC 1123](https://tools.ietf.org/html/rfc1123)), + meaning it can no longer contain `.` characters (in all versions). Valid handlers match the + following regular expression: `^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`. + +**Action Required:** The following actions are required to upgrade from the alpha version of the +RuntimeClass feature to the beta version: + +- RuntimeClass resources must be recreated *after* upgrading to v1.14, and the + `runtimeclasses.node.k8s.io` CRD should be manually deleted: + ``` + kubectl delete customresourcedefinitions.apiextensions.k8s.io runtimeclasses.node.k8s.io + ``` +- Alpha RuntimeClasses with an unspecified or empty `runtimeHandler` or those using a `.` character + in the handler are no longer valid, and must be migrated to a valid handler configuration (see + above). + {{% /capture %}} diff --git a/content/en/docs/concepts/overview/object-management-kubectl/kustomization.md b/content/en/docs/concepts/overview/object-management-kubectl/kustomization.md new file mode 100644 index 0000000000000..87ec48daea4f0 --- /dev/null +++ b/content/en/docs/concepts/overview/object-management-kubectl/kustomization.md @@ -0,0 +1,758 @@ +--- +title: Declarative Management of Kubernetes Objects Using Kustomize +content_template: templates/concept +weight: 40 +--- + +{{% capture overview %}} +[Kustomize](https://github.com/kubernetes-sigs/kustomize) is a standalone tool +to customize Kubernetes objects +through a [kustomization file](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/kustomization.yaml). +Since 1.14, Kubectl also +supports the management of Kubernetes objects using a kustomization file. +To view Resources found in a directory containing a kustomization file, run the following command: +```shell +kubectl kustomize +``` +To apply those Resources, run `kubectl apply` with `--kustomize` or `-k` flag: +```shell +kubectl apply -k +``` +{{% /capture %}} + +{{% capture body %}} + +## Overview of Kustomize +Kustomize is a tool for customizing Kubernetes configurations. It has the following features to manage application configuration files: + +* generating resources from other sources +* setting cross-cutting fields for resources +* composing and customizing collections of resources + +### Generating Resources +ConfigMap and Secret hold config or sensitive data that are used by other Kubernetes objects, such as Pods. The source +of truth of ConfigMap or Secret are usually from somewhere else, such as a `.properties` file or a ssh key file. +Kustomize has `secretGenerator` and `configMapGenerator`, which generate Secret and ConfigMap from files or literals. + + +#### configMapGenerator +To generate a ConfigMap from a file, add an entry to `files` list in `configMapGenerator`. Here is an example of generating a ConfigMap with a data item from a file content. +```shell +# Create a application.properties file +cat <application.properties +FOO=Bar +EOF + +cat <./kustomization.yaml +configMapGenerator: +- name: example-configmap-1 + files: + - application.properties +EOF +``` +The generated ConfigMap can be checked by the following command: +```shell +kubectl kustomize ./ +``` +The generated ConfigMap is +```yaml +apiVersion: v1 +data: + application.properties: | + FOO=Bar +kind: ConfigMap +metadata: + name: example-configmap-1-8mbdf7882g +``` + +ConfigMap can also be generated from literal key-value pairs. To generate a ConfigMap from a literal key-value pair, add an entry to `literals` list in configMapGenerator. Here is an example of generating a ConfigMap with a data item from a key-value pair. +```shell +cat <./kustomization.yaml +configMapGenerator: +- name: example-configmap-2 + literals: + - FOO=Bar +EOF +``` +The generated ConfigMap can be checked by the following command: +```shell +kubectl kustomize ./ +``` +The generated ConfigMap is +```yaml +apiVersion: v1 +data: + FOO: Bar +kind: ConfigMap +metadata: + name: example-configmap-2-g2hdhfc6tk +``` + +#### secretGenerator +Secret can also be generated from files or literal key-value pairs. To generate a Secret from a file, add an entry to `files` list in `secretGenerator`. Here is an example of generating a Secret with a data item from a file. +```shell +# Create a password.txt file +cat <./password.txt +username=admin +password=secret +EOF + +cat <./kustomization.yaml +secretGenerator: +- name: example-secret-1 + files: + - password.txt +EOF +``` +The generated Secret is as follows: +```yaml +apiVersion: v1 +data: + password.txt: dXNlcm5hbWU9YWRtaW4KcGFzc3dvcmQ9c2VjcmV0Cg== +kind: Secret +metadata: + name: example-secret-1-t2kt65hgtb +type: Opaque +``` +To generate a Secret from a literal key-value pair, add an entry to `literals` list in `secretGenerator`. Here is an example of generating a Secret with a data item from a key-value pair. +```shell +cat <./kustomization.yaml +secretGenerator: +- name: example-secret-2 + literals: + - username=admin + - password=secert +EOF +``` +The generated Secret is as follows: +```yaml +apiVersion: v1 +data: + password: c2VjZXJ0 + username: YWRtaW4= +kind: Secret +metadata: + name: example-secret-2-t52t6g96d8 +type: Opaque +``` + +#### generatorOptions +The generated ConfigMaps and Secrets have a suffix appended by hashing the contents. This ensures that a new ConfigMap or Secret is generated when the content is changed. To disable the behavior of appending a suffix, one can use `generatorOptions`. Besides that, it is also possible to specify cross-cutting options for generated ConfigMaps and Secrets. +```shell +cat <./kustomization.yaml +configMapGenerator: +- name: example-configmap-3 + literals: + - FOO=Bar +generatorOptions: + disableNameSuffixHash: true + labels: + type: generated + annotations: + note: generated +EOF +``` +Run`kubectl kustomize ./` to view the generated ConfigMap: +```yaml +apiVersion: v1 +data: + FOO: Bar +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: example-configmap-3 +``` + +### Setting cross-cutting fields +It is quite common to set cross-cutting fields for all Kubernetes resources in a project. +Some use cases for setting cross-cutting fields: + +* setting the same namespace for all Resource +* adding the same name prefix or suffix +* adding the same set of labels +* adding the same set of annotations + +Here is an example: +```shell +# Create a deployment.yaml +cat <./deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx +EOF + +cat <./kustomization.yaml +namespace: my-namespace +namePrefix: dev- +nameSuffix: "-001" +commonLabels: + app: bingo +commonAnnotations: + oncallPager: 800-555-1212 +resources: +- deployment.yaml +EOF +``` +Run `kubectl kustomize ./` to view those fields are all set in the Deployment Resource: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + oncallPager: 800-555-1212 + labels: + app: bingo + name: dev-nginx-deployment-001 + namespace: my-namespace +spec: + selector: + matchLabels: + app: bingo + template: + metadata: + annotations: + oncallPager: 800-555-1212 + labels: + app: bingo + spec: + containers: + - image: nginx + name: nginx +``` + +### Composing and Customizing Resources +It is common to compose a set of Resources in a project and manage them inside +the same file or directory. +Kustomize offers composing Resources from different files and applying patches or other customization to them. + +#### Composing +Kustomize supports composition of different resources. The `resources` field, in the `kustomization.yaml` file, defines the list of resources to include in a configuration. Set the path to a resource's configuration file in the `resources` list. +Here is an example for an nginx application with a Deployment and a Service. +```shell +# Create a deployment.yaml file +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +# Create a service.yaml file +cat < service.yaml +apiVersion: v1 +kind: Service +metadata: + name: my-nginx + labels: + run: my-nginx +spec: + ports: + - port: 80 + protocol: TCP + selector: + run: my-nginx +EOF + +# Create a kustomization.yaml composing them +cat <./kustomization.yaml +resources: +- deployment.yaml +- service.yaml +EOF +``` +The Resources from `kubectl kustomize ./` contains both the Deployment and the Service objects. + +#### Customizing +On top of Resources, one can apply different customizations by applying patches. Kustomize supports different patching +mechanisms through `patchesStrategicMerge` and `patchesJson6902`. `patchesStrategicMerge` is a list of file paths. Each file should be resolved to a [strategic merge patch](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md). The names inside the patches must match Resource names that are already loaded. Small patches that do one thing are recommended. For example, create one patch for increasing the deployment replica number and another patch for setting the memory limit. +```shell +# Create a deployment.yaml file +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +# Create a patch increase_replicas.yaml +cat < increase_replicas.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + replicas: 3 +EOF + +# Create another patch set_memory.yaml +cat < set_memory.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + template: + spec: + containers: + - name: my-nginx + resources: + limits: + memory: 512Mi +EOF + +cat <./kustomization.yaml +resources: +- deployment.yaml +patchesStrategicMerge: +- increase_replicas.yaml +- set_memory.yaml +EOF +``` +Run `kubectl kustomize ./` to view the Deployment: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + replicas: 3 + selector: + matchLabels: + run: my-nginx + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - image: nginx + limits: + memory: 512Mi + name: my-nginx + ports: + - containerPort: 80 +``` +Not all Resources or fields support strategic merge patches. To support modifying arbitrary fields in arbitrary Resources, +Kustomize offers applying [JSON patch](https://tools.ietf.org/html/rfc6902) through `patchesJson6902`. +To find the correct Resource for a Json patch, the group, version, kind and name of that Resource need to be +specified in `kustomization.yaml`. For example, increasing the replica number of a Deployment object can also be done +through `patchesJson6902`. +```shell +# Create a deployment.yaml file +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +# Create a json patch +cat < patch.yaml +- op: replace + path: /spec/replicas + value: 3 +EOF + +# Create a kustomization.yaml +cat <./kustomization.yaml +resources: +- deployment.yaml + +patchesJson6902: +- target: + group: apps + version: v1 + kind: Deployment + name: my-nginx + path: patch.yaml +EOF +``` +Run `kubectl kustomize ./` to see the `replicas` field is updated: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + replicas: 3 + selector: + matchLabels: + run: my-nginx + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - image: nginx + name: my-nginx + ports: + - containerPort: 80 +``` +In addition to patches, Kustomize also offers customizing container images or injecting field values from other objects into containers +without creating patches. For example, you can change the image used inside containers by specifying the new image in `images` field in `kustomization.yaml`. +```shell +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +cat <./kustomization.yaml +resources: +- deployment.yaml +images: +- name: nginx + newName: my.image.registry/nginx + newTag: 1.4.0 +EOF +``` +Run `kubectl kustomize ./` to see that the image being used is updated: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + replicas: 2 + selector: + matchLabels: + run: my-nginx + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - image: my.image.registry/nginx:1.4.0 + name: my-nginx + ports: + - containerPort: 80 +``` +Sometimes, the application running in a Pod may need to use configuration values from other objects. For example, +a Pod from a Deployment object need to read the corresponding Service name from Env or as a command argument. +Since the Service name may change as `namePrefix` or `nameSuffix` is added in the `kustomization.yaml` file. It is +not recommended to hard code the Service name in the command argument. For this usage, Kustomize can inject the Service name into containers through `vars`. + +```shell +# Create a deployment.yaml file +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + command: ["start", "--host", "\$(MY_SERVICE_NAME)"] +EOF + +# Create a service.yaml file +cat < service.yaml +apiVersion: v1 +kind: Service +metadata: + name: my-nginx + labels: + run: my-nginx +spec: + ports: + - port: 80 + protocol: TCP + selector: + run: my-nginx +EOF + +cat <./kustomization.yaml +namePrefix: dev- +nameSuffix: "-001" + +resources: +- deployment.yaml +- service.yaml + +vars: +- name: MY_SERVICE_NAME + objref: + kind: Service + name: my-nginx + apiVersion: v1 +EOF +``` +Run `kubectl kustomize ./` to see that the Service name injected into containers is `dev-my-nginx-001`: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dev-my-nginx-001 +spec: + replicas: 2 + selector: + matchLabels: + run: my-nginx + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - command: + - start + - --host + - dev-my-nginx-001 + image: nginx + name: my-nginx +``` + +## Bases and Overlays +Kustomize has the concepts of **bases** and **overlays**. A **base** is a directory with a `kustomization.yaml`, which contains a +set of resources and associated customization. A base could be either a local directory or a directory from a remote repo, +as long as a `kustomization.yaml` is present inside. An **overlay** is a directory with a `kustomization.yaml` that refers to other +kustomization directories as its `bases`. A **base** has no knowledge of an overlay and can be used in multiple overlays. +An overlay may have multiple bases and it composes all resources +from bases and may also have customization on top of them. + +Here is an example of a base. +```shell +# Create a directory to hold the base +mkdir base +# Create a base/deployment.yaml +cat < base/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx +EOF + +# Create a base/service.yaml file +cat < base/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: my-nginx + labels: + run: my-nginx +spec: + ports: + - port: 80 + protocol: TCP + selector: + run: my-nginx +EOF +# Create a base/kustomization.yaml +cat < base/kustomization.yaml +resources: +- deployment.yaml +- service.yaml +``` +This base can be used in multiple overlays. You can add different `namePrefix` or other cross-cutting fields +in different overlays. Here are two overlays using the same base. +```shell +mkdir dev +cat < dev/kustomization.yaml +bases: +- ../base +namePrefix: dev- +EOF + +mkdir prod +cat < prod/kustomization.yaml +bases: +- ../base +namePrefix: prod- +EOF +``` + +## How to apply/view/delete objects using Kustomize +Use `--kustomize` or `-k` in `kubectl` commands to recognize Resources managed by `kustomization.yaml`. +Note that `-k` should point to a kustomization directory, such as + +```shell +kubectl apply -k / +``` +Given the following `kustomization.yaml`, +```shell +# Create a deployment.yaml file +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +# Create a kustomization.yaml +cat <./kustomization.yaml +namePrefix: dev- +commonLabels: + app: my-nginx +resources: +- deployment.yaml +EOF +``` +Running the following command will apply the Deployment object `dev-my-nginx`: +```shell +> kubectl apply -k ./ +deployment.apps/dev-my-nginx created +``` +Running the following command will get he Deployment object `dev-my-nginx`: +```shell +kubectl get -k ./ +``` +or +```shell +kubectl describe -k ./ +``` +Running the following command will delete the Deployment object `dev-my-nginx`: +```shell +> kubectl delete -k ./ +deployment.apps "dev-my-nginx" deleted +``` + + +## Kustomize Feature List +Here is a list of all the features in Kustomize. + +| Field | Type | Explanation | +|-----------------------|--------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------| +| namespace | string | add namespace to all resources | +| namePrefix | string | value of this field is prepended to the names of all resources | +| nameSuffix | string | value of this field is appended to the names of all resources | +| commonlabels | map[string]string | labels to add to all resources and selectors | +| commonAnnotations | map[string]string | annotations to add to all resources | +| resources | []string | each entry in this list must resolve to an existing resource configuration file | +| configmapGenerator | [][ConfigMapArgs](https://github.com/kubernetes-sigs/kustomize/blob/master/pkg/types/kustomization.go#L195) | Each entry in this list generates a ConfigMap | +| secretGenerator | [][SecretArgs](https://github.com/kubernetes-sigs/kustomize/blob/master/pkg/types/kustomization.go#L201) | Each entry in this list generates a Secret | +| generatorOptions | [GeneratorOptions](https://github.com/kubernetes-sigs/kustomize/blob/master/pkg/types/kustomization.go#L239) | Modify behaviors of all ConfigMap and Secret generatos | +| bases | []string | Each entry in this list should resolve to a directory containing a kustomization.yaml file | +| patchesStrategicMerge | []string | Each entry in this list should resolve a strategic merge patch of a Kubernetes object | +| patchesJson6902 | [][Json6902](https://github.com/kubernetes-sigs/kustomize/blob/master/pkg/patch/json6902.go#L23) | Each entry in this list should resolve to a Kubernetes object and a Json Patch | +| vars | [][Var](https://github.com/kubernetes-sigs/kustomize/blob/master/pkg/types/var.go#L31) | Each entry is to capture text from one resource's field | +| images | [][Image](https://github.com/kubernetes-sigs/kustomize/blob/master/pkg/image/image.go#L23) | Each entry is to modify the name, tags and/or digest for one image without creating patches | +| configurations | []string | Each entry in this list should resolve to a file containing [Kustomize transformer configurations](https://github.com/kubernetes-sigs/kustomize/tree/master/examples/transformerconfigs) | +| crds | []string | Each entry in this list should resolve to an OpenAPI definition file for Kubernetes types | + + + +{{% capture whatsnext %}} +- [Kustomize](https://github.com/kubernetes-sigs/kustomize) +- [Kubectl Book](https://kubectl.docs.kubernetes.io) +- [Kubectl Command Reference](/docs/reference/generated/kubectl/kubectl/) +- [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) +{{% /capture %}} diff --git a/content/en/docs/concepts/overview/object-management-kubectl/overview.md b/content/en/docs/concepts/overview/object-management-kubectl/overview.md index 3987da4a71f00..723df86818c01 100644 --- a/content/en/docs/concepts/overview/object-management-kubectl/overview.md +++ b/content/en/docs/concepts/overview/object-management-kubectl/overview.md @@ -7,7 +7,8 @@ weight: 10 {{% capture overview %}} The `kubectl` command-line tool supports several different ways to create and manage Kubernetes objects. This document provides an overview of the different -approaches. +approaches. Read the [Kubectl book](https://kubectl.docs.kubernetes.io) for +details of managing objects by Kubectl. {{% /capture %}} {{% capture body %}} @@ -179,6 +180,7 @@ Disadvantages compared to imperative object configuration: - [Managing Kubernetes Objects Using Object Configuration (Imperative)](/docs/concepts/overview/object-management-kubectl/imperative-config/) - [Managing Kubernetes Objects Using Object Configuration (Declarative)](/docs/concepts/overview/object-management-kubectl/declarative-config/) - [Kubectl Command Reference](/docs/reference/generated/kubectl/kubectl-commands/) +- [Kubectl Book](https://kubectl.docs.kubernetes.io) - [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) {{< comment >}} diff --git a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md index 57d65343d0e8a..fec01aeb8e54f 100644 --- a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -42,11 +42,11 @@ Here's an example `.yaml` file that shows the required fields and object spec fo {{< codenew file="application/deployment.yaml" >}} One way to create a Deployment using a `.yaml` file like the one above is to use the -[`kubectl create`](/docs/reference/generated/kubectl/kubectl-commands#create) command +[`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands#apply) command in the `kubectl` command-line interface, passing the `.yaml` file as an argument. Here's an example: ```shell -kubectl create -f https://k8s.io/examples/application/deployment.yaml --record +kubectl apply -f https://k8s.io/examples/application/deployment.yaml --record ``` The output is similar to this: diff --git a/content/en/docs/concepts/services-networking/connect-applications-service.md b/content/en/docs/concepts/services-networking/connect-applications-service.md index a37f9d8b7cc56..073e83abdd185 100644 --- a/content/en/docs/concepts/services-networking/connect-applications-service.md +++ b/content/en/docs/concepts/services-networking/connect-applications-service.md @@ -35,7 +35,7 @@ Create an nginx Pod, and note that it has a container port specification: This makes it accessible from any node in your cluster. Check the nodes the Pod is running on: ```shell -kubectl create -f ./run-my-nginx.yaml +kubectl apply -f ./run-my-nginx.yaml kubectl get pods -l run=my-nginx -o wide ``` ``` @@ -71,7 +71,7 @@ kubectl expose deployment/my-nginx service/my-nginx exposed ``` -This is equivalent to `kubectl create -f` the following yaml: +This is equivalent to `kubectl apply -f` the following yaml: {{< codenew file="service/networking/nginx-svc.yaml" >}} @@ -232,7 +232,7 @@ You can acquire all these from the [nginx https example](https://github.com/kube ```shell make keys secret KEY=/tmp/nginx.key CERT=/tmp/nginx.crt SECRET=/tmp/secret.json -kubectl create -f /tmp/secret.json +kubectl apply -f /tmp/secret.json ``` ``` secret/nginxsecret created @@ -269,7 +269,7 @@ data: Now create the secrets using the file: ```shell -kubectl create -f nginxsecrets.yaml +kubectl apply -f nginxsecrets.yaml kubectl get secrets ``` ``` @@ -312,7 +312,7 @@ Let's test this from a pod (the same secret is being reused for simplicity, the {{< codenew file="service/networking/curlpod.yaml" >}} ```shell -kubectl create -f ./curlpod.yaml +kubectl apply -f ./curlpod.yaml kubectl get pods -l app=curlpod ``` ``` diff --git a/content/en/docs/concepts/services-networking/dns-pod-service.md b/content/en/docs/concepts/services-networking/dns-pod-service.md index d843027dfca31..54a75172e85c5 100644 --- a/content/en/docs/concepts/services-networking/dns-pod-service.md +++ b/content/en/docs/concepts/services-networking/dns-pod-service.md @@ -170,10 +170,10 @@ following pod-specific DNS policies. These policies are specified in the for details on how DNS queries are handled in those cases. - "`ClusterFirstWithHostNet`": For Pods running with hostNetwork, you should explicitly set its DNS policy "`ClusterFirstWithHostNet`". -- "`None`": A new option value introduced in Kubernetes v1.9 (Beta in v1.10). It - allows a Pod to ignore DNS settings from the Kubernetes environment. All DNS - settings are supposed to be provided using the `dnsConfig` field in the Pod Spec. - See [DNS config](#dns-config) subsection below. +- "`None`": It allows a Pod to ignore DNS settings from the Kubernetes + environment. All DNS settings are supposed to be provided using the + `dnsConfig` field in the Pod Spec. + See [Pod's DNS config](#pod-s-dns-config) subsection below. {{< note >}} "Default" is not the default DNS policy. If `dnsPolicy` is not @@ -205,13 +205,7 @@ spec: ### Pod's DNS Config -Kubernetes v1.9 introduces an Alpha feature (Beta in v1.10) that allows users more -control on the DNS settings for a Pod. This feature is enabled by default in v1.10. -To enable this feature in v1.9, the cluster administrator -needs to enable the `CustomPodDNS` feature gate on the apiserver and the kubelet, -for example, "`--feature-gates=CustomPodDNS=true,...`". -When the feature gate is enabled, users can set the `dnsPolicy` field of a Pod -to "`None`" and they can add a new field `dnsConfig` to a Pod Spec. +Pod's DNS Config allows users more control on the DNS settings for a Pod. The `dnsConfig` field is optional and it can work with any `dnsPolicy` settings. However, when a Pod's `dnsPolicy` is set to "`None`", the `dnsConfig` field has @@ -257,6 +251,16 @@ search default.svc.cluster.local svc.cluster.local cluster.local options ndots:5 ``` +### Feature availability + +The availability of Pod DNS Config and DNS Policy "`None`"" is shown as below. + +| k8s version | Feature support | +| :---------: |:-----------:| +| 1.14 | Stable | +| 1.10 | Beta (on by default)| +| 1.9 | Alpha | + {{% /capture %}} {{% capture whatsnext %}} diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index 10cf95e295865..19683544fe518 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -136,7 +136,7 @@ There are existing Kubernetes concepts that allow you to expose a single Service {{< codenew file="service/networking/ingress.yaml" >}} -If you create it using `kubectl create -f` you should see: +If you create it using `kubectl apply -f` you should see: ```shell kubectl get ingress test-ingress @@ -190,7 +190,7 @@ spec: servicePort: 8080 ``` -When you create the Ingress with `kubectl create -f`: +When you create the ingress with `kubectl apply -f`: ```shell kubectl describe ingress simple-fanout-example diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index ee66fdd81c0b4..6f471b8a228d4 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -179,8 +179,8 @@ However, the particular path specified in the custom recycler pod template in th ### Expanding Persistent Volumes Claims -{{< feature-state for_k8s_version="v1.8" state="alpha" >}} {{< feature-state for_k8s_version="v1.11" state="beta" >}} + Support for expanding PersistentVolumeClaims (PVCs) is now enabled by default. You can expand the following types of volumes: @@ -193,6 +193,7 @@ the following types of volumes: * Azure Disk * Portworx * FlexVolumes +* CSI You can only expand a PVC if its storage class's `allowVolumeExpansion` field is set to true. @@ -214,6 +215,13 @@ To request a larger volume for a PVC, edit the PVC object and specify a larger size. This triggers expansion of the volume that backs the underlying `PersistentVolume`. A new `PersistentVolume` is never created to satisfy the claim. Instead, an existing volume is resized. +#### CSI Volume expansion + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +CSI volume expansion requires enabling `ExpandCSIVolumes` feature gate and also requires specific CSI driver to support volume expansion. Please refer to documentation of specific CSI driver for more information. + + #### Resizing a volume containing a file system You can only resize volumes containing a file system if the file system is XFS, Ext3, or Ext4. @@ -312,7 +320,7 @@ Currently, storage size is the only resource that can be set or requested. Futu {{< feature-state for_k8s_version="v1.13" state="beta" >}} Prior to Kubernetes 1.9, all volume plugins created a filesystem on the persistent volume. -Now, you can set the value of `volumeMode` to `raw` to use a raw block device, or `filesystem` +Now, you can set the value of `volumeMode` to `block` to use a raw block device, or `filesystem` to use a filesystem. `filesystem` is the default if the value is omitted. This is an optional API parameter. diff --git a/content/en/docs/concepts/storage/storage-classes.md b/content/en/docs/concepts/storage/storage-classes.md index fe817d1aac5b6..b0df83ed47612 100644 --- a/content/en/docs/concepts/storage/storage-classes.md +++ b/content/en/docs/concepts/storage/storage-classes.md @@ -151,6 +151,11 @@ The following plugins support `WaitForFirstConsumer` with pre-created Persistent * All of the above * [Local](#local) +{{< feature-state state="beta" for_k8s_version="1.14" >}} +[CSI volumes](/docs/concepts/storage/volumes/#csi) are also supported with dynamic provisioning +and pre-created PVs, but you'll need to look at the documentation for a specific CSI driver +to see its supported topology keys and examples. The `CSINodeInfo` feature gate must be enabled. + ### Allowed Topologies When a cluster operator specifies the `WaitForFirstConsumer` volume binding mode, it is no longer necessary @@ -739,7 +744,7 @@ references it. ### Local -{{< feature-state for_k8s_version="v1.10" state="beta" >}} +{{< feature-state for_k8s_version="v1.14" state="stable" >}} ```yaml kind: StorageClass @@ -750,7 +755,7 @@ provisioner: kubernetes.io/no-provisioner volumeBindingMode: WaitForFirstConsumer ``` -Local volumes do not support dynamic provisioning yet, however a StorageClass +Local volumes do not currently support dynamic provisioning, however a StorageClass should still be created to delay volume binding until pod scheduling. This is specified by the `WaitForFirstConsumer` volume binding mode. diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index a479c9ebded6d..bcf6abe2d9c72 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -70,6 +70,7 @@ Kubernetes supports several types of Volumes: * [azureDisk](#azuredisk) * [azureFile](#azurefile) * [cephfs](#cephfs) + * [cinder](#cinder) * [configMap](#configmap) * [csi](#csi) * [downwardAPI](#downwardapi) @@ -148,6 +149,17 @@ spec: fsType: ext4 ``` +#### CSI Migration + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +The CSI Migration feature for awsElasticBlockStore, when enabled, shims all plugin operations +from the existing in-tree plugin to the `ebs.csi.aws.com` Container +Storage Interface (CSI) Driver. In order to use this feature, the [AWS EBS CSI +Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationAWS` +Alpha features must be enabled. + ### azureDisk {#azuredisk} A `azureDisk` is used to mount a Microsoft Azure [Data Disk](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-linux-about-disks-vhds/) into a Pod. @@ -176,6 +188,48 @@ You must have your own Ceph server running with the share exported before you ca See the [CephFS example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/volumes/cephfs/) for more details. +### cinder {#cinder} + +{{< note >}} +Prerequisite: Kubernetes with OpenStack Cloud Provider configured. For cloudprovider +configuration please refer [cloud provider openstack](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#openstack). +{{< /note >}} + +`cinder` is used to mount OpenStack Cinder Volume into your Pod. + +#### Cinder Volume Example configuration + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-cinder +spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-cinder-container + volumeMounts: + - mountPath: /test-cinder + name: test-volume + volumes: + - name: test-volume + # This OpenStack volume must already exist. + cinder: + volumeID: + fsType: ext4 +``` + +#### CSI Migration + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +The CSI Migration feature for Cinder, when enabled, shims all plugin operations +from the existing in-tree plugin to the `cinder.csi.openstack.org` Container +Storage Interface (CSI) Driver. In order to use this feature, the [Openstack Cinder CSI +Driver](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/using-cinder-csi-plugin.md) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationOpenStack` +Alpha features must be enabled. + ### configMap {#configmap} The [`configMap`](/docs/tasks/configure-pod-container/configure-pod-configmap/) resource @@ -401,6 +455,17 @@ spec: fsType: ext4 ``` +#### CSI Migration + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +The CSI Migration feature for GCE PD, when enabled, shims all plugin operations +from the existing in-tree plugin to the `pd.csi.storage.gke.io` Container +Storage Interface (CSI) Driver. In order to use this feature, the [GCE PD CSI +Driver](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) +must be installed on the cluster and the `CSIMigration` and `CSIMigrationGCE` +Alpha features must be enabled. + ### gitRepo (deprecated) {#gitrepo} {{< warning >}} @@ -535,14 +600,7 @@ See the [iSCSI example](https://github.com/kubernetes/examples/tree/{{< param "g ### local {#local} -{{< feature-state for_k8s_version="v1.10" state="beta" >}} - -{{< note >}} -The alpha PersistentVolume NodeAffinity annotation has been deprecated -and will be removed in a future release. Existing PersistentVolumes using this -annotation must be updated by the user to use the new PersistentVolume -`NodeAffinity` field. -{{< /note >}} +{{< feature-state for_k8s_version="v1.14" state="stable" >}} A `local` volume represents a mounted local storage device such as a disk, partition or directory. @@ -608,7 +666,8 @@ selectors, Pod affinity, and Pod anti-affinity. An external static provisioner can be run separately for improved management of the local volume lifecycle. Note that this provisioner does not support dynamic provisioning yet. For an example on how to run an external local provisioner, -see the [local volume provisioner user guide](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume). +see the [local volume provisioner user +guide](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner). {{< note >}} The local PersistentVolume requires manual cleanup and deletion by the @@ -1073,13 +1132,14 @@ spec: ### Using subPath with expanded environment variables -{{< feature-state for_k8s_version="v1.11" state="alpha" >}} +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} -`subPath` directory names can also be constructed from Downward API environment variables. +Use the `subPathExpr` field to construct `subPath` directory names from Downward API environment variables. Before you use this feature, you must enable the `VolumeSubpathEnvExpansion` feature gate. +The `subPath` and `subPathExpr` properties are mutually exclusive. -In this example, a Pod uses `subPath` to create a directory `pod1` within the hostPath volume `/var/log/pods`, using the pod name from the Downward API. The host directory `/var/log/pods/pod1` is mounted at `/logs` in the container. +In this example, a Pod uses `subPathExpr` to create a directory `pod1` within the hostPath volume `/var/log/pods`, using the pod name from the Downward API. The host directory `/var/log/pods/pod1` is mounted at `/logs` in the container. ```yaml apiVersion: v1 @@ -1100,7 +1160,7 @@ spec: volumeMounts: - name: workdir1 mountPath: /logs - subPath: $(POD_NAME) + subPathExpr: $(POD_NAME) restartPolicy: Never volumes: - name: workdir1 @@ -1217,28 +1277,78 @@ persistent volume: #### CSI raw block volume support -{{< feature-state for_k8s_version="v1.11" state="alpha" >}} +{{< feature-state for_k8s_version="v1.14" state="beta" >}} Starting with version 1.11, CSI introduced support for raw block volumes, which relies on the raw block volume feature that was introduced in a previous version of Kubernetes. This feature will make it possible for vendors with external CSI drivers to implement raw block volumes support in Kubernetes workloads. -CSI block volume support is feature-gated and turned off by default. To run CSI with -block volume support enabled, a cluster administrator must enable the feature for each -Kubernetes component using the following feature gate flags: +CSI block volume support is feature-gated, but enabled by default. The two +feature gates which must be enabled for this feature are `BlockVolume` and +`CSIBlockVolume`. + +Learn how to +[setup your PV/PVC with raw block volume support](/docs/concepts/storage/persistent-volumes/#raw-block-volume-support). + +#### CSI ephemeral volumes + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +This feature allows CSI volumes to be directly embedded in the Pod specification instead of a PersistentVolume. Volumes specified in this way are ephemeral and do not persist across Pod restarts. + +Example: + +```yaml +kind: Pod +apiVersion: v1 +metadata: + name: my-csi-app +spec: + containers: + - name: my-frontend + image: busybox + volumeMounts: + - mountPath: "/data" + name: my-csi-inline-vol + command: [ "sleep", "1000000" ] + volumes: + - name: my-csi-inline-vol + csi: + driver: inline.storage.kubernetes.io + volumeAttributes: + foo: bar +``` + +This feature requires CSIInlineVolume feature gate to be enabled: ``` ---feature-gates=BlockVolume=true,CSIBlockVolume=true +--feature-gates=CSIInlineVolume=true ``` -Learn how to -[setup your PV/PVC with raw block volume support](/docs/concepts/storage/persistent-volumes/#raw-block-volume-support). +CSI ephemeral volumes are only supported by a subset of CSI drivers. Please see the list of CSI drivers [here](https://kubernetes-csi.github.io/docs/drivers.html). -#### Developer resources +# Developer resources For more information on how to develop a CSI driver, refer to the [kubernetes-csi documentation](https://kubernetes-csi.github.io/docs/) +#### Migrating to CSI drivers from in-tree plugins + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +The CSI Migration feature, when enabled, directs operations against existing in-tree +plugins to corresponding CSI plugins (which are expected to be installed and configured). +The feature implements the necessary translation logic and shims to re-route the +operations in a seamless fashion. As a result, operators do not have to make any +configuration changes to existing Storage Classes, PVs or PVCs (referring to +in-tree plugins) when transitioning to a CSI driver that supersedes an in-tree plugin. + +In the alpha state, the operations and features that are supported include +provisioning/delete, attach/detach and mount/unmount of volumes with `volumeMode` set to `filesystem` + +In-tree plugins that support CSI Migration and have a corresponding CSI driver implemented +are listed in the "Types of Volumes" section above. + ### Flexvolume {#flexVolume} Flexvolume is an out-of-tree plugin interface that has existed in Kubernetes diff --git a/content/en/docs/concepts/workloads/controllers/daemonset.md b/content/en/docs/concepts/workloads/controllers/daemonset.md index 33fd8af3702e1..3a1a875e2e2e3 100644 --- a/content/en/docs/concepts/workloads/controllers/daemonset.md +++ b/content/en/docs/concepts/workloads/controllers/daemonset.md @@ -42,7 +42,7 @@ You can describe a DaemonSet in a YAML file. For example, the `daemonset.yaml` f * Create a DaemonSet based on the YAML file: ``` -kubectl create -f https://k8s.io/examples/controllers/daemonset.yaml +kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml ``` ### Required Fields diff --git a/content/en/docs/concepts/workloads/controllers/deployment.md b/content/en/docs/concepts/workloads/controllers/deployment.md index 0910dfbdd19c7..5d91c8b878b9a 100644 --- a/content/en/docs/concepts/workloads/controllers/deployment.md +++ b/content/en/docs/concepts/workloads/controllers/deployment.md @@ -73,7 +73,7 @@ In this example: To create this Deployment, run the following command: ```shell -kubectl create -f https://k8s.io/examples/controllers/nginx-deployment.yaml +kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml ``` {{< note >}} @@ -455,7 +455,7 @@ kubectl rollout history deployment.v1.apps/nginx-deployment ``` deployments "nginx-deployment" REVISION CHANGE-CAUSE -1 kubectl create --filename=https://k8s.io/examples/controllers/nginx-deployment.yaml --record=true +1 kubectl apply --filename=https://k8s.io/examples/controllers/nginx-deployment.yaml --record=true 2 kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 --record=true 3 kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.91 --record=true ``` diff --git a/content/en/docs/concepts/workloads/controllers/garbage-collection.md b/content/en/docs/concepts/workloads/controllers/garbage-collection.md index ee3fb1fcd5da8..ecd89ba87693b 100644 --- a/content/en/docs/concepts/workloads/controllers/garbage-collection.md +++ b/content/en/docs/concepts/workloads/controllers/garbage-collection.md @@ -39,7 +39,7 @@ If you create the ReplicaSet and then view the Pod metadata, you can see OwnerReferences field: ```shell -kubectl create -f https://k8s.io/examples/controllers/replicaset.yaml +kubectl apply -f https://k8s.io/examples/controllers/replicaset.yaml kubectl get pods --output=yaml ``` diff --git a/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md index 18e213f17e330..2155531342a47 100644 --- a/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -39,7 +39,7 @@ It takes around 10s to complete. You can run the example with this command: ```shell -kubectl create -f https://k8s.io/examples/controllers/job.yaml +kubectl apply -f https://k8s.io/examples/controllers/job.yaml ``` ``` job "pi" created diff --git a/content/en/docs/concepts/workloads/controllers/replicaset.md b/content/en/docs/concepts/workloads/controllers/replicaset.md index 9ca82477fe7fd..e5db639861cd1 100644 --- a/content/en/docs/concepts/workloads/controllers/replicaset.md +++ b/content/en/docs/concepts/workloads/controllers/replicaset.md @@ -54,7 +54,7 @@ Saving this manifest into `frontend.yaml` and submitting it to a Kubernetes clus create the defined ReplicaSet and the Pods that it manages. ```shell -kubectl create -f http://k8s.io/examples/controllers/frontend.yaml +kubectl apply -f http://k8s.io/examples/controllers/frontend.yaml ``` You can then get the current ReplicaSets deployed: @@ -162,7 +162,7 @@ Suppose you create the Pods after the frontend ReplicaSet has been deployed and fulfill its replica count requirement: ```shell -kubectl create -f http://k8s.io/examples/pods/pod-rs.yaml +kubectl apply -f http://k8s.io/examples/pods/pod-rs.yaml ``` The new Pods will be acquired by the ReplicaSet, and then immediately terminated as the ReplicaSet would be over @@ -184,12 +184,12 @@ pod2 0/1 Terminating 0 4s If you create the Pods first: ```shell -kubectl create -f http://k8s.io/examples/pods/pod-rs.yaml +kubectl apply -f http://k8s.io/examples/pods/pod-rs.yaml ``` And then create the ReplicaSet however: ```shell -kubectl create -f http://k8s.io/examples/controllers/frontend.yaml +kubectl apply -f http://k8s.io/examples/controllers/frontend.yaml ``` You shall see that the ReplicaSet has acquired the Pods and has only created new ones according to its spec until the @@ -308,7 +308,7 @@ create the defined HPA that autoscales the target ReplicaSet depending on the CP of the replicated Pods. ```shell -kubectl create -f https://k8s.io/examples/controllers/hpa-rs.yaml +kubectl apply -f https://k8s.io/examples/controllers/hpa-rs.yaml ``` Alternatively, you can use the `kubectl autoscale` command to accomplish the same diff --git a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md index c77b2fee28092..499be034cb994 100644 --- a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md @@ -55,7 +55,7 @@ This example ReplicationController config runs three copies of the nginx web ser Run the example job by downloading the example file and then running this command: ```shell -kubectl create -f https://k8s.io/examples/controllers/replication.yaml +kubectl apply -f https://k8s.io/examples/controllers/replication.yaml ``` ``` replicationcontroller/nginx created diff --git a/content/en/docs/concepts/workloads/pods/init-containers.md b/content/en/docs/concepts/workloads/pods/init-containers.md index 70b5a60aa9c9a..c535d5b59e60b 100644 --- a/content/en/docs/concepts/workloads/pods/init-containers.md +++ b/content/en/docs/concepts/workloads/pods/init-containers.md @@ -180,7 +180,7 @@ spec: This Pod can be started and debugged with the following commands: ```shell -kubectl create -f myapp.yaml +kubectl apply -f myapp.yaml ``` ``` pod/myapp-pod created @@ -240,7 +240,7 @@ Once we start the `mydb` and `myservice` services, we can see the Init Container complete and the `myapp-pod` is created: ```shell -kubectl create -f services.yaml +kubectl apply -f services.yaml ``` ``` service/myservice created diff --git a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md index 707a3d1658a90..d1e1f7e9f279b 100644 --- a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md @@ -193,7 +193,7 @@ Once Pod is assigned to a node by scheduler, kubelet starts creating containers ## Pod readiness gate -{{< feature-state for_k8s_version="v1.12" state="beta" >}} +{{< feature-state for_k8s_version="v1.14" state="stable" >}} In order to add extensibility to Pod readiness by enabling the injection of extra feedbacks or signals into `PodStatus`, Kubernetes 1.11 introduced a diff --git a/content/en/docs/getting-started-guides/windows/OVN_OVS_Windows_Installer.png b/content/en/docs/getting-started-guides/windows/OVN_OVS_Windows_Installer.png deleted file mode 100644 index 520f6ae9e6c54..0000000000000 Binary files a/content/en/docs/getting-started-guides/windows/OVN_OVS_Windows_Installer.png and /dev/null differ diff --git a/content/en/docs/getting-started-guides/windows/UpstreamRouting.png b/content/en/docs/getting-started-guides/windows/UpstreamRouting.png deleted file mode 100644 index 91189c36af3ba..0000000000000 Binary files a/content/en/docs/getting-started-guides/windows/UpstreamRouting.png and /dev/null differ diff --git a/content/en/docs/getting-started-guides/windows/_index.md b/content/en/docs/getting-started-guides/windows/_index.md deleted file mode 100644 index 9f27564b07b50..0000000000000 --- a/content/en/docs/getting-started-guides/windows/_index.md +++ /dev/null @@ -1,401 +0,0 @@ ---- -title: Using Windows Server Containers in Kubernetes -toc_hide: true ---- -{{< note >}} -These instructions were recently updated based on Windows Server platform enhancements and the Kubernetes v1.9 release -{{< /note >}} - -Kubernetes version 1.5 introduced Alpha support for Windows Server -Containers based on the Windows Server 2016 operating system. With the -release of Windows Server version 1709 and using Kubernetes v1.9 users -are able to deploy a Kubernetes cluster either on-premises or in a -private/public cloud using a number of different network topologies -and CNI plugins. Some key feature improvements for Windows Server -Containers on Kubernetes include: - -- Improved support for pods! Shared network namespace (compartment) with multiple Windows Server containers (shared kernel) -- Reduced network complexity by using a single network endpoint per pod -- Kernel-Based load-balancing using the Virtual Filtering Platform (VFP) Hyper-v Switch Extension (analogous to Linux iptables) -- Container Runtime Interface (CRI) pod and node level statistics -- Support for kubeadm commands to add Windows Server nodes to a Kubernetes environment - -The Kubernetes control plane (API Server, Scheduler, Controller Manager, etc) continue to run on Linux, while the kubelet and kube-proxy can be run on Windows Server 2016 or later - -{{< note >}} -Windows Server Containers on Kubernetes is a Beta feature in Kubernetes v1.9 -{{< /note >}} - -## Get Windows Binaries -We recommend using the release binaries that can be found at [https://github.com/kubernetes/kubernetes/releases/latest](https://github.com/kubernetes/kubernetes/releases/latest). Under the CHANGELOG you can find the Node Binaries link for Windows-amd64, which will include kubeadm, kubectl, kubelet and kube-proxy. - -If you wish to build the code yourself, please refer to detailed build instructions [here](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/compiling-kubernetes-binaries). - -## Prerequisites -In Kubernetes version 1.9 or later, Windows Server Containers for Kubernetes are supported using the following: - -1. Kubernetes control plane running on existing Linux infrastructure (version 1.9 or later). -2. Kubenet network plugin setup on the Linux nodes. -3. Windows Server 2016 RTM or later. Windows Server version 1709 or later is preferred; it unlocks key capabilities like shared network namespace. -4. Docker Version 17.06.1-ee-2 or later for Windows Server nodes (Linux nodes and Kubernetes control plane can run any Kubernetes supported Docker Version). - -## Networking -There are several supported network configurations with Kubernetes v1.9 on Windows, including both Layer-3 routed and overlay topologies using third-party network plugins. - -1. [Upstream L3 Routing](#upstream-l3-routing-topology) - IP routes configured in upstream ToR -2. [Host-Gateway](#host-gateway-topology) - IP routes configured on each host -3. [Open vSwitch (OVS) & Open Virtual Network (OVN) with Overlay](#using-ovn-with-ovs) - overlay networks (supports STT and Geneve tunneling types) -4. [Future - In Review] Overlay - VXLAN or IP-in-IP encapsulation using Flannel -5. [Future] Layer-3 Routing with BGP (Calico) - -The selection of which network configuration and topology to deploy depends on the physical network topology and a user's ability to configure routes, performance concerns with encapsulation, and requirement to integrate with third-party network plugins. - -### Future CNI Plugins -An additional two CNI plugins [win-l2bridge (host-gateway) and win-overlay (vxlan)] are in [PR review](https://github.com/containernetworking/plugins/pull/85). These two CNI plugins, when ready, can either be used directly or with Flannel. - -### Linux -The above networking approaches are already supported on Linux using a bridge interface, which essentially creates a private network local to the node. Similar to the Windows side, routes to all other pod CIDRs must be created in order to send packets via the "public" NIC. - -### Windows -Windows supports the CNI network model and uses plugins to interface with the Windows Host Networking Service (HNS) to configure host networking and policy. At the time of this writing, the only publicly available CNI plugin from Microsoft is built from a private repo and available here [wincni.exe](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/cni/wincni.exe). It uses an l2bridge network created through the Windows Host Networking Service (HNS) by an administrator using HNS PowerShell commands on each node as documented in the [Windows Host Setup](#windows-host-setup) section below. Source code for the future CNI plugins will be made available publicly. - -#### Upstream L3 Routing Topology -In this topology, networking is achieved using L3 routing with static IP routes configured in an upstream Top of Rack (ToR) switch/router. Each cluster node is connected to the management network with a host IP. Additionally, each node uses a local 'l2bridge' network with a pod CIDR assigned. All pods on a given worker node will be connected to the pod CIDR subnet ('l2bridge' network). In order to enable network communication between pods running on different nodes, the upstream router has static routes configured with pod CIDR prefix => Host IP. - -The following example diagram illustrates the Windows Server networking setup for Kubernetes using Upstream L3 Routing Setup: -![K8s Cluster using L3 Routing with ToR](UpstreamRouting.png) - -#### Host-Gateway Topology -This topology is similar to the Upstream L3 Routing topology with the only difference being that static IP routes are configured directly on each cluster node and not in the upstream ToR. Each node uses a local 'l2bridge' network with a pod CIDR assigned as before and has routing table entries for all other pod CIDR subnets assigned to the remote cluster nodes. - -#### Using OVN with OVS -The following diagram gives a general overview of the architecture and interaction between components: - -![Overlay using OVN controller and OVS Switch Extension](ovn_kubernetes.png) - -(The above image is from [https://github.com/openvswitch/ovn-kubernetes#overlay-mode-architecture-diagram](https://github.com/openvswitch/ovn-kubernetes#overlay-mode-architecture-diagram)) - -Due to its architecture, OVN has a central component which stores your networking intent in a database. Other components i.e. kube-apiserver, kube-controller-manager, kube-scheduler etc. can be deployed on that central node as well. - -## Setting up Windows Server Containers on Kubernetes -To run Windows Server Containers on Kubernetes, you'll need to set up both your host machines and the Kubernetes node components for Windows. Depending on your network topology, routes may need to be set up for pod communication on different nodes. - -### Host Setup - -#### For 1. Upstream L3 Routing Topology and 2. Host-Gateway Topology - -##### Linux Host Setup - -1. Linux hosts should be setup according to their respective distro documentation and the requirements of the Kubernetes version you will be using. -2. Configure Linux Master node using steps [here](https://github.com/MicrosoftDocs/Virtualization-Documentation/blob/live/virtualization/windowscontainers/kubernetes/creating-a-linux-master.md) -3. [Optional] CNI network plugin installed. - -##### Windows Host Setup - - -1. Windows Server container host running the required Windows Server and Docker versions. Follow the setup instructions outlined by this help topic: https://docs.microsoft.com/en-us/virtualization/windowscontainers/quick-start/quick-start-windows-server. -2. [Get Windows Binaries](#get-windows-binaries) kubelet.exe, kube-proxy.exe, and kubectl.exe using instructions -3. Copy Node spec file (kube config) from Linux master node with X.509 keys -4. Create the HNS Network, ensure the correct CNI network config, and start kubelet.exe using this script [start-kubelet.ps1](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/start-kubelet.ps1) -5. Start kube-proxy using this script [start-kubeproxy.ps1](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/start-kubeproxy.ps1) -6. [Only required for #2 Host-Gateway mode] Add static routes on Windows host using this script [AddRoutes.ps1](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/AddRoutes.ps1) - -More detailed instructions can be found [here](https://github.com/MicrosoftDocs/Virtualization-Documentation/blob/live/virtualization/windowscontainers/kubernetes/getting-started-kubernetes-windows.md). - -**Windows CNI Config Example** -Today, Windows CNI plugin is based on wincni.exe code with the following example, configuration file. This is based on the ToR example diagram shown above, specifying the configuration to apply to Windows node-1. Of special interest is Windows node-1 pod CIDR (10.10.187.64/26) and the associated gateway of cbr0 (10.10.187.66). The exception list is specifying the Service CIDR (11.0.0.0/8), Cluster CIDR (10.10.0.0/16), and Management (or Host) CIDR (10.127.132.128/25). - -Note: this file assumes that a user previous created 'l2bridge' host networks on each Windows node using `-HNSNetwork` cmdlets as shown in the `start-kubelet.ps1` and `start-kubeproxy.ps1` scripts linked above - -```json -{ - "cniVersion": "0.2.0", - "name": "l2bridge", - "type": "wincni.exe", - "master": "Ethernet", - "ipam": { - "environment": "azure", - "subnet": "10.10.187.64/26", - "routes": [{ - "GW": "10.10.187.66" - }] - }, - "dns": { - "Nameservers": [ - "11.0.0.10" - ] - }, - "AdditionalArgs": [{ - "Name": "EndpointPolicy", - "Value": { - "Type": "OutBoundNAT", - "ExceptionList": [ - "11.0.0.0/8", - "10.10.0.0/16", - "10.127.132.128/25" - ] - } - }, - { - "Name": "EndpointPolicy", - "Value": { - "Type": "ROUTE", - "DestinationPrefix": "11.0.0.0/8", - "NeedEncap": true - } - }, - { - "Name": "EndpointPolicy", - "Value": { - "Type": "ROUTE", - "DestinationPrefix": "10.127.132.213/32", - "NeedEncap": true - } - } - ] -} -``` - -#### DNS configurations - -DNS configurations for Windows containers are set by CNI plugins which support `dns` capabilities. To enable `dns` capabilities, the following options should be included in the CNI configuration file: - -```json -{ - ... - "capabilities": {"dns": true}, -} -``` - -The following DNS options from kubelet will be passed to CNI plugins: - -- servers: List of DNS servers. -- searches: List of DNS search domains. -- options: List of DNS options. - -e.g. - -```json -"dns" { - "servers": ["10.0.0.10"], - "searches": ["default.svc.cluster.local","svc.cluster.local","cluster.local"], - "options": [] -} -``` - -#### For 3. Open vSwitch (OVS) & Open Virtual Network (OVN) with Overlay - -{{< note >}} -Fully automated setup via Ansible playbooks is [available](https://github.com/openvswitch/ovn-kubernetes/tree/master/contrib). -{{< /note >}} - -For manual setup, continue the following steps. - -##### Linux Host Setup - -Setting up the central node and the components needed is out of scope of this document. You can read [these instructions](https://github.com/openvswitch/ovn-kubernetes#k8s-master-node-initialization) for that. - -Adding a Linux minion is also out of scope and you can read it here: [Linux minion](https://github.com/openvswitch/ovn-kubernetes#k8s-minion-node-initializations). - - -##### Windows Host Setup - -Adding a Windows minion requires you to install OVS and OVN binaries. Windows Server container host running the required Windows Server and Docker versions. Follow the setup instructions outlined by [this help topic](https://docs.microsoft.com/en-us/virtualization/windowscontainers/quick-start/quick-start-windows-server). This type of deployment is supported starting with Windows Server 2016 RTM. - -Compiling OVS and generating the installer will not be treated in this document. For a step by step instruction please visit [this link](http://docs.openvswitch.org/en/latest/intro/install/windows/#open-vswitch-on-windows). -For a prebuilt certified installer please visit [this link](https://cloudbase.it/openvswitch/#download) and download the latest version of it. - -The following guide uses the prebuilt certified installer. - -Installing OVS can be done either via the GUI dialogs or unattended. Adding a Windows host to your setup requires you to have `OVN Host` together with the default installation features. Below is the dialog image on what needs to be installed: - -![OVN OVS Windows Installer](OVN_OVS_Windows_Installer.png) - -For an unattended installation please use the following command: -``` -cmd /c 'msiexec /i openvswitch.msi ADDLOCAL="OpenvSwitchCLI,OpenvSwitchDriver,OVNHost" /qn' -``` - -The installer propagates new environment variables. Please open a new command shell or logoff/logon to ensure the environment variables are refreshed. - -For overlay, OVS on Windows requires a transparent docker network to function properly. Please use the following to create a transparent docker network which will be used by OVS. From powershell: -``` -docker network create -d transparent --gateway $GATEWAY_IP --subnet $SUBNET ` - -o com.docker.network.windowsshim.interface="$INTERFACE_ALIAS" external -``` -Where $SUBNET is the minion subnet which will be used to spawn pods on (the one which will be used by kubernetes), $GATEWAY_IP is the first IP of the $SUBNET and $INTERFACE_ALIAS is the interface used for creating the overlay tunnels (must have connectivity with the rests of the OVN hosts). -Example: -``` -docker network create -d transparent --gateway 10.0.1.1 --subnet 10.0.1.0/24 ` - -o com.docker.network.windowsshim.interface="Ethernet0" external -``` -After creating the docker network please run the next commands from powershell. (creates an OVS bridge, adds the interface under the bridge and enables the OVS forwarding switch extension) -``` -$a = Get-NetAdapter | where Name -Match HNSTransparent -Rename-NetAdapter $a[0].Name -NewName HNSTransparent -Stop-Service ovs-vswitchd -force; Disable-VMSwitchExtension "Cloudbase Open vSwitch Extension"; -ovs-vsctl --no-wait del-br br-ex -ovs-vsctl --no-wait --may-exist add-br br-ex -ovs-vsctl --no-wait add-port br-ex HNSTransparent -- set interface HNSTransparent type=internal -ovs-vsctl --no-wait add-port br-ex $INTERFACE_ALIAS -Enable-VMSwitchExtension "Cloudbase Open vSwitch Extension"; sleep 2; Restart-Service ovs-vswitchd -``` -Besides of the above, setting up a Windows host is the same as the Linux host. Follow the steps from [here](https://github.com/openvswitch/ovn-kubernetes#k8s-minion-node-initializations). - -**Windows CNI Setup** - -Today, Windows OVN&OVS CNI plugin is based on ovn_cni.exe which can be downloaded from [here](https://cloudbase.it/downloads/ovn_cni.exe). A sample of CNI config file is the following: -``` -{ - "name": "net", - "type": "ovn_cni.exe", - "bridge": "br-int", - "isGateway": "true", - "ipMasq": "false", - "ipam": { - "type": "host-local", - "subnet": "$SUBNET" - } -} -``` -Where $SUBNET is the subnet that was used in the previous ```docker network create``` command. - -For a complete guide on Google Cloud Platform (GCP), namely Google Compute Engine (GCE) visit [this](https://github.com/apprenda/kubernetes-ovn-heterogeneous-cluster#heterogeneous-kubernetes-cluster-on-top-of-ovn). - -For a complete guide on Amazon Web Services (AWS) visit [this](https://github.com/justeat/kubernetes-windows-aws-ovs#kubernetes-on-windows-in-aws-using-ovn). - -## Starting the Cluster -To start your cluster, you'll need to start both the Linux-based Kubernetes control plane, and the Windows Server-based Kubernetes node components (kubelet and kube-proxy). For the OVS & OVN only the kubelet is required. - -## Starting the Linux-based Control Plane -Use your preferred method to start Kubernetes cluster on Linux. Please note that Cluster CIDR might need to be updated. - -## Support for kubeadm join - -If your cluster has been created by [kubeadm](/docs/setup/independent/create-cluster-kubeadm/), -and your networking is setup correctly using one of the methods listed above (networking is setup outside of kubeadm), you can use kubeadm to add a Windows node to your cluster. At a high level, you first have to initialize the master with kubeadm (Linux), then set up the CNI based networking (outside of kubeadm), and finally start joining Windows or Linux worker nodes to the cluster. For additional documentation and reference material, visit the kubeadm link above. - -The kubeadm binary can be found at [Kubernetes Releases](https://github.com/kubernetes/kubernetes/releases), inside the node binaries archive. Adding a Windows node is not any different than adding a Linux node: - -`kubeadm.exe join --token : --discovery-token-ca-cert-hash sha256:` - -See [joining-your-nodes](/docs/setup/independent/create-cluster-kubeadm/#joining-your-nodes) for more details. - -## Supported Features - -The examples listed below assume running Windows nodes on Windows Server 1709. If you are running Windows Server 2016, the examples will need the image updated to specify `image: microsoft/windowsservercore:ltsc2016`. This is due to the requirement for container images to match the host operating system version when using process isolation. Not specifying a tag will implicitly use the `:latest` tag which can lead to surprising behaviors. Please consult with [https://hub.docker.com/r/microsoft/windowsservercore/](https://hub.docker.com/r/microsoft/windowsservercore/) for additional information on Windows Server Core image tagging. - -### Scheduling Pods on Windows -Because your cluster has both Linux and Windows nodes, you must explicitly set the `nodeSelector` constraint to be able to schedule pods to Windows nodes. You must set nodeSelector with the label `beta.kubernetes.io/os` to the value `windows`; see the following example: - -{{< codenew file="windows/simple-pod.yaml" >}} - -{{< note >}} -This example assumes you are running on Windows Server 1709, so uses the image tag to support that. If you are on a different version, you will need to update the tag. For example, if on Windows Server 2016, update to use `"image": "microsoft/iis"` which will default to that OS version. -{{< /note >}} - -### Secrets and ConfigMaps -Secrets and ConfigMaps can be utilized in Windows Server Containers, but must be used as environment variables. See limitations section below for additional details. - -**Examples:** - -Windows pod with secrets mapped to environment variables - -{{< codenew file="windows/secret-pod.yaml" >}} - -Windows Pod with configMap values mapped to environment variables - -{{< codenew file="windows/configmap-pod.yaml" >}} - -### Volumes -Some supported Volume Mounts are local, emptyDir, hostPath. One thing to remember is that paths must either be escaped, or use forward slashes, for example `mountPath: "C:\\etc\\foo"` or `mountPath: "C:/etc/foo"`. - -Persistent Volume Claims are supported for supported volume types. - -**Examples:** - -Windows pod with a hostPath volume - -{{< codenew file="windows/hostpath-volume-pod.yaml" >}} - -Windows pod with multiple emptyDir volumes - -{{< codenew file="windows/emptydir-pod.yaml" >}} - -### DaemonSets - -DaemonSets are supported - -{{< codenew file="windows/daemonset.yaml" >}} - -### Metrics - -Windows Stats use a hybrid model: pod and container level stats come from CRI (via dockershim), while node level stats come from the "winstats" package that exports cadvisor like data structures using windows specific perf counters from the node. - -### Container Resources - -Container resources (CPU and memory) could be set now for windows containers in v1.10. - -{{< codenew file="windows/deploy-resource.yaml" >}} - -### Hyper-V Containers - -Hyper-V containers are supported as experimental in v1.10. To create a Hyper-V container, kubelet should be started with feature gates `HyperVContainer=true` and Pod should include annotation `experimental.windows.kubernetes.io/isolation-type=hyperv`. - -{{< codenew file="windows/deploy-hyperv.yaml" >}} - -### Kubelet and kube-proxy can now run as Windows services - -Starting with kubernetes v1.11, kubelet and kube-proxy can run as Windows services. - -This means that you can now register them as Windows services via `sc` command. More details about how to create Windows services with `sc` can be found [here](https://support.microsoft.com/en-us/help/251192/how-to-create-a-windows-service-by-using-sc-exe). - -**Examples:** - -To create the service: -``` -PS > sc.exe create binPath= " --windows-service " -CMD > sc create binPath= " --windows-service " -``` -Please note that if the arguments contain spaces, it must be escaped. Example: -``` -PS > sc.exe create kubelet binPath= "C:\kubelet.exe --windows-service --hostname-override 'minion' " -CMD > sc create kubelet binPath= "C:\kubelet.exe --windows-service --hostname-override 'minion' " -``` -To start the service: -``` -PS > Start-Service kubelet; Start-Service kube-proxy -CMD > net start kubelet && net start kube-proxy -``` -To stop the service: -``` -PS > Stop-Service kubelet (-Force); Stop-Service kube-proxy (-Force) -CMD > net stop kubelet && net stop kube-proxy -``` -To query the service: -``` -PS > Get-Service kubelet; Get-Service kube-proxy; -CMD > sc.exe queryex kubelet && sc qc kubelet && sc.exe queryex kube-proxy && sc.exe qc kube-proxy -``` - -## Known Limitations for Windows Server Containers with v1.9 - -Some of these limitations will be addressed by the community in future releases of Kubernetes: - -- Shared network namespace (compartment) with multiple Windows Server containers (shared kernel) per pod is only supported on Windows Server 1709 or later -- Using Secrets and ConfigMaps as volume mounts is not supported -- Mount propagation is not supported on Windows -- The StatefulSet functionality for stateful applications is not supported -- Horizontal Pod Autoscaling for Windows Server Container pods has not been verified to work end-to-end -- Hyper-V isolated containers are not supported. -- Windows container OS must match the Host OS. If it does not, the pod will get stuck in a crash loop. -- Under the networking models of L3 or Host GW, Kubernetes Services are inaccessible to Windows nodes due to a Windows issue. This is not an issue if using OVN/OVS for networking. -- Windows kubelet.exe may fail to start when running on Windows Server under VMware Fusion [issue 57110](https://github.com/kubernetes/kubernetes/pull/57124) -- Flannel and Weavenet are not yet supported -- Some .Net Core applications expect environment variables with a colon (`:`) in the name. Kubernetes currently does not allow this. Replace colon (`:`) with double underscore (`__`) as documented [here](https://docs.microsoft.com/en-us/aspnet/core/fundamentals/configuration/?tabs=basicconfiguration#configuration-by-environment). -- As cgroups are not supported on windows, kubelet.exe should be started with the following additional arguments `--cgroups-per-qos=false --enforce-node-allocatable=""` [issue 61716](https://github.com/kubernetes/kubernetes/issues/61716) - -## Next steps and resources - -- Support for Windows is in Beta as of v1.9 and your feedback is welcome. For information on getting involved, please head to [SIG-Windows](https://github.com/kubernetes/community/blob/master/sig-windows/README.md) -- Troubleshooting and Common Problems: [Link](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/common-problems) diff --git a/content/en/docs/getting-started-guides/windows/ovn_kubernetes.png b/content/en/docs/getting-started-guides/windows/ovn_kubernetes.png deleted file mode 100644 index 739d75aad765c..0000000000000 Binary files a/content/en/docs/getting-started-guides/windows/ovn_kubernetes.png and /dev/null differ diff --git a/content/en/docs/getting-started-guides/windows/sample-l2bridge-wincni-config.json b/content/en/docs/getting-started-guides/windows/sample-l2bridge-wincni-config.json deleted file mode 100644 index f3842026ce28c..0000000000000 --- a/content/en/docs/getting-started-guides/windows/sample-l2bridge-wincni-config.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "cniVersion": "0.2.0", - "name": "l2bridge", - "type": "wincni.exe", - "master": "Ethernet", - "ipam": { - "environment": "azure", - "subnet": "10.10.187.64/26", - "routes": [ - { - "GW": "10.10.187.66" - } - ] - }, - "dns": { - "Nameservers": [ - "11.0.0.10" - ] - }, - "AdditionalArgs": [ - { - "Name": "EndpointPolicy", - "Value": { - "Type": "OutBoundNAT", - "ExceptionList": [ - "11.0.0.0/8", - "10.10.0.0/16", - "10.127.132.128/25" - ] - } - }, - { - "Name": "EndpointPolicy", - "Value": { - "Type": "ROUTE", - "DestinationPrefix": "11.0.0.0/8", - "NeedEncap": true - } - }, - { - "Name": "EndpointPolicy", - "Value": { - "Type": "ROUTE", - "DestinationPrefix": "10.127.132.213/32", - "NeedEncap": true - } - } - ] -} diff --git a/content/en/docs/getting-started-guides/windows/windows-setup.png b/content/en/docs/getting-started-guides/windows/windows-setup.png deleted file mode 100644 index e11c58d596e35..0000000000000 Binary files a/content/en/docs/getting-started-guides/windows/windows-setup.png and /dev/null differ diff --git a/content/en/docs/reference/access-authn-authz/admission-controllers.md b/content/en/docs/reference/access-authn-authz/admission-controllers.md index 7051356b1a421..e067c0dbbe958 100644 --- a/content/en/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/admission-controllers.md @@ -429,9 +429,9 @@ This label prefix is reserved for administrators to label their `Node` objects f and kubelets will not be allowed to modify labels with that prefix. * **Allows** kubelets to add/remove/update these labels and label prefixes: * `kubernetes.io/hostname` - * `beta.kubernetes.io/arch` + * `kubernetes.io/arch` + * `kubernetes.io/os` * `beta.kubernetes.io/instance-type` - * `beta.kubernetes.io/os` * `failure-domain.beta.kubernetes.io/region` * `failure-domain.beta.kubernetes.io/zone` * `kubelet.kubernetes.io/`-prefixed labels diff --git a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md index c405d2736682d..d2fb12a3e84b3 100644 --- a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md @@ -5,6 +5,8 @@ reviewers: - whitlockjc - caesarxuchao - deads2k +- liggitt +- mbohlool title: Dynamic Admission Control content_template: templates/concept weight: 40 @@ -19,16 +21,14 @@ the following: * They need to be compiled into kube-apiserver. * They are only configurable when the apiserver starts up. -Two features, *Admission Webhooks* (beta in 1.9) and *Initializers* (alpha), -address these limitations. They allow admission controllers to be developed -out-of-tree and configured at runtime. +*Admission Webhooks* (beta in 1.9) addresses these limitations. It allows +admission controllers to be developed out-of-tree and configured at runtime. + +This page describes how to use Admission Webhooks. -This page describes how to use Admission Webhooks and Initializers. {{% /capture %}} {{% capture body %}} -## Admission Webhooks - ### What are admission webhooks? Admission webhooks are HTTP callbacks that receive admission requests and do @@ -67,6 +67,13 @@ that is validated in a Kubernetes e2e test. The webhook handles the `admissionReview` requests sent by the apiservers, and sends back its decision wrapped in `admissionResponse`. +the `admissionReview` request can have different versions (e.g. v1beta1 or `v1` in a future version). +The webhook can define what version they accept using `admissionReviewVersions` field. API server +will try to use first version in the list which it supports. If none of the versions specified +in this list supported by API server, validation will fail for this object. If the webhook +configuration has already been persisted, calls to the webhook will fail and be +subject to the failure policy. + The example admission webhook server leaves the `ClientAuth` field [empty](https://github.com/kubernetes/kubernetes/blob/v1.13.0/test/images/webhook/config.go#L47-L48), which defaults to `NoClientCert`. This means that the webhook server does not @@ -112,18 +119,32 @@ webhooks: - CREATE resources: - pods + scope: "Namespaced" clientConfig: service: namespace: name: caBundle: + admissionReviewVersions: + - v1beta1 + timeoutSeconds: 1 ``` +The scope field specifies if only cluster-scoped resources ("Cluster") or namespace-scoped +resources ("Namespaced") will match this rule. "*" means that there are no scope restrictions. + {{< note >}} When using `clientConfig.service`, the server cert must be valid for `..svc`. {{< /note >}} +{{< note >}} +Default timeout for a webhook call is 30 seconds but starting in kubernetes 1.14 you +can set the timeout and it is encouraged to use a very small timeout for webhooks. +If the webhook call times out, the request is handled according to the webhook's +failure policy. +{{< /note >}} + When an apiserver receives a request that matches one of the `rules`, the apiserver sends an `admissionReview` request to webhook as specified in the `clientConfig`. @@ -131,13 +152,6 @@ apiserver sends an `admissionReview` request to webhook as specified in the After you create the webhook configuration, the system will take a few seconds to honor the new configuration. -{{< note >}} -When the webhook plugin is deployed into the Kubernetes cluster as a -service, it has to expose its service on the 443 port. The communication -between the API server and the webhook service may fail if a different port -is used. -{{< /note >}} - ### Authenticate apiservers If your admission webhooks require authentication, you can configure the @@ -196,116 +210,4 @@ users: ``` Of course you need to set up the webhook server to handle these authentications. - -## Initializers - -### What are initializers? - -*Initializer* has two meanings: - -* A list of pending pre-initialization tasks, stored in every object's metadata - (e.g., "AddMyCorporatePolicySidecar"). - -* A user customized controller, which actually performs those tasks. The name of the task - corresponds to the controller which performs the task. For clarity, we call - them *initializer controllers* in this page. - -Once the controller has performed its assigned task, it removes its name from -the list. For example, it may send a PATCH that inserts a container in a pod and -also removes its name from `metadata.initializers.pending`. Initializers may make -mutations to objects. - -Objects which have a non-empty initializer list are considered uninitialized, -and are not visible in the API unless specifically requested by using the query parameter, -`?includeUninitialized=true`. - -### When to use initializers? - -Initializers are useful for admins to force policies (e.g., the -[AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) -admission controller), or to inject defaults (e.g., the -[DefaultStorageClass](/docs/reference/access-authn-authz/admission-controllers/#defaultstorageclass) -admission controller), etc. - -{{< note >}} -If your use case does not involve mutating objects, consider using -external admission webhooks, as they have better performance. -{{< /note >}} - -### How are initializers triggered? - -When an object is POSTed, it is checked against all existing -`initializerConfiguration` objects (explained below). For all that it matches, -all `spec.initializers[].name`s are appended to the new object's -`metadata.initializers.pending` field. - -An initializer controller should list and watch for uninitialized objects, by -using the query parameter `?includeUninitialized=true`. If using client-go, just -set -[listOptions.includeUninitialized](https://github.com/kubernetes/kubernetes/blob/v1.13.0/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go#L332) -to true. - -For the observed uninitialized objects, an initializer controller should first -check if its name matches `metadata.initializers.pending[0]`. If so, it should then -perform its assigned task and remove its name from the list. - -### Enable initializers alpha feature - -*Initializers* is an alpha feature, so it is disabled by default. To turn it on, -you need to: - -* Include "Initializers" in the `--enable-admission-plugins` flag when starting - `kube-apiserver`. If you have multiple `kube-apiserver` replicas, all should - have the same flag setting. - -* Enable the dynamic admission controller registration API by adding - `admissionregistration.k8s.io/v1alpha1` to the `--runtime-config` flag passed - to `kube-apiserver`, e.g. - `--runtime-config=admissionregistration.k8s.io/v1alpha1`. Again, all replicas - should have the same flag setting. - -### Deploy an initializer controller - -You should deploy an initializer controller via the [deployment -API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deployment-v1beta1-apps). - -### Configure initializers on the fly - -You can configure what initializers are enabled and what resources are subject -to the initializers by creating `initializerConfiguration` resources. - -You should first deploy the initializer controller and make sure that it is -working properly before creating the `initializerConfiguration`. Otherwise, any -newly created resources will be stuck in an uninitialized state. - -The following is an example `initializerConfiguration`: - -```yaml -apiVersion: admissionregistration.k8s.io/v1alpha1 -kind: InitializerConfiguration -metadata: - name: example-config -initializers: - # the name needs to be fully qualified, i.e., containing at least two "." - - name: podimage.example.com - rules: - # apiGroups, apiVersion, resources all support wildcard "*". - # "*" cannot be mixed with non-wildcard. - - apiGroups: - - "" - apiVersions: - - v1 - resources: - - pods -``` - -After you create the `initializerConfiguration`, the system will take a few -seconds to honor the new configuration. Then, `"podimage.example.com"` will be -appended to the `metadata.initializers.pending` field of newly created pods. You -should already have a ready "podimage" initializer controller that handles pods -whose `metadata.initializers.pending[0].name="podimage.example.com"`. Otherwise -the pods will be stuck in an uninitialized state. - -Make sure that all expansions of the `` tuple -in a `rule` are valid. If they are not, separate them in different `rules`. {{% /capture %}} diff --git a/content/en/docs/reference/access-authn-authz/rbac.md b/content/en/docs/reference/access-authn-authz/rbac.md index f2cb82bec97b6..8f6ad335f6e66 100644 --- a/content/en/docs/reference/access-authn-authz/rbac.md +++ b/content/en/docs/reference/access-authn-authz/rbac.md @@ -26,7 +26,7 @@ To enable RBAC, start the apiserver with `--authorization-mode=RBAC`. The RBAC API declares four top-level types which will be covered in this section. Users can interact with these resources as they would with any other API resource (via `kubectl`, API calls, etc.). For instance, -`kubectl create -f (resource).yml` can be used with any of these examples, +`kubectl apply -f (resource).yml` can be used with any of these examples, though readers who wish to follow along should review the section on bootstrapping first. @@ -489,13 +489,18 @@ NOTE: editing the role is not recommended as changes will be overwritten on API system:basic-user -system:authenticated and system:unauthenticated groups -Allows a user read-only access to basic information about themselves. +system:authenticated group +Allows a user read-only access to basic information about themselves. Prior to 1.14, this role was also bound to `system:unauthenticated` by default. system:discovery +system:authenticated group +Allows read-only access to API discovery endpoints needed to discover and negotiate an API level. Prior to 1.14, this role was also bound to `system:unauthenticated` by default. + + +system:public-info-viewer system:authenticated and system:unauthenticated groups -Allows read-only access to API discovery endpoints needed to discover and negotiate an API level. +Allows read-only access to non-sensitive information about the cluster. Introduced in 1.14. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 9a8c7c504a7cd..8807d19b2d916 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -55,9 +55,17 @@ different Kubernetes components. | `CPUManager` | `true` | Beta | 1.10 | | | `CRIContainerLogRotation` | `false` | Alpha | 1.10 | 1.10 | | `CRIContainerLogRotation` | `true` | Beta| 1.11 | | -| `CSIBlockVolume` | `false` | Alpha | 1.11 | | -| `CSIDriverRegistry` | `false` | Alpha | 1.12 | | -| `CSINodeInfo` | `false` | Alpha | 1.12 | | +| `CSIBlockVolume` | `false` | Alpha | 1.11 | 1.13 | +| `CSIBlockVolume` | `true` | Beta | 1.14 | | +| `CSIDriverRegistry` | `false` | Alpha | 1.12 | 1.13 | +| `CSIDriverRegistry` | `true` | Beta | 1.14 | | +| `CSIInlineVolume` | `false` | Alpha | 1.14 | - | +| `CSIMigration` | `false` | Alpha | 1.14 | | +| `CSIMigrationAWS` | `false` | Alpha | 1.14 | | +| `CSIMigrationGCE` | `false` | Alpha | 1.14 | | +| `CSIMigrationOpenStack` | `false` | Alpha | 1.14 | | +| `CSINodeInfo` | `false` | Alpha | 1.12 | 1.13 | +| `CSINodeInfo` | `true` | Beta | 1.14 | | | `CSIPersistentVolume` | `false` | Alpha | 1.9 | 1.9 | | `CSIPersistentVolume` | `true` | Beta | 1.10 | 1.12 | | `CSIPersistentVolume` | `true` | GA | 1.13 | - | @@ -79,7 +87,8 @@ different Kubernetes components. | `DynamicVolumeProvisioning` | `true` | Alpha | 1.3 | 1.7 | | `DynamicVolumeProvisioning` | `true` | GA | 1.8 | | | `EnableEquivalenceClassCache` | `false` | Alpha | 1.8 | | -| `ExpandInUsePersistentVolumes` | `false` | Alpha | 1.11 | | +| `ExpandCSIVolumes` | `false` | Alpha | 1.14 | | | +| `ExpandInUsePersistentVolumes` | `false` | Alpha | 1.11 | 1.13 | | | `ExpandPersistentVolumes` | `false` | Alpha | 1.8 | 1.10 | | `ExpandPersistentVolumes` | `true` | Beta | 1.11 | | | `ExperimentalCriticalPodAnnotation` | `false` | Alpha | 1.5 | | @@ -103,7 +112,8 @@ different Kubernetes components. | `MountPropagation` | `true` | GA | 1.12 | | | `NodeLease` | `false` | Alpha | 1.12 | | | `PersistentLocalVolumes` | `false` | Alpha | 1.7 | 1.9 | -| `PersistentLocalVolumes` | `true` | Beta | 1.10 | | +| `PersistentLocalVolumes` | `true` | Beta | 1.10 | 1.13 | +| `PersistentLocalVolumes` | `true` | GA | 1.14 | | | `PodPriority` | `false` | Alpha | 1.8 | 1.10 | | `PodPriority` | `true` | Beta | 1.11 | | | `PodReadinessGates` | `false` | Alpha | 1.11 | | @@ -118,9 +128,10 @@ different Kubernetes components. | `RotateKubeletClientCertificate` | `true` | Beta | 1.7 | | | `RotateKubeletServerCertificate` | `false` | Alpha | 1.7 | 1.11 | | `RotateKubeletServerCertificate` | `true` | Beta | 1.12 | | -| `RunAsGroup` | `false` | Alpha | 1.10 | | -| `RuntimeClass` | `false` | Alpha | 1.12 | | +| `RunAsGroup` | `true` | Beta | 1.14 | | +| `RuntimeClass` | `true` | Beta | 1.14 | | | `SCTPSupport` | `false` | Alpha | 1.12 | | +| `ServerSideApply` | `false` | Alpha | 1.14 | | | `ServiceNodeExclusion` | `false` | Alpha | 1.8 | | | `StorageObjectInUseProtection` | `true` | Beta | 1.10 | 1.10 | | `StorageObjectInUseProtection` | `true` | GA | 1.11 | | @@ -143,10 +154,11 @@ different Kubernetes components. | `VolumeScheduling` | `false` | Alpha | 1.9 | 1.9 | | `VolumeScheduling` | `true` | Beta | 1.10 | 1.12 | | `VolumeScheduling` | `true` | GA | 1.13 | | -| `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.11 | | +| `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.14 | | | `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | - | | `ScheduleDaemonSetPods` | `false` | Alpha | 1.11 | 1.11 | | `ScheduleDaemonSetPods` | `true` | Beta | 1.12 | | +| `WindowsGMSA` | `false` | Alpha | 1.14 | | ## Using a Feature @@ -213,11 +225,16 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CRIContainerLogRotation`: Enable container log rotation for cri container runtime. - `CSIBlockVolume`: Enable external CSI volume drivers to support block storage. See the [`csi` raw block volume support](/docs/concepts/storage/volumes/#csi-raw-block-volume-support) documentation for more details. - `CSIDriverRegistry`: Enable all logic related to the CSIDriver API object in csi.storage.k8s.io. +- `CSIMigration`: Enables shims and translation logic to route volume operations from in-tree plugins to corresponding pre-installed CSI plugins +- `CSIMigrationAWS`: Enables shims and translation logic to route volume operations from the AWS-EBS in-tree plugin to EBS CSI plugin +- `CSIMigrationGCE`: Enables shims and translation logic to route volume operations from the GCE-PD in-tree plugin to PD CSI plugin +- `CSIMigrationOpenStack`: Enables shims and translation logic to route volume operations from the Cinder in-tree plugin to Cinder CSI plugin - `CSINodeInfo`: Enable all logic related to the CSINodeInfo API object in csi.storage.k8s.io. - `CSIPersistentVolume`: Enable discovering and mounting volumes provisioned through a [CSI (Container Storage Interface)](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md) compatible volume plugin. Check the [`csi` volume type](/docs/concepts/storage/volumes/#csi) documentation for more details. +- `CSIInlineVolume`: Enable CSI volumes to be directly embedded in Pod specifications instead of a PersistentVolume. - `CustomPodDNS`: Enable customizing the DNS settings for a Pod using its `dnsConfig` property. Check [Pod's DNS Config](/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) for more details. @@ -286,6 +303,7 @@ Each feature gate is designed for enabling/disabling a specific feature: - `RuntimeClass`: Enable the [RuntimeClass](/docs/concepts/containers/runtime-class/) feature for selecting container runtime configurations. - `ScheduleDaemonSetPods`: Enable DaemonSet Pods to be scheduled by the default scheduler instead of the DaemonSet controller. - `SCTPSupport`: Enables the usage of SCTP as `protocol` value in `Service`, `Endpoint`, `NetworkPolicy` and `Pod` definitions +- `ServerSideApply`: Enables the [Sever Side Apply (SSA)](/docs/reference/using-api/api-concepts/#server-side-apply) path at the API Server. - `ServiceNodeExclusion`: Enable the exclusion of nodes from load balancers created by a cloud provider. A node is eligible for exclusion if annotated with "`alpha.service-controller.kubernetes.io/exclude-balancer`" key. - `StorageObjectInUseProtection`: Postpone the deletion of PersistentVolume or @@ -310,5 +328,7 @@ Each feature gate is designed for enabling/disabling a specific feature: enables the usage of [`local`](/docs/concepts/storage/volumes/#local) volume type when used together with the `PersistentLocalVolumes` feature gate. - `VolumeSnapshotDataSource`: Enable volume snapshot data source support. +- `VolumeSubpathEnvExpansion`: Enable `subPathExpr` field for expanding environment variables into a `subPath`. +- `WindowsGMSA`: Enables passing of GMSA credential specs from pods to container runtimes. {{% /capture %}} diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md b/content/en/docs/reference/command-line-tools-reference/kubelet.md index dc90116c31c4f..02a2893eec41b 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md @@ -707,7 +707,7 @@ kubelet [flags] --kube-reserved mapStringString - A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local ephemeral storage for root file system are supported. See /docs/user-guide/compute-resources for more detail. [default=none] + A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid=1000) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory, pid, and local ephemeral storage for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none] @@ -1092,7 +1092,7 @@ kubelet [flags] --system-reserved mapStringString - A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See /docs/user-guide/compute-resources for more detail. [default=none] + A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid=1000) pairs that describe resources reserved for non-kubernetes components. Currently only cpu, memory, and pid are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none] diff --git a/content/en/docs/reference/glossary/security-context.md b/content/en/docs/reference/glossary/security-context.md index 7bdf99534ae0d..9812304e4dd2a 100755 --- a/content/en/docs/reference/glossary/security-context.md +++ b/content/en/docs/reference/glossary/security-context.md @@ -14,5 +14,4 @@ tags: -The securityContext field in a {{< glossary_tooltip term_id="pod" >}} (applying to all containers) or container is used to set the user (runAsUser) and group (fsGroup), capabilities, privilege settings, and security policies (SELinux/AppArmor/Seccomp) that container processes use. - +The securityContext field in a {{< glossary_tooltip term_id="pod" >}} (applying to all containers) or container is used to set the user, groups, capabilities, privilege settings, and security policies (SELinux/AppArmor/Seccomp) and more that container processes use. diff --git a/content/en/docs/reference/kubectl/cheatsheet.md b/content/en/docs/reference/kubectl/cheatsheet.md index 47f9c3017d1c7..be4dce189de26 100644 --- a/content/en/docs/reference/kubectl/cheatsheet.md +++ b/content/en/docs/reference/kubectl/cheatsheet.md @@ -75,21 +75,24 @@ kubectl config set-context gce --user=cluster-admin --namespace=foo \ kubectl config unset users.foo # delete user foo ``` +## Apply +`apply` manages applications through files defining Kubernetes resources. It creates and updates resources in a cluster through running `kubectl apply`. This is the recommended way of managing Kubernetes applications on production. See [Kubectl Book](https://kubectl.docs.kubernetes.io). + ## Creating Objects Kubernetes manifests can be defined in json or yaml. The file extension `.yaml`, `.yml`, and `.json` can be used. ```bash -kubectl create -f ./my-manifest.yaml # create resource(s) -kubectl create -f ./my1.yaml -f ./my2.yaml # create from multiple files -kubectl create -f ./dir # create resource(s) in all manifest files in dir -kubectl create -f https://git.io/vPieo # create resource(s) from url +kubectl apply -f ./my-manifest.yaml # create resource(s) +kubectl apply -f ./my1.yaml -f ./my2.yaml # create from multiple files +kubectl apply -f ./dir # create resource(s) in all manifest files in dir +kubectl apply -f https://git.io/vPieo # create resource(s) from url kubectl create deployment nginx --image=nginx # start a single instance of nginx kubectl explain pods,svc # get the documentation for pod and svc manifests # Create multiple YAML objects from stdin -cat < directory. -kubectl create -f +kubectl apply -f ``` `kubectl get` - List one or more resources. diff --git a/content/en/docs/reference/kubernetes-api/labels-annotations-taints.md b/content/en/docs/reference/kubernetes-api/labels-annotations-taints.md index 78219c3085823..6f958c4060d0a 100644 --- a/content/en/docs/reference/kubernetes-api/labels-annotations-taints.md +++ b/content/en/docs/reference/kubernetes-api/labels-annotations-taints.md @@ -11,23 +11,32 @@ This document serves both as a reference to the values, and as a coordination po {{% /capture %}} {{% capture body %}} -## beta.kubernetes.io/arch +## kubernetes.io/arch -Example: `beta.kubernetes.io/arch=amd64` +Example: `kubernetes.io/arch=amd64` Used on: Node Kubelet populates this with `runtime.GOARCH` as defined by Go. This can be handy if you are mixing arm and x86 nodes, for example. -## beta.kubernetes.io/os +## kubernetes.io/os -Example: `beta.kubernetes.io/os=linux` +Example: `kubernetes.io/os=linux` Used on: Node Kubelet populates this with `runtime.GOOS` as defined by Go. This can be handy if you are mixing operating systems -in your cluster (although currently Linux is the only OS supported by Kubernetes). +in your cluster (e.g., mixing Linux and Windows nodes). + +## beta.kubernetes.io/arch (deprecated) + +This label has been deprecated. Please use `kubernetes.io/arch` instead. + +## beta.kubernetes.io/os (deprecated) + +This label has been deprecated. Please use `kubernetes.io/os` instead. + ## kubernetes.io/hostname diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_preflight.md deleted file mode 100644 index d88f71b0d9204..0000000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_preflight.md +++ /dev/null @@ -1,50 +0,0 @@ - -Commands related to pre-flight checks - -### Synopsis - - -This command is not meant to be run on its own. See list of available subcommands. - -### Options - - - - - - - - - - - - - - - - -
-h, --help
help for preflight
- - - -### Options inherited from parent commands - - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_preflight_node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_preflight_node.md deleted file mode 100644 index 47be57c832538..0000000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_preflight_node.md +++ /dev/null @@ -1,77 +0,0 @@ - -Run node pre-flight checks - -### Synopsis - - -Run node pre-flight checks, functionally equivalent to what implemented by kubeadm join. - -Alpha Disclaimer: this command is currently alpha. - -``` -kubeadm alpha preflight node [flags] -``` - -### Examples - -``` - # Run node pre-flight checks. - kubeadm alpha preflight node -``` - -### Options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--config string
Path to a kubeadm configuration file.
-h, --help
help for node
--ignore-preflight-errors stringSlice
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.
- - - -### Options inherited from parent commands - - - - - - - - - - - - - - - - -
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md new file mode 100644 index 0000000000000..aed553b8c3d1e --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md @@ -0,0 +1,27 @@ + +Upload certificates to kubeadm-certs + +### Synopsis + +This command is not meant to be run on its own. See list of available subcommands. + +``` +kubeadm init phase upload-certs [flags] +``` + +### Options + +``` + --certificate-key string Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. + --config string Path to a kubeadm configuration file. + --experimental-upload-certs Upload control-plane certificates to the kubeadm-certs Secret. + -h, --help help for upload-certs + --skip-certificate-key-print Don't print the key used to encrypt the control-plane certificates. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md new file mode 100644 index 0000000000000..a5562c5dc4e6c --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md @@ -0,0 +1,19 @@ + +use this command to invoke single phase of the join workflow + +### Synopsis + +use this command to invoke single phase of the join workflow + +### Options + +``` + -h, --help help for phase +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md new file mode 100644 index 0000000000000..e65c5248f44d4 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md @@ -0,0 +1,30 @@ + +Joins a machine as a control plane instance + +### Synopsis + +Joins a machine as a control plane instance + +``` +kubeadm join phase control-plane-join [flags] +``` + +### Examples + +``` + # Joins a machine as a control plane instance + kubeadm join phase control-plane-join all +``` + +### Options + +``` + -h, --help help for control-plane-join +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md new file mode 100644 index 0000000000000..d2f288fd98c59 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md @@ -0,0 +1,27 @@ + +Joins a machine as a control plane instance + +### Synopsis + +Joins a machine as a control plane instance + +``` +kubeadm join phase control-plane-join all [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for all + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md new file mode 100644 index 0000000000000..05ebd37d41c17 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md @@ -0,0 +1,27 @@ + +Add a new local etcd member + +### Synopsis + +Add a new local etcd member + +``` +kubeadm join phase control-plane-join etcd [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for etcd + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md new file mode 100644 index 0000000000000..9a06263e3876b --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md @@ -0,0 +1,26 @@ + +Mark a node as a control-plane + +### Synopsis + +Mark a node as a control-plane + +``` +kubeadm join phase control-plane-join mark-control-plane [flags] +``` + +### Options + +``` + --config string Path to kubeadm config file. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for mark-control-plane + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md new file mode 100644 index 0000000000000..00a10bb606939 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md @@ -0,0 +1,27 @@ + +Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap + +### Synopsis + +Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap + +``` +kubeadm join phase control-plane-join update-status [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for update-status + --node-name string Specify the node name. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md new file mode 100644 index 0000000000000..1ed4d231ba2e9 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md @@ -0,0 +1,30 @@ + +Prepares the machine for serving a control plane. + +### Synopsis + +Prepares the machine for serving a control plane. + +``` +kubeadm join phase control-plane-prepare [flags] +``` + +### Examples + +``` + # Prepares the machine for serving a control plane + kubeadm join phase control-plane-prepare all +``` + +### Options + +``` + -h, --help help for control-plane-prepare +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md new file mode 100644 index 0000000000000..30e3351584f55 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md @@ -0,0 +1,35 @@ + +Prepares the machine for serving a control plane. + +### Synopsis + +Prepares the machine for serving a control plane. + +``` +kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for all + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md new file mode 100644 index 0000000000000..f429b7536cf6e --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md @@ -0,0 +1,33 @@ + +Generates the certificates for the new control plane components + +### Synopsis + +Generates the certificates for the new control plane components + +``` +kubeadm join phase control-plane-prepare certs [api-server-endpoint] [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --config string Path to kubeadm config file. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for certs + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md new file mode 100644 index 0000000000000..cecc4b2a80ae8 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md @@ -0,0 +1,27 @@ + +Generates the manifests for the new control plane components + +### Synopsis + +Generates the manifests for the new control plane components + +``` +kubeadm join phase control-plane-prepare control-plane [flags] +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --config string Path to kubeadm config file. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for control-plane +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md new file mode 100644 index 0000000000000..cb87677c20600 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md @@ -0,0 +1,32 @@ + +[EXPERIMENTAL] Downloads certificates shared among control-plane nodes from the kubeadm-certs Secret + +### Synopsis + +[EXPERIMENTAL] Downloads certificates shared among control-plane nodes from the kubeadm-certs Secret + +``` +kubeadm join phase control-plane-prepare download-certs [api-server-endpoint] [flags] +``` + +### Options + +``` + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for download-certs + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md new file mode 100644 index 0000000000000..558ed7fd33ccb --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md @@ -0,0 +1,32 @@ + +Generates the kubeconfig for the new control plane components + +### Synopsis + +Generates the kubeconfig for the new control plane components + +``` +kubeadm join phase control-plane-prepare kubeconfig [api-server-endpoint] [flags] +``` + +### Options + +``` + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for kubeconfig + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md new file mode 100644 index 0000000000000..6120e664bb255 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md @@ -0,0 +1,32 @@ + +Writes kubelet settings, certificates and (re)starts the kubelet + +### Synopsis + +Writes a file with KubeletConfiguration and an environment file with node specific kubelet settings, and then (re)starts kubelet. + +``` +kubeadm join phase kubelet-start [api-server-endpoint] [flags] +``` + +### Options + +``` + --config string Path to kubeadm config file. + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + -h, --help help for kubelet-start + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md new file mode 100644 index 0000000000000..70643a0da341a --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md @@ -0,0 +1,44 @@ + +Run join pre-flight checks + +### Synopsis + +Run pre-flight checks for kubeadm join. + +``` +kubeadm join phase preflight [api-server-endpoint] [flags] +``` + +### Examples + +``` + # Run join pre-flight checks using a config file. + kubeadm join phase preflight --config kubeadm-config.yml +``` + +### Options + +``` + --apiserver-advertise-address string If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. + --apiserver-bind-port int32 If the node should host a new control plane instance, the port for the API Server to bind to. (default 6443) + --certificate-key string Use this key to decrypt the certificate secrets uploaded by init. + --config string Path to kubeadm config file. + --cri-socket string Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. + --discovery-file string For file-based discovery, a file or URL from which to load cluster information. + --discovery-token string For token-based discovery, the token used to validate cluster information fetched from the API server. + --discovery-token-ca-cert-hash strings For token-based discovery, validate that the root CA public key matches this hash (format: ":"). + --discovery-token-unsafe-skip-ca-verification For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. + --experimental-control-plane Create a new control plane instance on this node + -h, --help help for preflight + --ignore-preflight-errors strings A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. + --node-name string Specify the node name. + --tls-bootstrap-token string Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. + --token string Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +``` + +### Options inherited from parent commands + +``` + --rootfs string [EXPERIMENTAL] The path to the 'real' host root filesystem. +``` + diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md index da92919353a51..ad59320140614 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md @@ -48,15 +48,6 @@ to enable the DynamicKubeletConfiguration feature. {{< tab name="enable-dynamic" include="generated/kubeadm_alpha_kubelet_config_download.md" />}} {{< /tabs >}} -## kubeadm alpha preflight node {#cmd-phase-preflight} - -You can use the `node` sub command to run preflight checks on a worker node. - -{{< tabs name="tab-preflight" >}} -{{< tab name="preflight" include="generated/kubeadm_alpha_preflight.md" />}} -{{< tab name="node" include="generated/kubeadm_alpha_preflight_node.md" />}} -{{< /tabs >}} - ## kubeadm alpha selfhosting pivot {#cmd-selfhosting} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md index 360ac57aac704..b5644d854c50d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md @@ -79,7 +79,17 @@ Use the following phase to create a local etcd instance based on a static Pod fi {{< /tabs >}} -## kubeadm init phase mark-control-plane {#cmd-phase-control-plane} +## kubeadm init phase upload-certs {#cmd-phase-upload-certs} + +Use the following phase to upload control-plane certificates to the cluster. +By default the certs and encryption key expire after two hours. + +{{< tabs name="tab-upload-certs" >}} +{{< tab name="upload-certs" include="generated/kubeadm_init_phase_upload-certs.md" />}} +{{< /tabs >}} + + +## kubeadm init phase mark-control-plane {#cmd-phase-mark-control-plane} Use the following phase to label and taint the node with the `node-role.kubernetes.io/master=""` key-value pair. diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md index edeea8a1034ea..3c2300fab417c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -49,7 +49,7 @@ following steps: run there. 1. Generates the token that additional nodes can use to register - themselves with the master in the future. Optionally, the user can provide a + themselves with a control-plane in the future. Optionally, the user can provide a token via `--token`, as described in the [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token/) docs. @@ -82,13 +82,13 @@ Note that by calling `kubeadm init` all of the phases and sub-phases will be exe Some phases have unique flags, so if you want to have a look at the list of available options add `--help`, for example: -```bash +```shell sudo kubeadm init phase control-plane controller-manager --help ``` You can also use `--help` to see the list of sub-phases for a certain parent phase: -```bash +```shell sudo kubeadm init phase control-plane --help ``` @@ -96,7 +96,7 @@ sudo kubeadm init phase control-plane --help An example: -```bash +```shell sudo kubeadm init phase control-plane all --config=configfile.yaml sudo kubeadm init phase etcd local --config=configfile.yaml # you can now modify the control plane and etcd manifest files @@ -117,9 +117,10 @@ configuration file options. This file is passed in the `--config` option. In Kubernetes 1.11 and later, the default configuration can be printed out using the [kubeadm config print](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. + It is **recommended** that you migrate your old `v1alpha3` configuration to `v1beta1` using the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command, -because `v1alpha3` will be removed in Kubernetes 1.14. +because `v1alpha3` will be removed in Kubernetes 1.15. For more details on each field in the `v1beta1` configuration you can navigate to our [API reference pages](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1). @@ -266,20 +267,6 @@ with the `kubeadm init` and `kubeadm join` workflow to deploy Kubernetes cluster You may also want to set `--cri-socket` to `kubeadm init` and `kubeadm reset` when using an external CRI implementation. -### Using internal IPs in your cluster - -In order to set up a cluster where the master and worker nodes communicate with internal IP addresses (instead of public ones), execute following steps. - -1. When running init, you must make sure you specify an internal IP for the API server's bind address, like so: - - `kubeadm init --apiserver-advertise-address=` - -2. When a master or worker node has been provisioned, add a flag to `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` that specifies the private IP of the worker node: - - `--node-ip=` - -3. Finally, when you run `kubeadm join`, make sure you provide the private IP of the API server addressed as defined in step 1. - ### Setting the node name By default, `kubeadm` assigns a node name based on a machine's host address. You can override this setting with the `--node-name`flag. @@ -296,27 +283,23 @@ manager, and scheduler run as [DaemonSet pods](/docs/concepts/workloads/controll configured via the Kubernetes API instead of [static pods](/docs/tasks/administer-cluster/static-pod/) configured in the kubelet via static files. -To create a self-hosted cluster, pass the flag `--feature-gates=SelfHosting=true` to `kubeadm init`. - -{{< caution >}} -`SelfHosting` is an alpha feature. It is deprecated in 1.12 -and will be removed in 1.13. -{{< /caution >}} +To create a self-hosted cluster see the `kubeadm alpha selfhosting` command. #### Caveats -Self-hosting in 1.8 and later has some important limitations. In particular, a -self-hosted cluster _cannot recover from a reboot of the control-plane node_ -without manual intervention. This and other limitations are expected to be -resolved before self-hosting graduates from alpha. +1. Self-hosting in 1.8 and later has some important limitations. In particular, a + self-hosted cluster _cannot recover from a reboot of the control-plane node_ + without manual intervention. -By default, self-hosted control plane Pods rely on credentials loaded from -[`hostPath`](/docs/concepts/storage/volumes/#hostpath) -volumes. Except for initial creation, these credentials are not managed by -kubeadm. +1. A self-hosted cluster is not upgradeable using `kubeadm upgrade`. -In kubeadm 1.8, the self-hosted portion of the control plane does not include etcd, -which still runs as a static Pod. +1. By default, self-hosted control plane Pods rely on credentials loaded from + [`hostPath`](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) + volumes. Except for initial creation, these credentials are not managed by + kubeadm. + +1. The self-hosted portion of the control plane does not include etcd, + which still runs as a static Pod. #### Process @@ -345,35 +328,16 @@ In summary, `kubeadm alpha selfhosting` works as follows: ### Running kubeadm without an internet connection -For running kubeadm without an internet connection you have to pre-pull the required master images for the version of choice: - -| Image Name | v1.10 release branch version | -|--------------------------------------------|------------------------------| -| k8s.gcr.io/kube-apiserver-${ARCH} | v1.10.x | -| k8s.gcr.io/kube-controller-manager-${ARCH} | v1.10.x | -| k8s.gcr.io/kube-scheduler-${ARCH} | v1.10.x | -| k8s.gcr.io/kube-proxy-${ARCH} | v1.10.x | -| k8s.gcr.io/etcd-${ARCH} | 3.1.12 | -| k8s.gcr.io/pause-${ARCH} | 3.1 | -| k8s.gcr.io/k8s-dns-sidecar-${ARCH} | 1.14.8 | -| k8s.gcr.io/k8s-dns-kube-dns-${ARCH} | 1.14.8 | -| k8s.gcr.io/k8s-dns-dnsmasq-nanny-${ARCH} | 1.14.8 | -| coredns/coredns | 1.0.6 | - -Here `v1.10.x` means the "latest patch release of the v1.10 branch". - -`${ARCH}` can be one of: `amd64`, `arm`, `arm64`, `ppc64le` or `s390x`. - -If you run Kubernetes version 1.10 or earlier, and if you set `--feature-gates=CoreDNS=true`, -you must also use the `coredns/coredns` image, instead of the three `k8s-dns-*` images. +For running kubeadm without an internet connection you have to pre-pull the required control-plane images. In Kubernetes 1.11 and later, you can list and pull the images using the `kubeadm config images` sub-command: -``` + +```shell kubeadm config images list kubeadm config images pull ``` -Starting with Kubernetes 1.12, the `k8s.gcr.io/kube-*`, `k8s.gcr.io/etcd` and `k8s.gcr.io/pause` images +In Kubernetes 1.12 and later, the `k8s.gcr.io/kube-*`, `k8s.gcr.io/etcd` and `k8s.gcr.io/pause` images don't require an `-${ARCH}` suffix. ### Automating kubeadm @@ -381,7 +345,7 @@ don't require an `-${ARCH}` suffix. Rather than copying the token you obtained from `kubeadm init` to each node, as in the [basic kubeadm tutorial](/docs/setup/independent/create-cluster-kubeadm/), you can parallelize the token distribution for easier automation. To implement this automation, you must -know the IP address that the master will have after it is started. +know the IP address that the control-plane node will have after it is started. 1. Generate a token. This token must have the form `<6 character string>.<16 character string>`. More formally, it must match the regex: @@ -389,7 +353,7 @@ know the IP address that the master will have after it is started. kubeadm can generate a token for you: - ```bash + ```shell kubeadm token generate ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join-phase.md new file mode 100644 index 0000000000000..bb993fa113cc9 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join-phase.md @@ -0,0 +1,62 @@ +--- +title: kubeadm join phase +weight: 90 +--- +In v1.14.0, kubeadm introduces the `kubeadm join phase` command with the aim of making kubeadm more modular. This modularity enables you to invoke atomic sub-steps of the join process. +Hence, you can let kubeadm do some parts and fill in yourself where you need customizations. + +`kubeadm join phase` is consistent with the [kubeadm join workflow](/docs/reference/setup-tools/kubeadm/kubeadm-join/#join-workflow), +and behind the scene both use the same code. + +## kubeadm join phase {#cmd-join-phase} + +{{< tabs name="tab-phase" >}} +{{< tab name="phase" include="generated/kubeadm_join_phase.md" />}} +{{< /tabs >}} + +## kubeadm join phase preflight {#cmd-join-phase-preflight} + +Using this phase you can execute preflight checks on a joining node. + +{{< tabs name="tab-preflight" >}} +{{< tab name="preflight" include="generated/kubeadm_join_phase_preflight.md" />}} +{{< /tabs >}} + +## kubeadm join phase control-plane-prepare {#cmd-join-phase-control-plane-prepare} + +Using this phase you can prepare a node for serving a control-plane. + +{{< tabs name="tab-control-plane-prepare" >}} +{{< tab name="control-plane-prepare" include="generated/kubeadm_join_phase_control-plane-prepare.md" />}} +{{< tab name="all" include="generated/kubeadm_join_phase_control-plane-prepare_all.md" />}} +{{< tab name="download-certs" include="generated/kubeadm_join_phase_control-plane-prepare_download-certs.md" />}} +{{< tab name="certs" include="generated/kubeadm_join_phase_control-plane-prepare_certs.md" />}} +{{< tab name="kubeconfig" include="generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md" />}} +{{< tab name="control-plane" include="generated/kubeadm_join_phase_control-plane-prepare_control-plane.md" />}} +{{< /tabs >}} + +## kubeadm join phase kubelet-start {#cmd-join-phase-kubelet-start} + +Using this phase you can write the kubelet settings, certificates and (re)start the kubelet. + +{{< tabs name="tab-kubelet-start" >}} +{{< tab name="kubelet-start" include="generated/kubeadm_join_phase_kubelet-start.md" />}} +{{< /tabs >}} + +## kubeadm join phase control-plane-join {#cmd-join-phase-control-plane-join} + +Using this phase you can join a node as a control-plane instance. + +{{< tabs name="tab-control-plane-join" >}} +{{< tab name="control-plane-join" include="generated/kubeadm_join_phase_control-plane-join.md" />}} +{{< tab name="all" include="generated/kubeadm_join_phase_control-plane-join_all.md" />}} +{{< tab name="etcd" include="generated/kubeadm_join_phase_control-plane-join_etcd.md" />}} +{{< tab name="update-status" include="generated/kubeadm_join_phase_control-plane-join_update-status.md" />}} +{{< tab name="mark-control-plane" include="generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md" />}} +{{< /tabs >}} + +## What's next +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes control-plane node +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to connect a node to the cluster +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` +* [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/) to try experimental functionality diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md index 6c6de5a281606..7852e16af1e0e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md @@ -14,23 +14,16 @@ This command initializes a Kubernetes worker node and joins it to the cluster. {{% capture body %}} {{< include "generated/kubeadm_join.md" >}} -### The joining workflow +### The join workflow {#join-workflow} -`kubeadm join` bootstraps a Kubernetes worker node and joins it to the cluster. -This action consists of the following steps: +`kubeadm join` bootstraps a Kubernetes worker node or a control-plane node and adds it to the cluster. +This action consists of the following steps for worker nodes: 1. kubeadm downloads necessary cluster information from the API server. By default, it uses the bootstrap token and the CA key hash to verify the authenticity of that data. The root CA can also be discovered directly via a file or URL. -1. If kubeadm is invoked with `--feature-gates=DynamicKubeletConfig` enabled, - it first retrieves the kubelet init configuration from the master and writes it to - the disk. When kubelet starts up, kubeadm updates the node `Node.spec.configSource` property of the node. - See [Set Kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) - and [Reconfigure a Node's Kubelet in a Live Cluster](/docs/tasks/administer-cluster/reconfigure-kubelet/) - for more information about Dynamic Kubelet Configuration. - 1. Once the cluster information is known, kubelet can start the TLS bootstrapping process. @@ -41,6 +34,40 @@ This action consists of the following steps: 1. Finally, kubeadm configures the local kubelet to connect to the API server with the definitive identity assigned to the node. +For control-plane nodes additional steps are performed: + +1. Downloading certificates shared among control-plane nodes from the cluster + (if explicitly requested by the user). + +1. Generating control-plane component manifests, certificates and kubeconfig. + +1. Adding new local etcd member. + +1. Adding this node to the ClusterStatus of the kubeadm cluster. + +### Using join phases with kubeadm {#join-phases} + +Kubeadm allows you join a node to the cluster in phases. The `kubeadm join phase` command was added in v1.14.0. + +To view the ordered list of phases and sub-phases you can call `kubeadm join --help`. The list will be located +at the top of the help screen and each phase will have a description next to it. +Note that by calling `kubeadm join` all of the phases and sub-phases will be executed in this exact order. + +Some phases have unique flags, so if you want to have a look at the list of available options add `--help`, for example: + +```shell +kubeadm join phase kubelet-start --help +``` + +Similar to the [kubeadm init phase](/docs/reference/setup-tools/kubeadm/kubeadm-init/#init-phases) +command, `kubadm join phase` allows you to skip a list of phases using the `--skip-phases` flag. + +For example: + +```shell +sudo kubeadm join --skip-phases=preflight --config=config.yaml +``` + ### Discovering what cluster CA to trust The kubeadm discovery has several options, each with security tradeoffs. @@ -56,27 +83,35 @@ that the API server certificate is valid under the root CA. The CA key hash has the format `sha256:`. By default, the hash value is returned in the `kubeadm join` command printed at the end of `kubeadm init` or in the output of `kubeadm token create --print-join-command`. It is in a standard format (see [RFC7469](https://tools.ietf.org/html/rfc7469#section-2.4)) and can also be calculated by 3rd party tools or provisioning systems. For example, using the OpenSSL CLI: -```bash +```shell openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' ``` -**Example `kubeadm join` command:** +**Example `kubeadm join` commands:** -```bash +For worker nodes: + +```shell kubeadm join --discovery-token abcdef.1234567890abcdef --discovery-token-ca-cert-hash sha256:1234..cdef 1.2.3.4:6443 ``` +For control-plane nodes: + +```shell +kubeadm join --discovery-token abcdef.1234567890abcdef --discovery-token-ca-cert-hash sha256:1234..cdef --experimental-control-plane 1.2.3.4:6443 +``` + **Advantages:** - Allows bootstrapping nodes to securely discover a root of trust for the - master even if other worker nodes or the network are compromised. + control-plane node even if other worker nodes or the network are compromised. - Convenient to execute manually since all of the information required fits into a single `kubeadm join` command that is easy to copy and paste. **Disadvantages:** - - The CA hash is not normally known until the master has been provisioned, + - The CA hash is not normally known until the control-plane node has been provisioned, which can make it more difficult to build automated provisioning tools that use kubeadm. By generating your CA in beforehand, you may workaround this limitation though. @@ -86,13 +121,13 @@ kubeadm join --discovery-token abcdef.1234567890abcdef --discovery-token-ca-cert _This was the default in Kubernetes 1.7 and earlier_, but comes with some important caveats. This mode relies only on the symmetric token to sign (HMAC-SHA256) the discovery information that establishes the root of trust for -the master. It's still possible in Kubernetes 1.8 and above using the +the control-plane. It's still possible in Kubernetes 1.8 and above using the `--discovery-token-unsafe-skip-ca-verification` flag, but you should consider using one of the other modes if possible. **Example `kubeadm join` command:** -``` +```shell kubeadm join --token abcdef.1234567890abcdef --discovery-token-unsafe-skip-ca-verification 1.2.3.4:6443` ``` @@ -100,7 +135,7 @@ kubeadm join --token abcdef.1234567890abcdef --discovery-token-unsafe-skip-ca-ve - Still protects against many network-level attacks. - - The token can be generated ahead of time and shared with the master and + - The token can be generated ahead of time and shared with the control-plane node and worker nodes, which can then bootstrap in parallel without coordination. This allows it to be used in many provisioning scenarios. @@ -108,11 +143,11 @@ kubeadm join --token abcdef.1234567890abcdef --discovery-token-unsafe-skip-ca-ve - If an attacker is able to steal a bootstrap token via some vulnerability, they can use that token (along with network-level access) to impersonate the - master to other bootstrapping nodes. This may or may not be an appropriate + control-plane node to other bootstrapping nodes. This may or may not be an appropriate tradeoff in your environment. #### File or HTTPS-based discovery -This provides an out-of-band way to establish a root of trust between the master +This provides an out-of-band way to establish a root of trust between the control-plane node and bootstrapping nodes. Consider using this mode if you are building automated provisioning using kubeadm. @@ -125,12 +160,12 @@ using kubeadm. **Advantages:** - Allows bootstrapping nodes to securely discover a root of trust for the - master even if the network or other worker nodes are compromised. + control-plane node even if the network or other worker nodes are compromised. **Disadvantages:** - Requires that you have some way to carry the discovery information from - the master to the bootstrapping nodes. This might be possible, for example, + the control-plane node to the bootstrapping nodes. This might be possible, for example, via your cloud provider or provisioning tool. The information in this file is not secret, but HTTPS or equivalent is required to ensure its integrity. @@ -145,21 +180,21 @@ By default, there is a CSR auto-approver enabled that basically approves any cli for a kubelet when a Bootstrap Token was used when authenticating. If you don't want the cluster to automatically approve kubelet client certs, you can turn it off by executing this command: -```console +```shell $ kubectl delete clusterrolebinding kubeadm:node-autoapprove-bootstrap ``` After that, `kubeadm join` will block until the admin has manually approved the CSR in flight: -```console -$ kubectl get csr +```shell +kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-c69HXe7aYcqkS1bKmH4faEnHAWxn6i2bHZ2mD04jZyQ 18s system:bootstrap:878f07 Pending -$ kubectl certificate approve node-csr-c69HXe7aYcqkS1bKmH4faEnHAWxn6i2bHZ2mD04jZyQ +kubectl certificate approve node-csr-c69HXe7aYcqkS1bKmH4faEnHAWxn6i2bHZ2mD04jZyQ certificatesigningrequest "node-csr-c69HXe7aYcqkS1bKmH4faEnHAWxn6i2bHZ2mD04jZyQ" approved -$ kubectl get csr +kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-c69HXe7aYcqkS1bKmH4faEnHAWxn6i2bHZ2mD04jZyQ 1m system:bootstrap:878f07 Approved,Issued ``` @@ -169,15 +204,15 @@ Only after `kubectl certificate approve` has been run, `kubeadm join` can procee #### Turning off public access to the cluster-info ConfigMap In order to achieve the joining flow using the token as the only piece of validation information, a - ConfigMap with some data needed for validation of the master's identity is exposed publicly by + ConfigMap with some data needed for validation of the control-plane node's identity is exposed publicly by default. While there is no private data in this ConfigMap, some users might wish to turn it off regardless. Doing so will disable the ability to use the `--discovery-token` flag of the `kubeadm join` flow. Here are the steps to do so: * Fetch the `cluster-info` file from the API Server: -```console -$ kubectl -n kube-public get cm cluster-info -o yaml | grep "kubeconfig:" -A11 | grep "apiVersion" -A10 | sed "s/ //" | tee cluster-info.yaml +```shell +kubectl -n kube-public get cm cluster-info -o yaml | grep "kubeconfig:" -A11 | grep "apiVersion" -A10 | sed "s/ //" | tee cluster-info.yaml apiVersion: v1 clusters: - cluster: @@ -195,8 +230,8 @@ users: [] * Turn off public access to the `cluster-info` ConfigMap: -```console -$ kubectl -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo +```shell +kubectl -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo ``` These commands should be run after `kubeadm init` but before `kubeadm join`. @@ -214,7 +249,7 @@ contain a `JoinConfiguration` structure. To print the default values of `JoinConfiguration` run the following command: -```bash +```shell kubeadm config print-default --api-objects=JoinConfiguration ``` diff --git a/content/en/docs/reference/using-api/api-concepts.md b/content/en/docs/reference/using-api/api-concepts.md index 5438cd2a7dce1..b9aaada90cca7 100644 --- a/content/en/docs/reference/using-api/api-concepts.md +++ b/content/en/docs/reference/using-api/api-concepts.md @@ -317,6 +317,116 @@ Some values of an object are typically generated before the object is persisted. * Any field set by a mutating admission controller * For the `Service` resource: Ports or IPs that kube-apiserver assigns to v1.Service objects -{{% /capture %}} +## Server Side Apply + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} Server Side Apply allows clients other than kubectl to perform the Apply operation, and will eventually fully replace the complicated Client Side Apply logic that only exists in kubectl. If the Server Side Apply feature is enabled, the `PATCH` endpoint accepts the additional `application/apply-patch+yaml` content type. Users of Server Side Apply can send partially specified objects to this endpoint. An applied config should always include every field that the applier has an opinion about. + +### Enable the Server Side Apply alpha feature + +Server Side Apply is an alpha feature, so it is disabled by default. To turn this [feature gate](/docs/reference/command-line-tools-reference/feature-gates) on, +you need to include the `--feature-gates ServerSideApply=true` flag when starting `kube-apiserver`. +If you have multiple `kube-apiserver` replicas, all should have the same flag setting. + +### Field Management + +Compared to the `last-applied` annotation managed by `kubectl`, Server Side Apply uses a more declarative approach, which tracks a user's field management, rather than a user's last applied state. This means that as a side effect of using Server Side Apply, information about which field manager manages each field in an object also becomes available. + +For a user to manage a field, in the Server Side Apply sense, means that the user relies on and expects the value of the field not to change. The user who last made an assertion about the value of a field will be recorded as the current field manager. This can be done either by changing the value with `POST`, `PUT`, or non-apply `PATCH`, or by including the field in a config sent to the Server Side Apply endpoint. Any applier that tries to change the field which is managed by someone else will get its request rejected (if not forced, see the Conflicts section below). + +Field management is stored in a newly introduced `managedFields` field that is part of an object's [`metadata`](/docs/reference/generated/kubernetes-api/v1.14/#objectmeta-v1-meta). + +A simple example of an object created by Server Side Apply could look like this: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm + namespace: default + labels: + test-label: test + managedFields: + - manager: kubectl + operation: Apply + apiVersion: v1 + fields: + f:metadata: + f:labels: + f:test-label: {} + f:data: + f:key: {} +data: + key: some value +``` + +The above object contains a single manager in `metadata.managedFields`. The manager consists of basic information about the managing entity itself, like operation type, api version, and the fields managed by it. + +{{< note >}} This field is managed by the apiserver and should not be changed by the user. {{< /note >}} + +Nevertheless it is possible to change `metadata.managedFields` through an `Update` operation. Doing so is highly discouraged, but might be a reasonable option to try if, for example, the `managedFields` get into an inconsistent state (which clearly should not happen). + +### Operations + +The two operation types considered by this feature are `Apply` (`PATCH` with content type `application/apply-patch+yaml`) and `Update` (all other operations which modify the object). Both operations update the `managedFields`, but behave a little differently. + +For instance, only the apply operation fails on conflicts while update does not. Also, apply operations are required to identify themselves by providing a `fieldManager` query parameter, while the query parameter is optional for update operations. + +An example object with multiple managers could look like this: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm + namespace: default + labels: + test-label: test + managedFields: + - manager: kubectl + operation: Apply + apiVersion: v1 + fields: + f:metadata: + f:labels: + f:test-label: {} + - manager: kube-controller-manager + operation: Update + apiVersion: v1 + time: '2019-03-30T16:00:00.000Z' + fields: + f:data: + f:key: {} +data: + key: new value +``` + +In this example, a second operation was run as an `Update` by the manager called `kube-controller-manager`. The update changed a value in the data field which caused the field's management to change to the `kube-controller-manager`. +{{< note >}}If this update would have been an `Apply` operation, the operation would have failed due to conflicting ownership.{{< /note >}} + +### Merge Strategy + +The merging strategy, implemented with Server Side Apply, provides a generally more stable object lifecycle. +Server Side Apply tries to merge fields based on the fact who manages them instead of overruling just based on values. +This way it is intended to make it easier and more stable for multiple actors updating the same object by causing less unexpected interference. + +When a user sends a partially specified object to the Server Side Apply endpoint, the server merges it with the live object favoring the value in the applied config if it is specified in both places. If the set of items present in the applied config is not a superset of the items applied by the same user last time, each missing item not managed by any other field manager is removed. For more information about how an object's schema is used to make decisions when merging, see [sigs.k8s.io/structured-merge-diff](https://sigs.k8s.io/structured-merge-diff). + +### Conflicts + +A conflict is a special status error that occurs when an `Apply` operation tries to change a field, which another user also claims to manage. This prevents an applier from unintentionally overwriting the value set by another user. When this occurs, the applier has 3 options to resolve the conflicts: + +* **Overwrite value, become sole manager:** If overwriting the value was intentional (or if the applier is an automated process like a controller) the applier should set the `force` query parameter to true and make the request again. This forces the operation to succeed, changes the value of the field, and removes the field from all other managers' entries in managedFields. +* **Don't overwrite value, give up management claim:** If the applier doesn't care about the value of the field anymore, they can remove it from their config and make the request again. This leaves the value unchanged, and causes the field to be removed from the applier's entry in managedFields. +* **Don't overwrite value, become shared manager:** If the applier still cares about the value of the field, but doesn't want to overwrite it, they can change the value of the field in their config to match the value of the object on the server, and make the request again. This leaves the value unchanged, and causes the field's management to be shared by the applier and all other field managers that already claimed to manage it. + +### Comparison with Client Side Apply + +A consequence of the conflict detection and resolution implemented by Server Side Apply is that an applier always has up to date field values in their local state. If they don't, they get a conflict the next time they apply. Any of the three options to resolve conflicts results in the applied config being an up to date subset of the object on the server's fields. + +This is different from Client Side Apply, where outdated values which have been overwritten by other users are left in an applier's local config. These values only become accurate when the user updates that specific field, if ever, and an applier has no way of knowing whether their next apply will overwrite other users' changes. + +Another difference is that an applier using Client Side Apply is unable to change the API version they are using, but Server Side Apply supports this use case. +### Custom Resources +Server Side Apply currently treats all custom resources as unstructured data. All keys are treated the same as struct fields, and all lists are considered atomic. In the future, it will use the validation field in Custom Resource Definitions to allow Custom Resource authors to define how to how to merge their own objects. diff --git a/content/en/docs/setup/independent/create-cluster-kubeadm.md b/content/en/docs/setup/independent/create-cluster-kubeadm.md index 5a7bc6956c4e3..a91c7e25b1141 100644 --- a/content/en/docs/setup/independent/create-cluster-kubeadm.md +++ b/content/en/docs/setup/independent/create-cluster-kubeadm.md @@ -117,6 +117,10 @@ communicates with). be passed to kubeadm initialization. Depending on which third-party provider you choose, you might need to set the `--pod-network-cidr` to a provider-specific value. See [Installing a pod network add-on](#pod-network). +1. (Optional) Since version 1.14, kubeadm will try to detect the container runtime on Linux +by using a list of well known domain socket paths. To use different container runtime or +if there are more than one installed on the provisioned node, specify the `--cri-socket` +argument to `kubeadm init`. See [Installing runtime](/docs/setup/independent/install-kubeadm/#installing-runtime). 1. (Optional) Unless otherwise specified, kubeadm uses the network interface associated with the default gateway to advertise the master's IP. To use a different network interface, specify the `--apiserver-advertise-address=` argument diff --git a/content/en/docs/setup/independent/high-availability.md b/content/en/docs/setup/independent/high-availability.md index 1ae27f0236e64..38f0425dfc530 100644 --- a/content/en/docs/setup/independent/high-availability.md +++ b/content/en/docs/setup/independent/high-availability.md @@ -19,15 +19,12 @@ control plane nodes and etcd members are separated. Before proceeding, you should carefully consider which approach best meets the needs of your applications and environment. [This comparison topic](/docs/setup/independent/ha-topology/) outlines the advantages and disadvantages of each. -Your clusters must run Kubernetes version 1.12 or later. You should also be aware that -setting up HA clusters with kubeadm is still experimental and will be further simplified -in future versions. You might encounter issues with upgrading your clusters, for example. +You should also be aware that setting up HA clusters with kubeadm is still experimental and will be further +simplified in future versions. You might encounter issues with upgrading your clusters, for example. We encourage you to try either approach, and provide us with feedback in the kubeadm [issue tracker](https://github.com/kubernetes/kubeadm/issues/new). -Note that the alpha feature gate `HighAvailability` is deprecated in v1.12 and removed in v1.13. - -See also [The HA upgrade documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-ha-1-13). +See also [The upgrade documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14). {{< caution >}} This page does not address running your cluster on a cloud provider. In a cloud @@ -57,28 +54,12 @@ For the external etcd cluster only, you also need: - Three additional machines for etcd members -{{< note >}} -The following examples run Calico as the Pod networking provider. If you run another -networking provider, make sure to replace any default values as needed. -{{< /note >}} - {{% /capture %}} {{% capture steps %}} ## First steps for both methods -{{< note >}} -All commands on any control plane or etcd node should be -run as root. -{{< /note >}} - -- Some CNI network plugins like Calico require a CIDR such as `192.168.0.0/16` and - some like Weave do not. See the [CNI network - documentation](/docs/setup/independent/create-cluster-kubeadm/#pod-network). - To add a pod CIDR set the `podSubnet: 192.168.0.0/16` field under - the `networking` object of `ClusterConfiguration`. - ### Create load balancer for kube-apiserver {{< note >}} @@ -119,38 +100,6 @@ option. Your cluster requirements may need a different configuration. 1. Add the remaining control plane nodes to the load balancer target group. -### Configure SSH - -SSH is required if you want to control all nodes from a single machine. - -1. Enable ssh-agent on your main device that has access to all other nodes in - the system: - - ``` - eval $(ssh-agent) - ``` - -1. Add your SSH identity to the session: - - ``` - ssh-add ~/.ssh/path_to_private_key - ``` - -1. SSH between nodes to check that the connection is working correctly. - - - When you SSH to any node, make sure to add the `-A` flag: - - ``` - ssh -A 10.0.0.7 - ``` - - - When using sudo on any node, make sure to preserve the environment so SSH - forwarding works: - - ``` - sudo -E -s - ``` - ## Stacked control plane and etcd nodes ### Steps for the first control plane node @@ -160,9 +109,6 @@ SSH is required if you want to control all nodes from a single machine. apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration kubernetesVersion: stable - apiServer: - certSANs: - - "LOAD_BALANCER_DNS" controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - `kubernetesVersion` should be set to the Kubernetes version to use. This @@ -170,131 +116,124 @@ SSH is required if you want to control all nodes from a single machine. - `controlPlaneEndpoint` should match the address or DNS and port of the load balancer. - It's recommended that the versions of kubeadm, kubelet, kubectl and Kubernetes match. -1. Make sure that the node is in a clean state: +{{< note >}} +Some CNI network plugins like Calico require a CIDR such as `192.168.0.0/16` and +some like Weave do not. See the [CNI network +documentation](/docs/setup/independent/create-cluster-kubeadm/#pod-network). +To add a pod CIDR set the `podSubnet: 192.168.0.0/16` field under +the `networking` object of `ClusterConfiguration`. +{{< /note >}} + +1. Initialize the control plane: ```sh - sudo kubeadm init --config=kubeadm-config.yaml + sudo kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs ``` - - You should see something like: + - The `--experimental-upload-certs` flags is used to upload the certificates that should be shared + across all the control-plane instances to the cluster. If instead, you prefer to copy certs across + control-plane nodes manually or using automation tools, please remove this flag and refer to [Manual + certificate distribution](#manual-certs) section bellow. + + After the command completes you should see something like so: ```sh ... - You can now join any number of machines by running the following on each node - as root: + You can now join any number of control-plane node by running the following command on each as a root: + kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --experimental-control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 - kubeadm join 192.168.0.200:6443 --token j04n3m.octy8zely83cy2ts --discovery-token-ca-cert-hash sha256:84938d2a22203a8e56a787ec0c6ddad7bc7dbd52ebabc62fd5f4dbea72b14d1f - ``` - -1. Copy this output to a text file. You will need it later to join other control plane nodes to the - cluster. - -1. Apply the Weave CNI plugin: + Please note that the certificate-key gives access to cluster sensitive data, keep it secret! + As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use kubeadm init phase upload-certs to reload certs afterward. - ```sh - kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + Then you can join any number of worker nodes by running the following on each as root: + kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 ``` -1. Type the following and watch the pods of the components get started: + - Copy this output to a text file. You will need it later to join control plane and worker nodes to the cluster. + - When `--experimental-upload-certs` is used with `kubeadm init`, the certificates of the primary control plane + are encrypted and uploaded in the `kubeadm-certs` Secret. + - To re-upload the certificates and generate a new decryption key, use the following command on a control plane + node that is already joined to the cluster: - ```sh - kubectl get pod -n kube-system -w - ``` - - - It's recommended that you join new control plane nodes only after the first node has finished initializing. + ```sh + sudo kubeadm init phase upload-certs --experimental-upload-certs + ``` -1. Copy the certificate files from the first control plane node to the rest: - - In the following example, replace `CONTROL_PLANE_IPS` with the IP addresses of the - other control plane nodes. - ```sh - USER=ubuntu # customizable - CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" - for host in ${CONTROL_PLANE_IPS}; do - scp /etc/kubernetes/pki/ca.crt "${USER}"@$host: - scp /etc/kubernetes/pki/ca.key "${USER}"@$host: - scp /etc/kubernetes/pki/sa.key "${USER}"@$host: - scp /etc/kubernetes/pki/sa.pub "${USER}"@$host: - scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host: - scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host: - scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt - scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key - scp /etc/kubernetes/admin.conf "${USER}"@$host: - done - ``` +{{< note >}} +The `kubeadm-certs` Secret and decryption key expire after two hours. +{{< /note >}} {{< caution >}} -Copy only the certificates in the above list. kubeadm will take care of generating the rest of the certificates -with the required SANs for the joining control-plane instances. If you copy all the certificates by mistake, -the creation of additional nodes could fail due to a lack of required SANs. +As stated in the command output, the certificate-key gives access to cluster sensitive data, keep it secret! {{< /caution >}} -### Steps for the rest of the control plane nodes +1. Apply the CNI plugin of your choice: + + [Follow these instructions](/docs/setup/independent/create-cluster-kubeadm/#pod-network) to install + the CNI provider. Make sure the configuration corresponds to the Pod CIDR specified in the kubeadm + configuration file if applicable. -1. Move the files created by the previous step where `scp` was used: + In this example we are using Weave Net: ```sh - USER=ubuntu # customizable - mkdir -p /etc/kubernetes/pki/etcd - mv /home/${USER}/ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/ca.key /etc/kubernetes/pki/ - mv /home/${USER}/sa.pub /etc/kubernetes/pki/ - mv /home/${USER}/sa.key /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/ - mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt - mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key - mv /home/${USER}/admin.conf /etc/kubernetes/admin.conf + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" ``` - This process writes all the requested files in the `/etc/kubernetes` folder. - -1. Start `kubeadm join` on this node using the join command that was previously given to you by `kubeadm init` on - the first node. It should look something like this: +1. Type the following and watch the pods of the control plane components get started: ```sh - sudo kubeadm join 192.168.0.200:6443 --token j04n3m.octy8zely83cy2ts --discovery-token-ca-cert-hash sha256:84938d2a22203a8e56a787ec0c6ddad7bc7dbd52ebabc62fd5f4dbea72b14d1f --experimental-control-plane + kubectl get pod -n kube-system -w ``` - - Notice the addition of the `--experimental-control-plane` flag. This flag automates joining this - control plane node to the cluster. +### Steps for the rest of the control plane nodes + +{{< caution >}} +You must join new control plane nodes sequentially, only after the first node has finished initializing. +{{< /caution >}} -1. Type the following and watch the pods of the components get started: +For each additional control plane node you should: + +1. Execute the join command that was previously given to you by the `kubeadm init` output on the first node. + It should look something like this: ```sh - kubectl get pod -n kube-system -w + sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --experimental-control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 ``` -1. Repeat these steps for the rest of the control plane nodes. + - The `--experimental-control-plane` flag tells `kubeadm join` to create a new control plane. + - The `--certificate-key ...` will cause the control plane certificates to be downloaded + from the `kubeadm-certs` Secret in the cluster and be decrypted using the given key. ## External etcd nodes +Setting up a cluster with external etcd nodes is similar to the procedure used for stacked etcd +with the exception that you should setup etcd first, and you should pass the etcd information +in the kubeadm config file. + ### Set up the etcd cluster -- Follow [these instructions](/docs/setup/independent/setup-ha-etcd-with-kubeadm/) - to set up the etcd cluster. +1. Follow [these instructions](/docs/setup/independent/setup-ha-etcd-with-kubeadm/) + to set up the etcd cluster. -### Set up the first control plane node +1. Setup SSH as described [here](#manual-certs). -1. Copy the following files from any node from the etcd cluster to this node: +1. Copy the following files from any etcd node in the cluster to the first control plane node: ```sh export CONTROL_PLANE="ubuntu@10.0.0.7" - +scp /etc/kubernetes/pki/etcd/ca.crt "${CONTROL_PLANE}": - +scp /etc/kubernetes/pki/apiserver-etcd-client.crt "${CONTROL_PLANE}": - +scp /etc/kubernetes/pki/apiserver-etcd-client.key "${CONTROL_PLANE}": + scp /etc/kubernetes/pki/etcd/ca.crt "${CONTROL_PLANE}": + scp /etc/kubernetes/pki/apiserver-etcd-client.crt "${CONTROL_PLANE}": + scp /etc/kubernetes/pki/apiserver-etcd-client.key "${CONTROL_PLANE}": ``` - - Replace the value of `CONTROL_PLANE` with the `user@host` of this machine. + - Replace the value of `CONTROL_PLANE` with the `user@host` of the first control plane machine. + +### Set up the first control plane node 1. Create a file called `kubeadm-config.yaml` with the following contents: apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration kubernetesVersion: stable - apiServer: - certSANs: - - "LOAD_BALANCER_DNS" controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" etcd: external: @@ -306,9 +245,13 @@ the creation of additional nodes could fail due to a lack of required SANs. certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key - - The difference between stacked etcd and external etcd here is that we are using the `external` field for `etcd` in the kubeadm config. In the case of the stacked etcd topology this is managed automatically. +{{< note >}} +The difference between stacked etcd and external etcd here is that we are using +the `external` field for `etcd` in the kubeadm config. In the case of the stacked +etcd topology this is managed automatically. +{{< /note >}} - - Replace the following variables in the template with the appropriate values for your cluster: + - Replace the following variables in the config template with the appropriate values for your cluster: - `LOAD_BALANCER_DNS` - `LOAD_BALANCER_PORT` @@ -316,11 +259,13 @@ the creation of additional nodes could fail due to a lack of required SANs. - `ETCD_1_IP` - `ETCD_2_IP` -1. Run `kubeadm init --config kubeadm-config.yaml` on this node. +The following steps are exactly the same as described for stacked etcd setup: -1. Write the join command that is returned to a text file for later use. +1. Run `sudo kubeadm init --config kubeadm-config.yaml --experimental-upload-certs` on this node. -1. Apply the Weave CNI plugin: +1. Write the output join commands that are returned to a text file for later use. + +1. Apply the CNI plugin of your choice. The given example is for Weave Net: ```sh kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" @@ -328,27 +273,103 @@ the creation of additional nodes could fail due to a lack of required SANs. ### Steps for the rest of the control plane nodes -To add the rest of the control plane nodes, follow [these instructions](#steps-for-the-rest-of-the-control-plane-nodes). -The steps are the same as for the stacked etcd setup, with the exception that a local -etcd member is not created. - -To summarize: +The steps are the same as for the stacked etcd setup: - Make sure the first control plane node is fully initialized. -- Copy certificates between the first control plane node and the other control plane nodes. -- Join each control plane node with the join command you saved to a text file, plus add the `--experimental-control-plane` flag. +- Join each control plane node with the join command you saved to a text file. It's recommended +to join the control plane nodes one at a time. +- Don't forget that the decryption key from `--certificate-key` expires after two hours, by default. ## Common tasks after bootstrapping control plane -### Install a pod network +### Install workers -[Follow these instructions](/docs/setup/independent/create-cluster-kubeadm/#pod-network) to install -the pod network. Make sure this corresponds to whichever pod CIDR you provided -in the master configuration file. +Worker nodes can be joined to the cluster with the command you stored previously +as the output from the `kubeadm init` command: -### Install workers +```sh +sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 +``` + +## Manual certificate distribution {#manual-certs} + +If you choose to not use `kubeadm init` with the `--experimental-upload-certs` flag this means that +you are going to have to manually copy the certificates from the primary control plane node to the +joining control plane nodes. + +There are many ways to do this. In the following example we are using `ssh` and `scp`: + +SSH is required if you want to control all nodes from a single machine. + +1. Enable ssh-agent on your main device that has access to all other nodes in + the system: + + ``` + eval $(ssh-agent) + ``` + +1. Add your SSH identity to the session: + + ``` + ssh-add ~/.ssh/path_to_private_key + ``` + +1. SSH between nodes to check that the connection is working correctly. + + - When you SSH to any node, make sure to add the `-A` flag: + + ``` + ssh -A 10.0.0.7 + ``` -Each worker node can now be joined to the cluster with the command returned from any of the -`kubeadm init` commands. The flag `--experimental-control-plane` should not be added to worker nodes. + - When using sudo on any node, make sure to preserve the environment so SSH + forwarding works: + + ``` + sudo -E -s + ``` + +1. After configuring SSH on all the nodes you should run the following script on the first control plane node after + running `kubeadm init`. This script will copy the certificates from the first control plane node to the other + control plane nodes: + + In the following example, replace `CONTROL_PLANE_IPS` with the IP addresses of the + other control plane nodes. + ```sh + USER=ubuntu # customizable + CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" + for host in ${CONTROL_PLANE_IPS}; do + scp /etc/kubernetes/pki/ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.pub "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt + scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key + done + ``` + +{{< caution >}} +Copy only the certificates in the above list. kubeadm will take care of generating the rest of the certificates +with the required SANs for the joining control-plane instances. If you copy all the certificates by mistake, +the creation of additional nodes could fail due to a lack of required SANs. +{{< /caution >}} + +1. Then on each joining control plane node you have to run the following script before running `kubeadm join`. + This script will move the previously copied certificates from the home directory to `/etc/kuberentes/pki`: + + ```sh + USER=ubuntu # customizable + mkdir -p /etc/kubernetes/pki/etcd + mv /home/${USER}/ca.crt /etc/kubernetes/pki/ + mv /home/${USER}/ca.key /etc/kubernetes/pki/ + mv /home/${USER}/sa.pub /etc/kubernetes/pki/ + mv /home/${USER}/sa.key /etc/kubernetes/pki/ + mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/ + mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/ + mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt + mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key + ``` {{% /capture %}} diff --git a/content/en/docs/setup/independent/install-kubeadm.md b/content/en/docs/setup/independent/install-kubeadm.md index 16570b2a36e82..db25539b94eee 100644 --- a/content/en/docs/setup/independent/install-kubeadm.md +++ b/content/en/docs/setup/independent/install-kubeadm.md @@ -83,10 +83,28 @@ The pod network plugin you use (see below) may also require certain ports to be open. Since this differs with each pod network plugin, please see the documentation for the plugins about what port(s) those need. -## Installing runtime +## Installing runtime {#installing-runtime} Since v1.6.0, Kubernetes has enabled the use of CRI, Container Runtime Interface, by default. -The container runtime used by default is Docker, which is enabled through the built-in + +Since v1.14.0, kubeadm will try to automatically detect the container runtime on Linux nodes +by scanning through a list of well known domain sockets. The detectable runtimes and the +socket paths, that are used, can be found in the table below. + +| Runtime | Domain Socket | +|------------|----------------------------------| +| Docker | /var/run/docker.sock | +| containerd | /run/containerd/containerd.sock | +| CRI-O | /var/run/crio/crio.sock | + +If both Docker and containerd are detected together, Docker takes precedence. This is +needed, because Docker 18.09 ships with containerd and both are detectable. +If any other two or more runtimes are detected, kubeadm will exit with an appropriate +error message. + +On non-Linux nodes the container runtime used by default is Docker. + +If the container runtime of choice is Docker, it is used through the built-in `dockershim` CRI implementation inside of the `kubelet`. Other CRI-based runtimes include: diff --git a/content/en/docs/setup/multiple-zones.md b/content/en/docs/setup/multiple-zones.md index 7b1af187acbe1..4beab2b3b9169 100644 --- a/content/en/docs/setup/multiple-zones.md +++ b/content/en/docs/setup/multiple-zones.md @@ -189,7 +189,7 @@ kubernetes-minion-wf8i Ready 2m v1.13.0 Create a volume using the dynamic volume creation (only PersistentVolumes are supported for zone affinity): ```json -kubectl create -f - <}} +The Kubernetes control plane, including the [master components](/docs/concepts/overview/components/), continues to run on Linux. There are no plans to have a Windows-only Kubernetes cluster. +{{< /note >}} + +{{< note >}} +In this document, when we talk about Windows containers we mean Windows containers with process isolation. Windows containers with [Hyper-V isolation](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container) is planned for a future release. +{{< /note >}} + +## Supported Functionality and Limitations + +### Supported Functionality + +#### Compute + +From an API and kubectl perspective, Windows containers behave in much the same way as Linux-based containers. However, there are some notable differences in key functionality which are outlined in the limitation section. + +Let's start with the operating system version. Refer to the following table for Windows operating system support in Kubernetes. A single heterogeneous Kubernetes cluster can have both Windows and Linux worker nodes. Windows containers have to be scheduled on Windows nodes and Linux containers on Linux nodes. + +| Kubernetes version | Host OS version (Kubernetes Node) | | | +| --- | --- | --- | --- | +| | *Windows Server 1709* | *Windows Server 1803* | *Windows Server 1809/Windows Server 2019* | +| *Kubernetes v1.14* | Not Supported | Not Supported| Supported for Windows Server containers Builds 17763.* with Docker EE-basic 18.09 | + +{{< note >}} +We don't expect all Windows customers to update the operating system for their apps frequently. Upgrading your applications is what dictates and necessitates upgrading or introducing new nodes to the cluster. For the customers that chose to upgrade their operating system for containers running on Kubernetes, we will offer guidance and step-by-step instructions when we add support for a new operating system version. This guidance will include recommended upgrade procedures for upgrading user applications together with cluster nodes. Windows nodes adhere to Kubernetes [version-skew policy](/docs/setup/version-skew-policy/) (node to control plane versioning) the same way as Linux nodes do today. +{{< /note >}} +{{< note >}} +The Windows Server Host Operating System is subject to the [Windows Server ](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) licensing. The Windows Container images are subject to the [Supplemental License Terms for Windows containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/images-eula). +{{< /note >}} +{{< note >}} +Windows containers with process isolation have strict compatibility rules, [where the host OS version must match the container base image OS version](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility). Once we support Windows containers with Hyper-V isolation in Kubernetes, the limitation and compatibility rules will change. +{{< /note >}} + +Key Kubernetes elements work the same way in Windows as they do in Linux. In this section, we talk about some of the key workload enablers and how they map to Windows. + +* [Pods](/docs/concepts/workloads/pods/pod-overview/) + + A Pod is the basic building block of Kubernetes–the smallest and simplest unit in the Kubernetes object model that you create or deploy. The following Pod capabilities, properties and events are supported with Windows containers: + + * Single or multiple containers per Pod with process isolation and volume sharing + * Pod status fields + * Readiness and Liveness probes + * postStart & preStop container lifecycle events + * ConfigMap, Secrets: as environment variables or volumes + * EmptyDir + * Named pipe host mounts + * Resource limits +* [Controllers](/docs/concepts/workloads/controllers/) + + Kubernetes controllers handle the desired state of Pods. The following workload controllers are supported with Windows containers: + + * ReplicaSet + * ReplicationController + * Deployments + * StatefulSets + * DaemonSet + * Job + * CronJob +* [Services](/docs/concepts/services-networking/service/) + + A Kubernetes Service is an abstraction which defines a logical set of Pods and a policy by which to access them - sometimes called a micro-service. You can use services for cross-operating system connectivity. In Windows, services can utilize the following types, properties and capabilities: + + * Service Environment variables + * NodePort + * ClusterIP + * LoadBalancer + * ExternalName + * Headless services + +Pods, Controllers and Services are critical elements to managing Windows workloads on Kubernetes. However, on their own they are not enough to enable the proper lifecycle management of Windows workloads in a dynamic cloud native environment. We added support for the following features: + +* Pod and container metrics +* Horizontal Pod Autoscaler support +* kubectl Exec +* Resource Quotas +* Scheduler preemption + +#### Container Runtime + +Docker EE-basic 18.09 is required on Windows Server 2019 / 1809 nodes for Kubernetes. This works with the dockershim code included in the kubelet. Additional runtimes such as CRI-ContainerD may be supported in later Kubernetes versions. + +#### Storage + +Kubernetes Volumes enable complex applications with data persistence and Pod volume sharing requirements to be deployed on Kubernetes. Kubernetes on Windows supports the following types of [volumes](/docs/concepts/storage/volumes/): + +* FlexVolume out-of-tree plugin with [SMB and iSCSI](https://github.com/Microsoft/K8s-Storage-Plugins/tree/master/flexvolume/windows) support +* [azureDisk](/docs/concepts/storage/volumes/#azuredisk) +* [azureFile](/docs/concepts/storage/volumes/#azurefile) +* [gcePersistentDisk](/docs/concepts/storage/volumes/#gcepersistentdisk) + +#### Networking + +Networking for Windows containers is exposed through [CNI plugins](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/). Windows containers function similarly to virtual machines in regards to networking. Each container has a virtual network adapter (vNIC) which is connected to a Hyper-V virtual switch (vSwitch). The Host Networking Service (HNS) and the Host Compute Service (HCS) work together to create containers and attach container vNICs to networks. HCS is responsible for the management of containers whereas HNS is responsible for the management of networking resources such as: + +* Virtual networks (including creation of vSwitches) +* Endpoints / vNICs +* Namespaces +* Policies (Packet encapsulations, Load-balancing rules, ACLs, NAT'ing rules, etc.) + +The following service spec types are supported: + +* NodePort +* ClusterIP +* LoadBalancer +* ExternalName + +Windows supports five different networking drivers/modes: L2bridge, L2tunnel, Overlay, Transparent, and NAT. In a heterogeneous cluster with Windows and Linux worker nodes, you need to select a networking solution that is compatible on both Windows and Linux. The following out-of-tree plugins are supported on Windows, with recommendations on when to use each CNI: + +| Network Driver | Description | Container Packet Modifications | Network Plugins | Network Plugin Characteristics | +| -------------- | ----------- | ------------------------------ | --------------- | ------------------------------ | +| L2bridge | Containers are attached to an external vSwitch. Containers are attached to the underlay network, although the physical network doesn't need to learn the container MACs because they are rewritten on ingress/egress. Inter-container traffic is bridged inside the container host. | MAC is rewritten to host MAC, IP remains the same. | [win-bridge](https://github.com/containernetworking/plugins/tree/master/plugins/main/windows/win-bridge), [Azure-CNI](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md), Flannel host-gateway uses win-bridge | win-bridge uses L2bridge network mode, connects containers to the underlay of hosts, offering best performance. Requires L2 adjacency between container hosts | +| L2Tunnel | This is a special case of l2bridge, but only used on Azure. All packets are sent to the virtualization host where SDN policy is applied. | MAC rewritten, IP visible on the underlay network | [Azure-CNI](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) | Azure-CNI allows integration of containers with Azure vNET, and allows them to leverage the set of capabilities that [Azure Virtual Network provides](https://azure.microsoft.com/en-us/services/virtual-network/). For example, securely connect to Azure services or use Azure NSGs. See [azure-cni for some examples](https://docs.microsoft.com/en-us/azure/aks/concepts-network#azure-cni-advanced-networking) | +| Overlay (Overlay networking for Windows in Kubernetes is in *alpha* stage) | Containers are given a vNIC connected to an external vSwitch. Each overlay network gets its own IP subnet, defined by a custom IP prefix.The overlay network driver uses VXLAN encapsulation. | Encapsulated with an outer header, inner packet remains the same. | [Win-overlay](https://github.com/containernetworking/plugins/tree/master/plugins/main/windows/win-overlay), Flannel VXLAN (uses win-overlay) | win-overlay should be used when virtual container networks are desired to be isolated from underlay of hosts (e.g. for security reasons). Allows for IPs to be re-used for different overlay networks (which have different VNID tags) if you are restricted on IPs in your datacenter. This option may be used when the container hosts are not L2 adjacent but have L3 connectivity | +| Transparent (special use case for [ovn-kubernetes](https://github.com/openvswitch/ovn-kubernetes)) | Requires an external vSwitch. Containers are attached to an external vSwitch which enables intra-pod communication via logical networks (logical switches and routers). | Packet is encapsulated either via [GENEVE](https://datatracker.ietf.org/doc/draft-gross-geneve/) or [STT](https://datatracker.ietf.org/doc/draft-davie-stt/) tunneling to reach pods which are not on the same host.
Packets are forwarded or dropped via the tunnel metadata information supplied by the ovn network controller.
NAT is done for north-south communication. | [ovn-kubernetes](https://github.com/openvswitch/ovn-kubernetes) | [Deploy via ansible](https://github.com/openvswitch/ovn-kubernetes/tree/master/contrib). Distributed ACLs can be applied via Kubernetes policies. IPAM support. Load-balancing can be achieved without kube-proxy. NATing is done without using iptables/netsh. | +| NAT (*not used in Kubernetes*) | Containers are given a vNIC connected to an internal vSwitch. DNS/DHCP is provided using an internal component called [WinNAT](https://blogs.technet.microsoft.com/virtualization/2016/05/25/windows-nat-winnat-capabilities-and-limitations/) | MAC and IP is rewritten to host MAC/IP. | [nat](https://github.com/Microsoft/windows-container-networking/tree/master/plugins/nat) | Included here for completeness | + +As outlined above, the [Flannel](https://github.com/coreos/flannel) CNI [meta plugin](https://github.com/containernetworking/plugins/tree/master/plugins/meta/flannel) is also supported on [Windows](https://github.com/containernetworking/plugins/tree/master/plugins/meta/flannel#windows-support-experimental) via the [VXLAN network backend](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan) (**alpha support** ; delegates to win-overlay) and [host-gateway network backend](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) (stable support; delegates to win-bridge). This plugin supports delegating to one of the reference CNI plugins (win-overlay, win-bridge), to work in conjunction with Flannel daemon on Windows (Flanneld) for automatic node subnet lease assignment and HNS network creation. This plugin reads in its own configuration file (net-conf.json), and aggregates it with the environment variables from the FlannelD generated subnet.env file. It then delegates to one of the reference CNI plugins for network plumbing, and sends the correct configuration containing the node-assigned subnet to the IPAM plugin (e.g. host-local). + +For the node, pod, and service objects, the following network flows are supported for TCP/UDP traffic: + +* Pod -> Pod (IP) +* Pod -> Pod (Name) +* Pod -> Service (Cluster IP) +* Pod -> Service (PQDN, but only if there are no ".") +* Pod -> Service (FQDN) +* Pod -> External (IP) +* Pod -> External (DNS) +* Node -> Pod +* Pod -> Node + +The following IPAM options are supported on Windows: + +* [Host-local](https://github.com/containernetworking/plugins/tree/master/plugins/ipam/host-local) +* HNS IPAM (Inbox platform IPAM, this is a fallback when no IPAM is set) +* [Azure-vnet-ipam](https://github.com/Azure/azure-container-networking/blob/master/docs/ipam.md) (for azure-cni only) + +### Limitations + +#### Control Plane + +Windows is only supported as a worker node in the Kubernetes architecture and component matrix. This means that a Kubernetes cluster must always include Linux master nodes, zero or more Linux worker nodes, and zero or more Windows worker nodes. + +#### Compute + +##### Resource management and process isolation + + Linux cgroups are used as a pod boundary for resource controls in Linux. Containers are created within that boundary for network, process and file system isolation. The cgroups APIs can be used to gather cpu/io/memory stats. In contrast, Windows uses a Job object per container with a system namespace filter to contain all processes in a container and provide logical isolation from the host. There is no way to run a Windows container without the namespace filtering in place. This means that system privileges cannot be asserted in the context of the host, and thus privileged containers are not available on Windows. Containers cannot assume an identity from the host because the Security Account Manager (SAM) is separate. + +##### Operating System Restrictions + +Windows has strict compatibility rules, where the host OS version must match the container base image OS version. Only Windows containers with a container operating system of Windows Server 2019 are supported. Hyper-V isolation of containers, enabling some backward compatibility of Windows container image versions, is planned for a future release. + +##### Feature Restrictions + +* TerminationGracePeriod: not implemented +* Single file mapping: to be implemented with CRI-ContainerD +* Termination message: to be implemented with CRI-ContainerD +* Privileged Containers: not currently supported in Windows containers +* HugePages: not currently supported in Windows containers +* The existing node problem detector is Linux-only and requires privileged containers. In general, we don't expect this to be used on Windows because privileged containers are not supported +* Not all features of shared namespaces are supported (see API section for more details) + +##### Memory Reservations and Handling + +Windows does not have an out-of-memory process killer as Linux does. Windows always treats all user-mode memory allocations as virtual, and pagefiles are mandatory. The net effect is that Windows won't reach out of memory conditions the same way Linux does, and processes page to disk instead of being subject to out of memory (OOM) termination. If memory is over-provisioned and all physical memory is exhausted, then paging can slow down performance. + +Keeping memory usage within reasonable bounds is possible with a two-step process. First, use the kubelet parameters `--kubelet-reserve` and/or `--system-reserve` to account for memory usage on the node (outside of containers). This reduces [NodeAllocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)). As you deploy workloads, use resource limits (must set only limits or limits must equal requests) on containers. This also subtracts from NodeAllocatable and prevents the scheduler from adding more pods once a node is full. + +A best practice to avoid over-provisioning is to configure the kubelet with a system reserved memory of at least 2GB to account for Windows, Docker, and Kubernetes processes. + +The behavior of the flags behave differently as described below: + +* `--kubelet-reserve`, `--system-reserve` , and `--eviction-hard` flags update Node Allocatable +* Eviction by using `--enforce-node-allocable` is not implemented +* Eviction by using `--eviction-hard` and `--eviction-soft` are not implemented +* MemoryPressure Condition is not implemented +* There are no OOM eviction actions taken by the kubelet +* Kubelet running on the windows node does not have memory restrictions. `--kubelet-reserve` and `--system-reserve` do not set limits on kubelet or processes running the host. This means kubelet or a process on the host could cause memory resource starvation outside the node-allocatable and scheduler + +#### Storage + +Windows has a layered filesystem driver to mount container layers and create a copy filesystem based on NTFS. All file paths in the container are resolved only within the context of that container. + +* Volume mounts can only target a directory in the container, and not an individual file +* Volume mounts cannot project files or directories back to the host filesystem +* Read-only filesystems are not supported because write access is always required for the Windows registry and SAM database. However, read-only volumes are supported +* Volume user-masks and permissions are not available. Because the SAM is not shared between the host & container, there's no mapping between them. All permissions are resolved within the context of the container + +As a result, the following storage functionality is not supported on Windows nodes + +* Volume subpath mounts. Only the entire volume can be mounted in a Windows container. +* Subpath volume mounting for Secrets +* Host mount projection +* DefaultMode (due to UID/GID dependency) +* Read-only root filesystem. Mapped volumes still support readOnly +* Block device mapping +* Memory as the storage medium +* CSI plugins which require privileged containers +* File system features like uui/guid, per-user Linux filesystem permissions +* NFS based storage/volume support +* Expanding the mounted volume (resizefs) + +#### Networking + +Windows Container Networking differs in some important ways from Linux networking. The [Microsoft documentation for Windows Container Networking](https://docs.microsoft.com/en-us/virtualization/windowscontainers/container-networking/architecture) contains additional details and background. + +The Windows host networking networking service and virtual switch implement namespacing and can create virtual NICs as needed for a pod or container. However, many configurations such as DNS, routes, and metrics are stored in the Windows registry database rather than /etc/... files as they are on Linux. The Windows registry for the container is separate from that of the host, so concepts like mapping /etc/resolv.conf from the host into a container don't have the same effect they would on Linux. These must be configured using Windows APIs run in the context of that container. Therefore CNI implementations need to call the HNS instead of relying on file mappings to pass network details into the pod or container. + +The following networking functionality is not supported on Windows nodes + +* Host networking mode is not available for Windows pods +* Local NodePort access from the node itself fails (works for other nodes or external clients) +* Accessing service VIPs from nodes will be available with a future release of Windows Server +* Overlay networking support in kube-proxy is an alpha release. In addition, it requires [KB4482887](https://support.microsoft.com/en-us/help/4482887/windows-10-update-kb4482887) to be installed on Windows Server 2019 +* `kubectl port-forward` +* Local Traffic Policy and DSR mode +* Windows containers connected to l2bridge, l2tunnel, or overlay networks do not support communicating over the IPv6 stack. There is outstanding Windows platform work required to enable these network drivers to consume IPv6 addresses and subsequent Kubernetes work in kubelet, kube-proxy, and CNI plugins. +* Outbound communication using the ICMP protocol via the win-overlay, win-bridge, and Azure-CNI plugin. Specifically, the Windows data plane ([VFP](https://www.microsoft.com/en-us/research/project/azure-virtual-filtering-platform/)) doesn't support ICMP packet transpositions. This means: + * ICMP packets directed to destinations within the same network (e.g. pod to pod communication via ping) work as expected and without any limitations + * TCP/UDP packets work as expected and without any limitations + * ICMP packets directed to pass through a remote network (e.g. pod to external internet communication via ping) cannot be transposed and thus will not be routed back to their source + * Since TCP/UDP packets can still be transposed, one can substitute `ping ` with `curl ` to be able to debug connectivity to the outside world. + +##### CNI Plugins + +* Windows reference network plugins win-bridge and win-overlay do not currently implement [CNI spec](https://github.com/containernetworking/cni/blob/master/SPEC.md) v0.4.0 due to missing "CHECK" implementation. +* The Flannel VXLAN CNI has the following limitations on Windows: + +1. Node-pod connectivity isn't possible by design. It's only possible for local pods with Flannel [PR 1096](https://github.com/coreos/flannel/pull/1096) +2. We are restricted to using VNI 4096 and UDP port 4789. The VNI limitation is being worked on and will be overcome in a future release (open-source flannel changes). See the official [Flannel VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan) backend docs for more details on these parameters. + +##### DNS {#dns-limitations} + +* ClusterFirstWithHostNet is not supported for DNS. Windows treats all names with a '.' as a FQDN and skips PQDN resolution +* On Linux, you have a DNS suffix list, which is used when trying to resolve PQDNs. On Windows, we only have 1 DNS suffix, which is the DNS suffix associated with that pod's namespace (mydns.svc.cluster.local for example). Windows can resolve FQDNs and services or names resolvable with just that suffix. For example, a pod spawned in the default namespace, will have the DNS suffix **default.svc.cluster.local**. On a Windows pod, you can resolve both **kubernetes.default.svc.cluster.local** and **kubernetes**, but not the in-betweens, like **kubernetes.default** or **kubernetes.default.svc**. + +##### Security + +Secrets are written in clear text on the node's volume (as compared to tmpfs/in-memory on linux). This means customers have to do two things + +1. Use file ACLs to secure the secrets file location +2. Use volume-level encryption using [BitLocker](https://docs.microsoft.com/en-us/windows/security/information-protection/bitlocker/bitlocker-how-to-deploy-on-windows-server) + +[RunAsUser ](/docs/concepts/policy/pod-security-policy/#users-and-groups)is not currently supported on Windows. The workaround is to create local accounts before packaging the container. The RunAsUsername capability may be added in a future release. + +Linux specific pod security context privileges such as SELinux, AppArmor, Seccomp, Capabilities (POSIX Capabilities), and others are not supported. + +In addition, as mentioned already, privileged containers are not supported on Windows. + +#### API + +There are no differences in how most of the Kubernetes APIs work for Windows. The subtleties around what's different come down to differences in the OS and container runtime. In certain situations, some properties on workload APIs such as Pod or Container were designed with an assumption that they are implemented on Linux, failing to run on Windows. + +At a high level, these OS concepts are different: + +* Identity - Linux uses userID (UID) and groupID (GID) which are represented as integer types. User and group names are not canonical - they are just an alias in `/etc/groups` or `/etc/passwd` back to UID+GID. Windows uses a larger binary security identifier (SID) which is stored in the Windows Security Access Manager (SAM) database. This database is not shared between the host and containers, or between containers. +* File permissions - Windows uses an access control list based on SIDs, rather than a bitmask of permissions and UID+GID +* File paths - convention on Windows is to use **\** instead of **/**. The Go IO libraries typically accept both and just make it work, but when you're setting a path or command line that's interpreted inside a container, **\** may be needed. +* Signals - Windows interactive apps handle termination differently, and can implement one or more of these: + * A UI thread handles well-defined messages including WM_CLOSE + * Console apps handle ctrl-c or ctrl-break using a Control Handler + * Services register a Service Control Handler function that can accept SERVICE_CONTROL_STOP control codes + +Exit Codes follow the same convention where 0 is success, nonzero is failure. The specific error codes may differ across Windows and Linux. However, exit codes passed from the Kubernetes components (kubelet, kube-proxy) are unchanged. + +##### V1.Container + +* V1.Container.ResourceRequirements.limits.cpu and V1.Container.ResourceRequirements.limits.memory - Windows doesn't use hard limits for CPU allocations. Instead, a share system is used. The existing fields based on millicores are scaled into relative shares that are followed by the Windows scheduler. [see: kuberuntime/helpers_windows.go](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kuberuntime/helpers_windows.go), [see: resource controls in Microsoft docs](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/resource-controls) + * Huge pages are not implemented in the Windows container runtime, and are not available. They require [asserting a user privilege](https://docs.microsoft.com/en-us/windows/desktop/Memory/large-page-support) that's not configurable for containers. +* V1.Container.ResourceRequirements.requests.cpu and V1.Container.ResourceRequirements.requests.memory - Requests are subtracted from node available resources, so they can be used to avoid overprovisioning a node. However, they cannot be used to guarantee resources in an overprovisioned node. They should be applied to all containers as a best practice if the operator wants to avoid overprovisioning entirely. +* V1.Container.SecurityContext.allowPrivilegeEscalation - not possible on Windows, none of the capabilities are hooked up +* V1.Container.SecurityContext.Capabilities - POSIX capabilities are not implemented on Windows +* V1.Container.SecurityContext.privileged - Windows doesn't support privileged containers +* V1.Container.SecurityContext.procMount - Windows doesn't have a /proc filesystem +* V1.Container.SecurityContext.readOnlyRootFilesystem - not possible on Windows, write access is required for registry & system processes to run inside the container +* V1.Container.SecurityContext.runAsGroup - not possible on Windows, no GID support +* V1.Container.SecurityContext.runAsNonRoot - Windows does not have a root user. The closest equivalent is ContainerAdministrator which is an identity that doesn't exist on the node. +* V1.Container.SecurityContext.runAsUser - not possible on Windows, no UID support as int. +* V1.Container.SecurityContext.seLinuxOptions - not possible on Windows, no SELinux +* V1.Container.terminationMessagePath - this has some limitations in that Windows doesn't support mapping single files. The default value is /dev/termination-log, which does work because it does not exist on Windows by default. + +##### V1.Pod + +* V1.Pod.hostIPC, v1.pod.hostpid - host namespace sharing is not possible on Windows +* V1.Pod.hostNetwork - There is no Windows OS support to share the host network +* V1.Pod.dnsPolicy - ClusterFirstWithHostNet - is not supported because Host Networking is not supported on Windows. +* V1.Pod.podSecurityContext - see V1.PodSecurityContext below +* V1.Pod.shareProcessNamespace - this is a beta feature, and depends on Linux namespaces which are not implemented on Windows. Windows cannot share process namespaces or the container's root filesystem. Only the network can be shared. +* V1.Pod.terminationGracePeriodSeconds - this is not fully implemented in Docker on Windows, see: [reference](https://github.com/moby/moby/issues/25982). The behavior today is that the ENTRYPOINT process is sent CTRL_SHUTDOWN_EVENT, then Windows waits 5 seconds by default, and finally shuts down all processes using the normal Windows shutdown behavior. The 5 second default is actually in the Windows registry [inside the container](https://github.com/moby/moby/issues/25982#issuecomment-426441183), so it can be overridden when the container is built. +* V1.Pod.volumeDevices - this is a beta feature, and is not implemented on Windows. Windows cannot attach raw block devices to pods. +* V1.Pod.volumes - EmptyDir, Secret, ConfigMap, HostPath - all work and have tests in TestGrid + * V1.emptyDirVolumeSource - the Node default medium is disk on Windows. Memory is not supported, as Windows does not have a built-in RAM disk. +* V1.VolumeMount.mountPropagation - only MountPropagationHostToContainer is available. Windows cannot create mounts within a pod or project them back to the node. + +##### V1.PodSecurityContext + +None of the PodSecurityContext fields work on Windows. They're listed here for reference. + +* V1.PodSecurityContext.SELinuxOptions - SELinux is not available on Windows +* V1.PodSecurityContext.RunAsUser - provides a UID, not available on Windows +* V1.PodSecurityContext.RunAsGroup - provides a GID, not available on Windows +* V1.PodSecurityContext.RunAsNonRoot - Windows does not have a root user. The closest equivalent is ContainerAdministrator which is an identity that doesn't exist on the node. +* V1.PodSecurityContext.SupplementalGroups - provides GID, not available on Windows +* V1.PodSecurityContext.Sysctls - these are part of the Linux sysctl interface. There's no equivalent on Windows. + +## Getting Help and Troubleshooting {#troubleshooting} + +Your main source of help for troubleshooting your Kubernetes cluster should start with this [section](/docs/tasks/debug-application-cluster/troubleshooting/). Some additional, Windows-specific troubleshooting help is included in this section. Logs are an important element of troubleshooting issues in Kubernetes. Make sure to include them any time you seek troubleshooting assistance from other contributors. Follow the instructions in the SIG-Windows [contributing guide on gathering logs](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs). + +1. How do I know start.ps1 completed successfully? + + You should see kubelet, kube-proxy, and (if you chose Flannel as your networking solution) flanneld host-agent processes running on your node, with running logs being displayed in separate PowerShell windows. In addition to this, your Windows node should be listed as "Ready" in your Kubernetes cluster. + +1. Can I configure the Kubernetes node processes to run in the background as services? + + Kubelet and kube-proxy are already configured to run as native Windows Services, offering resiliency by re-starting the services automatically in the event of failure (for example a process crash). You have two options for configuring these node components as services. + + 1. As native Windows Services + + Kubelet & kube-proxy can be run as native Windows Services using `sc.exe`. + + ```powershell + # Create the services for kubelet and kube-proxy in two separate commands + sc.exe create binPath= " --service " + + # Please note that if the arguments contain spaces, they must be escaped. + sc.exe create kubelet binPath= "C:\kubelet.exe --service --hostname-override 'minion' " + + # Start the services + Start-Service kubelet + Start-Service kube-proxy + + # Stop the service + Stop-Service kubelet (-Force) + Stop-Service kube-proxy (-Force) + + # Query the service status + Get-Service kubelet + Get-Service kube-proxy + ``` + + 1. Using nssm.exe + + You can also always use alternative service managers like [nssm.exe](https://nssm.cc/) to run these processes (flanneld, kubelet & kube-proxy) in the background for you. You can use this [sample script](https://github.com/Microsoft/SDN/tree/master/Kubernetes/flannel/register-svc.ps1), leveraging nssm.exe to register kubelet, kube-proxy, and flanneld.exe to run as Windows services in the background. + + ```powershell + register-svc.ps1 -NetworkMode -ManagementIP -ClusterCIDR -KubeDnsServiceIP -LogDir + + # NetworkMode = The network mode l2bridge (flannel host-gw, also the default value) or overlay (flannel vxlan) chosen as a network solution + # ManagementIP = The IP address assigned to the Windows node. You can use ipconfig to find this + # ClusterCIDR = The cluster subnet range. (Default value 10.244.0.0/16) + # KubeDnsServiceIP = The Kubernetes DNS service IP (Default value 10.96.0.10) + # LogDir = The directory where kubelet and kube-proxy logs are redirected into their respective output files (Default value C:\k) + ``` + + If the above referenced script is not suitable, you can manually configure nssm.exe using the following examples. + ```powershell + # Register flanneld.exe + nssm install flanneld C:\flannel\flanneld.exe + nssm set flanneld AppParameters --kubeconfig-file=c:\k\config --iface= --ip-masq=1 --kube-subnet-mgr=1 + nssm set flanneld AppEnvironmentExtra NODE_NAME= + nssm set flanneld AppDirectory C:\flannel + nssm start flanneld + + # Register kubelet.exe + # Microsoft releases the pause infrastructure container at mcr.microsoft.com/k8s/core/pause:1.0.0 + # For more info search for "pause" in the "Guide for adding Windows Nodes in Kubernetes" + nssm install kubelet C:\k\kubelet.exe + nssm set kubelet AppParameters --hostname-override= --v=6 --pod-infra-container-image=mcr.microsoft.com/k8s/core/pause:1.0.0 --resolv-conf="" --allow-privileged=true --enable-debugging-handlers --cluster-dns= --cluster-domain=cluster.local --kubeconfig=c:\k\config --hairpin-mode=promiscuous-bridge --image-pull-progress-deadline=20m --cgroups-per-qos=false --log-dir= --logtostderr=false --enforce-node-allocatable="" --network-plugin=cni --cni-bin-dir=c:\k\cni --cni-conf-dir=c:\k\cni\config + nssm set kubelet AppDirectory C:\k + nssm start kubelet + + # Register kube-proxy.exe (l2bridge / host-gw) + nssm install kube-proxy C:\k\kube-proxy.exe + nssm set kube-proxy AppDirectory c:\k + nssm set kube-proxy AppParameters --v=4 --proxy-mode=kernelspace --hostname-override=--kubeconfig=c:\k\config --enable-dsr=false --log-dir= --logtostderr=false + nssm.exe set kube-proxy AppEnvironmentExtra KUBE_NETWORK=cbr0 + nssm set kube-proxy DependOnService kubelet + nssm start kube-proxy + + # Register kube-proxy.exe (overlay / vxlan) + nssm install kube-proxy C:\k\kube-proxy.exe + nssm set kube-proxy AppDirectory c:\k + nssm set kube-proxy AppParameters --v=4 --proxy-mode=kernelspace --feature-gates="WinOverlay=true" --hostname-override= --kubeconfig=c:\k\config --network-name=vxlan0 --source-vip= --enable-dsr=false --log-dir= --logtostderr=false + nssm set kube-proxy DependOnService kubelet + nssm start kube-proxy + ``` + + + For initial troubleshooting, you can use the following flags in [nssm.exe](https://nssm.cc/) to redirect stdout and stderr to a output file: + + ```powershell + nssm set AppStdout C:\k\mysvc.log + nssm set AppStderr C:\k\mysvc.log + ``` + + For additional details, see official [nssm usage](https://nssm.cc/usage) docs. + +1. My Windows Pods do not have network connectivity + + If you are using virtual machines, ensure that MAC spoofing is enabled on all the VM network adapter(s). + +1. My Windows Pods cannot ping external resources + + Windows Pods do not have outbound rules programmed for the ICMP protocol today. However, TCP/UDP is supported. When trying to demonstrate connectivity to resources outside of the cluster, please substitute `ping ` with corresponding `curl ` commands. + + If you are still facing problems, most likely your network configuration in [cni.conf](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/l2bridge/cni/config/cni.conf) deserves some extra attention. You can always edit this static file. The configuration update will apply to any newly created Kubernetes resources. + + One of the Kubernetes networking requirements (see [Kubernetes model](/docs/concepts/cluster-administration/networking/)) is for cluster communication to occur without NAT internally. To honor this requirement, there is an [ExceptionList](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/l2bridge/cni/config/cni.conf#L20) for all the communication where we do not want outbound NAT to occur. However, this also means that you need to exclude the external IP you are trying to query from the ExceptionList. Only then will the traffic originating from your Windows pods be SNAT'ed correctly to receive a response from the outside world. In this regard, your ExceptionList in `cni.conf` should look as follows: + + ```conf + "ExceptionList": [ + "10.244.0.0/16", # Cluster subnet + "10.96.0.0/12", # Service subnet + "10.127.130.0/24" # Management (host) subnet + ] + ``` + +1. My Windows node cannot access NodePort service + + Local NodePort access from the node itself fails. This is a known limitation. NodePort access works from other nodes or external clients. + +1. vNICs and HNS endpoints of containers are being deleted + + This issue can be caused when the `hostname-override` parameter is not passed to [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/). To resolve it, users need to pass the hostname to kube-proxy as follows: + + ```powershell + C:\k\kube-proxy.exe --hostname-override=$(hostname) + ``` + +1. With flannel my nodes are having issues after rejoining a cluster + + Whenever a previously deleted node is being re-joined to the cluster, flannelD tries to assign a new pod subnet to the node. Users should remove the old pod subnet configuration files in the following paths: + + ```powershell + Remove-Item C:\k\SourceVip.json + Remove-Item C:\k\SourceVipRequest.json + ``` + +1. After launching `start.ps1`, flanneld is stuck in "Waiting for the Network to be created" + + There are numerous reports of this [issue which are being investigated](https://github.com/coreos/flannel/issues/1066); most likely it is a timing issue for when the management IP of the flannel network is set. A workaround is to simply relaunch start.ps1 or relaunch it manually as follows: + + ```powershell + PS C:> [Environment]::SetEnvironmentVariable("NODE_NAME", "") + PS C:> C:\flannel\flanneld.exe --kubeconfig-file=c:\k\config --iface= --ip-masq=1 --kube-subnet-mgr=1 + ``` + +1. My Windows Pods cannot launch because of missing `/run/flannel/subnet.env` + + This indicates that Flannel didn't launch correctly. You can either try to restart flanneld.exe or you can copy the files over manually from `/run/flannel/subnet.env` on the Kubernetes master to` C:\run\flannel\subnet.env` on the Windows worker node and modify the `FLANNEL_SUBNET` row to a different number. For example, if node subnet 10.244.4.1/24 is desired: + + ```env + FLANNEL_NETWORK=10.244.0.0/16 + FLANNEL_SUBNET=10.244.4.1/24 + FLANNEL_MTU=1500 + FLANNEL_IPMASQ=true + ``` + +1. My Windows node cannot access my services using the service IP + + This is a known limitation of the current networking stack on Windows. Windows Pods are able to access the service IP however. + +1. No network adapter is found when starting kubelet + + The Windows networking stack needs a virtual adapter for Kubernetes networking to work. If the following commands return no results (in an admin shell), virtual network creation — a necessary prerequisite for Kubelet to work — has failed: + + ```powershell + Get-HnsNetwork | ? Name -ieq "cbr0" + Get-NetAdapter | ? Name -Like "vEthernet (Ethernet*" + ``` + + Often it is worthwhile to modify the [InterfaceName](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/l2bridge/start.ps1#L6) parameter of the start.ps1 script, in cases where the host's network adapter isn't "Ethernet". Otherwise, consult the output of the `start-kubelet.ps1` script to see if there are errors during virtual network creation. + +1. My Pods are stuck at "Container Creating" or restarting over and over + + Check that your pause image is compatible with your OS version. The [instructions](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/deploying-resources) assume that both the OS and the containers are version 1803. If you have a later version of Windows, such as an Insider build, you need to adjust the images accordingly. Please refer to the Microsoft's [Docker repository](https://hub.docker.com/u/microsoft/) for images. Regardless, both the pause image Dockerfile and the sample service expect the image to be tagged as :latest. + + Starting with Kubernetes v1.14, Microsoft releases the pause infrastructure container at `mcr.microsoft.com/k8s/core/pause:1.0.0`. For more information search for "pause" in the [Guide for adding Windows Nodes in Kubernetes](../user-guide-windows-nodes). + +1. DNS resolution is not properly working + + Check the DNS limitations for Windows in this [section](#dns-limitations). + +### Further investigation + +If these steps don't resolve your problem, you can get help running Windows containers on Windows nodes in Kubernetes through: + +* StackOverflow [Windows Server Container](https://stackoverflow.com/questions/tagged/windows-server-container) topic +* Kubernetes Official Forum [discuss.kubernetes.io](https://discuss.kubernetes.io/) +* Kubernetes Slack [#SIG-Windows Channel](https://kubernetes.slack.com/messages/sig-windows) + +## Reporting Issues and Feature Requests + +If you have what looks like a bug, or you would like to make a feature request, please use the [Github issue tracking system](https://github.com/kubernetes/kubernetes/issues). You can open issues on [GitHub](https://github.com/kubernetes/kubernetes/issues/new/choose) and assign them to SIG-Windows. You should first search the list of issues in case it was reported previously and comment with your experience on the issue and add additional logs. SIG-Windows Slack is also a great avenue to get some initial support and troubleshooting ideas prior to creating a ticket. + +If filing a bug, please include detailed information about how to reproduce the problem, such as: + +* Kubernetes version: kubectl version +* Environment details: Cloud provider, OS distro, networking choice and configuration, and Docker version +* Detailed steps to reproduce the problem +* [Relevant logs](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs) +* Tag the issue sig/windows by commenting on the issue with `/sig windows` to bring it to a SIG-Windows member's attention + +{{% /capture %}} + +{{% capture whatsnext %}} + +We have a lot of features in our roadmap. An abbreviated high level list is included below, but we encourage you to view our [roadmap project](https://github.com/orgs/kubernetes/projects/8) and help us make Windows support better by [contributing](https://github.com/kubernetes/community/blob/master/sig-windows/). + +### CRI-ContainerD + +ContainerD is another OCI-compliant runtime that recently graduated as a CNCF project. It's currently tested on Linux, but 1.3 will bring support for Windows and Hyper-V. [[reference](https://blog.docker.com/2019/02/containerd-graduates-within-the-cncf/)] + +The CRI-ContainerD interface will be able to manage sandboxes based on Hyper-V. This provides a foundation where RuntimeClass could be implemented for new use cases including: + +* Hypervisor-based isolation between pods for additional security +* Backwards compatibility allowing a node to run a newer Windows Server version without requiring containers to be rebuilt +* Specific CPU/NUMA settings for a pod +* Memory isolation and reservations + +### Hyper-V isolation + +The existing Hyper-V isolation support, an experimental feature as of v1.10, will be deprecated in the future in favor of the CRI-ContainerD and RuntimeClass features mentioned above. To use the current features and create a Hyper-V isolated container, the kubelet should be started with feature gates `HyperVContainer=true` and the Pod should include the annotation `experimental.windows.kubernetes.io/isolation-type=hyperv`. In the experiemental release, this feature is limited to 1 container per Pod. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iis +spec: + selector: + matchLabels: + app: iis + replicas: 3 + template: + metadata: + labels: + app: iis + annotations: + experimental.windows.kubernetes.io/isolation-type: hyperv + spec: + containers: + - name: iis + image: microsoft/iis + ports: + - containerPort: 80 +``` + +### Deployment with kubeadm and cluster API + +Kubeadm is becoming the de facto standard for users to deploy a Kubernetes cluster. Windows node support in kubeadm will come in a future release. We are also making investments in cluster API to ensure Windows nodes are properly provisioned. + +### A few other key features +* Beta support for Group Managed Service Accounts +* More CNIs +* More Storage Plugins + +{{% /capture %}} diff --git a/content/en/docs/setup/windows/user-guide-windows-containers.md b/content/en/docs/setup/windows/user-guide-windows-containers.md new file mode 100644 index 0000000000000..0928f394f8d92 --- /dev/null +++ b/content/en/docs/setup/windows/user-guide-windows-containers.md @@ -0,0 +1,140 @@ +--- +reviewers: +- michmike +- patricklang +title: Guide for scheduling Windows containers in Kubernetes +content_template: templates/concept +weight: 75 +--- + +{{% capture overview %}} + +Windows applications constitute a large portion of the services and applications that run in many organizations. This guide walks you through the steps to configure and deploy a Windows container in Kubernetes. + +{{% /capture %}} + +{{% capture body %}} + +## Objectives + +* Configure an example deployment to run Windows containers on the Windows node +* (Optional) Configure an Active Directory Identity for your Pod using Group Managed Service Accounts (GMSA) + +## Before you begin + +* Create a Kubernetes cluster that includes a [master and a worker node running Windows Server](../user-guide-windows-nodes) +* It is important to note that creating and deploying services and workloads on Kubernetes behaves in much the same way for Linux and Windows containers. [Kubectl commands](/docs/reference/kubectl/overview/) to interface with the cluster are identical. The example in the section below is provided simply to jumpstart your experience with Windows containers. + +## Getting Started: Deploying a Windows container + +To deploy a Windows container on Kubernetes, you must first create an example application. The example YAML file below creates a simple webserver application. Create a service spec named `win-webserver.yaml` with the contents below: + +```yaml + apiVersion: v1 + kind: Service + metadata: + name: win-webserver + labels: + app: win-webserver + spec: + ports: + # the port that this service should serve on + - port: 80 + targetPort: 80 + selector: + app: win-webserver + type: NodePort + --- + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + labels: + app: win-webserver + name: win-webserver + spec: + replicas: 2 + template: + metadata: + labels: + app: win-webserver + name: win-webserver + spec: + containers: + - name: windowswebserver + image: mcr.microsoft.com/windows/servercore:ltsc2019 + command: + - powershell.exe + - -command + - "<#code used from https://gist.github.com/wagnerandrade/5424431#> ; $$listener = New-Object System.Net.HttpListener ; $$listener.Prefixes.Add('http://*:80/') ; $$listener.Start() ; $$callerCounts = @{} ; Write-Host('Listening at http://*:80/') ; while ($$listener.IsListening) { ;$$context = $$listener.GetContext() ;$$requestUrl = $$context.Request.Url ;$$clientIP = $$context.Request.RemoteEndPoint.Address ;$$response = $$context.Response ;Write-Host '' ;Write-Host('> {0}' -f $$requestUrl) ; ;$$count = 1 ;$$k=$$callerCounts.Get_Item($$clientIP) ;if ($$k -ne $$null) { $$count += $$k } ;$$callerCounts.Set_Item($$clientIP, $$count) ;$$ip=(Get-NetAdapter | Get-NetIpAddress); $$header='

Windows Container Web Server

' ;$$callerCountsString='' ;$$callerCounts.Keys | % { $$callerCountsString+='

IP {0} callerCount {1} ' -f $$ip[1].IPAddress,$$callerCounts.Item($$_) } ;$$footer='' ;$$content='{0}{1}{2}' -f $$header,$$callerCountsString,$$footer ;Write-Output $$content ;$$buffer = [System.Text.Encoding]::UTF8.GetBytes($$content) ;$$response.ContentLength64 = $$buffer.Length ;$$response.OutputStream.Write($$buffer, 0, $$buffer.Length) ;$$response.Close() ;$$responseStatus = $$response.StatusCode ;Write-Host('< {0}' -f $$responseStatus) } ; " + nodeSelector: + beta.kubernetes.io/os: windows +``` + +{{< note >}} +Port mapping is also supported, but for simplicity in this example the container port 80 is exposed directly to the service. +{{< /note >}} + +1. Check that all nodes are healthy: + + ```bash + kubectl get nodes + ``` + +1. Deploy the service and watch for pod updates: + + ```bash + kubectl apply -f win-webserver.yaml + kubectl get pods -o wide -w + ``` + + When the service is deployed correctly both Pods are marked as Ready. To exit the watch command, press Ctrl+C. + +1. Check that the deployment succeeded. To verify: + + * Two containers per pod on the Windows node, use `docker ps` + * Two pods listed from the Linux master, use `kubectl get pods` + * Node-to-pod communication across the network, `curl` port 80 of your pod IPs from the Linux master to check for a web server response + * Pod-to-pod communication, ping between pods (and across hosts, if you have more than one Windows node) using docker exec or kubectl exec + * Service-to-pod communication, `curl` the virtual service IP (seen under `kubectl get services`) from the Linux master and from individual pods + * Service discovery, `curl` the service name with the Kubernetes [default DNS suffix](/docs/concepts/services-networking/dns-pod-service/#services) + * Inbound connectivity, `curl` the NodePort from the Linux master or machines outside of the cluster + * Outbound connectivity, `curl` external IPs from inside the pod using kubectl exec + +{{< note >}} +Windows container hosts are not able to access the IP of services scheduled on them due to current platform limitations of the Windows networking stack. Only Windows pods are able to access service IPs. +{{< /note >}} + +## Managing Workload Identity with Group Managed Service Accounts + +Starting with Kubernetes v1.14, Windows container workloads can be configured to use Group Managed Service Accounts (GMSA). Group Managed Service Accounts are a specific type of Active Directory account that provides automatic password management, simplified service principal name (SPN) management, and the ability to delegate the management to other administrators across multiple servers. Containers configured with a GMSA can access external Active Directory Domain resources while carrying the identity configured with the GMSA. Learn more about configuring and using GMSA for Windows containers [here](/docs/tasks/configure-pod-container/configure-gmsa/). + +## Taints and Tolerations + +Users today need to use some combination of taints and node selectors in order to keep Linux and Windows workloads on their respective OS-specific nodes. This likely imposes a burden only on Windows users. The recommended approach is outlined below, with one of its main goals being that this approach should not break compatibility for existing Linux workloads. + +### Ensuring OS-specific workloads land on the appropriate container host + +Users can ensure Windows containers can be scheduled on the appropriate host using Taints and Tolerations. All Kubernetes nodes today have the following default labels: + +* beta.kubernetes.io/os = [windows|linux] +* beta.kubernetes.io/arch = [amd64|arm64|...] + +If a Pod specification does not specify a nodeSelector like `"beta.kubernetes.io/os": windows`, it is possible the Pod can be scheduled on any host, Windows or Linux. This can be problematic since a Windows container can only run on Windows and a Linux container can only run on Linux. The best practice is to use a nodeSelector. + +However, we understand that in many cases users have a pre-existing large number of deployments for Linux containers, as well as an ecosystem of off-the-shelf configurations, such as community Helm charts, and programmatic Pod generation cases, such as with Operators. In those situations, you may be hesitant to make the configuration change to add nodeSelectors. The alternative is to use Taints. Because the kubelet can set Taints during registration, it could easily be modified to automatically add a taint when running on Windows only. + +For example: `--register-with-taints='os=Win1809:NoSchedule'` + +By adding a taint to all Windows nodes, nothing will be scheduled on them (that includes existing Linux Pods). In order for a Windows Pod to be scheduled on a Windows node, it would need both the nodeSelector to choose Windows, and the appropriate matching toleration. + +```yaml +nodeSelector: + "beta.kubernetes.io/os": windows +tolerations: + - key: "os" + operator: "Equal" + value: "Win1809" + effect: "NoSchedule" +``` + +{{% /capture %}} diff --git a/content/en/docs/setup/windows/user-guide-windows-nodes.md b/content/en/docs/setup/windows/user-guide-windows-nodes.md new file mode 100644 index 0000000000000..454e67084b33e --- /dev/null +++ b/content/en/docs/setup/windows/user-guide-windows-nodes.md @@ -0,0 +1,273 @@ +--- +reviewers: +- michmike +- patricklang +title: Guide for adding Windows Nodes in Kubernetes +content_template: templates/concept +weight: 70 +--- + +{{% capture overview %}} + +The Kubernetes platform can now be used to run both Linux and Windows containers. One or more Windows nodes can be registered to a cluster. This guide shows how to: + +* Register a Windows node to the cluster +* Configure networking so pods on Linux and Windows can communicate + +{{% /capture %}} + +{{% capture body %}} + +## Before you begin + +* Obtain a [Windows Server license](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) in order to configure the Windows node that hosts Windows containers. You can use your organization's licenses for the cluster, or acquire one from Microsoft, a reseller, or via the major cloud providers such as GCP, AWS, and Azure by provisioning a virtual machine running Windows Server through their marketplaces. A [time-limited trial](https://www.microsoft.com/en-us/cloud-platform/windows-server-trial) is also available. +* Build a Linux-based Kubernetes cluster in which you have access to the control plane (some examples include [Getting Started from Scratch](/docs/setup/scratch/), [kubeadm](/docs/setup/independent/create-cluster-kubeadm/), [AKS Engine](/docs/setup/turnkey/azure/), [GCE](/docs/setup/turnkey/gce/), [AWS](/docs/setup/turnkey/aws/)). + +## Getting Started: Adding a Windows Node to Your Cluster + +### Plan IP Addressing + +Kubernetes cluster management requires careful planning of your IP addresses so that you do not inadvertently cause network collision. This guide assumes that you are familiar with the [Kubernetes networking concepts](/docs/concepts/cluster-administration/networking/). + +In order to deploy your cluster you need the following address spaces: + +| Subnet / address range | Description | Default value | +| --- | --- | --- | +| Service Subnet | A non-routable, purely virtual subnet that is used by pods to uniformly access services without caring about the network topology. It is translated to/from routable address space by `kube-proxy` running on the nodes. | 10.96.0.0/12 | +| Cluster Subnet | This is a global subnet that is used by all pods in the cluster. Each node is assigned a smaller /24 subnet from this for their pods to use. It must be large enough to accommodate all pods used in your cluster. To calculate *minimumsubnet* size: `(number of nodes) + (number of nodes * maximum pods per node that you configure)`. Example: for a 5 node cluster for 100 pods per node: `(5) + (5 * 100) = 505.` | 10.244.0.0/16 | +| Kubernetes DNS Service IP | IP address of `kube-dns` service that is used for DNS resolution & cluster service discovery. | 10.96.0.10 | + +Review the networking options supported in 'Intro to Windows containers in Kubernetes: Supported Functionality: Networking' to determine how you need to allocate IP addresses for your cluster. + +### Components that run on Windows + +While the Kubernetes control plane runs on your Linux node(s), the following components are configured and run on your Windows node(s). + +1. kubelet +2. kube-proxy +3. kubectl (optional) +4. Container runtime + +Get the latest binaries from [https://github.com/kubernetes/kubernetes/releases](https://github.com/kubernetes/kubernetes/releases), starting with v1.14 or later. The Windows-amd64 binaries for kubeadm, kubectl, kubelet, and kube-proxy can be found under the CHANGELOG link. + +### Networking Configuration + +Once you have a Linux-based Kubernetes master node you are ready to choose a networking solution. This guide illustrates using Flannel in VXLAN mode for simplicity. + +#### Configuring Flannel in VXLAN mode on the Linux controller + +1. Prepare Kubernetes master for Flannel + + Some minor preparation is recommended on the Kubernetes master in our cluster. It is recommended to enable bridged IPv4 traffic to iptables chains when using Flannel. This can be done using the following command: + + ```bash + sudo sysctl net.bridge.bridge-nf-call-iptables=1 + ``` + +1. Download & configure Flannel + + Download the most recent Flannel manifest: + + ```bash + wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + ``` + + There are two sections you should modify to enable the vxlan networking backend: + + After applying the steps below, the `net-conf.json` section of `kube-flannel.yml` should look as follows: + + ```json + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan", + "VNI" : 4096, + "Port": 4789 + } + } + ``` + + {{< note >}} + The VNI must be set to 4096 and port 4789 for Flannel on Linux to interoperate with Flannel on Windows. Support for other VNIs is coming soon. See the VXLAN documentation at https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan for an explanation of these fields. + {{< /note >}} + +1. In the `net-conf.json` section of your `kube-flannel.yml`, double-check: + 1. The cluster subnet (e.g. "10.244.0.0/16") is set as per your IP plan. + * VNI 4096 is set in the backend + * Port 4789 is set in the backend + 2. In the `cni-conf.json` section of your `kube-flannel.yml`, change the network name to `vxlan0`. + + + Your `cni-conf.json` should look as follows: + + ```json + cni-conf.json: | + { + "name": "vxlan0", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + ``` + +1. Apply the Flannel yaml and Validate + + Let's apply the Flannel configuration: + + ```bash + kubectl apply -f kube-flannel.yml + ``` + + Next, since the Flannel pods are Linux-based, apply a NodeSelector patch, which can be found [here](https://github.com/Microsoft/SDN/blob/1d5c055bb195fecba07ad094d2d7c18c188f9d2d/Kubernetes/flannel/l2bridge/manifests/node-selector-patch.yml), to the Flannel DaemonSet pod: + + ```bash + kubectl patch ds/kube-flannel-ds-amd64 --patch "$(cat node-selector-patch.yml)" -n=kube-system + ``` + + After a few minutes, you should see all the pods as running if the Flannel pod network was deployed. + + ```bash + kubectl get pods --all-namespaces + ``` + + ![alt_text](../flannel-master-kubeclt-get-pods.png "flannel master kubectl get pods screen capture") + + Verify that the Flannel DaemonSet has the NodeSelector applied. + + ```bash + kubectl get ds -n kube-system + ``` + + ![alt_text](../flannel-master-kubectl-get-ds.png "flannel master kubectl get ds screen capture") + +#### Join Windows Worker + +In this section we'll cover configuring a Windows node from scratch to join a cluster on-prem. If your cluster is on a cloud you'll likely want to follow the cloud specific guides in the next section. + +#### Preparing a Windows Node +{{< note >}} +All code snippets in Windows sections are to be run in a PowerShell environment with elevated permissions (Admin). +{{< /note >}} + +1. Install Docker (requires a system reboot) + + Kubernetes uses [Docker](https://www.docker.com/) as its container engine, so we need to install it. You can follow the [official Docs instructions](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-docker/configure-docker-daemon#install-docker), the [Docker instructions](https://store.docker.com/editions/enterprise/docker-ee-server-windows), or try the following *recommended* steps: + + ```PowerShell + Enable-WindowsOptionalFeature -FeatureName Containers + Restart-Computer -Force + Install-Module -Name DockerMsftProvider -Repository PSGallery -Force + Install-Package -Name Docker -ProviderName DockerMsftProvider + ``` + + If you are behind a proxy, the following PowerShell environment variables must be defined: + + ```PowerShell + [Environment]::SetEnvironmentVariable("HTTP_PROXY", "http://proxy.example.com:80/", [EnvironmentVariableTarget]::Machine) + [Environment]::SetEnvironmentVariable("HTTPS_PROXY", "http://proxy.example.com:443/", [EnvironmentVariableTarget]::Machine) + ``` + + If after reboot you see the following error, you need to restart the docker service manually + + ![alt_text](../windows-docker-error.png "windows docker error screen capture") + + ```PowerShell + Start-Service docker + ``` + + {{< note >}} + The "pause" (infrastructure) image is hosted on Microsoft Container Registry (MCR). You can access it using "docker pull mcr.microsoft.com/k8s/core/pause:1.0.0". The DOCKERFILE is available at https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/Dockerfile. + {{< /note >}} + +1. Prepare a Windows directory for Kubernetes + + Create a "Kubernetes for Windows" directory to store Kubernetes binaries as well as any deployment scripts and config files. + + ```PowerShell + mkdir c:\k + ``` + +1. Copy Kubernetes certificate + + Copy the Kubernetes certificate file `$HOME/.kube/config` [from the Linux controller](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/creating-a-linux-master#collect-cluster-information) to this new `C:\k` directory on your Windows node. + + Tip: You can use tools such as [xcopy](https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/xcopy), [WinSCP](https://winscp.net/eng/download.php), or this [PowerShell wrapper for WinSCP](https://www.powershellgallery.com/packages/WinSCP/5.13.2.0) to transfer the config file between nodes. + +1. Download Kubernetes binaries + + To be able to run Kubernetes, you first need to download the `kubelet` and `kube-proxy` binaries. You download these from the Node Binaries links in the CHANGELOG.md file of the [latest releases](https://github.com/kubernetes/kubernetes/releases/). For example 'kubernetes-node-windows-amd64.tar.gz'. You may also optionally download `kubectl` to run on Windows which you can find under Client Binaries. + + Use the [Expand-Archive](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.archive/expand-archive?view=powershell-6) PowerShell command to extract the archive and place the binaries into `C:\k`. + +#### Join the Windows node to the Flannel cluster + +The Flannel overlay deployment scripts and documentation are available in [this repository](https://github.com/Microsoft/SDN/tree/master/Kubernetes/flannel/overlay). The following steps are a simple walkthrough of the more comprehensive instructions available there. + +Download the [Flannel start.ps1](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/start.ps1) script, the contents of which should be extracted to `C:\k`: + +```PowerShell +cd c:\k +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +wget https://raw.githubusercontent.com/Microsoft/SDN/master/Kubernetes/flannel/start.ps1 -o c:\k\start.ps1 +``` + +{{< note >}} +[start.ps1](https://github.com/Microsoft/SDN/blob/master/Kubernetes/flannel/start.ps1) references [install.ps1](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/install.ps1), which downloads additional files such as the `flanneld` executable and the [Dockerfile for infrastructure pod](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/Dockerfile) and install those for you. For overlay networking mode, the [firewall](https://github.com/Microsoft/SDN/blob/master/Kubernetes/windows/helper.psm1#L111) is opened for local UDP port 4789. There may be multiple powershell windows being opened/closed as well as a few seconds of network outage while the new external vSwitch for the pod network is being created the first time. Run the script using the arguments as specified below: +{{< /note >}} + +```PowerShell +.\start.ps1 -ManagementIP -NetworkMode overlay -ClusterCIDR -ServiceCIDR -KubeDnsServiceIP -LogDir +``` + +| Parameter | Default Value | Notes | +| --- | --- | --- | +| -ManagementIP | N/A (required) | The IP address assigned to the Windows node. You can use `ipconfig` to find this. | +| -NetworkMode | l2bridge | We're using `overlay` here | +| -ClusterCIDR | 10.244.0.0/16 | Refer to your cluster IP plan | +| -ServiceCIDR | 10.96.0.0/12 | Refer to your cluster IP plan | +| -KubeDnsServiceIP | 10.96.0.10 | | +| -InterfaceName | Ethernet | The name of the network interface of the Windows host. You can use ipconfig to find this. | +| -LogDir | C:\k | The directory where kubelet and kube-proxy logs are redirected into their respective output files. | + +Now you can view the Windows nodes in your cluster by running the following: + +```bash +kubectl get nodes +``` + +{{< note >}} +You may want to configure your Windows node components like kubelet and kube-proxy to run as services. View the services and background processes section under [troubleshooting](#troubleshooting) for additional instructions. Once you are running the node components as services, collecting logs becomes an important part of troubleshooting. View the [gathering logs](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs) section of the contributing guide for further instructions. +{{< /note >}} + +### Public Cloud Providers + +#### Azure + +AKS-Engine can deploy a complete, customizable Kubernetes cluster with both Linux & Windows nodes. There is a step-by-step walkthrough available in the [docs on GitHub](https://github.com/Azure/aks-engine/blob/master/docs/topics/windows.md). + +#### GCP + +Users can easily deploy a complete Kubernetes cluster on GCE following this step-by-step walkthrough on [GitHub](https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/windows/README-GCE-Windows-kube-up.md) + +#### Deployment with kubeadm and cluster API + +Kubeadm is becoming the de facto standard for users to deploy a Kubernetes cluster. Windows node support in kubeadm will come in a future release. We are also making investments in cluster API to ensure Windows nodes are properly provisioned. + +### Next Steps + +Now that you've configured a Windows worker in your cluster to run Windows containers you may want to add one or more Linux nodes as well to run Linux containers. You are now ready to schedule Windows containers on your cluster. + +{{% /capture %}} diff --git a/content/en/docs/setup/windows/windows-docker-error.png b/content/en/docs/setup/windows/windows-docker-error.png new file mode 100644 index 0000000000000..d00528c0d4cc4 Binary files /dev/null and b/content/en/docs/setup/windows/windows-docker-error.png differ diff --git a/content/en/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md b/content/en/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md index edf999f3caeb0..27a7d5fabdc7b 100644 --- a/content/en/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md +++ b/content/en/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md @@ -44,7 +44,7 @@ directory of the nginx server. Create the Pod and the two Containers: - kubectl create -f https://k8s.io/examples/pods/two-container-pod.yaml + kubectl apply -f https://k8s.io/examples/pods/two-container-pod.yaml View information about the Pod and the Containers: diff --git a/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md b/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md index c35dd3571b4d6..3cb90383c7abb 100644 --- a/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md +++ b/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md @@ -49,7 +49,7 @@ file for the backend Deployment: Create the backend Deployment: ```shell -kubectl create -f https://k8s.io/examples/service/access/hello.yaml +kubectl apply -f https://k8s.io/examples/service/access/hello.yaml ``` View information about the backend Deployment: @@ -113,7 +113,7 @@ that have the labels `app: hello` and `tier: backend`. Create the `hello` Service: ```shell -kubectl create -f https://k8s.io/examples/service/access/hello-service.yaml +kubectl apply -f https://k8s.io/examples/service/access/hello-service.yaml ``` At this point, you have a backend Deployment running, and you have a @@ -140,7 +140,7 @@ the Service uses the default load balancer of your cloud provider. Create the frontend Deployment and Service: ```shell -kubectl create -f https://k8s.io/examples/service/access/frontend.yaml +kubectl apply -f https://k8s.io/examples/service/access/frontend.yaml ``` The output verifies that both resources were created: diff --git a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index a303b9a8780b6..f05ee8297ba83 100644 --- a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -28,7 +28,7 @@ for database debugging. 1. Create a Redis deployment: - kubectl create -f https://k8s.io/examples/application/guestbook/redis-master-deployment.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-deployment.yaml The output of a successful command verifies that the deployment was created: @@ -64,7 +64,7 @@ for database debugging. 2. Create a Redis service: - kubectl create -f https://k8s.io/examples/application/guestbook/redis-master-service.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-service.yaml The output of a successful command verifies that the service was created: diff --git a/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md index e62aeca24e7c2..f51eac0f71244 100644 --- a/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -30,7 +30,7 @@ Dashboard also provides information on the state of Kubernetes resources in your The Dashboard UI is not deployed by default. To deploy it, run the following command: ``` -kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml ``` ## Accessing the Dashboard UI diff --git a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md b/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md index 4e8effea64637..8bed722a7599f 100644 --- a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md +++ b/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning.md @@ -87,10 +87,10 @@ spec: ``` You can save the CustomResourceDefinition in a YAML file, then use -`kubectl create` to create it. +`kubectl apply` to create it. ```shell -kubectl create -f my-versioned-crontab.yaml +kubectl apply -f my-versioned-crontab.yaml ``` After creation, the API server starts to serve each enabled version at an HTTP diff --git a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md b/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md index ab18190bf5436..29af0df9d81d3 100644 --- a/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md +++ b/content/en/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions.md @@ -70,7 +70,7 @@ spec: And create it: ```shell -kubectl create -f resourcedefinition.yaml +kubectl apply -f resourcedefinition.yaml ``` Then a new namespaced RESTful API endpoint is created at: @@ -112,7 +112,7 @@ spec: and create it: ```shell -kubectl create -f my-crontab.yaml +kubectl apply -f my-crontab.yaml ``` You can then manage your CronTab objects using kubectl. For example: @@ -288,7 +288,7 @@ spec: And create it: ```shell -kubectl create -f resourcedefinition.yaml +kubectl apply -f resourcedefinition.yaml ``` A request to create a custom object of kind `CronTab` will be rejected if there are invalid values in its fields. @@ -313,7 +313,7 @@ spec: and create it: ```shell -kubectl create -f my-crontab.yaml +kubectl apply -f my-crontab.yaml ``` you will get an error: @@ -343,10 +343,62 @@ spec: And create it: ```shell -kubectl create -f my-crontab.yaml +kubectl apply -f my-crontab.yaml crontab "my-new-cron-object" created ``` +### Publish Validation Schema in OpenAPI v2 + +{{< feature-state state="alpha" for_kubernetes_version="1.14" >}} + +Starting with Kubernetes 1.14, [custom resource validation schema](#validation) can be published as part +of [OpenAPI v2 spec](/docs/concepts/overview/kubernetes-api/#openapi-and-swagger-definitions) from +Kubernetes API server. + +[kubectl](/docs/reference/kubectl/overview) consumes the published schema to perform client-side validation +(`kubectl create` and `kubectl apply`), schema explanation (`kubectl explain`) on custom resources. +The published schema can be consumed for other purposes. The feature is Alpha in 1.14 and disabled by default. +You can enable the feature using the `CustomResourcePublishOpenAPI` feature gate on the +[kube-apiserver](/docs/admin/kube-apiserver): + +``` +--feature-gates=CustomResourcePublishOpenAPI=true +``` + +Custom resource validation schema will be converted to OpenAPI v2 schema, and +show up in `definitions` and `paths` fields in the [OpenAPI v2 spec](/docs/concepts/overview/kubernetes-api/#openapi-and-swagger-definitions). +The following modifications are applied during the conversion to keep backwards compatiblity with +kubectl in previous 1.13 version. These modifications prevent kubectl from being over-strict and rejecting +valid OpenAPI schemas that it doesn't understand. The conversion won't modify the validation schema defined in CRD, +and therefore won't affect [validation](#validation) in the API server. + +1. The following fields are removed as they aren't supported by OpenAPI v2 (in future versions OpenAPI v3 will be used without these restrictions) + - The fields `oneOf`, `anyOf` and `not` are removed +2. The following fields are removed as they aren't allowed by kubectl in + previous 1.13 version + - For a schema with a `$ref` + - the fields `properties` and `type` are removed + - if the `$ref` is outside of the `definitions`, the field `$ref` is removed + - For a schema of a primitive data type (which means the field `type` has two elements: one type and one format) + - if any one of the two elements is `null`, the field `type` is removed + - otherwise, the fields `type` and `properties` are removed + - For a schema of more than two types + - the fields `type` and `properties` are removed + - For a schema of `null` type + - the field `type` is removed + - For a schema of `array` type + - if the schema doesn't have exactly one item, the fields `type` and `items` are + removed + - For a schema with no type specified + - the field `properties` is removed +3. The following fields are removed as they aren't supported by the OpenAPI protobuf implementation + - The fields `id`, `schema`, `definitions`, `additionalItems`, `dependencies`, + and `patternProperties` are removed + - For a schema with a `externalDocs` + - if the `externalDocs` has `url` defined, the field `externalDocs` is removed + - For a schema with `items` defined + - if the field `items` has multiple schemas, the field `items` is removed + ### Additional printer columns Starting with Kubernetes 1.11, kubectl uses server-side printing. The server decides which @@ -387,7 +439,7 @@ columns. 2. Create the CustomResourceDefinition: ```shell - kubectl create -f resourcedefinition.yaml + kubectl apply -f resourcedefinition.yaml ``` 3. Create an instance using the `my-crontab.yaml` from the previous section. @@ -560,7 +612,7 @@ spec: And create it: ```shell -kubectl create -f resourcedefinition.yaml +kubectl apply -f resourcedefinition.yaml ``` After the CustomResourceDefinition object has been created, you can create custom objects. @@ -581,7 +633,7 @@ spec: and create it: ```shell -kubectl create -f my-crontab.yaml +kubectl apply -f my-crontab.yaml ``` Then new namespaced RESTful API endpoints are created at: @@ -645,7 +697,7 @@ spec: And create it: ```shell -kubectl create -f resourcedefinition.yaml +kubectl apply -f resourcedefinition.yaml ``` After the CustomResourceDefinition object has been created, you can create custom objects. @@ -665,7 +717,7 @@ spec: and create it: ```shell -kubectl create -f my-crontab.yaml +kubectl apply -f my-crontab.yaml ``` You can specify the category using `kubectl get`: diff --git a/content/en/docs/tasks/administer-cluster/coredns.md b/content/en/docs/tasks/administer-cluster/coredns.md index 00cbe26aa685e..7055b21f6b302 100644 --- a/content/en/docs/tasks/administer-cluster/coredns.md +++ b/content/en/docs/tasks/administer-cluster/coredns.md @@ -29,7 +29,9 @@ deployment, or by using tools like kubeadm that will deploy and upgrade the clus For manual deployment or replacement of kube-dns, see the documentation at the [CoreDNS GitHub project.](https://github.com/coredns/deployment/tree/master/kubernetes) -## Upgrading an existing cluster with kubeadm +## Migrating to CoreDNS + +### Upgrading an existing cluster with kubeadm In Kubernetes version 1.10 and later, you can also move to CoreDNS when you use `kubeadm` to upgrade a cluster that is using `kube-dns`. In this case, `kubeadm` will generate the CoreDNS configuration @@ -53,7 +55,8 @@ customizations after the new ConfigMap is up and running. If you are running CoreDNS in Kubernetes version 1.11 and later, during upgrade, your existing Corefile will be retained. -## Installing kube-dns instead of CoreDNS with kubeadm + +### Installing kube-dns instead of CoreDNS with kubeadm {{< note >}} In Kubernetes 1.11, CoreDNS has graduated to General Availability (GA) @@ -69,6 +72,14 @@ kubeadm init --feature-gates=CoreDNS=false For versions 1.13 and later, follow the guide outlined [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase#cmd-phase-addon). +## Upgrading CoreDNS + +CoreDNS is available in Kubernetes since v1.9. +You can check the version of CoreDNS shipped with Kubernetes and the changes made to CoreDNS [here](https://github.com/coredns/deployment/blob/master/kubernetes/CoreDNS-k8s_version.md). + +CoreDNS can be upgraded manually in case you want to only upgrade CoreDNS or use your own custom image. +There is a helpful [guideline and walkthrough](https://github.com/coredns/deployment/blob/master/kubernetes/Upgrading_CoreDNS.md) available to ensure a smooth upgrade. + ## Tuning CoreDNS When resource utilisation is a concern, it may be useful to tune the configuration of CoreDNS. For more details, check out the diff --git a/content/en/docs/tasks/administer-cluster/declare-network-policy.md b/content/en/docs/tasks/administer-cluster/declare-network-policy.md index 164bb358c20d1..9579dac9980a0 100644 --- a/content/en/docs/tasks/administer-cluster/declare-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/declare-network-policy.md @@ -110,7 +110,7 @@ spec: Use kubectl to create a NetworkPolicy from the above nginx-policy.yaml file: ```console -kubectl create -f nginx-policy.yaml +kubectl apply -f nginx-policy.yaml ``` ```none diff --git a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md index c521b0291d43b..8dc097b2d2525 100644 --- a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md +++ b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md @@ -27,7 +27,7 @@ Create a file named busybox.yaml with the following contents: Then create a pod using this file and verify its status: ```shell -kubectl create -f https://k8s.io/examples/admin/dns/busybox.yaml +kubectl apply -f https://k8s.io/examples/admin/dns/busybox.yaml pod/busybox created kubectl get pods busybox diff --git a/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md b/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md index 6a18263b31d95..487a2a0fe345e 100644 --- a/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md +++ b/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md @@ -98,7 +98,7 @@ In the file, replace `` with your scale target. Go to the directory that contains your configuration file, and enter this command to create the Deployment: - kubectl create -f dns-horizontal-autoscaler.yaml + kubectl apply -f dns-horizontal-autoscaler.yaml The output of a successful command is: diff --git a/content/en/docs/tasks/administer-cluster/ip-masq-agent.md b/content/en/docs/tasks/administer-cluster/ip-masq-agent.md index 5821536e1fd57..3cce9c7153328 100644 --- a/content/en/docs/tasks/administer-cluster/ip-masq-agent.md +++ b/content/en/docs/tasks/administer-cluster/ip-masq-agent.md @@ -61,7 +61,7 @@ By default, in GCE/Google Kubernetes Engine starting with Kubernetes version 1.7 To create an ip-masq-agent, run the following kubectl command: ` -kubectl create -f https://raw.githubusercontent.com/kubernetes-incubator/ip-masq-agent/master/ip-masq-agent.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes-incubator/ip-masq-agent/master/ip-masq-agent.yaml ` You must also apply the appropriate node label to any nodes in your cluster that you want the agent to run on. diff --git a/content/en/docs/tasks/administer-cluster/kms-provider.md b/content/en/docs/tasks/administer-cluster/kms-provider.md index cfd0e23d6ee51..b07f13f7ccafd 100644 --- a/content/en/docs/tasks/administer-cluster/kms-provider.md +++ b/content/en/docs/tasks/administer-cluster/kms-provider.md @@ -31,7 +31,8 @@ To configure a KMS provider on the API server, include a provider of type ```kms * `name`: Display name of the KMS plugin. * `endpoint`: Listen address of the gRPC server (KMS plugin). The endpoint is a UNIX domain socket. - * `cachesize`: Number of data encryption keys (DEKs) to be cached in the clear. When cached, DEKs can be used without another call to the KMS; whereas DEKs that are not cached require a call to the KMS to unwrap.. + * `cachesize`: Number of data encryption keys (DEKs) to be cached in the clear. When cached, DEKs can be used without another call to the KMS; whereas DEKs that are not cached require a call to the KMS to unwrap. + * `timeout`: How long should kube-apiserver wait for kms-plugin to respond before returning an error (default is 3 seconds). See [Understanding the encryption at rest configuration.](/docs/tasks/administer-cluster/encrypt-data) @@ -89,6 +90,7 @@ resources: name: myKmsPlugin endpoint: unix:///tmp/socketfile.sock cachesize: 100 + timeout: 3s - identity: {} ``` diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-11.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-11.md deleted file mode 100644 index 6589c9cbfe170..0000000000000 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-11.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -reviewers: -- sig-cluster-lifecycle -title: Upgrading kubeadm clusters from v1.10 to v1.11 -content_template: templates/task ---- - -{{% capture overview %}} - -This page explains how to upgrade a Kubernetes cluster created with `kubeadm` from version 1.10.x to version 1.11.x, and from version 1.11.x to 1.11.y, where `y > x`. - -{{% /capture %}} - -{{% capture prerequisites %}} - -- You need to have a `kubeadm` Kubernetes cluster running version 1.10.0 or later. Swap must be disabled. The cluster should use a static control plane and etcd pods. -- Make sure you read the [release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md) carefully. -- Make sure to back up any important components, such as app-level state stored in a database. `kubeadm upgrade` does not touch your workloads, only components internal to Kubernetes, but backups are always a best practice. - -### Additional information - -- All containers are restarted after upgrade, because the container spec hash value is changed. -- You can upgrade only from one minor version to the next minor version. That is, you cannot skip versions when you upgrade. For example, you can upgrade only from 1.10 to 1.11, not from 1.9 to 1.11. -- The default DNS provider in version 1.11 is [CoreDNS](https://coredns.io/) rather than [kube-dns](https://github.com/kubernetes/dns). -To keep `kube-dns`, pass `--feature-gates=CoreDNS=false` to `kubeadm upgrade apply`. - -{{% /capture %}} - -{{% capture steps %}} - -## Upgrade the control plane - -1. On your master node, run the following (as root): - - export VERSION=$(curl -sSL https://dl.k8s.io/release/stable.txt) # or manually specify a released Kubernetes version - export ARCH=amd64 # or: arm, arm64, ppc64le, s390x - curl -sSL https://dl.k8s.io/release/${VERSION}/bin/linux/${ARCH}/kubeadm > /usr/bin/kubeadm - chmod a+rx /usr/bin/kubeadm - - Note that upgrading the `kubeadm` package on your system prior to upgrading the control plane causes a failed upgrade. Even though `kubeadm` ships in the Kubernetes repositories, it's important to install it manually. The kubeadm team is working on fixing this limitation. - -1. Verify that the download works and has the expected version: - - ```shell - kubeadm version - ``` - -1. On the master node, run: - - ```shell - kubeadm upgrade plan - ``` - - You should see output similar to this: - - - - ```shell - [preflight] Running pre-flight checks. - [upgrade] Making sure the cluster is healthy: - [upgrade/config] Making sure the configuration is correct: - [upgrade/config] Reading configuration from the cluster... - [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' - I0618 20:32:32.950358 15307 feature_gate.go:230] feature gates: &{map[]} - [upgrade] Fetching available versions to upgrade to - [upgrade/versions] Cluster version: v1.10.4 - [upgrade/versions] kubeadm version: v1.11.0-beta.2.78+e0b33dbc2bde88 - - Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': - COMPONENT CURRENT AVAILABLE - Kubelet 1 x v1.10.4 v1.11.0 - - Upgrade to the latest version in the v1.10 series: - - COMPONENT CURRENT AVAILABLE - API Server v1.10.4 v1.11.0 - Controller Manager v1.10.4 v1.11.0 - Scheduler v1.10.4 v1.11.0 - Kube Proxy v1.10.4 v1.11.0 - CoreDNS 1.1.3 - Kube DNS 1.14.8 - Etcd 3.1.12 3.2.18 - - You can now apply the upgrade by executing the following command: - - kubeadm upgrade apply v1.11.0 - - Note: Before you can perform this upgrade, you have to update kubeadm to v1.11.0. - - _____________________________________________________________________ - ``` - - This command checks that your cluster can be upgraded, and fetches the versions you can upgrade to. - -1. Choose a version to upgrade to, and run the appropriate command. For example: - - ```shell - kubeadm upgrade apply v1.11.0 - ``` - - If you currently use `kube-dns` and wish to continue doing so, add `--feature-gates=CoreDNS=false`. - - You should see output similar to this: - - - - ```shell - [preflight] Running pre-flight checks. - [upgrade] Making sure the cluster is healthy: - [upgrade/config] Making sure the configuration is correct: - [upgrade/config] Reading configuration from the cluster... - [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' - I0614 20:56:08.320369 30918 feature_gate.go:230] feature gates: &{map[]} - [upgrade/apply] Respecting the --cri-socket flag that is set with higher priority than the config file. - [upgrade/version] You have chosen to change the cluster version to "v1.11.0-beta.2.78+e0b33dbc2bde88" - [upgrade/versions] Cluster version: v1.10.4 - [upgrade/versions] kubeadm version: v1.11.0-beta.2.78+e0b33dbc2bde88 - [upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: y - [upgrade/prepull] Will prepull images for components [kube-apiserver kube-controller-manager kube-scheduler etcd] - [upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.11.0-beta.2.78+e0b33dbc2bde88"... - Static pod: kube-apiserver-ip-172-31-85-18 hash: 7a329408b21bc0c44d7b3b78ff8187bf - Static pod: kube-controller-manager-ip-172-31-85-18 hash: 24fd3157627c7567b687968967c6a5e8 - Static pod: kube-scheduler-ip-172-31-85-18 hash: 5179266fb24d4c1834814c4f69486371 - Static pod: etcd-ip-172-31-85-18 hash: 9dfc197f444be11fcc70ab1467b030b8 - [etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests089436939/etcd.yaml" - [certificates] Using the existing etcd/ca certificate and key. - [certificates] Using the existing etcd/server certificate and key. - [certificates] Using the existing etcd/peer certificate and key. - [certificates] Using the existing etcd/healthcheck-client certificate and key. - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/etcd.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2018-06-14-20-56-11/etcd.yaml" - [upgrade/staticpods] Waiting for the kubelet to restart the component - Static pod: etcd-ip-172-31-85-18 hash: 9dfc197f444be11fcc70ab1467b030b8 - < snip > - [apiclient] Found 1 Pods for label selector component=etcd - [upgrade/staticpods] Component "etcd" upgraded successfully! - [upgrade/etcd] Waiting for etcd to become available - [util/etcd] Waiting 0s for initial delay - [util/etcd] Attempting to see if all cluster endpoints are available 1/10 - [upgrade/staticpods] Writing new Static Pod manifests to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests089436939" - [controlplane] wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests089436939/kube-apiserver.yaml" - [controlplane] wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests089436939/kube-controller-manager.yaml" - [controlplane] wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests089436939/kube-scheduler.yaml" - [certificates] Using the existing etcd/ca certificate and key. - [certificates] Using the existing apiserver-etcd-client certificate and key. - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-apiserver.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2018-06-14-20-56-11/kube-apiserver.yaml" - [upgrade/staticpods] Waiting for the kubelet to restart the component - Static pod: kube-apiserver-ip-172-31-85-18 hash: 7a329408b21bc0c44d7b3b78ff8187bf - < snip > - [apiclient] Found 1 Pods for label selector component=kube-apiserver - [upgrade/staticpods] Component "kube-apiserver" upgraded successfully! - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-controller-manager.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2018-06-14-20-56-11/kube-controller-manager.yaml" - [upgrade/staticpods] Waiting for the kubelet to restart the component - Static pod: kube-controller-manager-ip-172-31-85-18 hash: 24fd3157627c7567b687968967c6a5e8 - Static pod: kube-controller-manager-ip-172-31-85-18 hash: 63992ff14733dcb9dcfa6ac0a3b8031a - [apiclient] Found 1 Pods for label selector component=kube-controller-manager - [upgrade/staticpods] Component "kube-controller-manager" upgraded successfully! - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-scheduler.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2018-06-14-20-56-11/kube-scheduler.yaml" - [upgrade/staticpods] Waiting for the kubelet to restart the component - Static pod: kube-scheduler-ip-172-31-85-18 hash: 5179266fb24d4c1834814c4f69486371 - Static pod: kube-scheduler-ip-172-31-85-18 hash: 831e4b9425f758e572392976311e56d9 - [apiclient] Found 1 Pods for label selector component=kube-scheduler - [upgrade/staticpods] Component "kube-scheduler" upgraded successfully! - [uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace - [kubelet] Creating a ConfigMap "kubelet-config-1.11" in namespace kube-system with the configuration for the kubelets in the cluster - [kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.11" ConfigMap in the kube-system namespace - [kubelet] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" - [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "ip-172-31-85-18" as an annotation - [bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials - [bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token - [bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster - [addons] Applied essential addon: CoreDNS - [addons] Applied essential addon: kube-proxy - - [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.11.0-beta.2.78+e0b33dbc2bde88". Enjoy! - - [upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so. - ``` - -1. Manually upgrade your Software Defined Network (SDN). - - Your Container Network Interface (CNI) provider may have its own upgrade instructions to follow. - Check the [addons](/docs/concepts/cluster-administration/addons/) page to - find your CNI provider and see whether additional upgrade steps are required. - -## Upgrade master and node packages - -1. Prepare each host for maintenance, marking it unschedulable and evicting the workload: - - ```shell - kubectl drain $HOST --ignore-daemonsets - ``` - - On the master host, you must add `--ignore-daemonsets`: - - ```shell - kubectl drain ip-172-31-85-18 - node "ip-172-31-85-18" cordoned - error: unable to drain node "ip-172-31-85-18", aborting command... - - There are pending nodes to be drained: - ip-172-31-85-18 - error: DaemonSet-managed pods (use --ignore-daemonsets to ignore): calico-node-5798d, kube-proxy-thjp9 - ``` - - ``` - kubectl drain ip-172-31-85-18 --ignore-daemonsets - node "ip-172-31-85-18" already cordoned - WARNING: Ignoring DaemonSet-managed pods: calico-node-5798d, kube-proxy-thjp9 - node "ip-172-31-85-18" drained - ``` - -1. Upgrade the Kubernetes package version on each `$HOST` node by running the Linux package manager for your distribution: - - {{< tabs name="k8s_install" >}} - {{% tab name="Ubuntu, Debian or HypriotOS" %}} - apt-get update - apt-get upgrade -y kubelet kubeadm - {{% /tab %}} - {{% tab name="CentOS, RHEL or Fedora" %}} - yum upgrade -y kubelet kubeadm --disableexcludes=kubernetes - {{% /tab %}} - {{< /tabs >}} - -## Upgrade kubelet on each node - -1. On each node except the master node, upgrade the kubelet config: - - ```shell - sudo kubeadm upgrade node config --kubelet-version $(kubelet --version | cut -d ' ' -f 2) - ``` - -1. Restart the kubectl process: - - ```shell - sudo systemctl restart kubelet - ``` - -1. Verify that the new version of the `kubelet` is running on the host: - - ```shell - systemctl status kubelet - ``` - -1. Bring the host back online by marking it schedulable: - - ```shell - kubectl uncordon $HOST - ``` - -1. After the kubelet is upgraded on all hosts, verify that all nodes are available again by running the following command from anywhere -- for example, from outside the cluster: - - ```shell - kubectl get nodes - ``` - - The `STATUS` column should show `Ready` for all your hosts, and the version number should be updated. - -{{% /capture %}} - -## Recovering from a failure state - -If `kubeadm upgrade` fails and does not roll back, for example because of an unexpected shutdown during execution, -you can run `kubeadm upgrade` again. This command is idempotent and eventually makes sure that the actual state is the desired state you declare. - -To recover from a bad state, you can also run `kubeadm upgrade --force` without changing the version that your cluster is running. - -## How it works - -`kubeadm upgrade apply` does the following: - -- Checks that your cluster is in an upgradeable state: - - The API server is reachable, - - All nodes are in the `Ready` state - - The control plane is healthy -- Enforces the version skew policies. -- Makes sure the control plane images are available or available to pull to the machine. -- Upgrades the control plane components or rollbacks if any of them fails to come up. -- Applies the new `kube-dns` and `kube-proxy` manifests and enforces that all necessary RBAC rules are created. -- Creates new certificate and key files of the API server and backs up old files if they're about to expire in 180 days. diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14.md new file mode 100644 index 0000000000000..7b4f54ee3c1c8 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14.md @@ -0,0 +1,382 @@ +--- +reviewers: +- sig-cluster-lifecycle +title: Upgrading kubeadm clusters from v1.13 to v1.14 +content_template: templates/task +--- + +{{% capture overview %}} + +This page explains how to upgrade a Kubernetes cluster created with kubeadm from version 1.13.x to version 1.14.x, +and from version 1.14.x to 1.14.y (where `y > x`). + +The upgrade workflow at high level is the following: + +1. Upgrade the primary control plane node. +1. Upgrade additional control plane nodes. +1. Upgrade worker nodes. + +{{< note >}} +With the release of Kubernetes v1.14, the kubeadm instructions for upgrading both HA and single control plane clusters +are merged into a single document. +{{}} + +{{% /capture %}} + +{{% capture prerequisites %}} + +- You need to have a kubeadm Kubernetes cluster running version 1.13.0 or later. +- [Swap must be disabled](https://serverfault.com/questions/684771/best-way-to-disable-swap-in-linux). +- The cluster should use a static control plane and etcd pods. +- Make sure you read the [release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md) carefully. +- Make sure to back up any important components, such as app-level state stored in a database. + `kubeadm upgrade` does not touch your workloads, only components internal to Kubernetes, but backups are always a best practice. + +### Additional information + +- All containers are restarted after upgrade, because the container spec hash value is changed. +- You only can upgrade from one MINOR version to the next MINOR version, + or between PATCH versions of the same MINOR. That is, you cannot skip MINOR versions when you upgrade. + For example, you can upgrade from 1.y to 1.y+1, but not from 1.y to 1.y+2. + +{{% /capture %}} + +{{% capture steps %}} + +## Determine which version to upgrade to + +1. Find the latest stable 1.14 version: + + {{< tabs name="k8s_install_versions" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + apt update + apt-cache policy kubeadm + # find the latest 1.14 version in the list + # it should look like 1.14.x-00, where x is the latest patch + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + yum list --showduplicates kubeadm --disableexcludes=kubernetes + # find the latest 1.14 version in the list + # it should look like 1.14.x-0, where x is the latest patch + {{% /tab %}} + {{< /tabs >}} + +## Upgrade the first control plane node + +1. On your first control plane node, upgrade kubeadm: + + {{< tabs name="k8s_install_kubeadm_first_cp" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + # replace x in 1.14.x-00 with the latest patch version + apt-mark unhold kubeadm && \ + apt-get update && apt-get install -y kubeadm=1.14.x-00 && \ + apt-mark hold kubeadm + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + # replace x in 1.14.x-0 with the latest patch version + yum install -y kubeadm-1.14.x-0 --disableexcludes=kubernetes + {{% /tab %}} + {{< /tabs >}} + +1. Verify that the download works and has the expected version: + + ```shell + kubeadm version + ``` + +1. On the control plane node, run: + + ```shell + sudo kubeadm upgrade plan + ``` + + You should see output similar to this: + + ```shell + [preflight] Running pre-flight checks. + [upgrade] Making sure the cluster is healthy: + [upgrade/config] Making sure the configuration is correct: + [upgrade/config] Reading configuration from the cluster... + [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' + [upgrade] Fetching available versions to upgrade to + [upgrade/versions] Cluster version: v1.13.3 + [upgrade/versions] kubeadm version: v1.14.0 + + Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': + COMPONENT CURRENT AVAILABLE + Kubelet 2 x v1.13.3 v1.14.0 + + Upgrade to the latest version in the v1.13 series: + + COMPONENT CURRENT AVAILABLE + API Server v1.13.3 v1.14.0 + Controller Manager v1.13.3 v1.14.0 + Scheduler v1.13.3 v1.14.0 + Kube Proxy v1.13.3 v1.14.0 + CoreDNS 1.2.6 1.3.1 + Etcd 3.2.24 3.3.10 + + You can now apply the upgrade by executing the following command: + + kubeadm upgrade apply v1.14.0 + + _____________________________________________________________________ + ``` + + This command checks that your cluster can be upgraded, and fetches the versions you can upgrade to. + +1. Choose a version to upgrade to, and run the appropriate command. For example: + + ```shell + sudo kubeadm upgrade apply v1.14.x + ``` + + - Replace `x` with the patch version you picked for this ugprade. + + You should see output similar to this: + + ```shell + [preflight] Running pre-flight checks. + [upgrade] Making sure the cluster is healthy: + [upgrade/config] Making sure the configuration is correct: + [upgrade/config] Reading configuration from the cluster... + [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' + [upgrade/version] You have chosen to change the cluster version to "v1.14.0" + [upgrade/versions] Cluster version: v1.13.3 + [upgrade/versions] kubeadm version: v1.14.0 + [upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: y + [upgrade/prepull] Will prepull images for components [kube-apiserver kube-controller-manager kube-scheduler etcd] + [upgrade/prepull] Prepulling image for component etcd. + [upgrade/prepull] Prepulling image for component kube-scheduler. + [upgrade/prepull] Prepulling image for component kube-apiserver. + [upgrade/prepull] Prepulling image for component kube-controller-manager. + [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-etcd + [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-kube-scheduler + [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-kube-controller-manager + [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-kube-apiserver + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-etcd + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-controller-manager + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-scheduler + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-apiserver + [upgrade/prepull] Prepulled image for component etcd. + [upgrade/prepull] Prepulled image for component kube-apiserver. + [upgrade/prepull] Prepulled image for component kube-scheduler. + [upgrade/prepull] Prepulled image for component kube-controller-manager. + [upgrade/prepull] Successfully prepulled the images for all the control plane components + [upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.14.0"... + Static pod: kube-apiserver-myhost hash: 6436b0d8ee0136c9d9752971dda40400 + Static pod: kube-controller-manager-myhost hash: 8ee730c1a5607a87f35abb2183bf03f2 + Static pod: kube-scheduler-myhost hash: 4b52d75cab61380f07c0c5a69fb371d4 + [upgrade/etcd] Upgrading to TLS for etcd + Static pod: etcd-myhost hash: 877025e7dd7adae8a04ee20ca4ecb239 + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/etcd.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-03-14-20-52-44/etcd.yaml" + [upgrade/staticpods] Waiting for the kubelet to restart the component + [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) + Static pod: etcd-myhost hash: 877025e7dd7adae8a04ee20ca4ecb239 + Static pod: etcd-myhost hash: 877025e7dd7adae8a04ee20ca4ecb239 + Static pod: etcd-myhost hash: 64a28f011070816f4beb07a9c96d73b6 + [apiclient] Found 1 Pods for label selector component=etcd + [upgrade/staticpods] Component "etcd" upgraded successfully! + [upgrade/etcd] Waiting for etcd to become available + [upgrade/staticpods] Writing new Static Pod manifests to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests043818770" + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-apiserver.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-03-14-20-52-44/kube-apiserver.yaml" + [upgrade/staticpods] Waiting for the kubelet to restart the component + [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) + Static pod: kube-apiserver-myhost hash: 6436b0d8ee0136c9d9752971dda40400 + Static pod: kube-apiserver-myhost hash: 6436b0d8ee0136c9d9752971dda40400 + Static pod: kube-apiserver-myhost hash: 6436b0d8ee0136c9d9752971dda40400 + Static pod: kube-apiserver-myhost hash: b8a6533e241a8c6dab84d32bb708b8a1 + [apiclient] Found 1 Pods for label selector component=kube-apiserver + [upgrade/staticpods] Component "kube-apiserver" upgraded successfully! + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-controller-manager.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-03-14-20-52-44/kube-controller-manager.yaml" + [upgrade/staticpods] Waiting for the kubelet to restart the component + [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) + Static pod: kube-controller-manager-myhost hash: 8ee730c1a5607a87f35abb2183bf03f2 + Static pod: kube-controller-manager-myhost hash: 6f77d441d2488efd9fc2d9a9987ad30b + [apiclient] Found 1 Pods for label selector component=kube-controller-manager + [upgrade/staticpods] Component "kube-controller-manager" upgraded successfully! + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-scheduler.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-03-14-20-52-44/kube-scheduler.yaml" + [upgrade/staticpods] Waiting for the kubelet to restart the component + [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) + Static pod: kube-scheduler-myhost hash: 4b52d75cab61380f07c0c5a69fb371d4 + Static pod: kube-scheduler-myhost hash: a24773c92bb69c3748fcce5e540b7574 + [apiclient] Found 1 Pods for label selector component=kube-scheduler + [upgrade/staticpods] Component "kube-scheduler" upgraded successfully! + [upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace + [kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster + [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace + [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" + [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials + [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token + [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster + [addons] Applied essential addon: CoreDNS + [addons] Applied essential addon: kube-proxy + + [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.14.0". Enjoy! + + [upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so. + ``` + +1. Manually upgrade your CNI provider plugin. + + Your Container Network Interface (CNI) provider may have its own upgrade instructions to follow. + Check the [addons](/docs/concepts/cluster-administration/addons/) page to + find your CNI provider and see whether additional upgrade steps are required. + +1. Upgrade the kubelet and kubectl on the control plane node: + + {{< tabs name="k8s_install_kubelet" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + # replace x in 1.14.x-00 with the latest patch version + apt-mark unhold kubelet && \ + apt-get update && apt-get install -y kubelet=1.14.x-00 kubectl=1.14.x-00 && \ + apt-mark hold kubelet + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + # replace x in 1.14.x-0 with the latest patch version + yum install -y kubelet-1.14.x-0 kubectl-1.14.x-0 --disableexcludes=kubernetes + {{% /tab %}} + {{< /tabs >}} + +1. Restart the kubelet + + ```shell + sudo systemctl restart kubelet + ``` + +## Upgrade additional control plane nodes + +1. Same as the first control plane node but use: + +``` +sudo kubeadm upgrade node experimental-control-plane +``` + +instead of: + +``` +sudo kubeadm upgrade apply +``` + +Also `sudo kubeadm upgrade plan` is not needed. + +## Ugrade worker nodes + +The upgrade procedure on worker nodes should be executed one node at a time or few nodes at a time, +without compromising the minimum required capacity for running your workloads. + +### Upgrade kubeadm + +1. Upgrade kubeadm on all worker nodes: + + {{< tabs name="k8s_install_kubeadm_worker_nodes" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + # replace x in 1.14.x-00 with the latest patch version + apt-mark unhold kubeadm && \ + apt-get update && apt-get install -y kubeadm=1.14.x-00 && \ + apt-mark hold kubeadm + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + # replace x in 1.14.x-0 with the latest patch version + yum install -y kubeadm-1.14.x-0 --disableexcludes=kubernetes + {{% /tab %}} + {{< /tabs >}} + +### Cordon the node + +1. Prepare the node for maintenance by marking it unschedulable and evicting the workloads. Run: + + ```shell + kubectl drain $NODE --ignore-daemonsets + ``` + + You should see output similar to this: + + ```shell + kubectl drain ip-172-31-85-18 + node "ip-172-31-85-18" cordoned + error: unable to drain node "ip-172-31-85-18", aborting command... + + There are pending nodes to be drained: + ip-172-31-85-18 + error: DaemonSet-managed pods (use --ignore-daemonsets to ignore): calico-node-5798d, kube-proxy-thjp9 + ``` + +### Upgrade the kubelet config + +1. Upgrade the kubelet config: + + ```shell + sudo kubeadm upgrade node config --kubelet-version v1.14.x + ``` + + Replace `x` with the patch version you picked for this ugprade. + + +### Upgrade kubelet and kubectl + +1. Upgrade the Kubernetes package version by running the Linux package manager for your distribution: + + {{< tabs name="k8s_kubelet_and_kubectl" >}} + {{% tab name="Ubuntu, Debian or HypriotOS" %}} + # replace x in 1.14.x-00 with the latest patch version + apt-get update + apt-get install -y kubelet=1.14.x-00 kubectl=1.14.x-00 + {{% /tab %}} + {{% tab name="CentOS, RHEL or Fedora" %}} + # replace x in 1.14.x-0 with the latest patch version + yum install -y kubelet-1.14.x-0 kubectl-1.14.x-0 --disableexcludes=kubernetes + {{% /tab %}} + {{< /tabs >}} + +1. Restart the kubelet + + ```shell + sudo systemctl restart kubelet + ``` + +### Uncordon the node + +1. Bring the node back online by marking it schedulable: + + ```shell + kubectl uncordon $NODE + ``` + +## Verify the status of the cluster + +After the kubelet is upgraded on all nodes verify that all nodes are available again by running the following command from anywhere kubectl can access the cluster: + +```shell +kubectl get nodes +``` + +The `STATUS` column should show `Ready` for all your nodes, and the version number should be updated. + +{{% /capture %}} + +## Recovering from a failure state + +If `kubeadm upgrade` fails and does not roll back, for example because of an unexpected shutdown during execution, you can run `kubeadm upgrade` again. +This command is idempotent and eventually makes sure that the actual state is the desired state you declare. + +To recover from a bad state, you can also run `kubeadm upgrade --force` without changing the version that your cluster is running. + +## How it works + +`kubeadm upgrade apply` does the following: + +- Checks that your cluster is in an upgradeable state: + - The API server is reachable + - All nodes are in the `Ready` state + - The control plane is healthy +- Enforces the version skew policies. +- Makes sure the control plane images are available or available to pull to the machine. +- Upgrades the control plane components or rollbacks if any of them fails to come up. +- Applies the new `kube-dns` and `kube-proxy` manifests and makes sure that all necessary RBAC rules are created. +- Creates new certificate and key files of the API server and backs up old files if they're about to expire in 180 days. + +`kubeadm upgrade node experimental-control-plane` does the following on additional control plane nodes: +- Fetches the kubeadm `ClusterConfiguration` from the cluster. +- Optionally backups the kube-apiserver certificate. +- Upgrades the static Pod manifests for the control plane components. diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md index 249265300f13c..78693d4405410 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md @@ -45,7 +45,7 @@ Here's the configuration file for a LimitRange: Create the LimitRange: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/cpu-constraints.yaml --namespace=constraints-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/cpu-constraints.yaml --namespace=constraints-cpu-example ``` View detailed information about the LimitRange: @@ -96,7 +96,7 @@ minimum and maximum CPU constraints imposed by the LimitRange. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/cpu-constraints-pod.yaml --namespace=constraints-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/cpu-constraints-pod.yaml --namespace=constraints-cpu-example ``` Verify that the Pod's Container is running: @@ -138,7 +138,7 @@ CPU request of 500 millicpu and a cpu limit of 1.5 cpu. Attempt to create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/cpu-constraints-pod-2.yaml --namespace=constraints-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/cpu-constraints-pod-2.yaml --namespace=constraints-cpu-example ``` The output shows that the Pod does not get created, because the Container specifies a CPU limit that is @@ -159,7 +159,7 @@ CPU request of 100 millicpu and a CPU limit of 800 millicpu. Attempt to create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/cpu-constraints-pod-3.yaml --namespace=constraints-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/cpu-constraints-pod-3.yaml --namespace=constraints-cpu-example ``` The output shows that the Pod does not get created, because the Container specifies a CPU @@ -180,7 +180,7 @@ specify a CPU request, and it does not specify a CPU limit. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/cpu-constraints-pod-4.yaml --namespace=constraints-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/cpu-constraints-pod-4.yaml --namespace=constraints-cpu-example ``` View detailed information about the Pod: diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md index 53edcd7f25c36..df5ec46de5298 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md @@ -40,7 +40,7 @@ a default CPU request and a default CPU limit. Create the LimitRange in the default-cpu-example namespace: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/cpu-defaults.yaml --namespace=default-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/cpu-defaults.yaml --namespace=default-cpu-example ``` Now if a Container is created in the default-cpu-example namespace, and the @@ -56,7 +56,7 @@ does not specify a CPU request and limit. Create the Pod. ```shell -kubectl create -f https://k8s.io/examples/admin/resource/cpu-defaults-pod.yaml --namespace=default-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/cpu-defaults-pod.yaml --namespace=default-cpu-example ``` View the Pod's specification: @@ -91,7 +91,7 @@ Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/cpu-defaults-pod-2.yaml --namespace=default-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/cpu-defaults-pod-2.yaml --namespace=default-cpu-example ``` View the Pod specification: @@ -121,7 +121,7 @@ specifies a CPU request, but not a limit: Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/cpu-defaults-pod-3.yaml --namespace=default-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/cpu-defaults-pod-3.yaml --namespace=default-cpu-example ``` View the Pod specification: diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md index aca790e87ac5f..e6a6e1c2b0d39 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md @@ -45,7 +45,7 @@ Here's the configuration file for a LimitRange: Create the LimitRange: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/memory-constraints.yaml --namespace=constraints-mem-example +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints.yaml --namespace=constraints-mem-example ``` View detailed information about the LimitRange: @@ -90,7 +90,7 @@ minimum and maximum memory constraints imposed by the LimitRange. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/memory-constraints-pod.yaml --namespace=constraints-mem-example +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints-pod.yaml --namespace=constraints-mem-example ``` Verify that the Pod's Container is running: @@ -132,7 +132,7 @@ memory request of 800 MiB and a memory limit of 1.5 GiB. Attempt to create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/memory-constraints-pod-2.yaml --namespace=constraints-mem-example +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints-pod-2.yaml --namespace=constraints-mem-example ``` The output shows that the Pod does not get created, because the Container specifies a memory limit that is @@ -153,7 +153,7 @@ memory request of 100 MiB and a memory limit of 800 MiB. Attempt to create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/memory-constraints-pod-3.yaml --namespace=constraints-mem-example +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints-pod-3.yaml --namespace=constraints-mem-example ``` The output shows that the Pod does not get created, because the Container specifies a memory @@ -176,7 +176,7 @@ specify a memory request, and it does not specify a memory limit. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/memory-constraints-pod-4.yaml --namespace=constraints-mem-example +kubectl apply -f https://k8s.io/examples/admin/resource/memory-constraints-pod-4.yaml --namespace=constraints-mem-example ``` View detailed information about the Pod: diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md index 94f07c040c6eb..197d61171734e 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md @@ -42,7 +42,7 @@ a default memory request and a default memory limit. Create the LimitRange in the default-mem-example namespace: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/memory-defaults.yaml --namespace=default-mem-example +kubectl apply -f https://k8s.io/examples/admin/resource/memory-defaults.yaml --namespace=default-mem-example ``` Now if a Container is created in the default-mem-example namespace, and the @@ -58,7 +58,7 @@ does not specify a memory request and limit. Create the Pod. ```shell -kubectl create -f https://k8s.io/examples/admin/resource/memory-defaults-pod.yaml --namespace=default-mem-example +kubectl apply -f https://k8s.io/examples/admin/resource/memory-defaults-pod.yaml --namespace=default-mem-example ``` View detailed information about the Pod: @@ -99,7 +99,7 @@ Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/memory-defaults-pod-2.yaml --namespace=default-mem-example +kubectl apply -f https://k8s.io/examples/admin/resource/memory-defaults-pod-2.yaml --namespace=default-mem-example ``` View detailed information about the Pod: @@ -129,7 +129,7 @@ specifies a memory request, but not a limit: Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/memory-defaults-pod-3.yaml --namespace=default-mem-example +kubectl apply -f https://k8s.io/examples/admin/resource/memory-defaults-pod-3.yaml --namespace=default-mem-example ``` View the Pod's specification: diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md index 4bdc646742f54..9558766410663 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md @@ -44,7 +44,7 @@ Here is the configuration file for a ResourceQuota object: Create the ResourceQuota: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/quota-mem-cpu.yaml --namespace=quota-mem-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/quota-mem-cpu.yaml --namespace=quota-mem-cpu-example ``` View detailed information about the ResourceQuota: @@ -71,7 +71,7 @@ Here is the configuration file for a Pod: Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/quota-mem-cpu-pod.yaml --namespace=quota-mem-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/quota-mem-cpu-pod.yaml --namespace=quota-mem-cpu-example ``` Verify that the Pod's Container is running: @@ -117,7 +117,7 @@ request exceeds the memory request quota. 600 MiB + 700 MiB > 1 GiB. Attempt to create the Pod: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/quota-mem-cpu-pod-2.yaml --namespace=quota-mem-cpu-example +kubectl apply -f https://k8s.io/examples/admin/resource/quota-mem-cpu-pod-2.yaml --namespace=quota-mem-cpu-example ``` The second Pod does not get created. The output shows that creating the second Pod diff --git a/content/en/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md b/content/en/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md index 1cad0ee7bd5dc..31cac82cf1016 100644 --- a/content/en/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md +++ b/content/en/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md @@ -42,7 +42,7 @@ Here is the configuration file for a ResourceQuota object: Create the ResourceQuota: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/quota-pod.yaml --namespace=quota-pod-example +kubectl apply -f https://k8s.io/examples/admin/resource/quota-pod.yaml --namespace=quota-pod-example ``` View detailed information about the ResourceQuota: @@ -74,7 +74,7 @@ In the configuration file, `replicas: 3` tells Kubernetes to attempt to create t Create the Deployment: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/quota-pod-deployment.yaml --namespace=quota-pod-example +kubectl apply -f https://k8s.io/examples/admin/resource/quota-pod-deployment.yaml --namespace=quota-pod-example ``` View detailed information about the Deployment: diff --git a/content/en/docs/tasks/administer-cluster/quota-api-object.md b/content/en/docs/tasks/administer-cluster/quota-api-object.md index 971a4901eb0e9..c6cff9076901e 100644 --- a/content/en/docs/tasks/administer-cluster/quota-api-object.md +++ b/content/en/docs/tasks/administer-cluster/quota-api-object.md @@ -43,7 +43,7 @@ Here is the configuration file for a ResourceQuota object: Create the ResourceQuota: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/quota-objects.yaml --namespace=quota-object-example +kubectl apply -f https://k8s.io/examples/admin/resource/quota-objects.yaml --namespace=quota-object-example ``` View detailed information about the ResourceQuota: @@ -77,7 +77,7 @@ Here is the configuration file for a PersistentVolumeClaim object: Create the PersistentVolumeClaim: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/quota-objects-pvc.yaml --namespace=quota-object-example +kubectl apply -f https://k8s.io/examples/admin/resource/quota-objects-pvc.yaml --namespace=quota-object-example ``` Verify that the PersistentVolumeClaim was created: @@ -102,7 +102,7 @@ Here is the configuration file for a second PersistentVolumeClaim: Attempt to create the second PersistentVolumeClaim: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/quota-objects-pvc-2.yaml --namespace=quota-object-example +kubectl apply -f https://k8s.io/examples/admin/resource/quota-objects-pvc-2.yaml --namespace=quota-object-example ``` The output shows that the second PersistentVolumeClaim was not created, diff --git a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md index 2cd6afa3049a5..c259369bf0309 100644 --- a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md +++ b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md @@ -88,7 +88,7 @@ be configured to use the `systemd` cgroup driver. ### Kube Reserved -- **Kubelet Flag**: `--kube-reserved=[cpu=100m][,][memory=100Mi][,][ephemeral-storage=1Gi]` +- **Kubelet Flag**: `--kube-reserved=[cpu=100m][,][memory=100Mi][,][ephemeral-storage=1Gi][,][pid=1000]` - **Kubelet Flag**: `--kube-reserved-cgroup=` `kube-reserved` is meant to capture resource reservation for kubernetes system @@ -102,6 +102,10 @@ post](https://kubernetes.io/blog/2016/11/visualize-kubelet-performance-with-node explains how the dashboard can be interpreted to come up with a suitable `kube-reserved` reservation. +In addition to `cpu`, `memory`, and `ephemeral-storage`, `pid` may be +specified to reserve the specified number of process IDs for +kubernetes system daemons. + To optionally enforce `kube-reserved` on system daemons, specify the parent control group for kube daemons as the value for `--kube-reserved-cgroup` kubelet flag. @@ -118,7 +122,7 @@ exist. Kubelet will fail if an invalid cgroup is specified. ### System Reserved -- **Kubelet Flag**: `--system-reserved=[cpu=100m][,][memory=100Mi][,][ephemeral-storage=1Gi]` +- **Kubelet Flag**: `--system-reserved=[cpu=100m][,][memory=100Mi][,][ephemeral-storage=1Gi][,][pid=1000]` - **Kubelet Flag**: `--system-reserved-cgroup=` @@ -128,6 +132,10 @@ like `sshd`, `udev`, etc. `system-reserved` should reserve `memory` for the Reserving resources for user login sessions is also recommended (`user.slice` in systemd world). +In addition to `cpu`, `memory`, and `ephemeral-storage`, `pid` may be +specified to reserve the specified number of process IDs for OS system +daemons. + To optionally enforce `system-reserved` on system daemons, specify the parent control group for OS system daemons as the value for `--system-reserved-cgroup` kubelet flag. @@ -182,7 +190,8 @@ container runtime. However, Kubelet cannot burst and use up all available Node resources if `kube-reserved` is enforced. Be extra careful while enforcing `system-reserved` reservation since it can lead -to critical system services being CPU starved or OOM killed on the node. The +to critical system services being CPU starved, OOM killed, or unable +to fork on the node. The recommendation is to enforce `system-reserved` only if a user has profiled their nodes exhaustively to come up with precise estimates and is confident in their ability to recover if any process in that group is oom_killed. diff --git a/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md b/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md index 55abb118dd4de..3ebd53370bc02 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md +++ b/content/en/docs/tasks/configure-pod-container/assign-cpu-resource.md @@ -77,7 +77,7 @@ The `-cpus "2"` argument tells the Container to attempt to use 2 CPUs. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/resource/cpu-request-limit.yaml --namespace=cpu-example +kubectl apply -f https://k8s.io/examples/pods/resource/cpu-request-limit.yaml --namespace=cpu-example ``` Verify that the Pod Container is running: @@ -168,7 +168,7 @@ capacity of any Node in your cluster. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/resource/cpu-request-limit-2.yaml --namespace=cpu-example +kubectl apply -f https://k8s.io/examples/pods/resource/cpu-request-limit-2.yaml --namespace=cpu-example ``` View the Pod status: diff --git a/content/en/docs/tasks/configure-pod-container/assign-memory-resource.md b/content/en/docs/tasks/configure-pod-container/assign-memory-resource.md index df19079cbe2e3..0f418370c30c6 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-memory-resource.md +++ b/content/en/docs/tasks/configure-pod-container/assign-memory-resource.md @@ -76,7 +76,7 @@ The `"--vm-bytes", "150M"` arguments tell the Container to attempt to allocate 1 Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/resource/memory-request-limit.yaml --namespace=mem-example +kubectl apply -f https://k8s.io/examples/pods/resource/memory-request-limit.yaml --namespace=mem-example ``` Verify that the Pod Container is running: @@ -146,7 +146,7 @@ will attempt to allocate 250 MiB of memory, which is well above the 100 MiB limi Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/resource/memory-request-limit-2.yaml --namespace=mem-example +kubectl apply -f https://k8s.io/examples/pods/resource/memory-request-limit-2.yaml --namespace=mem-example ``` View detailed information about the Pod: @@ -252,7 +252,7 @@ of any Node in your cluster. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/resource/memory-request-limit-3.yaml --namespace=mem-example +kubectl apply -f https://k8s.io/examples/pods/resource/memory-request-limit-3.yaml --namespace=mem-example ``` View the Pod status: diff --git a/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md index 25247a1a00e57..55c5a0754bbf2 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md +++ b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md @@ -71,7 +71,7 @@ a `disktype=ssd` label. chosen node: ```shell - kubectl create -f https://k8s.io/examples/pods/pod-nginx.yaml + kubectl apply -f https://k8s.io/examples/pods/pod-nginx.yaml ``` 1. Verify that the pod is running on your chosen node: diff --git a/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md b/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md index 3c461a2ecbd86..67aedfafca051 100644 --- a/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md +++ b/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md @@ -38,7 +38,7 @@ nginx gracefully. This is helpful if the Container is being terminated because o Create the Pod: - kubectl create -f https://k8s.io/examples/pods/lifecycle-events.yaml + kubectl apply -f https://k8s.io/examples/pods/lifecycle-events.yaml Verify that the Container in the Pod is running: diff --git a/content/en/docs/tasks/configure-pod-container/configure-gmsa.md b/content/en/docs/tasks/configure-pod-container/configure-gmsa.md new file mode 100644 index 0000000000000..c6f43f96a8f49 --- /dev/null +++ b/content/en/docs/tasks/configure-pod-container/configure-gmsa.md @@ -0,0 +1,204 @@ +--- +title: Configure GMSA for Windows pods and containers +content_template: templates/task +weight: 20 +--- + +{{% capture overview %}} + +{{< feature-state for_k8s_version="v1.14" state="alpha" >}} + +This page shows how to configure [Group Managed Service Accounts](https://docs.microsoft.com/en-us/windows-server/security/group-managed-service-accounts/group-managed-service-accounts-overview) (GMSA) for pods and containers that will run on Windows nodes. Group Managed Service Accounts are a specific type of Active Directory account that provides automatic password management, simplified service principal name (SPN) management, and the ability to delegate the management to other administrators across multiple servers. + +In Kubernetes, GMSA credential specs are configured at a Kubernetes cluster-wide scope as custom resources. Windows pods, as well as individual containers within a pod, can be configured to use a GMSA for domain based functions (e.g. Kerberos authentication) when interacting with other Windows services. As of v1.14, the only container runtime interface that supports GMSA for Windows workloads is Dockershim. Implementation of GMSA through CRI and other runtimes is planned for the future. + +{{< note >}} +Currently this feature is in alpha state. While the overall goals and functionality will not change, the way in which the GMSA credspec references are specified in pod specs may change from annotations to API fields. Please take this into consideration when testing or adopting this feature. +{{< /note >}} + +{{% /capture %}} + +{{% capture prerequisites %}} + +You need to have a Kubernetes cluster and the kubectl command-line tool must be configured to communicate with your cluster. The cluster is expected to have Windows worker nodes where pods with containers running Windows workloads requiring GMSA credentials will get scheduled. This section covers a set of initial steps required once for each cluster: + +### Enable the WindowsGMSA feature gate +In the alpha state, the `WindowsGMSA` feature gate needs to be enabled on kubelet on Windows nodes. This is required to pass down the GMSA credential specs from the cluster scoped configurations to the container runtime. See [Feature Gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/) for an explanation of enabling feature gates. Please make sure `--feature-gates=WindowsGMSA=true` parameter exists in the kubelet.exe command line. + +### Install the GMSACredentialSpec CRD +A [CustomResourceDefinition](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/) (CRD) for GMSA credential spec resources needs to be configured on the cluster to define the custom resource type `GMSACredentialSpec`. Download the GMSA CRD [YAML](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/gmsa-crd.yml) and save it as gmsa-crd.yaml. +Next, install the CRD with `kubectl apply -f gmsa-crd.yaml` + +### Install webhooks to validate GMSA users +Two webhooks need to be configured on the Kubernetes cluster to populate and validate GMSA credential spec references at the pod or container level: + +1. A mutating webhook that expands references to GMSAs (by name from a pod specification) into the full credential spec in JSON form within the pod spec. + +1. A validating webhook ensures all references to GMSAs are authorized to be used by the pod service account. + +Installing the above webhooks and associated objects require the steps below: + +1. Create a certificate key pair (that will be used to allow the webhook container to communicate to the cluster) + +1. Install a secret with the certificate from above. + +1. Create a deployment for the core webhook logic. + +1. Create the validating and mutating webhook configurations referring to the deployment. + +A [script](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/deploy-gmsa-webhook.sh) can be used to deploy and configure the GMSA webhooks and associated objects mentioned above. The script can be run with a ```--dry-run``` option to allow you to review the changes that would be made to your cluster. + +The [YAML template](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/gmsa-webhook.yml.tpl) used by the script may also be used to deploy the webhooks and associated objects manually (with appropriate substitutions for the parameters) + +{{% /capture %}} + +{{% capture steps %}} + +## Configure GMSAs and Windows nodes in Active Directory +Before pods in Kubernetes can be configured to use GMSAs, the desired GMSAs need to be provisioned in Active Directory as described [here](https://docs.microsoft.com/en-us/windows-server/security/group-managed-service-accounts/getting-started-with-group-managed-service-accounts#BKMK_Step1). Windows worker nodes (that are part of the Kubernetes cluster) need to be configured in Active Directory to access the secret credentials associated with the desired GMSA as described [here](https://docs.microsoft.com/en-us/windows-server/security/group-managed-service-accounts/getting-started-with-group-managed-service-accounts#to-add-member-hosts-using-the-set-adserviceaccount-cmdlet) + +## Create GMSA credential spec resources +With the GMSACredentialSpec CRD installed (as described earlier), custom resources containing GMSA credential specs can be configured. The GMSA credential spec does not contain secret or sensitive data. It is information that a container runtime can use to describe the desired GMSA of a container to Windows. GMSA credential specs can be generated in YAML format with a utility [PowerShell script](https://github.com/kubernetes-sigs/windows-gmsa/tree/master/scripts/GenerateCredentialSpecResource.ps1). + +Following are the steps for generating a GMSA credential spec YAML manually in JSON format and then converting it: + +1. Import the CredentialSpec [module](https://github.com/MicrosoftDocs/Virtualization-Documentation/blob/live/windows-server-container-tools/ServiceAccounts/CredentialSpec.psm1): `ipmo CredentialSpec.psm1` + +1. Create a credential spec in JSON format using `New-CredentialSpec`. To create a GMSA credential spec named WebApp1, invoke `New-CredentialSpec -Name WebApp1 -AccountName WebApp1 -Domain $(Get-ADDomain -Current LocalComputer)` + +1. Use `Get-CredentialSpec` to show the path of the JSON file. + +1. Convert the credspec file from JSON to YAML format and apply the necessary header fields `apiVersion`, `kind`, `metadata` and `credspec` to make it a GMSACredentialSpec custom resource that can be configured in Kubernetes. + +The following YAML configuration describes a GMSA credential spec named `gmsa-WebApp1`: + +``` +apiVersion: windows.k8s.io/v1alpha1 +kind: GMSACredentialSpec +metadata: + name: gmsa-WebApp1 #This is an arbitrary name but it will be used as a reference +credspec: + ActiveDirectoryConfig: + GroupManagedServiceAccounts: + - Name: WebApp1 #Username of the GMSA account + Scope: CONTOSO #NETBIOS Domain Name + - Name: WebApp1 #Username of the GMSA account + Scope: contoso.com #DNS Domain Name + CmsPlugins: + - ActiveDirectory + DomainJoinConfig: + DnsName: contoso.com #DNS Domain Name + DnsTreeName: contoso.com #DNS Domain Name Root + Guid: 244818ae-87ac-4fcd-92ec-e79e5252348a #GUID + MachineAccountName: WebApp1 #Username of the GMSA account + NetBiosName: CONTOSO #NETBIOS Domain Name + Sid: S-1-5-21-2126449477-2524075714-3094792973 #SID of GMSA +``` + +The above credential spec resource may be saved as `gmsa-Webapp1-credspec.yaml` and applied to the cluster using: `kubectl apply -f gmsa-Webapp1-credspec.yml` + +## Configure cluster role to enable RBAC on specific GMSA credential specs +A cluster role needs to be defined for each GMSA credential spec resource. This authorizes the `use` verb on a specific GMSA resource by a subject which is typically a service account. The following example shows a cluster role that authorizes usage of the `gmsa-WebApp1` credential spec from above. Save the file as gmsa-webapp1-role.yaml and apply using `kubectl apply -f gmsa-webapp1-role.yaml` + +``` +#Create the Role to read the credspec +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: webapp1-role +rules: +- apiGroups: ["windows.k8s.io"] + resources: ["gmsacredentialspecs"] + verbs: ["use"] + resourceNames: ["gmsa-WebApp1"] +``` + +## Assign role to service accounts to use specific GMSA credspecs +A service account (that pods will be configured with) needs to be bound to the cluster role create above. This authorizes the service account to "use" the desired GMSA credential spec resource. The following shows the default service account being bound to a cluster role `webapp1-role` to use `gmsa-WebApp1` credential spec resource created above. + +``` +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: allow-default-svc-account-read-on-gmsa-WebApp1 + namespace: default +subjects: +- kind: ServiceAccount + name: default + namespace: default +roleRef: + kind: ClusterRole + name: webapp1-role + apiGroup: rbac.authorization.k8s.io +``` + +## Configure GMSA credential spec reference in pod spec +In the alpha stage of the feature, the annotation `pod.alpha.windows.kubernetes.io/gmsa-credential-spec-name` is used to specify references to desired GMSA credential spec custom resources in pod specs. This configures all containers in the pod spec to use the specified GMSA. A sample pod spec with the annotation populated to refer to `gmsa-WebApp1`: + +``` +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + run: with-creds + name: with-creds + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + run: with-creds + template: + metadata: + labels: + run: with-creds + annotations: + pod.alpha.windows.kubernetes.io/gmsa-credential-spec-name: gmsa-WebApp1 # This must be the name of the cred spec you created + spec: + containers: + - image: mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019 + imagePullPolicy: Always + name: iis + nodeSelector: + beta.kubernetes.io/os: windows +``` + +Individual containers in a pod spec can also specify the desired GMSA credspec using annotation `.container.alpha.windows.kubernetes.io/gmsa-credential-spec`. For example: + +``` +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + run: with-creds + name: with-creds + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + run: with-creds + template: + metadata: + labels: + run: with-creds + annotations: + iis.container.alpha.windows.kubernetes.io/gmsa-credential-spec-name: gmsa-WebApp1 # This must be the name of the cred spec you created + spec: + containers: + - image: mcr.microsoft.com/windows/servercore/iis:windowsservercore-ltsc2019 + imagePullPolicy: Always + name: iis + nodeSelector: + beta.kubernetes.io/os: windows +``` + +As pod specs with GMSA annotations (as described above) are applied in a cluster configured for GMSA, the following sequence of events take place: + +1. The mutating webhook resolves and expands all references to GMSA credential spec resources to the contents of the GMSA credential spec. + +1. The validating webhook ensures the service account associated with the pod is authorized for the "use" verb on the specified GMSA credential spec. + +1. The container runtime configures each Windows container with the specified GMSA credential spec so that the container can assume the identity of the GMSA in Active Directory and access services in the domain using that identity. + +{{% /capture %}} diff --git a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md index 36c4f758ac070..f2ba952f7b02d 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md +++ b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md @@ -62,7 +62,7 @@ code. After 30 seconds, `cat /tmp/healthy` returns a failure code. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/probe/exec-liveness.yaml +kubectl apply -f https://k8s.io/examples/pods/probe/exec-liveness.yaml ``` Within 30 seconds, view the Pod events: @@ -163,7 +163,7 @@ checks will fail, and the kubelet will kill and restart the Container. To try the HTTP liveness check, create a Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/probe/http-liveness.yaml +kubectl apply -f https://k8s.io/examples/pods/probe/http-liveness.yaml ``` After 10 seconds, view Pod events to verify that liveness probes have failed and @@ -204,7 +204,7 @@ will be restarted. To try the TCP liveness check, create a Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/probe/tcp-liveness-readiness.yaml +kubectl apply -f https://k8s.io/examples/pods/probe/tcp-liveness-readiness.yaml ``` After 15 seconds, view Pod events to verify that liveness probes: diff --git a/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md b/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md index cc3f7a3fde61c..d51327390a47b 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md +++ b/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md @@ -73,7 +73,7 @@ PersistentVolumeClaim requests to this PersistentVolume. Create the PersistentVolume: - kubectl create -f https://k8s.io/examples/pods/storage/pv-volume.yaml + kubectl apply -f https://k8s.io/examples/pods/storage/pv-volume.yaml View information about the PersistentVolume: @@ -98,7 +98,7 @@ Here is the configuration file for the PersistentVolumeClaim: Create the PersistentVolumeClaim: - kubectl create -f https://k8s.io/examples/pods/storage/pv-claim.yaml + kubectl apply -f https://k8s.io/examples/pods/storage/pv-claim.yaml After you create the PersistentVolumeClaim, the Kubernetes control plane looks for a PersistentVolume that satisfies the claim's requirements. If the control @@ -138,7 +138,7 @@ is a volume. Create the Pod: - kubectl create -f https://k8s.io/examples/pods/storage/pv-pod.yaml + kubectl apply -f https://k8s.io/examples/pods/storage/pv-pod.yaml Verify that the Container in the Pod is running; diff --git a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md index c8c61064385ba..90b9e07d52c9a 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -21,7 +21,10 @@ ConfigMaps allow you to decouple configuration artifacts from image content to k {{% capture steps %}} -## Create a ConfigMap +## Create a ConfigMap +You can use either `kubectl create configmap` or a ConfigMap generator in `kustomization.yaml` to create a ConfigMap. Note that `kubectl` starts to support `kustomization.yaml` since 1.14. + +### Create a ConfigMap Using kubectl create configmap Use the `kubectl create configmap` command to create configmaps from [directories](#create-configmaps-from-directories), [files](#create-configmaps-from-files), or [literal values](#create-configmaps-from-literal-values): @@ -40,7 +43,7 @@ You can use [`kubectl describe`](/docs/reference/generated/kubectl/kubectl-comma [`kubectl get`](/docs/reference/generated/kubectl/kubectl-commands/#get) to retrieve information about a ConfigMap. -### Create ConfigMaps from directories +#### Create ConfigMaps from directories You can use `kubectl create configmap` to create a ConfigMap from multiple files in the same directory. @@ -117,7 +120,7 @@ metadata: uid: b4952dc3-d670-11e5-8cd0-68f728db1985 ``` -### Create ConfigMaps from files +#### Create ConfigMaps from files You can use `kubectl create configmap` to create a ConfigMap from an individual file, or from multiple files. @@ -300,7 +303,7 @@ metadata: uid: 05f8da22-d671-11e5-8cd0-68f728db1985 ``` -### Create ConfigMaps from literal values +#### Create ConfigMaps from literal values You can use `kubectl create configmap` with the `--from-literal` argument to define a literal value from the command line: @@ -330,6 +333,101 @@ metadata: uid: dadce046-d673-11e5-8cd0-68f728db1985 ``` +### Create a ConfigMap from generator +`kubectl` supports `kustomization.yaml` since 1.14. +You can also create a ConfigMap from generators and then apply it to create the object on +the Apiserver. The generators +should be specified in a `kustomization.yaml` inside a directory. + +#### Generate ConfigMaps from files +For example, to generate a ConfigMap from files `configure-pod-container/configmap/kubectl/game.properties` +```shell +# Create a kustomization.yaml file with ConfigMapGenerator +cat <./kustomization.yaml +configMapGenerator: +- name: game-config-4 + files: + - configure-pod-container/configmap/kubectl/game.properties +EOF +``` + +Apply the kustomization directory to create the ConfigMap object. +```shell +kubectl apply -k . +configmap/game-config-4-m9dm2f92bt created +``` + +You can check that the ConfigMap was created like this: + +```shell +kubectl get configmap +NAME DATA AGE +game-config-4-m9dm2f92bt 1 37s + + +kubectl describe configmaps/game-config-4-m9dm2f92bt +Name: game-config-4-m9dm2f92bt +Namespace: default +Labels: +Annotations: kubectl.kubernetes.io/last-applied-configuration: + {"apiVersion":"v1","data":{"game.properties":"enemies=aliens\nlives=3\nenemies.cheat=true\nenemies.cheat.level=noGoodRotten\nsecret.code.p... + +Data +==== +game.properties: +---- +enemies=aliens +lives=3 +enemies.cheat=true +enemies.cheat.level=noGoodRotten +secret.code.passphrase=UUDDLRLRBABAS +secret.code.allowed=true +secret.code.lives=30 +Events: +``` + +Note that the generated ConfigMap name has a suffix appended by hashing the contents. This ensures that a +new ConfigMap is generated each time the content is modified. + +#### Define the key to use when generating a ConfigMap from a file +You can define a key other than the file name to use in the ConfigMap generator. +For example, to generate a ConfigMap from files `configure-pod-container/configmap/kubectl/game.properties` +with the key `game-special-key` + +```shell +# Create a kustomization.yaml file with ConfigMapGenerator +cat <./kustomization.yaml +configMapGenerator: +- name: game-config-5 + files: + - game-special-key=configure-pod-container/configmap/kubectl/game.properties +EOF +``` + +Apply the kustomization directory to create the ConfigMap object. +```shell +kubectl apply -k . +configmap/game-config-5-m67dt67794 created +``` + +#### Generate ConfigMaps from Literals +To generate a ConfigMap from literals `special.type=charm` and `special.how=very`, +you can specify the ConfigMap generator in `kusotmization.yaml` as +```shell +# Create a kustomization.yaml file with ConfigMapGenerator +cat <./kustomization.yaml +configMapGenerator: +- name: special-config-2 + literals: + - special.how=very + - special.type=charm +EOF +``` +Apply the kustomization directory to create the ConfigMap object. +```shell +kubectl apply -k . +configmap/special-config-2-c92b5mmcf2 created +``` ## Define container environment variables using ConfigMap data diff --git a/content/en/docs/tasks/configure-pod-container/configure-pod-initialization.md b/content/en/docs/tasks/configure-pod-container/configure-pod-initialization.md index 6ab76afd09f8e..a418a8d7c0bc6 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-pod-initialization.md +++ b/content/en/docs/tasks/configure-pod-container/configure-pod-initialization.md @@ -43,7 +43,7 @@ of the nginx server. Create the Pod: - kubectl create -f https://k8s.io/examples/pods/init-containers.yaml + kubectl apply -f https://k8s.io/examples/pods/init-containers.yaml Verify that the nginx container is running: diff --git a/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md b/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md index 122f3e0beb49d..cb7e2957e3eac 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md +++ b/content/en/docs/tasks/configure-pod-container/configure-projected-volume-storage.md @@ -42,7 +42,7 @@ Here is the configuration file for the Pod: ``` 1. Create the Pod: ```shell - kubectl create -f https://k8s.io/examples/pods/storage/projected.yaml + kubectl apply -f https://k8s.io/examples/pods/storage/projected.yaml ``` 1. Verify that the Pod's Container is running, and then watch for changes to the Pod: diff --git a/content/en/docs/tasks/configure-pod-container/configure-service-account.md b/content/en/docs/tasks/configure-pod-container/configure-service-account.md index 2cc040aa288f9..0d2e7e4114ff2 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/en/docs/tasks/configure-pod-container/configure-service-account.md @@ -90,7 +90,7 @@ default 1 1d You can create additional ServiceAccount objects like this: ```shell -kubectl create -f - <}} In the configuration file, the `runAsUser` field specifies that for any Containers in -the Pod, the first process runs with user ID 1000. The `fsGroup` field specifies that -group ID 2000 is associated with all Containers in the Pod. Group ID 2000 is also -associated with the volume mounted at `/data/demo` and with any files created in that -volume. +the Pod, all processes run with user ID 1000. The `runAsGroup` field specifies the primary group ID of 3000 for +all processes within any containers of the Pod. If this field is ommitted, the primary group ID of the containers +will be root(0). Any files created will also be owned by user 1000 and group 3000 when `runAsGroup` is specified. +Since `fsGroup` field is specified, all processes of the container are also part of the supplementary group ID 2000. +The owner for volume `/data/demo` and any files created in that volume will be Group ID 2000. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/security/security-context.yaml +kubectl apply -f https://k8s.io/examples/pods/security/security-context.yaml ``` Verify that the Pod's Container is running: @@ -123,6 +124,16 @@ The output shows that `testfile` has group ID 2000, which is the value of `fsGro -rw-r--r-- 1 1000 2000 6 Jun 6 20:08 testfile ``` +Run the following command: + +```shell +$ id +uid=1000 gid=3000 groups=2000 +``` +You will see that gid is 3000 which is same as `runAsGroup` field. If the `runAsGroup` was ommitted the gid would +remain as 0(root) and the process will be able to interact with files that are owned by root(0) group and that have +the required group permissions for root(0) group. + Exit your shell: ```shell @@ -146,7 +157,7 @@ and the Container have a `securityContext` field: Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/security/security-context-2.yaml +kubectl apply -f https://k8s.io/examples/pods/security/security-context-2.yaml ``` Verify that the Pod's Container is running: @@ -199,7 +210,7 @@ Here is configuration file that does not add or remove any Container capabilitie Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/security/security-context-3.yaml +kubectl apply -f https://k8s.io/examples/pods/security/security-context-3.yaml ``` Verify that the Pod's Container is running: @@ -261,7 +272,7 @@ adds the `CAP_NET_ADMIN` and `CAP_SYS_TIME` capabilities: Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/security/security-context-4.yaml +kubectl apply -f https://k8s.io/examples/pods/security/security-context-4.yaml ``` Get a shell into the running Container: @@ -357,5 +368,3 @@ After you specify an MCS label for a Pod, all Pods with the same label can acces {{% /capture %}} - - diff --git a/content/en/docs/tasks/configure-pod-container/share-process-namespace.md b/content/en/docs/tasks/configure-pod-container/share-process-namespace.md index b2b97815f08fa..3564ba1ff0e62 100644 --- a/content/en/docs/tasks/configure-pod-container/share-process-namespace.md +++ b/content/en/docs/tasks/configure-pod-container/share-process-namespace.md @@ -43,7 +43,7 @@ Process Namespace Sharing is enabled using the `ShareProcessNamespace` field of 1. Create the pod `nginx` on your cluster: - kubectl create -f https://k8s.io/examples/pods/share-process-namespace.yaml + kubectl apply -f https://k8s.io/examples/pods/share-process-namespace.yaml 1. Attach to the `shell` container and run `ps`: diff --git a/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md b/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md index 867b4bcd0d920..a6396b3dc30b9 100644 --- a/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md +++ b/content/en/docs/tasks/configure-pod-container/translate-compose-kubernetes.md @@ -124,7 +124,7 @@ you need is an existing `docker-compose.yml` file. ```bash $ kompose up We are going to create Kubernetes Deployments, Services and PersistentVolumeClaims for your Dockerized application. - If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead. + If you need different kind of resources, use the 'kompose convert' and 'kubectl apply -f' commands instead. INFO Successfully created Service: redis INFO Successfully created Service: web @@ -135,7 +135,7 @@ you need is an existing `docker-compose.yml` file. ``` 3. To convert the `docker-compose.yml` file to files that you can use with - `kubectl`, run `kompose convert` and then `kubectl create -f `. + `kubectl`, run `kompose convert` and then `kubectl apply -f `. ```bash $ kompose convert @@ -148,7 +148,7 @@ you need is an existing `docker-compose.yml` file. ``` ```bash - $ kubectl create -f frontend-service.yaml,redis-master-service.yaml,redis-slave-service.yaml,frontend-deployment.yaml,redis-master-deployment.yaml,redis-slave-deployment.yaml + $ kubectl apply -f frontend-service.yaml,redis-master-service.yaml,redis-slave-service.yaml,frontend-deployment.yaml,redis-master-deployment.yaml,redis-slave-deployment.yaml service/frontend created service/redis-master created service/redis-slave created @@ -309,7 +309,7 @@ Kompose supports a straightforward way to deploy your "composed" application to ```sh $ kompose --file ./examples/docker-guestbook.yml up We are going to create Kubernetes deployments and services for your Dockerized application. -If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead. +If you need different kind of resources, use the 'kompose convert' and 'kubectl apply -f' commands instead. INFO Successfully created service: redis-master INFO Successfully created service: redis-slave @@ -341,7 +341,7 @@ pod/redis-slave-2504961300-nve7b 1/1 Running 0 4m **Note**: - You must have a running Kubernetes cluster with a pre-configured kubectl context. -- Only deployments and services are generated and deployed to Kubernetes. If you need different kind of resources, use the `kompose convert` and `kubectl create -f` commands instead. +- Only deployments and services are generated and deployed to Kubernetes. If you need different kind of resources, use the `kompose convert` and `kubectl apply -f` commands instead. ### OpenShift ```sh @@ -426,7 +426,7 @@ INFO Image 'docker.io/foo/bar' from directory 'build' built successfully INFO Pushing image 'foo/bar:latest' to registry 'docker.io' INFO Attempting authentication credentials 'https://index.docker.io/v1/ INFO Successfully pushed image 'foo/bar:latest' to registry 'docker.io' -INFO We are going to create Kubernetes Deployments, Services and PersistentVolumeClaims for your Dockerized application. If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead. +INFO We are going to create Kubernetes Deployments, Services and PersistentVolumeClaims for your Dockerized application. If you need different kind of resources, use the 'kompose convert' and 'kubectl apply -f' commands instead. INFO Deploying application in "default" namespace INFO Successfully created Service: foo diff --git a/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md b/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md index a04200e9a0984..2ae12d2d48f97 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-application-introspection.md @@ -26,7 +26,7 @@ For this example we'll use a Deployment to create two pods, similar to the earli Create deployment by running following command: ```shell -kubectl create -f https://k8s.io/examples/application/nginx-with-request.yaml +kubectl apply -f https://k8s.io/examples/application/nginx-with-request.yaml ``` ```none @@ -293,8 +293,8 @@ kubectl describe node kubernetes-node-861h ```none Name: kubernetes-node-861h Role -Labels: beta.kubernetes.io/arch=amd64 - beta.kubernetes.io/os=linux +Labels: kubernetes.io/arch=amd64 + kubernetes.io/os=linux kubernetes.io/hostname=kubernetes-node-861h Annotations: node.alpha.kubernetes.io/ttl=0 volumes.kubernetes.io/controller-managed-attach-detach=true diff --git a/content/en/docs/tasks/debug-application-cluster/debug-application.md b/content/en/docs/tasks/debug-application-cluster/debug-application.md index eb69129790e7d..c3b8afb6a752e 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-application.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-application.md @@ -107,7 +107,7 @@ For example, if you misspelled `command` as `commnd` then the pod will be create will not use the command line you intended it to use. The first thing to do is to delete your pod and try creating it again with the `--validate` option. -For example, run `kubectl create --validate -f mypod.yaml`. +For example, run `kubectl apply --validate -f mypod.yaml`. If you misspelled `command` as `commnd` then will give an error like this: ```shell diff --git a/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md b/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md index b684ef2ddfff9..1353420d63d7d 100644 --- a/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md +++ b/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md @@ -38,7 +38,7 @@ the container starts. 1. Create a Pod based on the YAML configuration file: - kubectl create -f https://k8s.io/examples/debug/termination.yaml + kubectl apply -f https://k8s.io/examples/debug/termination.yaml In the YAML file, in the `cmd` and `args` fields, you can see that the container sleeps for 10 seconds and then writes "Sleep expired" to diff --git a/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md b/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md index 9f81b3ee900a6..e3e9150f330f4 100644 --- a/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md +++ b/content/en/docs/tasks/debug-application-cluster/events-stackdriver.md @@ -59,7 +59,7 @@ average, approximately 100Mb RAM and 100m CPU is needed. Deploy event exporter to your cluster using the following command: ```shell -kubectl create -f https://k8s.io/examples/debug/event-exporter.yaml +kubectl apply -f https://k8s.io/examples/debug/event-exporter.yaml ``` Since event exporter accesses the Kubernetes API, it requires permissions to diff --git a/content/en/docs/tasks/debug-application-cluster/get-shell-running-container.md b/content/en/docs/tasks/debug-application-cluster/get-shell-running-container.md index 8b97c679f12cc..f3ff92c1964b2 100644 --- a/content/en/docs/tasks/debug-application-cluster/get-shell-running-container.md +++ b/content/en/docs/tasks/debug-application-cluster/get-shell-running-container.md @@ -33,7 +33,7 @@ runs the nginx image. Here is the configuration file for the Pod: Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/application/shell-demo.yaml +kubectl apply -f https://k8s.io/examples/application/shell-demo.yaml ``` Verify that the Container is running: diff --git a/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md b/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md index 2b1233efeb531..d075944516953 100644 --- a/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md +++ b/content/en/docs/tasks/debug-application-cluster/logging-stackdriver.md @@ -97,7 +97,7 @@ than Google Kubernetes Engine. Proceed at your own risk. 1. Deploy a `ConfigMap` with the logging agent configuration by running the following command: ``` - kubectl create -f https://k8s.io/examples/debug/fluentd-gcp-configmap.yaml + kubectl apply -f https://k8s.io/examples/debug/fluentd-gcp-configmap.yaml ``` The command creates the `ConfigMap` in the `default` namespace. You can download the file @@ -106,7 +106,7 @@ than Google Kubernetes Engine. Proceed at your own risk. 1. Deploy the logging agent `DaemonSet` by running the following command: ``` - kubectl create -f https://k8s.io/examples/debug/fluentd-gcp-ds.yaml + kubectl apply -f https://k8s.io/examples/debug/fluentd-gcp-ds.yaml ``` You can download and edit this file before using it as well. @@ -139,7 +139,7 @@ that writes out the value of a counter and the datetime once per second, and runs indefinitely. Let's create this pod in the default namespace. ```shell -kubectl create -f https://k8s.io/examples/debug/counter-pod.yaml +kubectl apply -f https://k8s.io/examples/debug/counter-pod.yaml ``` You can observe the running pod: diff --git a/content/en/docs/tasks/debug-application-cluster/monitor-node-health.md b/content/en/docs/tasks/debug-application-cluster/monitor-node-health.md index 43b3c8fc860d3..6db5585f92dfe 100644 --- a/content/en/docs/tasks/debug-application-cluster/monitor-node-health.md +++ b/content/en/docs/tasks/debug-application-cluster/monitor-node-health.md @@ -73,7 +73,7 @@ OS distro.*** * **Step 2:** Start node problem detector with `kubectl`: ```shell - kubectl create -f https://k8s.io/examples/debug/node-problem-detector.yaml + kubectl apply -f https://k8s.io/examples/debug/node-problem-detector.yaml ``` ### Addon Pod @@ -105,7 +105,7 @@ node-problem-detector-config --from-file=config/`. ```shell kubectl delete -f https://k8s.io/examples/debug/node-problem-detector.yaml # If you have a node-problem-detector running - kubectl create -f https://k8s.io/examples/debug/node-problem-detector-configmap.yaml + kubectl apply -f https://k8s.io/examples/debug/node-problem-detector-configmap.yaml ``` ***Notice that this approach only applies to node problem detector started with `kubectl`.*** diff --git a/content/en/docs/tasks/debug-application-cluster/core-metrics-pipeline.md b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md similarity index 98% rename from content/en/docs/tasks/debug-application-cluster/core-metrics-pipeline.md rename to content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md index 19f612a5dc4f4..813613b499dd0 100644 --- a/content/en/docs/tasks/debug-application-cluster/core-metrics-pipeline.md +++ b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md @@ -2,7 +2,7 @@ reviewers: - fgrzadkowski - piosz -title: Core metrics pipeline +title: Resource metrics pipeline content_template: templates/concept --- diff --git a/content/en/docs/tasks/debug-application-cluster/resource-usage-monitoring.md b/content/en/docs/tasks/debug-application-cluster/resource-usage-monitoring.md index 805e77a847f42..928992232ad39 100644 --- a/content/en/docs/tasks/debug-application-cluster/resource-usage-monitoring.md +++ b/content/en/docs/tasks/debug-application-cluster/resource-usage-monitoring.md @@ -46,7 +46,7 @@ monitoring statistics by default: ### Kubelet -The Kubelet acts as a bridge between the Kubernetes master and the nodes. It manages the pods and containers running on a machine. Kubelet translates each pod into its constituent containers and fetches individual container usage statistics from cAdvisor. It then exposes the aggregated pod resource usage statistics via a REST API. +The Kubelet acts as a bridge between the Kubernetes master and the nodes. It manages the pods and containers running on a machine. Kubelet translates each pod into its constituent containers and fetches individual container usage statistics from the container runtime, through the container runtime interface. For the legacy docker integration, it fetches this information from cAdvisor. It then exposes the aggregated pod resource usage statistics through the kubelet resource metrics api. This api is served at `/metrics/resource/v1alpha1` on the kubelet's authenticated and read-only ports. ### cAdvisor diff --git a/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md b/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md index 3d142e51af9b0..a78bab5360050 100644 --- a/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md +++ b/content/en/docs/tasks/extend-kubectl/kubectl-plugins.md @@ -9,7 +9,7 @@ content_template: templates/task {{% capture overview %}} -{{< feature-state state="beta" >}} +{{< feature-state state="stable" >}} This guide demonstrates how to install and write extensions for [kubectl](/docs/reference/kubectl/kubectl/). By thinking of core `kubectl` commands as essential building blocks for interacting with a Kubernetes cluster, a cluster administrator can think of plugins as a means of utilizing these building blocks to create more complex behavior. Plugins extend `kubectl` with new sub-commands, allowing for new and custom features not included in the main distribution of `kubectl`. @@ -24,8 +24,6 @@ You need to have a working `kubectl` binary installed. Plugins were officially introduced as an alpha feature in the v1.8.0 release. They have been re-worked in the v1.12.0 release to support a wider range of use-cases. So, while some parts of the plugins feature were already available in previous versions, a `kubectl` version of 1.12.0 or later is recommended if you are following these docs. {{< /note >}} -Until a GA version is released, plugins should be considered unstable, and their underlying mechanism is prone to change. - {{% /capture %}} {{% capture steps %}} diff --git a/content/en/docs/tasks/federation/set-up-placement-policies-federation.md b/content/en/docs/tasks/federation/set-up-placement-policies-federation.md index 957702d02e9e8..4329245d95c54 100644 --- a/content/en/docs/tasks/federation/set-up-placement-policies-federation.md +++ b/content/en/docs/tasks/federation/set-up-placement-policies-federation.md @@ -33,7 +33,7 @@ After deploying the Federation control plane, you must configure an Admission Controller in the Federation API server that enforces placement decisions received from the external policy engine. - kubectl create -f scheduling-policy-admission.yaml + kubectl apply -f scheduling-policy-admission.yaml Shown below is an example ConfigMap for the Admission Controller: @@ -83,7 +83,7 @@ decisions in the Federation control plane. Create a Service in the host cluster to contact the external policy engine: - kubectl create -f policy-engine-service.yaml + kubectl apply -f policy-engine-service.yaml Shown below is an example Service for OPA. @@ -91,7 +91,7 @@ Shown below is an example Service for OPA. Create a Deployment in the host cluster with the Federation control plane: - kubectl create -f policy-engine-deployment.yaml + kubectl apply -f policy-engine-deployment.yaml Shown below is an example Deployment for OPA. diff --git a/content/en/docs/tasks/inject-data-application/define-command-argument-container.md b/content/en/docs/tasks/inject-data-application/define-command-argument-container.md index f7f2e2035fb40..66ebd69c134ab 100644 --- a/content/en/docs/tasks/inject-data-application/define-command-argument-container.md +++ b/content/en/docs/tasks/inject-data-application/define-command-argument-container.md @@ -47,7 +47,7 @@ file for the Pod defines a command and two arguments: 1. Create a Pod based on the YAML configuration file: ```shell - kubectl create -f https://k8s.io/examples/pods/commands.yaml + kubectl apply -f https://k8s.io/examples/pods/commands.yaml ``` 1. List the running Pods: diff --git a/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md b/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md index ec70cae998771..d10bbd323f67c 100644 --- a/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md +++ b/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md @@ -37,7 +37,7 @@ Pod: 1. Create a Pod based on the YAML configuration file: ```shell - kubectl create -f https://k8s.io/examples/pods/inject/envars.yaml + kubectl apply -f https://k8s.io/examples/pods/inject/envars.yaml ``` 1. List the running Pods: diff --git a/content/en/docs/tasks/inject-data-application/distribute-credentials-secure.md b/content/en/docs/tasks/inject-data-application/distribute-credentials-secure.md index a2533ac9c0d77..ff77fab7d77e8 100644 --- a/content/en/docs/tasks/inject-data-application/distribute-credentials-secure.md +++ b/content/en/docs/tasks/inject-data-application/distribute-credentials-secure.md @@ -42,7 +42,7 @@ username and password: 1. Create the Secret ```shell - kubectl create -f https://k8s.io/examples/pods/inject/secret.yaml + kubectl apply -f https://k8s.io/examples/pods/inject/secret.yaml ``` 1. View information about the Secret: @@ -98,7 +98,7 @@ Here is a configuration file you can use to create a Pod: 1. Create the Pod: ```shell - kubectl create -f https://k8s.io/examples/pods/inject/secret-pod.yaml + kubectl apply -f https://k8s.io/examples/pods/inject/secret-pod.yaml ``` 1. Verify that your Pod is running: @@ -153,7 +153,7 @@ Here is a configuration file you can use to create a Pod: 1. Create the Pod: ```shell - kubectl create -f https://k8s.io/examples/pods/inject/secret-envars-pod.yaml + kubectl apply -f https://k8s.io/examples/pods/inject/secret-envars-pod.yaml ``` 1. Verify that your Pod is running: diff --git a/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md b/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md index d9bcccdd9e04c..2fe432a7a4d4a 100644 --- a/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md +++ b/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md @@ -56,7 +56,7 @@ fields of the Container in the Pod. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/inject/dapi-volume.yaml +kubectl apply -f https://k8s.io/examples/pods/inject/dapi-volume.yaml ``` Verify that Container in the Pod is running: @@ -172,7 +172,7 @@ default value of `1` which means cores for cpu and bytes for memory. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/inject/dapi-volume-resources.yaml +kubectl apply -f https://k8s.io/examples/pods/inject/dapi-volume-resources.yaml ``` Get a shell into the Container that is running in your Pod: diff --git a/content/en/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md b/content/en/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md index ddb32c380a34e..b808dd9e2e219 100644 --- a/content/en/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md +++ b/content/en/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md @@ -55,7 +55,7 @@ Container in the Pod. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/inject/dapi-envars-pod.yaml +kubectl apply -f https://k8s.io/examples/pods/inject/dapi-envars-pod.yaml ``` Verify that the Container in the Pod is running: @@ -130,7 +130,7 @@ from Container fields. Create the Pod: ```shell -kubectl create -f https://k8s.io/examples/pods/inject/dapi-envars-container.yaml +kubectl apply -f https://k8s.io/examples/pods/inject/dapi-envars-container.yaml ``` Verify that the Container in the Pod is running: diff --git a/content/en/docs/tasks/inject-data-application/podpreset.md b/content/en/docs/tasks/inject-data-application/podpreset.md index 0655907797ba1..beb57754c3661 100644 --- a/content/en/docs/tasks/inject-data-application/podpreset.md +++ b/content/en/docs/tasks/inject-data-application/podpreset.md @@ -36,7 +36,7 @@ Preset. Create the PodPreset: ```shell -kubectl create -f https://k8s.io/examples/podpreset/preset.yaml +kubectl apply -f https://k8s.io/examples/podpreset/preset.yaml ``` Examine the created PodPreset: diff --git a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md index 7f81ed1c0c3f5..0399e24c13785 100644 --- a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md +++ b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md @@ -245,7 +245,7 @@ done. So we set, `.spec.completions: 8` for the example, since we put 8 items i So, now run the Job: ```shell -kubectl create -f ./job.yaml +kubectl apply -f ./job.yaml ``` Now wait a bit, then check on the job. diff --git a/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md b/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md index 80cff2bd9eba4..16b9327c76790 100644 --- a/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md +++ b/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md @@ -182,7 +182,7 @@ too. So, now run the Job: ```shell -kubectl create -f ./job.yaml +kubectl apply -f ./job.yaml ``` Now wait a bit, then check on the job. diff --git a/content/en/docs/tasks/job/parallel-processing-expansion.md b/content/en/docs/tasks/job/parallel-processing-expansion.md index 9a20fccc308cf..1567ac6f9de2d 100644 --- a/content/en/docs/tasks/job/parallel-processing-expansion.md +++ b/content/en/docs/tasks/job/parallel-processing-expansion.md @@ -178,7 +178,7 @@ cat job.yaml.jinja2 | render_template > jobs.yaml Or sent directly to kubectl, like this: ```shell -cat job.yaml.jinja2 | render_template | kubectl create -f - +cat job.yaml.jinja2 | render_template | kubectl apply -f - ``` ## Alternatives diff --git a/content/en/docs/tasks/manage-daemon/update-daemon-set.md b/content/en/docs/tasks/manage-daemon/update-daemon-set.md index 4b380f772d4d3..38baee076e8c3 100644 --- a/content/en/docs/tasks/manage-daemon/update-daemon-set.md +++ b/content/en/docs/tasks/manage-daemon/update-daemon-set.md @@ -56,7 +56,7 @@ If you haven't created the DaemonSet in the system, check your DaemonSet manifest with the following command instead: ```shell -kubectl create -f ds.yaml --dry-run -o go-template='{{.spec.updateStrategy.type}}{{"\n"}}' +kubectl apply -f ds.yaml --dry-run -o go-template='{{.spec.updateStrategy.type}}{{"\n"}}' ``` The output from both commands should be: @@ -76,7 +76,7 @@ step 3. After verifying the update strategy of the DaemonSet manifest, create the DaemonSet: ```shell -kubectl create -f ds.yaml +kubectl apply -f ds.yaml ``` Alternatively, use `kubectl apply` to create the same DaemonSet if you plan to diff --git a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md index 82b3c34d279c2..50044d907f081 100644 --- a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md +++ b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md @@ -6,10 +6,10 @@ content_template: templates/task --- {{% capture overview %}} -{{< feature-state state="beta" >}} +{{< feature-state state="stable" >}} Kubernetes supports the allocation and consumption of pre-allocated huge pages -by applications in a Pod as a **beta** feature. This page describes how users +by applications in a Pod as a **GA** feature. This page describes how users can consume huge pages and the current limitations. {{% /capture %}} diff --git a/content/en/docs/tasks/run-application/configure-pdb.md b/content/en/docs/tasks/run-application/configure-pdb.md index 7f994032ddaee..c11ee1eb5e7f9 100644 --- a/content/en/docs/tasks/run-application/configure-pdb.md +++ b/content/en/docs/tasks/run-application/configure-pdb.md @@ -167,7 +167,7 @@ automatically responds to changes in the number of replicas of the corresponding ## Create the PDB object -You can create the PDB object with a command like `kubectl create -f mypdb.yaml`. +You can create the PDB object with a command like `kubectl apply -f mypdb.yaml`. You cannot update PDB objects. They must be deleted and re-created. diff --git a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md index 8ba5248b09dee..1c74858f247a4 100644 --- a/content/en/docs/tasks/run-application/run-replicated-stateful-application.md +++ b/content/en/docs/tasks/run-application/run-replicated-stateful-application.md @@ -60,7 +60,7 @@ and a StatefulSet. Create the ConfigMap from the following YAML configuration file: ```shell -kubectl create -f https://k8s.io/examples/application/mysql/mysql-configmap.yaml +kubectl apply -f https://k8s.io/examples/application/mysql/mysql-configmap.yaml ``` {{< codenew file="application/mysql/mysql-configmap.yaml" >}} @@ -80,7 +80,7 @@ based on information provided by the StatefulSet controller. Create the Services from the following YAML configuration file: ```shell -kubectl create -f https://k8s.io/examples/application/mysql/mysql-services.yaml +kubectl apply -f https://k8s.io/examples/application/mysql/mysql-services.yaml ``` {{< codenew file="application/mysql/mysql-services.yaml" >}} @@ -106,7 +106,7 @@ writes. Finally, create the StatefulSet from the following YAML configuration file: ```shell -kubectl create -f https://k8s.io/examples/application/mysql/mysql-statefulset.yaml +kubectl apply -f https://k8s.io/examples/application/mysql/mysql-statefulset.yaml ``` {{< codenew file="application/mysql/mysql-statefulset.yaml" >}} diff --git a/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md b/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md index 275dcfec0735a..87f0b01ad0b32 100644 --- a/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md +++ b/content/en/docs/tasks/run-application/run-single-instance-stateful-application.md @@ -53,11 +53,11 @@ for a secure solution. 1. Deploy the PV and PVC of the YAML file: - kubectl create -f https://k8s.io/examples/application/mysql/mysql-pv.yaml + kubectl apply -f https://k8s.io/examples/application/mysql/mysql-pv.yaml 1. Deploy the contents of the YAML file: - kubectl create -f https://k8s.io/examples/application/mysql/mysql-deployment.yaml + kubectl apply -f https://k8s.io/examples/application/mysql/mysql-deployment.yaml 1. Display information about the Deployment: diff --git a/content/en/docs/tasks/run-application/scale-stateful-set.md b/content/en/docs/tasks/run-application/scale-stateful-set.md index c47fd8f47d13b..462025836dafe 100644 --- a/content/en/docs/tasks/run-application/scale-stateful-set.md +++ b/content/en/docs/tasks/run-application/scale-stateful-set.md @@ -50,7 +50,7 @@ kubectl scale statefulsets --replicas= Alternatively, you can do [in-place updates](/docs/concepts/cluster-administration/manage-deployment/#in-place-updates-of-resources) on your StatefulSets. -If your StatefulSet was initially created with `kubectl apply` or `kubectl create --save-config`, +If your StatefulSet was initially created with `kubectl apply`, update `.spec.replicas` of the StatefulSet manifests, and then do a `kubectl apply`: ```shell diff --git a/content/en/docs/tasks/run-application/update-api-object-kubectl-patch.md b/content/en/docs/tasks/run-application/update-api-object-kubectl-patch.md index d5c3f68213973..d77108977b767 100644 --- a/content/en/docs/tasks/run-application/update-api-object-kubectl-patch.md +++ b/content/en/docs/tasks/run-application/update-api-object-kubectl-patch.md @@ -31,7 +31,7 @@ is a Pod that has one container: Create the Deployment: ```shell -kubectl create -f https://k8s.io/examples/application/deployment-patch.yaml +kubectl apply -f https://k8s.io/examples/application/deployment-patch.yaml ``` View the Pods associated with your Deployment: diff --git a/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md b/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md index ccb7dd854c13a..70816ca4d8615 100644 --- a/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md +++ b/content/en/docs/tasks/tls/managing-tls-in-a-cluster.md @@ -104,7 +104,7 @@ Generate a CSR yaml blob and send it to the apiserver by running the following command: ```shell -cat <}} {{< version-check >}} +* The example shown on this page works with `kubectl` 1.14 and above. * Understand [Configure Containers Using a ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/). {{% /capture %}} @@ -35,49 +37,48 @@ This page provides a real world example of how to configure Redis using a Config You can follow the steps below to configure a Redis cache using data stored in a ConfigMap. -First create a ConfigMap from the `redis-config` file: +First create a `kustomization.yaml` containing a ConfigMap from the `redis-config` file: {{< codenew file="pods/config/redis-config" >}} ```shell curl -OL https://k8s.io/examples/pods/config/redis-config -kubectl create configmap example-redis-config --from-file=redis-config -``` -```shell -configmap/example-redis-config created +cat <./kustomization.yaml +configMapGenerator: +- name: example-redis-config + files: + - redis-config +EOF ``` -Examine the created ConfigMap: +Add the pod resource config to the `kustomization.yaml`: + +{{< codenew file="pods/config/redis-pod.yaml" >}} ```shell -kubectl get configmap example-redis-config -o yaml -``` +curl -OL https://k8s.io/examples/pods/config/redis-pod.yaml -```yaml -apiVersion: v1 -data: - redis-config: | - maxmemory 2mb - maxmemory-policy allkeys-lru -kind: ConfigMap -metadata: - creationTimestamp: 2016-03-30T18:14:41Z - name: example-redis-config - namespace: default - resourceVersion: "24686" - selfLink: /api/v1/namespaces/default/configmaps/example-redis-config - uid: 460a2b6e-f6a3-11e5-8ae5-42010af00002 +cat <>./kustomization.yaml +resources: +- redis-pod.yaml +EOF ``` -Now create a pod specification that uses the config data stored in the ConfigMap: +Apply the kustomization directory to create both the ConfigMap and Pod objects: -{{< codenew file="pods/config/redis-pod.yaml" >}} - -Create the pod: +```shell +kubectl apply -k . +``` +Examine the created objects by ```shell -kubectl create -f https://k8s.io/examples/pods/config/redis-pod.yaml +> kubectl get -k . +NAME DATA AGE +configmap/example-redis-config-dgh9dg555m 1 52s + +NAME READY STATUS RESTARTS AGE +pod/redis 1/1 Running 0 52s ``` In the example, the config volume is mounted at `/redis-master`. diff --git a/content/en/docs/tutorials/stateful-application/basic-stateful-set.md b/content/en/docs/tutorials/stateful-application/basic-stateful-set.md index c7ed8641bc43c..678d2219379f7 100644 --- a/content/en/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/en/docs/tutorials/stateful-application/basic-stateful-set.md @@ -73,11 +73,11 @@ kubectl get pods -w -l app=nginx ``` In the second terminal, use -[`kubectl create`](/docs/reference/generated/kubectl/kubectl-commands/#create) to create the +[`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands/#apply) to create the Headless Service and StatefulSet defined in `web.yaml`. ```shell -kubectl create -f web.yaml +kubectl apply -f web.yaml service/nginx created statefulset.apps/web created ``` @@ -783,7 +783,7 @@ you deleted the `nginx` Service ( which you should not have ), you will see an error indicating that the Service already exists. ```shell -kubectl create -f web.yaml +kubectl apply -f web.yaml statefulset.apps/web created Error from server (AlreadyExists): error when creating "web.yaml": services "nginx" already exists ``` @@ -883,7 +883,7 @@ service "nginx" deleted Recreate the StatefulSet and Headless Service one more time. ```shell -kubectl create -f web.yaml +kubectl apply -f web.yaml service/nginx created statefulset.apps/web created ``` @@ -947,7 +947,7 @@ kubectl get po -l app=nginx -w In another terminal, create the StatefulSet and Service in the manifest. ```shell -kubectl create -f web-parallel.yaml +kubectl apply -f web-parallel.yaml service/nginx created statefulset.apps/web created ``` diff --git a/content/en/docs/tutorials/stateful-application/cassandra.md b/content/en/docs/tutorials/stateful-application/cassandra.md index 7313f8c0e0879..f2169a5d8caf3 100644 --- a/content/en/docs/tutorials/stateful-application/cassandra.md +++ b/content/en/docs/tutorials/stateful-application/cassandra.md @@ -76,7 +76,7 @@ The following `Service` is used for DNS lookups between Cassandra Pods and clien 1. Create a Service to track all Cassandra StatefulSet nodes from the `cassandra-service.yaml` file: ```shell - kubectl create -f https://k8s.io/examples/application/cassandra/cassandra-service.yaml + kubectl apply -f https://k8s.io/examples/application/cassandra/cassandra-service.yaml ``` ### Validating (optional) @@ -110,7 +110,7 @@ This example uses the default provisioner for Minikube. Please update the follow 1. Create the Cassandra StatefulSet from the `cassandra-statefulset.yaml` file: ```shell - kubectl create -f https://k8s.io/examples/application/cassandra/cassandra-statefulset.yaml + kubectl apply -f https://k8s.io/examples/application/cassandra/cassandra-statefulset.yaml ``` ## Validating The Cassandra StatefulSet diff --git a/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md b/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md index f518c4bb0ddc6..f471de069dac6 100644 --- a/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md +++ b/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md @@ -27,16 +27,19 @@ The files provided in this tutorial are using GA Deployment APIs and are specifi {{% capture objectives %}} * Create PersistentVolumeClaims and PersistentVolumes -* Create a Secret -* Deploy MySQL -* Deploy WordPress +* Create a `kustomization.yaml` with + * a Secret generator + * MySQL resource configs + * WordPress resource configs +* Apply the kustomization directory by `kubectl apply -k ./` * Clean up {{% /capture %}} {{% capture prerequisites %}} -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +The example shown on this page works with `kubectl` 1.14 and above. Download the following configuration files: @@ -68,44 +71,70 @@ If you are bringing up a cluster that needs to use the `hostPath` provisioner, t If you have a Kubernetes cluster running on Google Kubernetes Engine, please follow [this guide](https://cloud.google.com/kubernetes-engine/docs/tutorials/persistent-disk). {{< /note >}} -## Create a Secret for MySQL Password +## Create a kustomization.yaml -A [Secret](/docs/concepts/configuration/secret/) is an object that stores a piece of sensitive data like a password or key. The manifest files are already configured to use a Secret, but you have to create your own Secret. +### Add a Secret generator +A [Secret](/docs/concepts/configuration/secret/) is an object that stores a piece of sensitive data like a password or key. Since 1.14, `kubectl` supports the management of Kubernetes objects using a kustomization file. You can create a Secret by generators in `kustomization.yaml`. -1. Create the Secret object from the following command. You will need to replace - `YOUR_PASSWORD` with the password you want to use. +Add a Secret generator in `kustomization.yaml` from the following command. You will need to replace `YOUR_PASSWORD` with the password you want to use. + +```shell +cat <./kustomization.yaml +secretGenerator: +- name: mysql-pass + literals: + - password=YOUR_PASSWORD +EOF +``` + +## Add resource configs for MySQL and WordPress + +The following manifest describes a single-instance MySQL Deployment. The MySQL container mounts the PersistentVolume at /var/lib/mysql. The `MYSQL_ROOT_PASSWORD` environment variable sets the database password from the Secret. + +{{< codenew file="application/wordpress/mysql-deployment.yaml" >}} + +1. Download the MySQL deployment configuration file. ```shell - kubectl create secret generic mysql-pass --from-literal=password=YOUR_PASSWORD + curl -LO https://k8s.io/examples/application/wordpress/mysql-deployment.yaml ``` - -2. Verify that the Secret exists by running the following command: + +2. Download the WordPress configuration file. ```shell - kubectl get secrets + curl -LO https://k8s.io/examples/application/wordpress/wordpress-deployment.yaml ``` + +3. Add them to `kustomization.yaml` file. - The response should be like this: - - ``` - NAME TYPE DATA AGE - mysql-pass Opaque 1 42s + ```shell + cat <>./kustomization.yaml + resources: + - mysql-deployment.yaml + - wordpress-deployment.yaml + EOF ``` -{{< note >}} -To protect the Secret from exposure, neither `get` nor `describe` show its contents. -{{< /note >}} +## Apply and Verify +The `kustomization.yaml` contains all the resources for deploying a WordPress site and a +MySQL database. You can apply the directory by +```shell +kubectl apply -k ./ +``` -## Deploy MySQL +Now you can verify that all objects exist. -The following manifest describes a single-instance MySQL Deployment. The MySQL container mounts the PersistentVolume at /var/lib/mysql. The `MYSQL_ROOT_PASSWORD` environment variable sets the database password from the Secret. +1. Verify that the Secret exists by running the following command: -{{< codenew file="application/wordpress/mysql-deployment.yaml" >}} + ```shell + kubectl get secrets + ``` -1. Deploy MySQL from the `mysql-deployment.yaml` file: + The response should be like this: ```shell - kubectl create -f https://k8s.io/examples/application/wordpress/mysql-deployment.yaml + NAME TYPE DATA AGE + mysql-pass-c57bb4t7mf Opaque 1 9s ``` 2. Verify that a PersistentVolume got dynamically provisioned. @@ -120,9 +149,10 @@ The following manifest describes a single-instance MySQL Deployment. The MySQL c The response should be like this: - ``` - NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE - mysql-pv-claim Bound pvc-91e44fbf-d477-11e7-ac6a-42010a800002 20Gi RWO standard 29s + ```shell + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + mysql-pv-claim Bound pvc-8cbd7b2e-4044-11e9-b2bb-42010a800002 20Gi RWO standard 77s + wp-pv-claim Bound pvc-8cd0df54-4044-11e9-b2bb-42010a800002 20Gi RWO standard 77s ``` 3. Verify that the Pod is running by running the following command: @@ -142,36 +172,7 @@ The following manifest describes a single-instance MySQL Deployment. The MySQL c wordpress-mysql-1894417608-x5dzt 1/1 Running 0 40s ``` -## Deploy WordPress - -The following manifest describes a single-instance WordPress Deployment and Service. It uses many of the same features like a PVC for persistent storage and a Secret for the password. But it also uses a different setting: `type: LoadBalancer`. This setting exposes WordPress to traffic from outside of the cluster. - -{{< codenew file="application/wordpress/wordpress-deployment.yaml" >}} - -1. Create a WordPress Service and Deployment from the `wordpress-deployment.yaml` file: - - ```shell - kubectl create -f https://k8s.io/examples/application/wordpress/wordpress-deployment.yaml - ``` - -2. Verify that a PersistentVolume got dynamically provisioned: - - ```shell - kubectl get pvc - ``` - - {{< note >}} - It can take up to a few minutes for the PVs to be provisioned and bound. - {{< /note >}} - - The response should be like this: - - ``` - NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE - wp-pv-claim Bound pvc-e69d834d-d477-11e7-ac6a-42010a800002 20Gi RWO standard 7s - ``` - -3. Verify that the Service is running by running the following command: +4. Verify that the Service is running by running the following command: ```shell kubectl get services wordpress @@ -188,7 +189,7 @@ The following manifest describes a single-instance WordPress Deployment and Serv Minikube can only expose Services through `NodePort`. The EXTERNAL-IP is always pending. {{< /note >}} -4. Run the following command to get the IP Address for the WordPress Service: +5. Run the following command to get the IP Address for the WordPress Service: ```shell minikube service wordpress --url @@ -200,7 +201,7 @@ The following manifest describes a single-instance WordPress Deployment and Serv http://1.2.3.4:32406 ``` -5. Copy the IP address, and load the page in your browser to view your site. +6. Copy the IP address, and load the page in your browser to view your site. You should see the WordPress set up page similar to the following screenshot. @@ -214,23 +215,10 @@ Do not leave your WordPress installation on this page. If another user finds it, {{% capture cleanup %}} -1. Run the following command to delete your Secret: - - ```shell - kubectl delete secret mysql-pass - ``` - -2. Run the following commands to delete all Deployments and Services: - - ```shell - kubectl delete deployment -l app=wordpress - kubectl delete service -l app=wordpress - ``` - -3. Run the following commands to delete the PersistentVolumeClaims. The dynamically provisioned PersistentVolumes will be automatically deleted. +1. Run the following command to delete your Secret, Deployments, Services and PersistentVolumeClaims: ```shell - kubectl delete pvc -l app=wordpress + kubectl delete -k ./ ``` {{% /capture %}} diff --git a/content/en/docs/user-journeys/users/cluster-operator/foundational.md b/content/en/docs/user-journeys/users/cluster-operator/foundational.md index 888d8b47f88b1..0615652e81d33 100644 --- a/content/en/docs/user-journeys/users/cluster-operator/foundational.md +++ b/content/en/docs/user-journeys/users/cluster-operator/foundational.md @@ -80,7 +80,7 @@ You can see the status of the core of kubernetes with the command `kubectl get c Some additional resources for getting information about your cluster and how it is operating include: * [Tools for Monitoring Compute, Storage, and Network Resources](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) -* [Core metrics pipeline](/docs/tasks/debug-application-cluster/core-metrics-pipeline/) +* [Resource metrics pipeline](/docs/tasks/debug-application-cluster/resource-metrics-pipeline/) * [Metrics](/docs/concepts/cluster-administration/controller-metrics/) ## Explore additional resources diff --git a/content/en/docs/user-journeys/users/cluster-operator/intermediate.md b/content/en/docs/user-journeys/users/cluster-operator/intermediate.md index b3c02c8824872..5421b5822b091 100644 --- a/content/en/docs/user-journeys/users/cluster-operator/intermediate.md +++ b/content/en/docs/user-journeys/users/cluster-operator/intermediate.md @@ -82,7 +82,7 @@ Start with the [basics on Kubernetes logging](/docs/concepts/cluster-administrat * [Logging Using Stackdriver](/docs/tasks/debug-application-cluster/logging-stackdriver/) Like log aggregation, many clusters utilize additional software to help capture metrics and display them. There is an overview of tools at [Tools for Monitoring Compute, Storage, and Network Resources](/docs/tasks/debug-application-cluster/resource-usage-monitoring/). -Kubernetes also supports a [core metrics pipeline](/docs/tasks/debug-application-cluster/core-metrics-pipeline/) which can be used by Horizontal Pod Autoscaler with custom metrics. +Kubernetes also supports a [resource metrics pipeline](/docs/tasks/debug-application-cluster/resource-metrics-pipeline/) which can be used by Horizontal Pod Autoscaler with custom metrics. [Prometheus](https://prometheus.io/), which is another CNCF project, is a common choice to support capture and temporary collection of metrics. There are several options for installing Prometheus, including using the [stable/prometheus](https://github.com/kubernetes/charts/tree/master/stable/prometheus) [helm](https://helm.sh/) chart, and CoreOS provides a [prometheus operator](https://github.com/coreos/prometheus-operator) and [kube-prometheus](https://github.com/coreos/prometheus-operator/tree/master/contrib/kube-prometheus), which adds on Grafana dashboards and common configurations. @@ -91,7 +91,7 @@ A common configuration on [Minikube](https://github.com/kubernetes/minikube) and There is a [walkthrough of how to install this configuration in your cluster](https://blog.kublr.com/how-to-utilize-the-heapster-influxdb-grafana-stack-in-kubernetes-for-monitoring-pods-4a553f4d36c9). As of Kubernetes 1.11, Heapster is deprecated, as per [sig-instrumentation](https://github.com/kubernetes/community/tree/master/sig-instrumentation). See [Prometheus vs. Heapster vs. Kubernetes Metrics APIs](https://brancz.com/2018/01/05/prometheus-vs-heapster-vs-kubernetes-metrics-apis/) for more information alternatives. -Hosted data analytics services such as [Datadog](https://docs.datadoghq.com/integrations/kubernetes/) also offer Kubernetes integration. +Hosted monitoring, APM, or data analytics services such as [Datadog](https://docs.datadoghq.com/integrations/kubernetes/) or [Instana](https://www.instana.com/supported-integrations/kubernetes-monitoring/) also offer Kubernetes integration. ## Additional resources diff --git a/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md b/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md index 025b91b9bb7d0..1ae7c53847f9b 100644 --- a/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md @@ -484,7 +484,7 @@ image. Complete the update by deleting the remaining Pods. --> `web-0` 已经更新了它的镜像,但是 `web-1` 和 `web-2` 仍保留了原始镜像。 -​```shell +```shell kubectl delete pod web-1 web-2 pod "web-1" deleted pod "web-2" deleted @@ -493,7 +493,7 @@ pod "web-2" deleted 观察 StatefulSet 的 Pod,等待它们全部变成 Running 和 Ready。 -``` +```shell kubectl get pods -w -l app=nginx NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 8m diff --git a/data/tasks.yml b/data/tasks.yml index ab03eec6bbd44..5ede25f87e42f 100644 --- a/data/tasks.yml +++ b/data/tasks.yml @@ -89,7 +89,7 @@ toc: - title: Monitor, Log, and Debug landing_page: /docs/tasks/debug-application-cluster/resource-usage-monitoring/ section: - - docs/tasks/debug-application-cluster/core-metrics-pipeline.md + - docs/tasks/debug-application-cluster/resource-metrics-pipeline.md - docs/tasks/debug-application-cluster/resource-usage-monitoring.md - docs/tasks/debug-application-cluster/get-shell-running-container.md - docs/tasks/debug-application-cluster/monitor-node-health.md diff --git a/static/images/docs/perf-test-result-1.png b/static/images/docs/perf-test-result-1.png new file mode 100644 index 0000000000000..ee8ed76b7d59c Binary files /dev/null and b/static/images/docs/perf-test-result-1.png differ diff --git a/static/images/docs/perf-test-result-2.png b/static/images/docs/perf-test-result-2.png new file mode 100644 index 0000000000000..147c9870fc4a2 Binary files /dev/null and b/static/images/docs/perf-test-result-2.png differ diff --git a/static/images/docs/perf-test-result-3.png b/static/images/docs/perf-test-result-3.png new file mode 100644 index 0000000000000..dab493f182004 Binary files /dev/null and b/static/images/docs/perf-test-result-3.png differ diff --git a/static/images/docs/perf-test-result-4.png b/static/images/docs/perf-test-result-4.png new file mode 100644 index 0000000000000..9f5dd83dad6e9 Binary files /dev/null and b/static/images/docs/perf-test-result-4.png differ diff --git a/static/images/docs/perf-test-result-5.png b/static/images/docs/perf-test-result-5.png new file mode 100644 index 0000000000000..114e079177f72 Binary files /dev/null and b/static/images/docs/perf-test-result-5.png differ diff --git a/static/images/docs/perf-test-result-6.png b/static/images/docs/perf-test-result-6.png new file mode 100644 index 0000000000000..48e2a43bf7f1e Binary files /dev/null and b/static/images/docs/perf-test-result-6.png differ