diff --git a/Dockerfile b/Dockerfile index d76317ff7f0dc..3e3335076cda2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,9 @@ RUN apk add --no-cache \ openssh-client \ rsync \ build-base \ - libc6-compat + libc6-compat \ + npm && \ + npm install -G autoprefixer postcss-cli ARG HUGO_VERSION diff --git a/Makefile b/Makefile index 69f85eb50d8c7..56ec8410f4e3c 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,8 @@ NETLIFY_FUNC = $(NODE_BIN)/netlify-lambda # but this can be overridden when calling make, e.g. # CONTAINER_ENGINE=podman make container-image CONTAINER_ENGINE ?= docker -CONTAINER_IMAGE = kubernetes-hugo +IMAGE_VERSION=$(shell scripts/hash-files.sh Dockerfile Makefile | cut -c 1-12) +CONTAINER_IMAGE = kubernetes-hugo:v$(HUGO_VERSION)-$(IMAGE_VERSION) CONTAINER_RUN = $(CONTAINER_ENGINE) run --rm --interactive --tty --volume $(CURDIR):/src CCRED=\033[0;31m @@ -17,12 +18,15 @@ CCEND=\033[0m help: ## Show this help. @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) +module-check: + @git submodule status --recursive | awk '/^[+-]/ {printf "\033[31mWARNING\033[0m Submodule not initialized: \033[34m%s\033[0m\n",$$2}' 1>&2 + all: build ## Build site with production settings and put deliverables in ./public -build: ## Build site with production settings and put deliverables in ./public +build: module-check ## Build site with production settings and put deliverables in ./public hugo --minify -build-preview: ## Build site with drafts and future posts enabled +build-preview: module-check ## Build site with drafts and future posts enabled hugo --buildDrafts --buildFuture deploy-preview: ## Deploy preview site via netlify @@ -39,7 +43,7 @@ production-build: build check-headers-file ## Build the production site and ensu non-production-build: ## Build the non-production site, which adds noindex headers to prevent indexing hugo --enableGitInfo -serve: ## Boot the development server. +serve: module-check ## Boot the development server. hugo server --buildFuture docker-image: @@ -60,10 +64,10 @@ container-image: --tag $(CONTAINER_IMAGE) \ --build-arg HUGO_VERSION=$(HUGO_VERSION) -container-build: - $(CONTAINER_RUN) $(CONTAINER_IMAGE) hugo +container-build: module-check + $(CONTAINER_RUN) $(CONTAINER_IMAGE) hugo --minify -container-serve: +container-serve: module-check $(CONTAINER_RUN) --mount type=tmpfs,destination=/src/resources,tmpfs-mode=0755 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0 test-examples: @@ -81,4 +85,3 @@ docker-internal-linkcheck: container-internal-linkcheck: link-checker-image-pull $(CONTAINER_RUN) $(CONTAINER_IMAGE) hugo --config config.toml,linkcheck-config.toml --buildFuture $(CONTAINER_ENGINE) run --mount type=bind,source=$(CURDIR),target=/test --rm wjdp/htmltest htmltest - diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index c2b0751dbe0e9..9498e2c228bd9 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -1,21 +1,4 @@ aliases: - sig-cluster-lifecycle-kubeadm-approvers: # Approving changes to kubeadm documentation - - timothysc - - lukemarsden - - luxas - - fabriziopandini - sig-cluster-lifecycle-kubeadm-reviewers: # Reviewing kubeadm documentation - - timothysc - - lukemarsden - - luxas - - fabriziopandini - - kad - - xiangpengzhao - - stealthybox - - liztio - - chuckha - - detiber - - dixudx sig-docs-blog-owners: # Approvers for blog content - castrojo - kbarnard10 @@ -40,30 +23,28 @@ aliases: - rlenferink sig-docs-en-owners: # Admins for English content - bradtopol - - daminisatya + - celestehorgan - jimangel - kbarnard10 - kbhawkey - makoscafee - onlydole - - Rajakavitha1 - savitharaghunathan - sftim - steveperry-53 - tengqm - - vineethreddy02 - xiangpengzhao - zacharysarah - zparnold sig-docs-en-reviews: # PR reviews for English content - bradtopol + - celestehorgan - daminisatya - jimangel - kbarnard10 - kbhawkey - makoscafee - onlydole - - rajakavitha1 - rajeshdeshpande02 - sftim - steveperry-53 @@ -111,12 +92,10 @@ aliases: - avidLearnerInProgress - daminisatya - mittalyashu - - Rajakavitha1 sig-docs-hi-reviews: # PR reviews for Hindi content - avidLearnerInProgress - daminisatya - mittalyashu - - Rajakavitha1 sig-docs-id-owners: # Admins for Indonesian content - girikuncoro - irvifa @@ -125,6 +104,7 @@ aliases: - irvifa - wahyuoi - phanama + - danninov sig-docs-it-owners: # Admins for Italian content - fabriziopandini - mattiaperi @@ -212,6 +192,7 @@ aliases: - potapy4 - dianaabv sig-docs-ru-reviews: # PR reviews for Russian content + - Arhell - msheldyakov - aisonaku - potapy4 @@ -233,4 +214,4 @@ aliases: - butuzov - idvoretskyi - MaxymVlasov - - Potapy4 + - Potapy4 \ No newline at end of file diff --git a/README-ja.md b/README-ja.md index 3bc4fcd27a27e..8fab3900c81ff 100644 --- a/README-ja.md +++ b/README-ja.md @@ -1,7 +1,6 @@ # Kubernetesのドキュメント -[![Build Status](https://api.travis-ci.org/kubernetes/website.svg?branch=master)](https://travis-ci.org/kubernetes/website) -[![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) このリポジトリには、[KubernetesのWebサイトとドキュメント](https://kubernetes.io/)をビルドするために必要な全アセットが格納されています。貢献に興味を持っていただきありがとうございます! @@ -14,7 +13,20 @@ Hugoがインストールできたら、以下のコマンドを使ってWebサ ```bash git clone https://github.com/kubernetes/website.git cd website -git submodule update --init --recursive +git submodule update --init --recursive --depth 1 +``` + +**注意:** Kubernetesのウェブサイトでは[DocsyというHugoのテーマ](https://github.com/google/docsy#readme)を使用しています。リポジトリを更新していない場合、 `website/themes/docsy`ディレクトリは空です。 このサイトはテーマのローカルコピーなしでは構築できません。 + +テーマをアップデートするには以下のコマンドを実行します: + +```bash +git submodule update --init --recursive --depth 1 +``` + +サイトをローカルでビルドしてテストするには以下のコマンドを実行します: + +```bash hugo server --buildFuture ``` @@ -33,11 +45,11 @@ hugo server --buildFuture GitHubの画面右上にある**Fork**ボタンをクリックすると、お使いのGitHubアカウントに紐付いた本リポジトリのコピーが作成され、このコピーのことを*フォーク*と呼びます。フォークリポジトリの中ではお好きなように変更を加えていただいて構いません。加えた変更をこのリポジトリに追加したい任意のタイミングにて、フォークリポジトリからPull Reqeustを作成してください。 -Pull Requestが作成されると、レビュー担当者が責任を持って明確かつ実用的なフィードバックを返します。 -Pull Requestの所有者は作成者であるため、**ご自身で作成したPull Requestを編集し、フィードバックに対応するのはご自身の役目です。** +Pull Requestが作成されると、レビュー担当者が責任を持って明確かつ実用的なフィードバックを返します。Pull Requestの所有者は作成者であるため、**ご自身で作成したPull Requestを編集し、フィードバックに対応するのはご自身の役目です。** + また、状況によっては2人以上のレビュアーからフィードバックが返されたり、アサインされていないレビュー担当者からのフィードバックが来ることがある点もご注意ください。 -さらに、特定のケースにおいては、レビュー担当者がKubernetesの技術的なレビュアーに対してレビューを依頼することもあります。 -レビュー担当者はタイムリーにフィードバックを提供するために最善を尽くしますが、応答時間は状況に応じて異なる場合があります。 + +さらに、特定のケースにおいては、レビュー担当者がKubernetesの技術的なレビュアーに対してレビューを依頼することもあります。レビュー担当者はタイムリーにフィードバックを提供するために最善を尽くしますが、応答時間は状況に応じて異なる場合があります。 Kubernetesのドキュメントへの貢献に関する詳細については以下のページをご覧ください: diff --git a/README-uk.md b/README-uk.md index 3aad33660a4f2..f43753535303d 100644 --- a/README-uk.md +++ b/README-uk.md @@ -55,7 +55,7 @@ hugo server --buildFuture Більше інформації про внесок у документацію Kubernetes ви знайдете у наступних джерелах: * [Внесок: з чого почати](https://kubernetes.io/docs/contribute/) -* [Використання шаблонів сторінок](http://kubernetes.io/docs/contribute/style/page-templates/) +* [Використання шаблонів сторінок](https://kubernetes.io/docs/contribute/style/page-content-types/) * [Керівництво зі стилю оформлення документації](http://kubernetes.io/docs/contribute/style/style-guide/) * [Переклад документації Kubernetes іншими мовами](https://kubernetes.io/docs/contribute/localization/) diff --git a/README-zh.md b/README-zh.md index 8a7898774a055..5b4353127fde6 100644 --- a/README-zh.md +++ b/README-zh.md @@ -1,182 +1,160 @@ # Kubernetes 文档 + +--> -[![Build Status](https://api.travis-ci.org/kubernetes/website.svg?branch=master)](https://travis-ci.org/kubernetes/website) -[![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) -欢迎!本仓库包含了所有用于构建 [Kubernetes 网站和文档](https://kubernetes.io/)的内容。 +This repository contains the assets required to build the [Kubernetes website and documentation](https://kubernetes.io/). We're glad that you want to contribute! +--> +本仓库包含了所有用于构建 [Kubernetes 网站和文档](https://kubernetes.io/) 的软件资产。 我们非常高兴您想要参与贡献! -## 贡献文档 +## Running the website locally using Hugo - -您可以点击屏幕右上方的 **Fork** 按钮,在您的 GitHub 账户下创建一份本仓库的副本。这个副本叫做 *fork*。您可以对 fork 副本进行任意修改, -当准备好把修改提交给我们时,您可以通过创建一个 pull request 来告知我们。 +See the [official Hugo documentation](https://gohugo.io/getting-started/installing/) for Hugo installation instructions. Make sure to install the Hugo extended version specified by the `HUGO_VERSION` environment variable in the [`netlify.toml`](netlify.toml#L10) file. +--> +## 在本地使用 Hugo 来运行网站 - -创建 pull request 后,Kubernetes 审核人员将负责提供清晰且可操作的反馈。作为 pull request 的所有者,**您有责任修改 pull request 以解决 Kubernetes 审核者提供给您的反馈。** -另请注意,您最终可能会收到多个 Kubernetes 审核人员为您提供的反馈,也可能出现后面 Kubernetes 审核人员的反馈与前面审核人员的反馈不尽相同的情况。 -此外,在某些情况下,您的某位评审员可能会在需要时要求 [Kubernetes 技术评审员](https://github.com/kubernetes/website/wiki/Tech-reviewers) 进行技术评审。 -审稿人将尽最大努力及时提供反馈,但响应时间可能因情况而异。 +请参考 [Hugo 的官方文档](https://gohugo.io/getting-started/installing/)了解 Hugo 的安装指令。 +请确保安装的是 [`netlify.toml`](netlify.toml#L10) 文件中环境变量 `HUGO_VERSION` 所指定的 +Hugo 扩展版本。 -有关为 Kubernetes 文档做出贡献的更多信息,请参阅: +Before building the site, clone the Kubernetes website repository: +--> +在构造网站之前,先克隆 Kubernetes website 仓库: -* [开始贡献](https://kubernetes.io/docs/contribute/start/) -* [缓存您的文档变更](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) -* [使用页面模版](http://kubernetes.io/docs/contribute/style/page-templates/) -* [文档风格指南](http://kubernetes.io/docs/contribute/style/style-guide/) -* [本地化 Kubernetes 文档](https://kubernetes.io/docs/contribute/localization/) +```bash +git clone https://github.com/kubernetes/website.git +cd website +git submodule update --init --recursive +``` -## `README.md` 的本地化 Kubernetes 文档 +**Note:** The Kubernetes website deploys the [Docsy Hugo theme](https://github.com/google/docsy#readme). +If you have not updated your website repository, the `website/themes/docsy` directory is empty. +The site cannot build without a local copy of the theme. - +**注意:** Kubernetes 网站要部署 [Docsy Hugo 主题](https://github.com/google/docsy#readme). +如果你还没有更新你本地的 website 仓库,目录 `website/themes/docsy` +会是空目录。 +在本地没有主题副本的情况下,网站无法正常构造。 -You can reach the maintainers of Korean localization at: +使用下面的命令更新网站主题: -* June Yi ([GitHub - @gochist](https://github.com/gochist)) -* [Slack channel](https://kubernetes.slack.com/messages/kubernetes-docs-ko) - --> -### 中文 +```bash +git submodule update --init --recursive --depth 1 +``` -可以通过以下方式联系中文本地化的维护人员: + +若要在本地构造和测试网站,请运行: -* Rui Chen ([GitHub - @chenrui333](https://github.com/chenrui333)) -* He Xiaolong ([GitHub - @markthink](https://github.com/markthink)) -* [Slack channel](https://kubernetes.slack.com/messages/kubernetes-docs-zh) +```bash +hugo server --buildFuture +``` -## 在本地使用 docker 运行网站 +This will start the local Hugo server on port 1313. Open up your browser to http://localhost:1313 to view the website. As you make changes to the source files, Hugo updates the website and forces a browser refresh. +--> +上述命令会在端口 1313 上启动本地 Hugo 服务器。 +启动浏览器,打开 http://localhost:1313 来查看网站。 +当你对源文件作出修改时,Hugo 会更新网站并强制浏览器执行刷新操作。 -在本地运行 Kubernetes 网站的推荐方法是运行包含 [Hugo](https://gohugo.io) 静态网站生成器的专用 [Docker](https://docker.com) 镜像。 +## Get involved with SIG Docs - -> 如果您使用的是 Windows,则需要一些工具,可以使用 [Chocolatey](https://chocolatey.org) 进行安装。`choco install make` +Learn more about SIG Docs Kubernetes community and meetings on the [community page](https://github.com/kubernetes/community/tree/master/sig-docs#meetings). - -> 如果您更喜欢在没有 Docker 的情况下在本地运行网站,请参阅下面的[使用 Hugo 在本地运行网站](#running-the-site-locally-using-hugo) 章节。 +You can also reach the maintainers of this project at: - -如果您已经[安装运行](https://www.docker.com/get-started)了 Docker,使用以下命令在本地构建 `kubernetes-hugo` Docker 镜像: +- [Slack](https://kubernetes.slack.com/messages/sig-docs) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) +--> +## 参与 SIG Docs 工作 -```bash -make docker-image -``` +通过 [社区页面](https://github.com/kubernetes/community/tree/master/sig-docs#meetings) +进一步了解 SIG Docs Kubernetes 社区和会议信息。 - -一旦创建了镜像,您就可以在本地运行网站了: +你也可以通过以下渠道联系本项目的维护人员: -```bash -make docker-serve -``` +- [Slack](https://kubernetes.slack.com/messages/sig-docs) +- [邮件列表](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) -打开浏览器访问 http://localhost:1313 以查看网站。当您对源文件进行更改时,Hugo 会更新网站并强制刷新浏览器。 +## Contributing to the docs - -## 使用 Hugo 在本地运行网站 {#running-the-site-locally-using-hugo} +You can click the **Fork** button in the upper-right area of the screen to create a copy of this repository in your GitHub account. This copy is called a *fork*. Make any changes you want in your fork, and when you are ready to send those changes to us, go to your fork and create a new pull request to let us know about it. - -有关 Hugo 的安装说明,请参阅 [Hugo 官方文档](https://gohugo.io/getting-started/installing/)。 -确保安装对应版本的 Hugo,版本号由 [`netlify.toml`](netlify.toml#L9) 文件中的 `HUGO_VERSION` 环境变量指定。 +Once your pull request is created, a Kubernetes reviewer will take responsibility for providing clear, actionable feedback. As the owner of the pull request, **it is your responsibility to modify your pull request to address the feedback that has been provided to you by the Kubernetes reviewer.** +--> +## 为文档做贡献 - -安装 Hugo 后,在本地运行网站: +你也可以点击屏幕右上方区域的 **Fork** 按钮,在你自己的 GitHub +账号下创建本仓库的拷贝。此拷贝被称作 *fork*。 +你可以在自己的拷贝中任意地修改文档,并在你已准备好将所作修改提交给我们时, +在你自己的拷贝下创建一个拉取请求(Pull Request),以便让我们知道。 -```bash -make serve -``` +一旦你创建了拉取请求,某个 Kubernetes 评审人会负责提供明确的、可执行的反馈意见。 +作为拉取请求的拥有者,*修改拉取请求以解决 Kubernetes +评审人所提出的反馈是你的责任*。 -这将在 1313 端口上启动本地 Hugo 服务器。打开浏览器访问 http://localhost:1313 查看网站。当您对源文件进行更改时,Hugo 会更新网站并强制刷新浏览器。 +Also, note that you may end up having more than one Kubernetes reviewer provide you feedback or you may end up getting feedback from a Kubernetes reviewer that is different than the one initially assigned to provide you feedback. - -## 社区、讨论、贡献和支持 +Furthermore, in some cases, one of your reviewers might ask for a technical review from a Kubernetes tech reviewer when needed. Reviewers will do their best to provide feedback in a timely fashion but response time can vary based on circumstances. +--> +还要提醒的一点,有时可能会有不止一个 Kubernetes 评审人为你提供反馈意见。 +有时候,某个评审人的意见和另一个最初被指派的评审人的意见不同。 - -在[社区页面](http://kubernetes.io/community/)了解如何与 Kubernetes 社区互动。 +更进一步,在某些时候,评审人之一可能会在需要的时候请求 Kubernetes +技术评审人来执行技术评审。 +评审人会尽力及时地提供反馈意见,不过具体的响应时间可能会因时而异。 -您可以通过以下方式联系该项目的维护人员: +For more information about contributing to the Kubernetes documentation, see: -- [Slack](https://kubernetes.slack.com/messages/sig-docs) -- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) +* [Contribute to Kubernetes docs](https://kubernetes.io/docs/contribute/) +* [Page Content Types](https://kubernetes.io/docs/contribute/style/page-content-types/) +* [Documentation Style Guide](https://kubernetes.io/docs/contribute/style/style-guide/) +* [Localizing Kubernetes Documentation](https://kubernetes.io/docs/contribute/localization/) +--> +有关为 Kubernetes 文档做出贡献的更多信息,请参阅: + +* [贡献 Kubernetes 文档](https://kubernetes.io/docs/contribute/) +* [页面内容类型](http://kubernetes.io/docs/contribute/style/page-content-types/) +* [文档风格指南](http://kubernetes.io/docs/contribute/style/style-guide/) +* [本地化 Kubernetes 文档](https://kubernetes.io/docs/contribute/localization/) + +## 中文本地化 + +可以通过以下方式联系中文本地化的维护人员: + +* Rui Chen ([GitHub - @chenrui333](https://github.com/chenrui333)) +* He Xiaolong ([GitHub - @markthink](https://github.com/markthink)) +* [Slack channel](https://kubernetes.slack.com/messages/kubernetes-docs-zh) +Participation in the Kubernetes community is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +--> ### 行为准则 -参与 Kubernetes 社区受 [Kubernetes 行为准则](code-of-conduct.md)的约束。 +参与 Kubernetes 社区受 [CNCF 行为准则](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)约束。 +--> ## 感谢! Kubernetes 因为社区的参与而蓬勃发展,感谢您对我们网站和文档的贡献! diff --git a/README.md b/README.md index 4c631b0dc8ae9..7c4809287f145 100644 --- a/README.md +++ b/README.md @@ -8,12 +8,27 @@ This repository contains the assets required to build the [Kubernetes website an See the [official Hugo documentation](https://gohugo.io/getting-started/installing/) for Hugo installation instructions. Make sure to install the Hugo extended version specified by the `HUGO_VERSION` environment variable in the [`netlify.toml`](netlify.toml#L10) file. -To run the website locally when you have Hugo installed: +Before building the site, clone the Kubernetes website repository: ```bash git clone https://github.com/kubernetes/website.git cd website -git submodule update --init --recursive +git submodule update --init --recursive --depth 1 +``` + +**Note:** The Kubernetes website deploys the [Docsy Hugo theme](https://github.com/google/docsy#readme). +If you have not updated your website repository, the `website/themes/docsy` directory is empty. The site cannot build +without a local copy of the theme. + +Update the website theme: + +```bash +git submodule update --init --recursive --depth 1 +``` + +To build and test the site locally, run: + +```bash hugo server --buildFuture ``` diff --git a/archetypes/concepts.md b/archetypes/concepts.md new file mode 100644 index 0000000000000..33653c911443e --- /dev/null +++ b/archetypes/concepts.md @@ -0,0 +1,12 @@ +--- +title: "{{ replace .Name "-" " " | title }}" +content_type: concept +--- + + + + + + + +## {{% heading "whatsnext" %}} diff --git a/archetypes/tasks.md b/archetypes/tasks.md new file mode 100644 index 0000000000000..9067df39ce918 --- /dev/null +++ b/archetypes/tasks.md @@ -0,0 +1,21 @@ +--- +title: "{{ replace .Name "-" " " | title }}" +content_type: task +--- + + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + + + + + + +## {{% heading "whatsnext" %}} diff --git a/archetypes/tutorials.md b/archetypes/tutorials.md new file mode 100644 index 0000000000000..46e2017460d8d --- /dev/null +++ b/archetypes/tutorials.md @@ -0,0 +1,19 @@ +--- +title: "{{ replace .Name "-" " " | title }}" +content_type: tutorial +--- + + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + +## {{% heading "objectives" %}} + + + +## {{% heading "cleanup" %}} + + +## {{% heading "whatsnext" %}} diff --git a/assets/scss/_base.scss b/assets/scss/_base.scss index fab7dd4e7ec1a..ad462067c6b5e 100644 --- a/assets/scss/_base.scss +++ b/assets/scss/_base.scss @@ -511,7 +511,7 @@ section#cncf { } #desktopKCButton { - position: relative; + position: absolute; font-size: 18px; background-color: $dark-grey; border-radius: 8px; diff --git a/assets/scss/_custom.scss b/assets/scss/_custom.scss index f02d6d908b919..6d353c380ce12 100644 --- a/assets/scss/_custom.scss +++ b/assets/scss/_custom.scss @@ -101,14 +101,6 @@ section { left: 0; background: #fff; } - - .dropdown-menu { - left: -80px; - } - - &.dropdown:hover { - color: $medium-grey; - } } } @@ -118,7 +110,7 @@ section { } @media only screen and (min-width: 1075px) { - margin-top: 1.5rem !important; + margin-top: 1rem !important; } } @@ -264,6 +256,14 @@ footer { } } +main { + .td-content table code, + .td-content>table td { + word-break: break-word; + } +} + + // blockquotes and callouts blockquote { @@ -390,4 +390,4 @@ main.content { } } } -} +} \ No newline at end of file diff --git a/assets/scss/_tablet.scss b/assets/scss/_tablet.scss index 54ead8319c861..299e50eebdac4 100644 --- a/assets/scss/_tablet.scss +++ b/assets/scss/_tablet.scss @@ -91,6 +91,7 @@ $feature-box-div-width: 45%; max-width: 25%; max-height: 100%; transform: translateY(-50%); + width: 100%; } &:nth-child(odd) { @@ -98,6 +99,7 @@ $feature-box-div-width: 45%; .image-wrapper { right: 0; + text-align: right; } } @@ -106,6 +108,7 @@ $feature-box-div-width: 45%; .image-wrapper { left: 0; + text-align: left; } } diff --git a/config.toml b/config.toml index 13961c17c0063..c880baa50449a 100644 --- a/config.toml +++ b/config.toml @@ -153,7 +153,6 @@ css = [ "custom-jekyll/tags" ] js = [ - "custom-jekyll/tags", "script" ] @@ -222,7 +221,7 @@ no = 'Sorry to hear that. Please 88% -- -Test: 67% -\> 72% -- -Pre production: 41% -\> 55% -- -Production: 50% -\> 62% -What is striking in this is that pre-production growth continued, even as workloads were clearly transitioned into true production. Likewise the share of people considering containers for production rose from 78% in January to 82% in February. Again we’ll see if the trend continues into March. +- Development: 80% -\> 88% +- Test: 67% -\> 72% +- Pre production: 41% -\> 55% +- Production: 50% -\> 62% + +What is striking in this is that pre-production growth continued, even as workloads were clearly transitioned into true production. Likewise the share of people considering containers for production rose from 78% in January to 82% in February. Again we’ll see if the trend continues into March. ## Container and cluster sizes diff --git a/content/en/blog/_posts/2016-12-00-Container-Runtime-Interface-Cri-In-Kubernetes.md b/content/en/blog/_posts/2016-12-00-Container-Runtime-Interface-Cri-In-Kubernetes.md index 061a39c196a92..721b217c47634 100644 --- a/content/en/blog/_posts/2016-12-00-Container-Runtime-Interface-Cri-In-Kubernetes.md +++ b/content/en/blog/_posts/2016-12-00-Container-Runtime-Interface-Cri-In-Kubernetes.md @@ -215,14 +215,10 @@ CRI is being actively developed and maintained by the Kubernetes [SIG-Node](http -- -Post issues or feature requests on [GitHub](https://github.com/kubernetes/kubernetes) -- -Join the #sig-node channel on [Slack](https://kubernetes.slack.com/) -- -Subscribe to the [SIG-Node mailing list](mailto:kubernetes-sig-node@googlegroups.com) -- -Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +- Post issues or feature requests on [GitHub](https://github.com/kubernetes/kubernetes) +- Join the #sig-node channel on [Slack](https://kubernetes.slack.com/) +- Subscribe to the [SIG-Node mailing list](mailto:kubernetes-sig-node@googlegroups.com) +- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates diff --git a/content/en/blog/_posts/2016-12-00-Five-Days-Of-Kubernetes-1-5.md b/content/en/blog/_posts/2016-12-00-Five-Days-Of-Kubernetes-1-5.md index 14eae43fc67a5..fa30aba5f784f 100644 --- a/content/en/blog/_posts/2016-12-00-Five-Days-Of-Kubernetes-1-5.md +++ b/content/en/blog/_posts/2016-12-00-Five-Days-Of-Kubernetes-1-5.md @@ -21,13 +21,8 @@ This progress is our commitment in continuing to make Kubernetes best way to man Connect -- -[Download](http://get.k8s.io/) Kubernetes -- -Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes) -- -Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) -- -Connect with the community on [Slack](http://slack.k8s.io/) -- -Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +- [Download](http://get.k8s.io/) Kubernetes +- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes) +- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) +- Connect with the community on [Slack](http://slack.k8s.io/) +- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates diff --git a/content/en/blog/_posts/2016-12-00-Windows-Server-Support-Kubernetes.md b/content/en/blog/_posts/2016-12-00-Windows-Server-Support-Kubernetes.md index 7f58071940467..ba87948d3c940 100644 --- a/content/en/blog/_posts/2016-12-00-Windows-Server-Support-Kubernetes.md +++ b/content/en/blog/_posts/2016-12-00-Windows-Server-Support-Kubernetes.md @@ -36,12 +36,11 @@ Most of the Kubernetes constructs, such as Pods, Services, Labels, etc. work wit | What doesn’t work yet? | -- -Pod abstraction is not same due to networking namespaces. Net result is that Windows containers in a single POD cannot communicate over localhost. Linux containers can share networking stack by placing them in the same network namespace. -- -DNS capabilities are not fully implemented -- -UDP is not supported inside a container + +- Pod abstraction is not same due to networking namespaces. Net result is that Windows containers in a single POD cannot communicate over localhost. Linux containers can share networking stack by placing them in the same network namespace. +- DNS capabilities are not fully implemented +- UDP is not supported inside a container + | | When will it be ready for all production workloads (general availability)? diff --git a/content/en/blog/_posts/2017-05-00-Kubernetes-Monitoring-Guide.md b/content/en/blog/_posts/2017-05-00-Kubernetes-Monitoring-Guide.md index 87a26f14b42ff..c5f114707275e 100644 --- a/content/en/blog/_posts/2017-05-00-Kubernetes-Monitoring-Guide.md +++ b/content/en/blog/_posts/2017-05-00-Kubernetes-Monitoring-Guide.md @@ -78,11 +78,7 @@ _--Jean-Mathieu Saponaro, Research & Analytics Engineer, Datadog_ -- -Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)  -- -Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)  -- -Connect with the community on [Slack](http://slack.k8s.io/) -- -Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes)  +- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes)  +- Connect with the community on [Slack](http://slack.k8s.io/) +- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates diff --git a/content/en/blog/_posts/2017-05-00-Kubespray-Ansible-Collaborative-Kubernetes-Ops.md b/content/en/blog/_posts/2017-05-00-Kubespray-Ansible-Collaborative-Kubernetes-Ops.md index 8c635748645af..c6e4007d9a223 100644 --- a/content/en/blog/_posts/2017-05-00-Kubespray-Ansible-Collaborative-Kubernetes-Ops.md +++ b/content/en/blog/_posts/2017-05-00-Kubespray-Ansible-Collaborative-Kubernetes-Ops.md @@ -113,11 +113,7 @@ _-- Rob Hirschfeld, co-founder of RackN and co-chair of the Cluster Ops SIG_ -- -Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes) -- -Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) -- -Connect with the community on [Slack](http://slack.k8s.io/) -- -Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes) +- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) +- Connect with the community on [Slack](http://slack.k8s.io/) +- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates diff --git a/content/en/blog/_posts/2017-07-00-Happy-Second-Birthday-Kubernetes.md b/content/en/blog/_posts/2017-07-00-Happy-Second-Birthday-Kubernetes.md index 774bbffad7ff0..7f3c6ebee9fc2 100644 --- a/content/en/blog/_posts/2017-07-00-Happy-Second-Birthday-Kubernetes.md +++ b/content/en/blog/_posts/2017-07-00-Happy-Second-Birthday-Kubernetes.md @@ -26,87 +26,69 @@ Kubernetes has also earned the trust of many [Fortune 500 companies](https://kub July 2016 -- -Kubernauts celebrated its [first anniversary](https://kubernetes.io/blog/2016/07/happy-k8sbday-1) of the Kubernetes 1.0 launch with 20 [#k8sbday](https://twitter.com/search?q=k8sbday&src=typd) parties hosted worldwide -- -Kubernetes [v1.3 release](https://kubernetes.io/blog/2016/07/kubernetes-1-3-bridging-cloud-native-and-enterprise-workloads/) +- Kubernauts celebrated its [first anniversary](https://kubernetes.io/blog/2016/07/happy-k8sbday-1) of the Kubernetes 1.0 launch with 20 [#k8sbday](https://twitter.com/search?q=k8sbday&src=typd) parties hosted worldwide +- Kubernetes [v1.3 release](https://kubernetes.io/blog/2016/07/kubernetes-1-3-bridging-cloud-native-and-enterprise-workloads/) September 2016 -- -Kubernetes [v1.4 release](https://kubernetes.io/blog/2016/09/kubernetes-1-4-making-it-easy-to-run-on-kuberentes-anywhere/) -- -Launch of [kubeadm](https://kubernetes.io/blog/2016/09/how-we-made-kubernetes-easy-to-install), a tool that makes Kubernetes dramatically easier to install -- -[Pokemon Go](https://www.sdxcentral.com/articles/news/google-dealt-pokemon-go-traffic-50-times-beyond-expectations/2016/09/) - one of the largest installs of Kubernetes ever +- Kubernetes [v1.4 release](https://kubernetes.io/blog/2016/09/kubernetes-1-4-making-it-easy-to-run-on-kuberentes-anywhere/) +- Launch of [kubeadm](https://kubernetes.io/blog/2016/09/how-we-made-kubernetes-easy-to-install), a tool that makes Kubernetes dramatically easier to install +- [Pokemon Go](https://www.sdxcentral.com/articles/news/google-dealt-pokemon-go-traffic-50-times-beyond-expectations/2016/09/) - one of the largest installs of Kubernetes ever October 2016 -- -Introduced [Kubernetes service partners program](https://kubernetes.io/blog/2016/10/kubernetes-service-technology-partners-program) and a redesigned [partners page](https://kubernetes.io/partners/) +- Introduced [Kubernetes service partners program](https://kubernetes.io/blog/2016/10/kubernetes-service-technology-partners-program) and a redesigned [partners page](https://kubernetes.io/partners/) November 2016 -- -CloudNativeCon/KubeCon [Seattle](https://www.cncf.io/blog/2016/11/17/cloudnativeconkubecon-2016-wrap/) -- -Cloud Native Computing Foundation partners with The Linux Foundation to launch a [new Kubernetes certification, training and managed service provider program](https://www.cncf.io/blog/2016/11/08/cncf-partners-linux-foundation-launch-new-kubernetes-certification-training-managed-service-provider-program/) +- CloudNativeCon/KubeCon [Seattle](https://www.cncf.io/blog/2016/11/17/cloudnativeconkubecon-2016-wrap/) +- Cloud Native Computing Foundation partners with The Linux Foundation to launch a [new Kubernetes certification, training and managed service provider program](https://www.cncf.io/blog/2016/11/08/cncf-partners-linux-foundation-launch-new-kubernetes-certification-training-managed-service-provider-program/) December 2016 -- -Kubernetes [v1.5 release](https://kubernetes.io/blog/2016/12/kubernetes-1-5-supporting-production-workloads/) +- Kubernetes [v1.5 release](https://kubernetes.io/blog/2016/12/kubernetes-1-5-supporting-production-workloads/) January 2017 -- -[Survey](https://www.cncf.io/blog/2017/01/17/container-management-trends-kubernetes-moves-testing-production/) from CloudNativeCon + KubeCon Seattle showcases the maturation of Kubernetes deployment +- [Survey](https://www.cncf.io/blog/2017/01/17/container-management-trends-kubernetes-moves-testing-production/) from CloudNativeCon + KubeCon Seattle showcases the maturation of Kubernetes deployment March 2017 -- -CloudNativeCon/KubeCon [Europe](https://www.cncf.io/blog/2017/04/17/highlights-cloudnativecon-kubecon-europe-2017/) -- -Kubernetes[v1.6 release](https://kubernetes.io/blog/2017/03/kubernetes-1-6-multi-user-multi-workloads-at-scale) +- CloudNativeCon/KubeCon [Europe](https://www.cncf.io/blog/2017/04/17/highlights-cloudnativecon-kubecon-europe-2017/) +- Kubernetes[v1.6 release](https://kubernetes.io/blog/2017/03/kubernetes-1-6-multi-user-multi-workloads-at-scale) April 2017 -- -The [Battery Open Source Software (BOSS) Index](https://www.battery.com/powered/boss-index-tracking-explosive-growth-open-source-software/) lists Kubernetes as #33 in the top 100 popular open-source software projects +- The [Battery Open Source Software (BOSS) Index](https://www.battery.com/powered/boss-index-tracking-explosive-growth-open-source-software/) lists Kubernetes as #33 in the top 100 popular open-source software projects May 2017 -- -[Four Kubernetes projects](https://www.cncf.io/blog/2017/05/04/cncf-brings-kubernetes-coredns-opentracing-prometheus-google-summer-code-2017/) accepted to The [Google Summer of Code](https://developers.google.com/open-source/gsoc/) (GSOC) 2017 program -- -Stutterstock and Kubernetes appear in [The Wall Street Journal](https://blogs.wsj.com/cio/2017/05/26/shutterstock-ceo-says-new-business-plan-hinged-upon-total-overhaul-of-it/): “On average we [Shutterstock] deploy 45 different releases into production a day using that framework. We use Docker, Kubernetes and Jenkins [to build and run containers and automate development,” said CTO Marty Brodbeck on the company’s IT overhaul and adoption of containerization. +- [Four Kubernetes projects](https://www.cncf.io/blog/2017/05/04/cncf-brings-kubernetes-coredns-opentracing-prometheus-google-summer-code-2017/) accepted to The [Google Summer of Code](https://developers.google.com/open-source/gsoc/) (GSOC) 2017 program +- Stutterstock and Kubernetes appear in [The Wall Street Journal](https://blogs.wsj.com/cio/2017/05/26/shutterstock-ceo-says-new-business-plan-hinged-upon-total-overhaul-of-it/): “On average we [Shutterstock] deploy 45 different releases into production a day using that framework. We use Docker, Kubernetes and Jenkins [to build and run containers and automate development,” said CTO Marty Brodbeck on the company’s IT overhaul and adoption of containerization. June 2017 -- -Kubernetes [v1.7 release](https://kubernetes.io/blog/2017/06/kubernetes-1-7-security-hardening-stateful-application-extensibility-updates) -- -[Survey](https://www.cncf.io/blog/2017/06/28/survey-shows-kubernetes-leading-orchestration-platform/) from CloudNativeCon + KubeCon Europe shows Kubernetes leading as the orchestration platform of choice -- -Kubernetes ranked [#4](https://github.com/cncf/velocity) in the [30 highest velocity open source projects](https://www.cncf.io/blog/2017/06/05/30-highest-velocity-open-source-projects/) +- Kubernetes [v1.7 release](https://kubernetes.io/blog/2017/06/kubernetes-1-7-security-hardening-stateful-application-extensibility-updates) +- [Survey](https://www.cncf.io/blog/2017/06/28/survey-shows-kubernetes-leading-orchestration-platform/) from CloudNativeCon + KubeCon Europe shows Kubernetes leading as the orchestration platform of choice +- Kubernetes ranked [#4](https://github.com/cncf/velocity) in the [30 highest velocity open source projects](https://www.cncf.io/blog/2017/06/05/30-highest-velocity-open-source-projects/) ![](https://lh5.googleusercontent.com/tN_M9v5pFyr3uzwAXTliSKofTGz9DUSMotLHWgy2vl2VSsfIfysagv7h5VRkMA5L9TsNBTMX4dWr-V3O1S9d3dw9IctSj4bAyzblXCAe4xjAhnNJEA3vjSq4Cw79SfoRWfnW-zYY) @@ -116,8 +98,7 @@ Figure 2: The 30 highest velocity open source projects. Source: [https://github. July 2017 -- -Kubernauts celebrate the second anniversary of the Kubernetes 1.0 launch with [#k8sbday](https://twitter.com/search?q=k8sbday&src=typd) parties worldwide! +- Kubernauts celebrate the second anniversary of the Kubernetes 1.0 launch with [#k8sbday](https://twitter.com/search?q=k8sbday&src=typd) parties worldwide! diff --git a/content/en/blog/_posts/2017-07-00-How-Watson-Health-Cloud-Deploys.md b/content/en/blog/_posts/2017-07-00-How-Watson-Health-Cloud-Deploys.md index de516c17a819d..b931ec336a57b 100644 --- a/content/en/blog/_posts/2017-07-00-How-Watson-Health-Cloud-Deploys.md +++ b/content/en/blog/_posts/2017-07-00-How-Watson-Health-Cloud-Deploys.md @@ -92,14 +92,10 @@ Usage of UCD in the Process Flow: UCD is used for deployment and the end-to end deployment process is automated here. UCD component process involves the following steps: -- -Download the required artifacts for deployment from the Gitlab. -- -Login to Bluemix and set the KUBECONFIG based on the Kubernetes cluster used for creating the pods. -- -Create the application pod in the cluster using kubectl create command. -- -If needed, run a rolling update to update the existing pod. +- Download the required artifacts for deployment from the Gitlab. +- Login to Bluemix and set the KUBECONFIG based on the Kubernetes cluster used for creating the pods. +- Create the application pod in the cluster using kubectl create command. +- If needed, run a rolling update to update the existing pod. @@ -150,13 +146,8 @@ To expose our services to outside the cluster, we used Ingress. In IBM Cloud Kub -- -Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) -- -Join the community portal for advocates on [K8sPort](http://k8sport.org/) -- -Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates -- -Connect with the community on [Slack](http://slack.k8s.io/) -- -Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes) +- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) +- Join the community portal for advocates on [K8sPort](http://k8sport.org/) +- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +- Connect with the community on [Slack](http://slack.k8s.io/) +- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes) diff --git a/content/en/blog/_posts/2017-08-00-Kompose-Helps-Developers-Move-Docker.md b/content/en/blog/_posts/2017-08-00-Kompose-Helps-Developers-Move-Docker.md index b266497707823..b94ac8b69320d 100644 --- a/content/en/blog/_posts/2017-08-00-Kompose-Helps-Developers-Move-Docker.md +++ b/content/en/blog/_posts/2017-08-00-Kompose-Helps-Developers-Move-Docker.md @@ -129,14 +129,10 @@ With our graduation, comes the release of Kompose 1.0.0, here’s what’s new: -- -Docker Compose Version 3: Kompose now supports Docker Compose Version 3. New keys such as ‘deploy’ now convert to their Kubernetes equivalent. -- -Docker Push and Build Support: When you supply a ‘build’ key within your `docker-compose.yaml` file, Kompose will automatically build and push the image to the respective Docker repository for Kubernetes to consume. -- -New Keys: With the addition of version 3 support, new keys such as pid and deploy are supported. For full details on what Kompose supports, view our [conversion document](http://kompose.io/conversion/). -- -Bug Fixes: In every release we fix any bugs related to edge-cases when converting. This release fixes issues relating to converting volumes with ‘./’ in the target name. +- Docker Compose Version 3: Kompose now supports Docker Compose Version 3. New keys such as ‘deploy’ now convert to their Kubernetes equivalent. +- Docker Push and Build Support: When you supply a ‘build’ key within your `docker-compose.yaml` file, Kompose will automatically build and push the image to the respective Docker repository for Kubernetes to consume. +- New Keys: With the addition of version 3 support, new keys such as pid and deploy are supported. For full details on what Kompose supports, view our [conversion document](http://kompose.io/conversion/). +- Bug Fixes: In every release we fix any bugs related to edge-cases when converting. This release fixes issues relating to converting volumes with ‘./’ in the target name. @@ -145,28 +141,18 @@ What’s ahead? As we continue development, we will strive to convert as many Docker Compose keys as possible for all future and current Docker Compose releases, converting each one to their Kubernetes equivalent. All future releases will be backwards-compatible. -- -[Install Kompose](https://github.com/kubernetes/kompose/blob/master/docs/installation.md) -- -[Kompose Quick Start Guide](https://github.com/kubernetes/kompose/blob/master/docs/installation.md) -- -[Kompose Web Site](http://kompose.io/) -- -[Kompose Documentation](https://github.com/kubernetes/kompose/tree/master/docs) +- [Install Kompose](https://github.com/kubernetes/kompose/blob/master/docs/installation.md) +- [Kompose Quick Start Guide](https://github.com/kubernetes/kompose/blob/master/docs/installation.md) +- [Kompose Web Site](http://kompose.io/) +- [Kompose Documentation](https://github.com/kubernetes/kompose/tree/master/docs) --Charlie Drage, Software Engineer, Red Hat -- -Post questions (or answer questions) on[Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) -- -Join the community portal for advocates on[K8sPort](http://k8sport.org/) -- -Follow us on Twitter[@Kubernetesio](https://twitter.com/kubernetesio) for latest updates -- -Connect with the community on[Slack](http://slack.k8s.io/) -- -Get involved with the Kubernetes project on[GitHub](https://github.com/kubernetes/kubernetes) -- +- Post questions (or answer questions) on[Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) +- Join the community portal for advocates on[K8sPort](http://k8sport.org/) +- Follow us on Twitter[@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +- Connect with the community on[Slack](http://slack.k8s.io/) +- Get involved with the Kubernetes project on[GitHub](https://github.com/kubernetes/kubernetes) diff --git a/content/en/blog/_posts/2017-09-00-Kubernetes-Statefulsets-Daemonsets.md b/content/en/blog/_posts/2017-09-00-Kubernetes-Statefulsets-Daemonsets.md index fe156e00dfd52..67f3e084cc957 100644 --- a/content/en/blog/_posts/2017-09-00-Kubernetes-Statefulsets-Daemonsets.md +++ b/content/en/blog/_posts/2017-09-00-Kubernetes-Statefulsets-Daemonsets.md @@ -987,13 +987,8 @@ Rolling updates and roll backs close an important feature gap for DaemonSets and -- -Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) -- -Join the community portal for advocates on [K8sPort](http://k8sport.org/) -- -Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates -- -Connect with the community on [Slack](http://slack.k8s.io/) -- -Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes) +- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) +- Join the community portal for advocates on [K8sPort](http://k8sport.org/) +- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +- Connect with the community on [Slack](http://slack.k8s.io/) +- Get involved with the Kubernetes project on [GitHub](https://github.com/kubernetes/kubernetes) diff --git a/content/en/blog/_posts/2019-03-28-running-kubernetes-locally-on-linux-with-minikube.md b/content/en/blog/_posts/2019-03-28-running-kubernetes-locally-on-linux-with-minikube.md index c33b805b4dcef..ebbf59177248c 100644 --- a/content/en/blog/_posts/2019-03-28-running-kubernetes-locally-on-linux-with-minikube.md +++ b/content/en/blog/_posts/2019-03-28-running-kubernetes-locally-on-linux-with-minikube.md @@ -18,7 +18,7 @@ This is post #1 in a series about the local deployment options on Linux, and it [Minikube](https://github.com/kubernetes/minikube) is a cross-platform, community-driven [Kubernetes](https://kubernetes.io/) distribution, which is targeted to be used primarily in local environments. It deploys a single-node cluster, which is an excellent option for having a simple Kubernetes cluster up and running on localhost. -Minikube is designed to be used as a virtual machine (VM), and the default VM runtime is [VirtualBox](https://www.virtualbox.org/). At the same time, extensibility is one of the critical benefits of Minikube, so it's possible to use it with [drivers](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md) outside of VirtualBox. +Minikube is designed to be used as a virtual machine (VM), and the default VM runtime is [VirtualBox](https://www.virtualbox.org/). At the same time, extensibility is one of the critical benefits of Minikube, so it's possible to use it with [drivers](https://minikube.sigs.k8s.io/docs/drivers/) outside of VirtualBox. By default, Minikube uses Virtualbox as a runtime for running the virtual machine. Virtualbox is a cross-platform solution, which can be used on a variety of operating systems, including GNU/Linux, Windows, and macOS. diff --git a/content/en/blog/_posts/2020-06-29-working-with-terraform-and-kubernetes.md b/content/en/blog/_posts/2020-06-29-working-with-terraform-and-kubernetes.md new file mode 100644 index 0000000000000..1b15ae28d213e --- /dev/null +++ b/content/en/blog/_posts/2020-06-29-working-with-terraform-and-kubernetes.md @@ -0,0 +1,59 @@ +--- +layout: blog +title: "Working with Terraform and Kubernetes" +date: 2020-06-29 +slug: working-with-terraform-and-kubernetes +url: /blog/2020/06/working-with-terraform-and-kubernetes +--- + +**Author:** [Philipp Strube](https://twitter.com/pst418), Kubestack + +Maintaining Kubestack, an open-source [Terraform GitOps Framework](https://www.kubestack.com/lp/terraform-gitops-framework) for Kubernetes, I unsurprisingly spend a lot of time working with Terraform and Kubernetes. Kubestack provisions managed Kubernetes services like AKS, EKS and GKE using Terraform but also integrates cluster services from Kustomize bases into the GitOps workflow. Think of cluster services as everything that's required on your Kubernetes cluster, before you can deploy application workloads. + +Hashicorp recently announced [better integration between Terraform and Kubernetes](https://www.hashicorp.com/blog/deploy-any-resource-with-the-new-kubernetes-provider-for-hashicorp-terraform/). I took this as an opportunity to give an overview of how Terraform can be used with Kubernetes today and what to be aware of. + +In this post I will however focus only on using Terraform to provision Kubernetes API resources, not Kubernetes clusters. + +[Terraform](https://www.terraform.io/intro/index.html) is a popular infrastructure as code solution, so I will only introduce it very briefly here. In a nutshell, Terraform allows declaring a desired state for resources as code, and will determine and execute a plan to take the infrastructure from its current state, to the desired state. + +To be able to support different resources, Terraform requires providers that integrate the respective API. So, to create Kubernetes resources we need a Kubernetes provider. Here are our options: + +## Terraform `kubernetes` provider (official) + +First, the [official Kubernetes provider](https://github.com/hashicorp/terraform-provider-kubernetes). This provider is undoubtedly the most mature of the three. However, it comes with a big caveat that's probably the main reason why using Terraform to maintain Kubernetes resources is not a popular choice. + +Terraform requires a schema for each resource and this means the maintainers have to translate the schema of each Kubernetes resource into a Terraform schema. This is a lot of effort and was the reason why for a long time the supported resources where pretty limited. While this has improved over time, still not everything is supported. And especially [custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) are not possible to support this way. + +This schema translation also results in some edge cases to be aware of. For example, `metadata` in the Terraform schema is a list of maps. Which means you have to refer to the `metadata.name` of a Kubernetes resource like this in Terraform: `kubernetes_secret.example.metadata.0.name`. + +On the plus side however, having a Terraform schema means full integration between Kubernetes and other Terraform resources. Like for [example](https://github.com/kbst/terraform-kubestack/blob/e5caa6d20926d546a045144ebe79c7cc8c0b4c8a/aws/_modules/eks/ingress.tf#L37), using Terraform to create a Kubernetes service of type `LoadBalancer` and then use the returned ELB hostname in a Route53 record to configure DNS. + +The biggest benefit when using Terraform to maintain Kubernetes resources is integration into the Terraform plan/apply life-cycle. So you can review planned changes before applying them. Also, using `kubectl`, purging of resources from the cluster is not trivial without manual intervention. Terraform does this reliably. + +## Terraform `kubernetes-alpha` provider + +Second, the new [alpha Kubernetes provider](https://github.com/hashicorp/terraform-provider-kubernetes-alpha). As a response to the limitations of the current Kubernetes provider the Hashicorp team recently released an alpha version of a new provider. + +This provider uses dynamic resource types and server-side-apply to support all Kubernetes resources. I personally think this provider has the potential to be a game changer - even if [managing Kubernetes resources in HCL](https://github.com/hashicorp/terraform-provider-kubernetes-alpha#moving-from-yaml-to-hcl) may still not be for everyone. Maybe the Kustomize provider below will help with that. + +The only downside really is, that it's explicitly discouraged to use it for anything but testing. But the more people test it, the sooner it should be ready for prime time. So I encourage everyone to give it a try. + +## Terraform `kustomize` provider + +Last, we have the [`kustomize` provider](https://github.com/kbst/terraform-provider-kustomize). Kustomize provides a way to do customizations of Kubernetes resources using inheritance instead of templating. It is designed to output the result to `stdout`, from where you can apply the changes using `kubectl`. This approach means that `kubectl` edge cases like no purging or changes to immutable attributes still make full automation difficult. + +Kustomize is a popular way to handle customizations. But I was looking for a more reliable way to automate applying changes. Since this is exactly what Terraform is great at the Kustomize provider was born. + +Not going into too much detail here, but from Terraform's perspective, this provider treats every Kubernetes resource as a JSON string. This way it can handle any Kubernetes resource resulting from the Kustomize build. But it has the big disadvantage that Kubernetes resources can not easily be integrated with other Terraform resources. Remember the load balancer example from above. + +Under the hood, similarly to the new Kubernetes alpha provider, the Kustomize provider also uses the dynamic Kubernetes client and server-side-apply. Going forward, I plan to deprecate this part of the Kustomize provider that overlaps with the new Kubernetes provider and only keep the Kustomize integration. + +## Conclusion + +For teams that are already invested into Terraform, or teams that are looking for ways to replace `kubectl` in automation, Terraform's plan/apply life-cycle has always been a promising option to automate changes to Kubernetes resources. However, the limitations of the official Kubernetes provider resulted in this not seeing significant adoption. + +The new alpha provider removes the limitations and has the potential to make Terraform a prime option to automate changes to Kubernetes resources. + +Teams that have already adopted Kustomize, may find integrating Kustomize and Terraform using the Kustomize provider beneficial over `kubectl` because it avoids common edge cases. Even if in this set up, Terraform can only easily be used to plan and apply the changes, not to adapt the Kubernetes resources. In the future, this issue may be resolved by combining the Kustomize provider with the new Kubernetes provider. + +If you have any questions regarding these three options, feel free to reach out to me on the Kubernetes Slack in either the [#kubestack](https://app.slack.com/client/T09NY5SBT/CMBCT7XRQ) or the [#kustomize](https://app.slack.com/client/T09NY5SBT/C9A5ALABG) channel. If you happen to give any of the providers a try and encounter a problem, please file a GitHub issue to help the maintainers fix it. diff --git a/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/KubernetesComputer_transparent.png b/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/KubernetesComputer_transparent.png new file mode 100644 index 0000000000000..86e4bdff5f23c Binary files /dev/null and b/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/KubernetesComputer_transparent.png differ diff --git a/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/PeopleDoodle_transparent.png b/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/PeopleDoodle_transparent.png new file mode 100644 index 0000000000000..6657c31ec4bd2 Binary files /dev/null and b/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/PeopleDoodle_transparent.png differ diff --git a/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/cgroupsNamespacesComboPic.png b/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/cgroupsNamespacesComboPic.png new file mode 100644 index 0000000000000..4aae049d00291 Binary files /dev/null and b/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/cgroupsNamespacesComboPic.png differ diff --git a/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/index.md b/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/index.md new file mode 100644 index 0000000000000..df5834764e77e --- /dev/null +++ b/content/en/blog/_posts/2020-06-30-SIG-Windows-Spotlight/index.md @@ -0,0 +1,104 @@ +--- +layout: blog +title: "SIG-Windows Spotlight" +date: 2020-06-30 +slug: sig-windows-spotlight-2020 +--- + +# SIG-Windows Spotlight +_This post tells the story of how Kubernetes contributors work together to provide a container orchestrator that works for both Linux and Windows._ + +Image of a computer with Kubernetes logo + +Most people who are familiar with Kubernetes are probably used to associating it with Linux. The connection makes sense, since Kubernetes ran on Linux from its very beginning. However, many teams and organizations working on adopting Kubernetes need the ability to orchestrate containers on Windows. Since the release of Docker and rise to popularity of containers, there have been efforts both from the community and from Microsoft itself to make container technology as accessible in Windows systems as it is in Linux systems. + +Within the Kubernetes community, those who are passionate about making Kubernetes accessible to the Windows community can find a home in the Windows Special Interest Group. To learn more about SIG-Windows and the future of Kubernetes on Windows, I spoke to co-chairs [Mark Rossetti](https://github.com/marosset) and [Michael Michael](https://github.com/michmike) about the SIG's goals and how others can contribute. + +## Intro to Windows Containers & Kubernetes + +Kubernetes is the most popular tool for orchestrating container workloads, so to understand the Windows Special Interest Group (SIG) within the Kubernetes project, it's important to first understand what we mean when we talk about running containers on Windows. + +*** +_"When looking at Windows support in Kubernetes," says SIG (Special Interest Group) Co-chairs Mark Rossetti and Michael Michael, "many start drawing comparisons to Linux containers. Although some of the comparisons that highlight limitations are fair, it is important to distinguish between operational limitations and differences between the Windows and Linux operating systems. Windows containers run the Windows operating system and Linux containers run Linux."_ +*** + +In essence, any "container" is simply a process being run on its host operating system, with some key tooling in place to isolate that process and its dependencies from the rest of the environment. The goal is to make that running process safely isolated, while taking up minimal resources from the system to perform that isolation. On Linux, the tooling used to isolate processes to create "containers" commonly boils down to cgroups and namespaces (among a few others), which are themselves tools built in to the Linux Kernel. + +A visual analogy using dogs to explain Linux cgroups and namespaces. + +#### _If dogs were processes: containerization would be like giving each dog their own resources like toys and food using cgroups, and isolating troublesome dogs using namespaces._ + + +Native Windows processes are processes that are or must be run on a Windows operating system. This makes them fundamentally different from a process running on a Linux operating system. Since Linux containers are Linux processes being isolated by the Linux kernel tools known as cgroups and namespaces, containerizing native Windows processes meant implementing similar isolation tools within the Windows kernel itself. Thus, "Windows Containers" and "Linux Containers" are fundamentally different technologies, even though they have the same goals (isolating processes) and in some ways work similarly (using kernel level containerization). + +So when it comes to running containers on Windows, there are actually two very important concepts to consider: + +* Native Windows processes running as native Windows Server style containers, +* and traditional Linux containers running on a Linux Kernel, generally hosted on a lightweight Hyper-V Virtual Machine. + +You can learn more about Linux and Windows containers in this [tutorial](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/linux-containers) from Microsoft. + + + +### Kubernetes on Windows + +Kubernetes was initially designed with Linux containers in mind and was itself designed to run on Linux systems. Because of that, much of the functionality of Kubernetes involves unique Linux functionality. The Linux-specific work is intentional--we all want Kubernetes to run optimally on Linux--but there is a growing demand for similar optimization for Windows servers. For cases where users need container orchestration on Windows, the Kubernetes contributor community of SIG-Windows has incorporated functionality for Windows-specific use cases. + +*** +_"A common question we get is, will I be able to have a Windows-only cluster. The answer is NO. Kubernetes control plane components will continue to be based on Linux, while SIG-Windows is concentrating on the experience of having Windows worker nodes in a Kubernetes cluster."_ +*** + +Rather than separating out the concepts of "Windows Kubernetes," and "Linux Kubernetes," the community of SIG-Windows works toward adding functionality to the main Kubernetes project which allows it to handle use cases for Windows. These Windows capabilities mirror, and in some cases add unique functionality to, the Linux use cases Kubernetes has served since its release in 2014 (want to learn more history? Scroll through this [original design document](https://github.com/kubernetes/kubernetes/blob/e2b948dbfbba62b8cb681189377157deee93bb43/DESIGN.md). + + +## What Does SIG-Windows Do? + +*** +_"SIG-Windows is really the center for all things Windows in Kubernetes,"_ SIG chairs Mark and Michael said, _"We mainly focus on the compute side of things, but really anything related to running Kubernetes on Windows is in scope for SIG-Windows."_ +*** + +In order to best serve users, SIG-Windows works to make the Kubernetes user experience as consistent as possible for users of Windows and Linux. However some use cases simply only apply to one Operating System, and as such, the SIG-Windows group also works to create functionality that is unique to Windows-only workloads. + +Many SIGs, or "Special Interest Groups" within Kubernetes have a narrow focus, allowing members to dive deep on a certain facet of the technology. While specific expertise is welcome, those interested in SIG-Windows will find it to be a great community to build broad understanding across many focus areas of Kubernetes. "Members from our SIG interface with storage, network, testing, cluster-lifecycle and others groups in Kubernetes." + +### Who are SIG-Windows' Users? +The best way to understand the technology a group makes, is often to understand who their customers or users are. + + + +#### "A majority of the users we've interacted with have business-critical infrastructure running on Windows developed over many years and can't move those workloads to Linux for various reasons (cost, time, compliance, etc)," the SIG chairs shared. "By transporting those workloads into Windows containers and running them in Kubernetes they are able to quickly modernize their infrastructure and help migrate it to the cloud." + +As anyone in the Kubernetes space can attest, companies around the world, in many different industries, see Kubernetes as their path to modernizing their infrastructure. Often this involves re-architecting or event totally re-inventing many of the ways they've been doing business. With the goal being to make their systems more scalable, more robust, and more ready for anything the future may bring. But not every application or workload can or should change the core operating system it runs on, so many teams need the ability to run containers at scale on Windows, or Linux, or both. + +"Sometimes the driver to Windows containers is a modernization effort and sometimes it’s because of expiring hardware warranties or end-of-support cycles for the current operating system. Our efforts in SIG-Windows enable Windows developers to take advantage of cloud native tools and Kubernetes to build and deploy distributed applications faster. That’s exciting! In essence, users can retain the benefits of application availability while decreasing costs." + +## Who are SIG-Windows? + +Who are these contributors working on enabling Windows workloads for Kubernetes? It could be you! + +Like with other Kubernetes SIGs, contributors to SIG-Windows can be anyone from independent hobbyists to professionals who work at many different companies. They come from many different parts of the world and bring to the table many different skill sets. + +Image of several people chatting pleasantly + +_"Like most other Kubernetes SIGs, we are a very welcome and open community," explained the SIG co-chairs Michael Michael and Mark Rosetti._ + + +### Becoming a contributor + +For anyone interested in getting started, the co-chairs added, "New contributors can view old community meetings on GitHub (we record every single meeting going back three years), read our documentation, attend new community meetings, ask questions in person or on Slack, and file some issues on Github. We also attend all KubeCon conferences and host 1-2 sessions, a contributor session, and meet-the-maintainer office hours." + +The co-chairs also shared a glimpse into what the path looks like to becoming a member of the SIG-Windows community: + +"We encourage new contributors to initially just join our community and listen, then start asking some questions and get educated on Windows in Kubernetes. As they feel comfortable, they could graduate to improving our documentation, file some bugs/issues, and eventually they can be a code contributor by fixing some bugs. If they have long-term and sustained substantial contributions to Windows, they could become a technical lead or a chair of SIG-Windows. You won't know if you love this area unless you get started :) To get started, [visit this getting-started page](https://github.com/kubernetes/community/tree/master/sig-windows). It's a one stop shop with links to everything related to SIG-Windows in Kubernetes." + +When asked if there were any useful skills for new contributors, the co-chairs said, + +"We are always looking for expertise in Go and Networking and Storage, along with a passion for Windows. Those are huge skills to have. However, we don’t require such skills, and we welcome any and all contributors, with varying skill sets. If you don’t know something, we will help you acquire it." + +You can get in touch with the folks at SIG-Windows in their [Slack channel](https://kubernetes.slack.com/archives/C0SJ4AFB7) or attend one of their regular meetings - currently 30min long on Tuesdays at 12:30PM EST! You can find links to their regular meetings as well as past meeting notes and recordings from the [SIG-Windows README](https://github.com/kubernetes/community/tree/master/sig-windows#readme) on GitHub. + +As a closing message from SIG-Windows: + +*** +#### _"We welcome you to get involved and join our community to share feedback and deployment stories, and contribute to code, docs, and improvements of any kind."_ +*** diff --git a/content/en/case-studies/OWNERS b/content/en/case-studies/OWNERS deleted file mode 100644 index e4131d339e668..0000000000000 --- a/content/en/case-studies/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -# Owned by Kubernetes Blog reviewers. -options: - no_parent_owners: false -reviewers: - - alexcontini -approvers: - - alexcontini - - sarahkconway diff --git a/content/en/case-studies/adform/index.html b/content/en/case-studies/adform/index.html index e9a8acc7a22f2..be35a2d8375ee 100644 --- a/content/en/case-studies/adform/index.html +++ b/content/en/case-studies/adform/index.html @@ -12,7 +12,7 @@ Kubernetes enabled the self-healing and immutable infrastructure. We can do faster releases, so our developers are really happy. They can ship our features faster than before, and that makes our clients happier. --- -
+

CASE STUDY:
Improving Performance and Morale with Cloud Native

@@ -66,7 +66,7 @@

Adform made +
"The fact that Cloud Native Computing Foundation incubated Kubernetes was a really big point for us because it was vendor neutral. And we can see that a community really gathers around it. Everyone shares their experiences, their knowledge, and the fact that it’s open source, you can contribute."

— Edgaras Apšega, IT Systems Engineer, Adform
@@ -83,7 +83,7 @@

Adform made +
"Releases are really nice for them, because they just push their code to Git and that’s it. They don’t have to worry about their virtual machines anymore."

— Andrius Cibulskis, IT Systems Engineer, Adform
diff --git a/content/en/case-studies/adidas/index.html b/content/en/case-studies/adidas/index.html index 3f7982765a397..5f9d0da24addf 100644 --- a/content/en/case-studies/adidas/index.html +++ b/content/en/case-studies/adidas/index.html @@ -9,7 +9,7 @@ ​ -
+

CASE STUDY: adidas

Staying True to Its Culture, adidas Got 40% of Its Most Impactful Systems Running on Kubernetes in a Year
@@ -33,7 +33,7 @@

Impact

-
+
"For me, Kubernetes is a platform made by engineers for engineers. It’s relieving the development team from tasks that they don’t want to do, but at the same time giving the visibility of what is behind the curtain, so they can also control it."

- FERNANDO CORNAGO, SENIOR DIRECTOR OF PLATFORM ENGINEERING AT ADIDAS

@@ -74,7 +74,7 @@

In recent years, the adidas team was happy with its software choices from a ​ ​ -
+
“There is no competitive edge over our competitors like Puma or Nike in running and operating a Kubernetes cluster. Our competitive edge is that we teach our internal engineers how to build cool e-comm stores that are fast, that are resilient, that are running perfectly.”

- DANIEL EICHTEN, SENIOR DIRECTOR OF PLATFORM ENGINEERING AT ADIDAS

diff --git a/content/en/case-studies/ant-financial/index.html b/content/en/case-studies/ant-financial/index.html index 92b46526dee48..1711ef97b8157 100644 --- a/content/en/case-studies/ant-financial/index.html +++ b/content/en/case-studies/ant-financial/index.html @@ -7,7 +7,7 @@ featured: false --- -
+ -
+
-
+
"We’re very grateful for CNCF and this amazing technology, which we need as we continue to scale globally. We’re definitely embracing the community and open source more in the future."

- HAOJIE HANG, PRODUCT MANAGEMENT, ANT FINANCIAL
diff --git a/content/en/case-studies/appdirect/index.html b/content/en/case-studies/appdirect/index.html index 16d93cce5cb4e..ca6b0b8fe92a4 100644 --- a/content/en/case-studies/appdirect/index.html +++ b/content/en/case-studies/appdirect/index.html @@ -12,7 +12,7 @@ We made the right decisions at the right time. Kubernetes and the cloud native technologies are now seen as the de facto ecosystem. --- -
+

CASE STUDY:
AppDirect: How AppDirect Supported the 10x Growth of Its Engineering Staff with Kubernetess

@@ -53,7 +53,7 @@

With its end-to-end commerce platform for cloud-based products and services,

-
+
"We made the right decisions at the right time. Kubernetes and the cloud native technologies are now seen as the de facto ecosystem. We know where to focus our efforts in order to tackle the new wave of challenges we face as we scale out. The community is so active and vibrant, which is a great complement to our awesome internal team."

- Alexandre Gervais, Staff Software Developer, AppDirect
@@ -69,7 +69,7 @@

With its end-to-end commerce platform for cloud-based products and services, Lacerte’s strategy ultimately worked because of the very real impact the Kubernetes platform has had to deployment time. Due to less dependency on custom-made, brittle shell scripts with SCP commands, time to deploy a new version has shrunk from 4 hours to a few minutes. Additionally, the company invested a lot of effort to make things self-service for developers. "Onboarding a new service doesn’t require Jira tickets or meeting with three different teams," says Lacerte. Today, the company sees 1,600 deployments per week, compared to 1-30 before.

-
+
"I think our velocity would have slowed down a lot if we didn’t have this new infrastructure."

- Pierre-Alexandre Lacerte, Director of Software Development, AppDirect
diff --git a/content/en/case-studies/babylon/index.html b/content/en/case-studies/babylon/index.html index afdc0054114e7..dce06121750e0 100644 --- a/content/en/case-studies/babylon/index.html +++ b/content/en/case-studies/babylon/index.html @@ -12,7 +12,7 @@ --- -
+

CASE STUDY: Babylon

How Cloud Native Is Enabling Babylon’s Medical AI Innovations
@@ -36,7 +36,7 @@

Impact

Instead of waiting hours or days to be able to compute, teams can get access instantaneously. Clinical validations used to take 10 hours; now they are done in under 20 minutes. The portability of the cloud native platform has also enabled Babylon to expand into other countries.
-
+
“Kubernetes is a great platform for machine learning because it comes with all the scheduling and scalability that you need.”

- JÉRÉMIE VALLÉE, AI INFRASTRUCTURE LEAD AT BABYLON

@@ -84,7 +84,7 @@

Babylon’s mission is to put accessible and affordable healthcare services -
+
“Giving a Kubernetes-based platform to our data scientists has meant increased security, increased innovation through empowerment, and a more affordable health service as our cloud engineers are building an experience that is used by hundreds on a daily basis, rather than supporting specific bespoke use cases.”

- JEAN MARIE FERDEGUE, DIRECTOR OF PLATFORM OPERATIONS AT BABYLON

diff --git a/content/en/case-studies/booking-com/index.html b/content/en/case-studies/booking-com/index.html index ffeb3f27075cf..99369a2bf9d72 100644 --- a/content/en/case-studies/booking-com/index.html +++ b/content/en/case-studies/booking-com/index.html @@ -14,7 +14,7 @@ ​ -
+

CASE STUDY: Booking.com

After Learning the Ropes with a Kubernetes Distribution, Booking.com Built a Platform of Its Own
@@ -40,7 +40,7 @@

Impact

-
+
“As our users learn Kubernetes and become more sophisticated Kubernetes users, they put pressure on us to provide a better, more native Kubernetes experience, which is great. It’s a super healthy dynamic.”

- BEN TYLER, PRINCIPAL DEVELOPER, B PLATFORM TRACK AT BOOKING.COM

@@ -91,7 +91,7 @@

Booking.com has a long history with Kubernetes: In 2015, a team at the trave ​ ​ -
+
“We have a tutorial. You follow the tutorial. Your code is running. Then, it’s business-logic time. The time to gain access to resources is decreased enormously.”

- BEN TYLER, PRINCIPAL DEVELOPER, B PLATFORM TRACK AT BOOKING.COM

diff --git a/content/en/case-studies/booz-allen/index.html b/content/en/case-studies/booz-allen/index.html index 2a48c7f3b75a0..fdda5e976af5b 100644 --- a/content/en/case-studies/booz-allen/index.html +++ b/content/en/case-studies/booz-allen/index.html @@ -13,7 +13,7 @@ ​ -
+

CASE STUDY: Booz Allen Hamilton

How Booz Allen Hamilton Is Helping Modernize the Federal Government with Kubernetes
@@ -38,7 +38,7 @@

Impact

-
+
"When there’s a regulatory change in an agency, or a legislative change in Congress, or an executive order that changes the way you do business, how do I deploy that and get that out to the people who need it rapidly? At the end of the day, that’s the problem we’re trying to help the government solve with tools like Kubernetes."

- JOSH BOYD, CHIEF TECHNOLOGIST AT BOOZ ALLEN HAMILTON

@@ -75,7 +75,7 @@

The White House launched an IT modernization effort in 2017, and in addition ​ ​ -
+
"Kubernetes alone enables a dramatic reduction in cost as resources are prioritized to the day’s event"

- MARTIN FOLKOFF, SENIOR LEAD TECHNOLOGIST AT BOOZ ALLEN HAMILTON

diff --git a/content/en/case-studies/bose/index.html b/content/en/case-studies/bose/index.html index d22de2187af9c..c77f416c13715 100644 --- a/content/en/case-studies/bose/index.html +++ b/content/en/case-studies/bose/index.html @@ -11,7 +11,7 @@ The CNCF Landscape quickly explains what’s going on in all the different areas from storage to cloud providers to automation and so forth. This is our shopping cart to build a cloud infrastructure. We can go choose from the different aisles. --- -
+

CASE STUDY:
Bose: Supporting Rapid Development for Millions of IoT Products With Kubernetes

@@ -56,7 +56,7 @@

A household name in high-quality audio equipment, +
"Everybody on the team thinks in terms of automation, leaning out the processes, getting things done as quickly as possible. When you step back and look at what it means for a 50-plus-year-old speaker company to have that sort of culture, it really is quite incredible, and I think the tools that we use and the foundation that we’ve built with them is a huge piece of that."

- Dylan O’Mahony, Cloud Architecture Manager, Bose
@@ -70,7 +70,7 @@

A household name in high-quality audio equipment, +
"The CNCF Landscape quickly explains what’s going on in all the different areas from storage to cloud providers to automation and so forth. This is our shopping cart to build a cloud infrastructure. We can go choose from the different aisles."

- Josh West, Lead Cloud Engineer, Bose
diff --git a/content/en/case-studies/capital-one/index.html b/content/en/case-studies/capital-one/index.html index 773db4869e4e3..f95fb2acc703b 100644 --- a/content/en/case-studies/capital-one/index.html +++ b/content/en/case-studies/capital-one/index.html @@ -5,7 +5,7 @@ css: /css/style_case_studies.css --- -
+

CASE STUDY:
Supporting Fast Decisioning Applications with Kubernetes

@@ -55,7 +55,7 @@

-
+
"We want to provide the tools in the same ecosystem, in a consistent way, rather than have a large custom snowflake ecosystem where every tool needs its own custom deployment. Kubernetes gives us the ability to bring all of these together, so the richness of the open source and even the license community dealing with big data can be corralled." @@ -69,7 +69,7 @@

-
+
With Kubernetes, "a team can come to us and we can have them up and running with a basic decisioning app in a fortnight, which before would have taken a whole quarter, if not longer. Kubernetes is a manifold productivity multiplier."
diff --git a/content/en/case-studies/cern/index.html b/content/en/case-studies/cern/index.html index 9bd797024595e..48e965d7fb145 100644 --- a/content/en/case-studies/cern/index.html +++ b/content/en/case-studies/cern/index.html @@ -7,7 +7,7 @@ logo: cern_featured_logo.png --- -
+

CASE STUDY: CERN
CERN: Processing Petabytes of Data More Efficiently with Kubernetes

@@ -52,7 +52,7 @@

With a mission of researching fundamental science, and a stable of extremely

-
+
"Before, the tendency was always: ‘I need this, I get a couple of developers, and I implement it.’ Right now it’s ‘I need this, I’m sure other people also need this, so I’ll go and ask around.’ The CNCF is a good source because there’s a very large catalog of applications available. It’s very hard right now to justify developing a new product in-house. There is really no real reason to keep doing that. It’s much easier for us to try it out, and if we see it’s a good solution, we try to reach out to the community and start working with that community."

- Ricardo Rocha, Software Engineer, CERN
@@ -66,7 +66,7 @@

With a mission of researching fundamental science, and a stable of extremely

-
+
"With Kubernetes, there’s a well-established technology and a big community that we can contribute to. It allows us to do our physics analysis without having to focus so much on the lower level software. This is just exciting. We are looking forward to keep contributing to the community and collaborating with everyone."

- Ricardo Rocha, Software Engineer, CERN
diff --git a/content/en/case-studies/chinaunicom/index.html b/content/en/case-studies/chinaunicom/index.html index 296b2ce1fcadb..4479d60e672fa 100644 --- a/content/en/case-studies/chinaunicom/index.html +++ b/content/en/case-studies/chinaunicom/index.html @@ -8,7 +8,7 @@ featured: false --- -
+

CASE STUDY:
China Unicom: How China Unicom Leveraged Kubernetes to Boost Efficiency
and Lower IT Costs

@@ -51,7 +51,7 @@

With more than 300 million users, China Unicom is one of the country’s top

-
+
"We could never imagine we can achieve this scalability in such a short time."

- Chengyu Zhang, Group Leader of Platform Technology R&D, China Unicom
@@ -65,7 +65,7 @@

With more than 300 million users, China Unicom is one of the country’s top

-
+
"This technology is relatively complicated, but as long as developers get used to it, they can enjoy all the benefits."

- Jie Jia, Member of Platform Technology R&D, China Unicom
diff --git a/content/en/case-studies/city-of-montreal/index.html b/content/en/case-studies/city-of-montreal/index.html index 151ce44b21691..55378c649ed01 100644 --- a/content/en/case-studies/city-of-montreal/index.html +++ b/content/en/case-studies/city-of-montreal/index.html @@ -7,7 +7,7 @@ featured: false --- -
+

CASE STUDY:
City of Montréal - How the City of Montréal Is Modernizing Its 30-Year-Old, Siloed Architecture with Kubernetes

@@ -50,7 +50,7 @@

The second biggest municipality in Canada, Montréal has a large number of l The first step to modernize the architecture was containerization. “We based our effort on the new trends; we understood the benefits of immutability and deployments without downtime and such things,” says Solutions Architect Marc Khouzam. The team started with a small Docker farm with four or five servers, with Rancher for providing access to the Docker containers and their logs and Jenkins for deployment.

-
+
"Getting a project running in Kubernetes is entirely dependent on how long you need to program the actual software. It’s no longer dependent on deployment. Deployment is so fast that it’s negligible."

- MARC KHOUZAM, SOLUTIONS ARCHITECT, CITY OF MONTRÉAL
@@ -65,7 +65,7 @@

The second biggest municipality in Canada, Montréal has a large number of l Another important factor in the decision was vendor neutrality. “As a government entity, it is essential for us to be neutral in our selection of products and providers,” says Thibault. “The independence of the Cloud Native Computing Foundation from any company provides this.”

-
+
"Kubernetes has been great. It’s been stable, and it provides us with elasticity, resilience, and robustness. While re-architecting for Kubernetes, we also benefited from the monitoring and logging aspects, with centralized logging, Prometheus logging, and Grafana dashboards. We have enhanced visibility of what’s being deployed."

- MORGAN MARTINET, ENTERPRISE ARCHITECT, CITY OF MONTRÉAL
diff --git a/content/en/case-studies/denso/index.html b/content/en/case-studies/denso/index.html index 3ad0812d24654..27ef1c77ede7e 100644 --- a/content/en/case-studies/denso/index.html +++ b/content/en/case-studies/denso/index.html @@ -12,7 +12,7 @@ --- -
+

CASE STUDY: Denso

How DENSO Is Fueling Development on the Vehicle Edge with Kubernetes
@@ -36,7 +36,7 @@

Impact

Critical layer features can take 2-3 years to implement in the traditional, waterfall model of development at DENSO. With the Kubernetes platform and agile methods, there’s a 2-month development cycle for non-critical software. Now, ten new applications are released a year, and a new prototype is introduced every week. "By utilizing Kubernetes managed services, such as GKE/EKS/AKS, we can unify the environment and simplify our maintenance operation," says Koizumi.
-
+
"Another disruptive innovation is coming, so to survive in this situation, we need to change our culture."

- SEIICHI KOIZUMI, R&D PRODUCT MANAGER, DIGITAL INNOVATION DEPARTMENT AT DENSO

@@ -79,7 +79,7 @@

Spun off from Toyota in 1949, DENSO Corporation is one of the top automotive -
+
"By utilizing Kubernetes managed services, such as GKE/EKS/AKS, we can unify the environment and simplify our maintenance operation."

- SEIICHI KOIZUMI, R&D PRODUCT MANAGER, DIGITAL INNOVATION DEPARTMENT AT DENSO

diff --git a/content/en/case-studies/ibm/index.html b/content/en/case-studies/ibm/index.html index 54e941c9cb3c8..e9a78a944371f 100644 --- a/content/en/case-studies/ibm/index.html +++ b/content/en/case-studies/ibm/index.html @@ -9,7 +9,7 @@ featured: false --- -
+

CASE STUDY:
Building an Image Trust Service on Kubernetes with Notary and TUF

@@ -58,7 +58,7 @@

Docker had already created the Notary project as an implementation of -
+
"Image signing is one key part of our Kubernetes container service offering, and our container registry team saw Notary as the de facto way to implement that capability in the current Docker and container ecosystem"

- Michael Hough, a software developer with the IBM Cloud Container Registry team
@@ -75,7 +75,7 @@

Docker had already created the Notary project as an implementation of -
+
"With our IBM Cloud Kubernetes as-a-service offering and the admission controller we have made available, it allows both IBM services as well as customers of the IBM public cloud to use security policies to control service deployment."

- Michael Hough, a software developer with the IBM Cloud Container Registry team
diff --git a/content/en/case-studies/ing/index.html b/content/en/case-studies/ing/index.html index 6e2648a455139..943daec2dec8f 100644 --- a/content/en/case-studies/ing/index.html +++ b/content/en/case-studies/ing/index.html @@ -11,7 +11,7 @@ --- -
+

CASE STUDY:
Driving Banking Innovation with Cloud Native

@@ -58,7 +58,7 @@

ING has long embraced innovation in banking, launching the internet-based IN

-
+
"We decided to standardize ING on a Kubernetes framework." Everything is run on premise due to banking regulations, he adds, but "we will be building an internal public cloud. We are trying to get on par with what public clouds are doing. That’s one of the reasons we got Kubernetes."

— Thijs Ebbers, Infrastructure Architect, ING
@@ -72,7 +72,7 @@

ING has long embraced innovation in banking, launching the internet-based IN

-
+
"We have to run the complete platform of services we need, many routing from different places. We need this Kubernetes framework for deploying the containers, with all those components, monitoring, logging. It’s complex."

— Onno Van der Voort, Infrastructure Architect, ING
diff --git a/content/en/case-studies/jd-com/index.html b/content/en/case-studies/jd-com/index.html index 636f22633922c..aed12fc54b302 100644 --- a/content/en/case-studies/jd-com/index.html +++ b/content/en/case-studies/jd-com/index.html @@ -7,7 +7,7 @@ featured: false --- -
+

CASE STUDY:
JD.com: How JD.com Pioneered Kubernetes for E-Commerce at Hyperscale

@@ -51,7 +51,7 @@

With more than 300 million active users and $55.7 billion in annual revenues

-
+
"We customized Kubernetes and built a modern system on top of it. This entire ecosystem of Kubernetes plus our own optimizations have helped us save costs and time."

- HAIFENG LIU, CHIEF ARCHITECT, JD.com
@@ -67,7 +67,7 @@

With more than 300 million active users and $55.7 billion in annual revenues

-
+
"My advice is first you need to combine this technology with your own businesses, and the second is you need clear goals. You cannot just use the technology because others are using it. You need to consider your own objectives."

- HAIFENG LIU, CHIEF ARCHITECT, JD.com
diff --git a/content/en/case-studies/naic/index.html b/content/en/case-studies/naic/index.html index d40dd19c774fa..3deb91e4808a0 100644 --- a/content/en/case-studies/naic/index.html +++ b/content/en/case-studies/naic/index.html @@ -9,7 +9,7 @@ featured: false --- -
+

CASE STUDY:
A Culture and Technology Transition Enabled by Kubernetes

@@ -59,7 +59,7 @@

Impact

-
+
"In our experience, vendor lock-in and tooling that is highly specific results in less resilient technology with fewer minds working to solve problems and grow the community."

- Dan Barker, Chief Enterprise Architect, NAIC
@@ -77,7 +77,7 @@

Impact

-
+
"We knew that Kubernetes had become the de facto standard for container orchestration. Two major factors for selecting this were the three major cloud vendors hosting their own versions and having it hosted in a neutral party as fully open source."

- Dan Barker, Chief Enterprise Architect, NAIC
diff --git a/content/en/case-studies/nav/index.html b/content/en/case-studies/nav/index.html index d4cc89590d9f5..bd606e7314244 100644 --- a/content/en/case-studies/nav/index.html +++ b/content/en/case-studies/nav/index.html @@ -8,7 +8,7 @@ featured: false --- -
+

CASE STUDY:
How A Startup Reduced Its Infrastructure Costs by 50% With Kubernetes

@@ -52,7 +52,7 @@

Founded in 2012, Nav provides small busin

-
+
"The community is absolutely vital: being able to pass ideas around, talk about a lot of the similar challenges that we’re all facing, and just get help. I like that we’re able to tackle the same problems for different reasons but help each other along the way."

- Travis Jeppson, Director of Engineering, Nav
@@ -65,7 +65,7 @@

Founded in 2012, Nav provides small busin Jeppson’s four-person Engineering Services team got Kubernetes up and running in six months (they decided to use Kubespray to spin up clusters), and the full migration of Nav’s 25 microservices and one primary monolith was completed in another six months. “We couldn’t rewrite everything; we couldn’t stop,” he says. “We had to stay up, we had to stay available, and we had to have minimal amount of downtime. So we got really comfortable around our building pipeline, our metrics and logging, and then around Kubernetes itself: how to launch it, how to upgrade it, how to service it. And we moved little by little.”

-
+
“Kubernetes has brought so much value to Nav by allowing all of these new freedoms that we had just never had before.”

- Travis Jeppson, Director of Engineering, Nav
diff --git a/content/en/case-studies/nerdalize/index.html b/content/en/case-studies/nerdalize/index.html index 127d95c375b51..2756ce431c2f5 100644 --- a/content/en/case-studies/nerdalize/index.html +++ b/content/en/case-studies/nerdalize/index.html @@ -6,7 +6,7 @@ css: /css/style_case_studies.css featured: false --- -
+

CASE STUDY:
Nerdalize: Providing Affordable and Sustainable Cloud Hosting with Kubernetes

@@ -47,7 +47,7 @@

Nerdalize is a cloud hosting provider that has no data centers. Instead, the After trying to develop its own scheduling system using another open source tool, Nerdalize found Kubernetes. “Kubernetes provided us with more functionality out of the gate,” says van der Veer.

-
+
“We always try to get a working version online first, like minimal viable products, and then move to stabilize that,” says van der Veer. “And I think that these kinds of day-two problems are now immediately solved. The rapid prototyping we saw internally is a very valuable aspect of Kubernetes.”

— AD VAN DER VEER, PRODUCT ENGINEER, NERDALIZE
@@ -62,7 +62,7 @@

Nerdalize is a cloud hosting provider that has no data centers. Instead, the Not to mention the 40% cost savings. “Every euro that we have to invest for licensing of software that’s not open source comes from that 40%,” says van der Veer. If Nerdalize had used a non-open source orchestration platform instead of Kubernetes, “that would reduce our cost savings proposition to like 30%. Kubernetes directly allows us to have this business model and this strategic advantage.”

-
+
“One of our customers used to spend up to a day setting up the virtual machines, network and software every time they wanted to run a project in the cloud. On our platform, with Docker and Kubernetes, customers can have their projects running in a couple of minutes.”

- MAAIKE STOOPS, CUSTOMER EXPERIENCE QUEEN, NERDALIZE
diff --git a/content/en/case-studies/netease/index.html b/content/en/case-studies/netease/index.html index a62ade486ff73..6cba5579abd5b 100644 --- a/content/en/case-studies/netease/index.html +++ b/content/en/case-studies/netease/index.html @@ -9,7 +9,7 @@ --- -
+

CASE STUDY:
How NetEase Leverages Kubernetes to Support Internet Business Worldwide

@@ -47,7 +47,7 @@

Its gaming business is the Kubernetes. The fact that the technology came out of Google gave the team confidence that it could keep up with NetEase’s scale. “After our 2-to-3-month evaluation, we believed it could satisfy our needs,” says Feng.

-
+
"We leveraged the programmability of Kubernetes so that we can build a platform to satisfy the needs of our internal customers for upgrades and deployment."

- Feng Changjian, Architect for NetEase Cloud and Container Service, NetEase
@@ -60,7 +60,7 @@

Its gaming business is the +
"As long as a company has a mature team and enough developers, I think Kubernetes is a very good technology that can help them."

- Li Lanqing, Kubernetes Developer, NetEase
diff --git a/content/en/case-studies/newyorktimes/index.html b/content/en/case-studies/newyorktimes/index.html index c65b5fe88355f..53dbd06a55085 100644 --- a/content/en/case-studies/newyorktimes/index.html +++ b/content/en/case-studies/newyorktimes/index.html @@ -5,7 +5,7 @@ css: /css/style_case_studies.css --- -
+

CASE STUDY:
The New York Times: From Print to the Web to Cloud Native

@@ -64,7 +64,7 @@

Impact

-
+
"We had some internal tooling that attempted to do what Kubernetes does for containers, but for VMs. We asked why are we building and maintaining these tools ourselves?"
@@ -79,7 +79,7 @@

Impact

-
+
"Right now, every team is running a small Kubernetes cluster, but it would be nice if we could all live in a larger ecosystem," says Kapadia. "Then we can harness the power of things like service mesh proxies that can actually do a lot of instrumentation between microservices, or service-to-service orchestration. Those are the new things that we want to experiment with as we go forward." diff --git a/content/en/case-studies/nokia/index.html b/content/en/case-studies/nokia/index.html index d8aaafc7f5e62..f824685327282 100644 --- a/content/en/case-studies/nokia/index.html +++ b/content/en/case-studies/nokia/index.html @@ -8,7 +8,7 @@ --- -
+

CASE STUDY:
Nokia: Enabling 5G and DevOps at a Telecom Company with Kubernetes

@@ -51,7 +51,7 @@

Nokia was the first name in mobile phones when they were becoming ubiquitous

-
+
"Having the community and CNCF around Kubernetes is not only important for having a connection to other companies who are using Kubernetes and a forum where you can ask or discuss features of Kubernetes. But as a company who would like to contribute to Kubernetes, it was very important to have a CLA (Contributors License Agreement) which is connected to the CNCF and not to a particular company. That was a critical step for us to start contributing to Kubernetes and Helm."

- Gergely Csatari, Senior Open Source Engineer, Nokia
@@ -65,7 +65,7 @@

Nokia was the first name in mobile phones when they were becoming ubiquitous

-
+
"Kubernetes opened the window to all of these open source projects instead of implementing everything in house. Our engineers can focus more on the application level, which is actually the thing what we are selling, and not on the infrastructure level. For us, the most important thing about Kubernetes is it allows us to focus on value creation of our business."

- Gergely Csatari, Senior Open Source Engineer, Nokia
diff --git a/content/en/case-studies/nordstrom/index.html b/content/en/case-studies/nordstrom/index.html index 5385c2473d481..788453de35a06 100644 --- a/content/en/case-studies/nordstrom/index.html +++ b/content/en/case-studies/nordstrom/index.html @@ -5,7 +5,7 @@ css: /css/style_case_studies.css --- -
+

CASE STUDY:
Finding Millions in Potential Savings in a Tough Retail Climate @@ -60,7 +60,7 @@

Impact

-
+
"We made a bet that Kubernetes was going to take off, informed by early indicators of community support and project velocity, so we rebuilt our system with Kubernetes at the core,"
@@ -77,7 +77,7 @@

Impact

-
+
"Teams running on our Kubernetes cluster loved the fact that they had fewer issues to worry about. They didn’t need to manage infrastructure or operating systems," says Grigoriu. "Early adopters loved the declarative nature of Kubernetes. They loved the reduced surface area they had to deal with."
diff --git a/content/en/case-studies/northwestern-mutual/index.html b/content/en/case-studies/northwestern-mutual/index.html index dac0ef0d66d55..47b4bbc7be649 100644 --- a/content/en/case-studies/northwestern-mutual/index.html +++ b/content/en/case-studies/northwestern-mutual/index.html @@ -5,7 +5,7 @@ css: /css/style_case_studies.css --- -
+

CASE STUDY:
Cloud Native at Northwestern Mutual @@ -22,7 +22,7 @@

CASE STUDY:

Challenge

- In the spring of 2015, Northwestern Mutual acquired a fintech startup, LearnVest, and decided to take "Northwestern Mutual’s leading products and services and meld it with LearnVest’s digital experience and innovative financial planning platform," says Brad Williams, Director of Engineering for Client Experience, Northwestern Mutual. The company’s existing infrastructure had been optimized for batch workflows hosted on on-prem networks; deployments were very traditional, focused on following a process instead of providing deployment agility. "We had to build a platform that was elastically scalable, but also much more responsive, so we could quickly get data to the client website so our end-customers have the experience they expect," says Williams. + In the spring of 2015, Northwestern Mutual acquired a fintech startup, LearnVest, and decided to take "Northwestern Mutual’s leading products and services and meld it with LearnVest’s digital experience and innovative financial planning platform," says Brad Williams, Director of Engineering for Client Experience, Northwestern Mutual. The company’s existing infrastructure had been optimized for batch workflows hosted on on-prem networks; deployments were very traditional, focused on following a process instead of providing deployment agility. "We had to build a platform that was elastically scalable, but also much more responsive, so we could quickly get data to the client website so our end-customers have the experience they expect," says Williams.

Solution

The platform team came up with a plan for using the public cloud (AWS), Docker containers, and Kubernetes for orchestration. "Kubernetes gave us that base framework so teams can be very autonomous in what they’re building and deliver very quickly and frequently," says Northwestern Mutual Cloud Native Engineer Frank Greco Jr. The team also built and open-sourced Kanali, a Kubernetes-native API management tool that uses OpenTracing, Jaeger, and gRPC. @@ -53,7 +53,7 @@

For more than 160 years, Northwestern Mutual has maintained its industry lea

-
+
"Kubernetes has definitely been the right choice for us. It gave us that base framework so teams can be autonomous in what they’re building and deliver very quickly and frequently." @@ -63,12 +63,12 @@

For more than 160 years, Northwestern Mutual has maintained its industry lea
Williams and the rest of the platform team decided that the first step would be to start moving from private data centers to AWS. With a new microservice architecture in mind—and the freedom to implement what was best for the organization—they began using Docker containers. After looking into the various container orchestration options, they went with Kubernetes, even though it was still in beta at the time. "There was some debate whether we should build something ourselves, or just leverage that product and evolve with it," says Northwestern Mutual Cloud Native Engineer Frank Greco Jr. "Kubernetes has definitely been the right choice for us. It gave us that base framework so teams can be autonomous in what they’re building and deliver very quickly and frequently."

As early adopters, the team had to do a lot of work with Ansible scripts to stand up the cluster. "We had a lot of hard security requirements given the nature of our business," explains Bryan Pfremmer, App Platform Teams Manager, Northwestern Mutual. "We found ourselves running a configuration that very few other people ever tried." The client experience group was the first to use the new platform; today, a few hundred of the company’s 1,500 engineers are using it and more are eager to get on board. -The results have been dramatic. Before, infrastructure deployments could take two weeks; now, it is done in a matter of minutes. Now with a focus on Infrastructure automation, and self-service, "You can take an app to production in that same day if you want to," says Pfremmer. +The results have been dramatic. Before, infrastructure deployments could take two weeks; now, it is done in a matter of minutes. Now with a focus on Infrastructure automation, and self-service, "You can take an app to production in that same day if you want to," says Pfremmer.
-
+
"Now, developers have autonomy, they can use this whenever they want, however they want. It becomes more valuable the more instrumentation downstream that happens, as we mature in it."
diff --git a/content/en/case-studies/ocado/index.html b/content/en/case-studies/ocado/index.html index 6a930f945ce6b..79ac9bf3a826a 100644 --- a/content/en/case-studies/ocado/index.html +++ b/content/en/case-studies/ocado/index.html @@ -11,7 +11,7 @@ quote: > People at Ocado Technology have been quite amazed. They ask, ‘Can we do this on a Dev cluster?’ and 10 minutes later we have rolled out something that is deployed across the cluster. The speed from idea to implementation to deployment is amazing. --- -
+

CASE STUDY:
Ocado: Running Grocery Warehouses with a Cloud Native Platform

@@ -32,7 +32,7 @@

Solution

- +

Impact

With Kubernetes, "the speed from idea to implementation to deployment is amazing," says Bryant. "I’ve seen features go from development to production inside of a week now. In the old world, a new application deployment could easily take over a month." And because there are no longer restrictive deployment windows in the warehouses, the rate of deployments has gone from as few as two per week to dozens per week. Ocado has also achieved cost savings because Kubernetes gives the team the ability to have more fine-grained resource allocation. Says DevOps Team Leader Kevin McCormack: "We have more confidence in the resource allocation/separation features of Kubernetes, so we have been able to migrate from around 10 fleet clusters to one Kubernetes cluster." The team also uses Prometheus and Grafana to visualize resource allocation, and makes the data available to developers. "The increased visibility offered by Prometheus means developers are more aware of what they are using and how their use impacts others, especially since we now have one shared cluster," says McCormack. "I’d estimate that we use about 15-25% less hardware resources to host the same applications in Kubernetes in our test environments." @@ -54,7 +54,7 @@

When it was founded in 2000, Ocado was an online-only grocery retailer in th

-
+
"We were looking for a platform with wide adoption, and that was where the momentum was, the two paths converged, and we didn’t even go through any proof-of-concept stage. The Code for Life work served that purpose,"

- Kevin McCormack, DevOps Team Leader, Ocado
@@ -68,7 +68,7 @@

When it was founded in 2000, Ocado was an online-only grocery retailer in th

-
+
"The unified API of Kubernetes means this is all in one place, and it’s one flow for approval and rollout. I’ve seen features go from development to production inside of a week now. In the old world, a new application deployment could easily take over a month."

- Mike Bryant, Platform Engineer, Ocado
diff --git a/content/en/case-studies/openAI/index.html b/content/en/case-studies/openAI/index.html index 040f704efa952..1b95ec5f35758 100644 --- a/content/en/case-studies/openAI/index.html +++ b/content/en/case-studies/openAI/index.html @@ -5,7 +5,7 @@ css: /css/style_case_studies.css --- -
+

CASE STUDY:
Launching and Scaling Up Experiments, Made Simple

@@ -56,7 +56,7 @@

From experiments in robotics to old-school video game play research, OpenAI

-
+
OpenAI’s experiments take advantage of Kubernetes’ benefits, including portability. "Because Kubernetes provides a consistent API, we can move our research experiments very easily between clusters..." @@ -69,7 +69,7 @@

From experiments in robotics to old-school video game play research, OpenAI

-
+
"One of our researchers who is working on a new distributed training system has been able to get his experiment running in two or three days," says Berner. "In a week or two he scaled it out to hundreds of GPUs. Previously, that would have easily been a couple of months of work."
diff --git a/content/en/case-studies/pearson/index.html b/content/en/case-studies/pearson/index.html index ddb567afb3d09..78f70228e5a30 100644 --- a/content/en/case-studies/pearson/index.html +++ b/content/en/case-studies/pearson/index.html @@ -8,7 +8,7 @@ quote: > We’re already seeing tremendous benefits with Kubernetes—improved engineering productivity, faster delivery of applications and a simplified infrastructure. But this is just the beginning. Kubernetes will help transform the way that educational content is delivered online. --- -
+

CASE STUDY:
Reinventing the World’s Largest Education Company With Kubernetes

@@ -47,7 +47,7 @@

Impact

The team adopted Kubernetes when it was still version 1.2 and are still going strong now on 1.7; they use Terraform and Ansible to deploy it on to basic AWS primitives. "We were trying to understand how we can create value for Pearson from this technology," says Ben Somogyi, Principal Architect for the Cloud Platforms. "It turned out that Kubernetes’ benefits are huge. We’re trying to help our applications development teams that use our platform go faster, so we filled that gap with a CI/CD pipeline that builds their images for them, standardizes them, patches everything up, allows them to deploy their different environments onto the cluster, and obfuscating the details of how difficult the work underneath the covers is."
-
+
"Your internal customers need to feel like they are choosing the very best option for them. We are experiencing this first hand in the growth of adoption. We are seeing triple-digit, year-on-year growth of the service."

— Chris Jackson, Director for Cloud Platforms & SRE at Pearson
@@ -60,7 +60,7 @@

Impact

Jackson estimates they’ve achieved a 15-20% boost in productivity for developer teams who adopt the platform. They also see a reduction in the number of customer-impacting incidents. Plus, says Jackson, "Teams who were previously limited to 1-2 releases per academic year can now ship code multiple times per day!"
-
+
"Teams who were previously limited to 1-2 releases per academic year can now ship code multiple times per day!"

— Chris Jackson, Director for Cloud Platforms & SRE at Pearson
diff --git a/content/en/case-studies/pingcap/index.html b/content/en/case-studies/pingcap/index.html index 637f891b3ebc8..8d032c7a8bdce 100644 --- a/content/en/case-studies/pingcap/index.html +++ b/content/en/case-studies/pingcap/index.html @@ -7,7 +7,7 @@ featured: false --- -
+

CASE STUDY:
PingCAP Bets on Cloud Native for Its TiDB Database Platform

@@ -52,7 +52,7 @@

Since it was introduced in 2015, the open source NewSQL database TiDB has ga Knowing that using a distributed system isn’t easy, the PingCAP team began looking for the right orchestration layer to help reduce some of that complexity for end users. Kubernetes had been on their radar for quite some time. "We knew Kubernetes had the promise of helping us solve our problems," says Xu. "We were just waiting for it to mature."

-
+
-
+
"A cloud native infrastructure will not only save you money and allow you to be more in control of the infrastructure resources you consume, but also empower new product innovation, new experience for your users, and new business possibilities. It’s both a cost reducer and a money maker."

- KEVIN XU, GENERAL MANAGER OF GLOBAL STRATEGY AND OPERATIONS, PINGCAP
diff --git a/content/en/case-studies/pinterest/index.html b/content/en/case-studies/pinterest/index.html index 0aa2381aa12d6..e4be7031bbe32 100644 --- a/content/en/case-studies/pinterest/index.html +++ b/content/en/case-studies/pinterest/index.html @@ -11,7 +11,7 @@ --- -
+

CASE STUDY:
Pinning Its Past, Present, and Future on Cloud Native

@@ -60,7 +60,7 @@

Pinterest was born on the cloud—running on +
"Though Kubernetes lacked certain things we wanted, we realized that by the time we get to productionizing many of those things, we’ll be able to leverage what the community is doing."

— MICHEAL BENEDICT, PRODUCT MANAGER FOR THE CLOUD AND THE DATA INFRASTRUCTURE GROUP AT PINTEREST
@@ -75,7 +75,7 @@

Pinterest was born on the cloud—running on
+
"So far it’s been good, especially the elasticity around how we can configure our Jenkins workloads on Kubernetes shared cluster. That is the win we were pushing for."

— MICHEAL BENEDICT, PRODUCT MANAGER FOR THE CLOUD AND THE DATA INFRASTRUCTURE GROUP AT PINTEREST
diff --git a/content/en/case-studies/prowise/index.html b/content/en/case-studies/prowise/index.html index 03bbc5117337e..2f0beda5ae62f 100644 --- a/content/en/case-studies/prowise/index.html +++ b/content/en/case-studies/prowise/index.html @@ -8,7 +8,7 @@ --- -
+

CASE STUDY:
Prowise: How Kubernetes is Enabling the Edtech Solution’s Global Expansion

@@ -50,7 +50,7 @@

If you haven’t set foot in a school in awhile, you might be surprised by w The company’s existing infrastructure on Microsoft Azure Cloud was all on virtual machines, “a pretty traditional setup,” van den Bosch says. “We decided that we want some features in our software that requires being able to scale quickly, being able to deploy new applications and versions on different versions of different programming languages quickly. And we didn’t really want the hassle of trying to keep those servers in a particular state.”

-
+
"You don’t have to go all-in immediately. You can just take a few projects, a service, run it alongside your more traditional stack, and build it up from there. Kubernetes scales, so as you add applications and services to it, it will scale with you. You don’t have to do it all at once, and that’s really a secret to everything, but especially true to Kubernetes."

— VICTOR VAN DEN BOSCH, SENIOR DEVOPS ENGINEER, PROWISE
@@ -67,7 +67,7 @@

If you haven’t set foot in a school in awhile, you might be surprised by w With its first web-based applications now running in beta on Prowise’s Kubernetes platform, the team is seeing the benefits of rapid and smooth deployments. “The old way of deploying took half an hour of preparations and half an hour deploying it. With Kubernetes, it’s a couple of seconds,” says Senior Developer Bart Haalstra. As a result, adds van den Bosch, “We’ve gone from quarterly releases to a release every month in production. We’re pretty much deploying every hour or just when we find that a feature is ready for production. Before, our releases were mostly done on off-hours, where it couldn’t impact our customers, as our confidence the process itself was relatively low. With Kubernetes, we dare to deploy in the middle of a busy day with high confidence the deployment will succeed.”

-
+
"Kubernetes allows us to really consider the best tools for a problem. Want to have a full-fledged analytics application developed by a third party that is just right for your use case? Run it. Dabbling in machine learning and AI algorithms but getting tired of waiting days for training to complete? It takes only seconds to scale it. Got a stubborn developer that wants to use a programming language no one has heard of? Let him, if it runs in a container, of course. And all of that while your operations team/DevOps get to sleep at night."

- VICTOR VAN DEN BOSCH, SENIOR DEVOPS ENGINEER, PROWISE
diff --git a/content/en/case-studies/ricardo-ch/index.html b/content/en/case-studies/ricardo-ch/index.html index 62501c4f5b237..2863ceac755e9 100644 --- a/content/en/case-studies/ricardo-ch/index.html +++ b/content/en/case-studies/ricardo-ch/index.html @@ -7,7 +7,7 @@ featured: false --- -
+

CASE STUDY:
ricardo.ch: How Kubernetes Improved Velocity and DevOps Harmony

@@ -48,7 +48,7 @@

When Cedric Meury joined ricardo.ch in 2016, he saw a clear divide between O To address the velocity issue, ricardo.ch CTO Jeremy Seitz established a new software factory called EPD, which consists of 65 engineers, 7 product managers and 2 designers. "We brought these three departments together so that they can kind of streamline this and talk to each other much more closely," says Meury.

-
+
"Being in the End User Community demonstrates that we stand behind these technologies. In Switzerland, if all the companies see that ricardo.ch’s using it, I think that will help adoption. I also like that we’re connected to the other end users, so if there is a really heavy problem, I could go to the Slack channel, and say, ‘Hey, you guys…’ Like Reddit, Github and New York Times or whoever can give a recommendation on what to use here or how to solve that. So that’s kind of a superpower."

— CEDRIC MEURY, HEAD OF PLATFORM ENGINEERING, RICARDO.CH
@@ -64,7 +64,7 @@

When Cedric Meury joined ricardo.ch in 2016, he saw a clear divide between O Meury estimates that half of the application has been migrated to Kubernetes. And the plan is to move everything to the Google Cloud Platform by the end of 2018. "We are still running some servers in our own data centers, but all of the containerization efforts and describing our services as Kubernetes manifests will allow us to quite easily make that shift," says Meury.

-
+
"One of the core moments was when a front-end developer asked me how to do a port forward from his laptop to a front-end application to debug, and I told him the command. And he was like, ‘Wow, that’s all I need to do?’ He was super excited and happy about it. That showed me that this power in the right hands can just accelerate development."

- CEDRIC MEURY, HEAD OF PLATFORM ENGINEERING, RICARDO.CH
diff --git a/content/en/case-studies/slamtec/index.html b/content/en/case-studies/slamtec/index.html index 4a99d28fb3e89..86ebe15f9122f 100644 --- a/content/en/case-studies/slamtec/index.html +++ b/content/en/case-studies/slamtec/index.html @@ -7,7 +7,7 @@ featured: false --- -
+

CASE STUDY:



@@ -47,7 +47,7 @@

Founded in 2013, Slamtec provides service robot autonomous localization and After an evaluation of existing technologies, Ji’s team chose Kubernetes for orchestration. "CNCF brings quality assurance and a complete ecosystem for Kubernetes, which is very important for the wide application of Kubernetes," says Ji. Plus, "avoiding binding to an infrastructure technology or provider can help us ensure that our business is deployed and migrated in cross-regional environments, and can serve users all over the world."

-
+
-
+
"Cloud native is suitable for microservice architecture, it’s suitable for fast iteration and agile development, and it has a relatively perfect ecosystem and active community."

- BENNIU JI, DIRECTOR OF CLOUD COMPUTING BUSINESS DIVISION
diff --git a/content/en/case-studies/slingtv/index.html b/content/en/case-studies/slingtv/index.html index a11527c2d9429..349ed8c2de9c7 100644 --- a/content/en/case-studies/slingtv/index.html +++ b/content/en/case-studies/slingtv/index.html @@ -11,7 +11,7 @@ --- -
+

CASE STUDY:
Sling TV: Marrying Kubernetes and AI to Enable Proper Web Scale

@@ -62,7 +62,7 @@

The beauty of streaming television, like the service offered by +
“We needed the flexibility to enable our use case versus just a simple orchestrater. Enabling our future in a way that did not give us vendor lock-in was also a key part of our strategy. I think that is part of the Rancher value proposition.”

— Brad Linder, Cloud Native & Big Data Evangelist for Sling TV
@@ -75,7 +75,7 @@

The beauty of streaming television, like the service offered by +
“We have to be able to react to changes and hiccups in the matrix. It is the foundation for our ability to deliver a high-quality service for our customers."

— Brad Linder, Cloud Native & Big Data Evangelist for Sling TV
diff --git a/content/en/case-studies/sos/index.html b/content/en/case-studies/sos/index.html index 64708a20f8ceb..becf486413c1c 100644 --- a/content/en/case-studies/sos/index.html +++ b/content/en/case-studies/sos/index.html @@ -8,7 +8,7 @@ --- -
+

CASE STUDY:
SOS International: Using Kubernetes to Provide Emergency Assistance in a Connected World

@@ -56,7 +56,7 @@

For six decades, SOS International has provided reliable emergency medical a

-
+
"We have to deliver new digital services, but we also have to migrate the old stuff, and we have to transform our core systems into new systems built on top of this platform. One of the reasons why we chose this technology is that we could build new digital services while changing the old one."

- Martin Ahrentsen, Head of Enterprise Architecture, SOS International
@@ -70,7 +70,7 @@

For six decades, SOS International has provided reliable emergency medical a

-
+
"During our onboarding, we could see that we were chosen by IT professionals because we provided the new technologies."

- Martin Ahrentsen, Head of Enterprise Architecture, SOS International
diff --git a/content/en/case-studies/spotify/index.html b/content/en/case-studies/spotify/index.html index 85e7fc1e86555..63243b08f6238 100644 --- a/content/en/case-studies/spotify/index.html +++ b/content/en/case-studies/spotify/index.html @@ -7,7 +7,7 @@ featured: false --- -
+

CASE STUDY: Spotify
Spotify: An Early Adopter of Containers, Spotify Is Migrating from Homegrown Orchestration to Kubernetes

@@ -52,7 +52,7 @@

"Our goal is to empower creators and enable a really immersive listening exp

-
+
"The community has been extremely helpful in getting us to work through all the technology much faster and much easier. And it’s helped us validate all the things we’re doing."

- Dave Zolotusky, Software Engineer, Infrastructure and Operations, Spotify
@@ -67,7 +67,7 @@

"Our goal is to empower creators and enable a really immersive listening exp

-
+
"We were able to use a lot of the Kubernetes APIs and extensibility features to support and interface with our legacy infrastructure, so the integration was straightforward and easy."

- James Wen, Site Reliability Engineer, Spotify
diff --git a/content/en/case-studies/squarespace/index.html b/content/en/case-studies/squarespace/index.html index d2b2a18c92cc5..27340835f43bd 100644 --- a/content/en/case-studies/squarespace/index.html +++ b/content/en/case-studies/squarespace/index.html @@ -5,7 +5,7 @@ css: /css/style_case_studies.css --- -
+

CASE STUDY:
Squarespace: Gaining Productivity and Resilience with Kubernetes

@@ -51,7 +51,7 @@

Since it was started in a dorm room in 2003, Squarespace has made it simple

-
+
After experimenting with another container orchestration platform and "breaking it in very painful ways," Lynch says, the team began experimenting with Kubernetes in mid-2016 and found that it "answered all the questions that we had." @@ -68,7 +68,7 @@

Since it was started in a dorm room in 2003, Squarespace has made it simple

-
+
"We switched to Kubernetes, a new world....It allowed us to streamline our process, so we can now easily create an entire microservice project from templates," Lynch says. And the whole process takes only five minutes, an almost 85% reduction in time compared to their VM deployment.
diff --git a/content/en/case-studies/thredup/index.html b/content/en/case-studies/thredup/index.html index 0a35de2b1ae1c..ad990356ff06d 100644 --- a/content/en/case-studies/thredup/index.html +++ b/content/en/case-studies/thredup/index.html @@ -7,7 +7,7 @@ featured: false --- -
+

CASE STUDY:



@@ -49,7 +49,7 @@

The largest online consignment store for women’s and children’s clothes, "We wanted to make sure that our engineers could embrace the DevOps mindset as they built software," Homer says. "It was really important to us that they could own the life cycle from end to end, from conception at design, through shipping it and running it in production, from marketing to ecommerce, the user experience and our internal distribution center operations."

-
+
"Kubernetes enabled auto scaling in a seamless and easily manageable way on days like Black Friday. We no longer have to sit there adding instances, monitoring the traffic, doing a lot of manual work."

- CHRIS HOMER, COFOUNDER/CTO, THREDUP
@@ -62,7 +62,7 @@

The largest online consignment store for women’s and children’s clothes, According to the infrastructure team, the key improvement was the consistent experience Kubernetes enabled for developers. "It lets developers work in the same environment that their application will be running in production," says Infrastructure Engineer Oleksandr Snagovskyi. Plus, "It became easier to test, easier to refine, and easier to deploy, because everything’s done automatically," says Infrastructure Engineer Oleksii Asiutin. "One of the main goals of our team is to make developers’ lives more comfortable, and we are achieving this with Kubernetes. They can experiment with existing applications and create new services, and do it all blazingly fast."

-
+
"One of the main goals of our team is to make developers’ lives more comfortable, and we are achieving this with Kubernetes. They can experiment with existing applications and create new services, and do it all blazingly fast."

- OLEKSII ASIUTIN, INFRASTRUCTURE ENGINEER, THREDUP
diff --git a/content/en/case-studies/vsco/index.html b/content/en/case-studies/vsco/index.html index 4ca7aa1bbcc76..c2ac2a2a7279c 100644 --- a/content/en/case-studies/vsco/index.html +++ b/content/en/case-studies/vsco/index.html @@ -7,7 +7,7 @@ featured: false --- -
+

CASE STUDY:
VSCO: How a Mobile App Saved 70% on Its EC2 Bill with Cloud Native

@@ -48,7 +48,7 @@

A photography app for mobile, VSCO was born in the cloud in 2011. In the beg

-
+
"Kubernetes seemed to have the strongest open source community around it, plus, we had started to standardize on a lot of the Google stack, with Go as a language, and gRPC for almost all communication between our own services inside the data center. So it seemed pretty natural for us to choose Kubernetes."

- MELINDA LU, ENGINEERING MANAGER FOR VSCO'S MACHINE LEARNING TEAM
@@ -64,7 +64,7 @@

A photography app for mobile, VSCO was born in the cloud in 2011. In the beg

-
+
"I've been really impressed seeing how our engineers have come up with really creative solutions to things by just combining a lot of Kubernetes primitives, exposing Kubernetes constructs as a service to our engineers as opposed to exposing higher order constructs has worked well for us. It lets you get familiar with the technology and do more interesting things with it."

- MELINDA LU, ENGINEERING MANAGER FOR VSCO’S MACHINE LEARNING TEAM
diff --git a/content/en/case-studies/woorank/index.html b/content/en/case-studies/woorank/index.html index aa41b7cb44754..fbb86bdd24e39 100644 --- a/content/en/case-studies/woorank/index.html +++ b/content/en/case-studies/woorank/index.html @@ -8,7 +8,7 @@ --- -
+

CASE STUDY:
Woorank: How Kubernetes Helped a Startup Manage 50 Microservices with
12 Engineers—At 30% Less Cost

@@ -50,7 +50,7 @@

Woorank’s core product is a tool that enables digital marketers to improve

-
+
"Cloud native technologies have brought to us a transparency on everything going on in our system, from the code to the server. It has brought huge cost savings and a better way of dealing with those costs and keeping them under control. And performance-wise, it has helped our team understand how we can make our code work better on the cloud native infrastructure."

— NILS DE MOOR, CTO/COFOUNDER, WOORANK
@@ -66,7 +66,7 @@

Woorank’s core product is a tool that enables digital marketers to improve The company’s number one concern was immediately erased: Maintaining Kubernetes is the responsibility of just one person on staff, and it’s not his fulltime job. Updating the old infrastructure “was always a pain,” says De Moor: It used to take two active working days, “and it was always a bit scary when we did that.” With Kubernetes, it’s just a matter of “a few hours of passively following the process.”

-
+
"When things fail and errors pop up, the system tries to heal itself, and that’s really, for us, the key reason to work with Kubernetes. It allowed us to set up certain testing frameworks to just be alerted when things go wrong, instead of having to look at whether everything went right. It’s made people’s lives much easier. It’s quite a big mindset change."

- NILS DE MOOR, CTO/COFOUNDER, WOORANK
diff --git a/content/en/case-studies/workiva/index.html b/content/en/case-studies/workiva/index.html index 95f323d5ae395..1c09503bfb351 100644 --- a/content/en/case-studies/workiva/index.html +++ b/content/en/case-studies/workiva/index.html @@ -11,7 +11,7 @@ With OpenTracing, my team was able to look at a trace and make optimization suggestions to another team without ever looking at their code. --- -
+

CASE STUDY:
Using OpenTracing to Help Pinpoint the Bottlenecks

@@ -30,12 +30,12 @@

Challenge

Workiva offers a cloud-based platform for managing and reporting business data. This SaaS product, Wdesk, is used by more than 70 percent of the Fortune 500 companies. As the company made the shift from a monolith to a more distributed, microservice-based system, "We had a number of people working on this, all on different teams, so we needed to identify what the issues were and where the bottlenecks were," says Senior Software Architect MacLeod Broad. With back-end code running on Google App Engine, Google Compute Engine, as well as Amazon Web Services, Workiva needed a tracing system that was agnostic of platform. While preparing one of the company’s first products utilizing AWS, which involved a "sync and link" feature that linked data from spreadsheets built in the new application with documents created in the old application on Workiva’s existing system, Broad’s team found an ideal use case for tracing: There were circular dependencies, and optimizations often turned out to be micro-optimizations that didn’t impact overall speed.
- +

Solution

- Broad’s team introduced the platform-agnostic distributed tracing system OpenTracing to help them pinpoint the bottlenecks. + Broad’s team introduced the platform-agnostic distributed tracing system OpenTracing to help them pinpoint the bottlenecks.

Impact

Now used throughout the company, OpenTracing produced immediate results. Software Engineer Michael Davis reports: "Tracing has given us immediate, actionable insight into how to improve our service. Through a combination of seeing where each call spends its time, as well as which calls are most often used, we were able to reduce our average response time by 95 percent (from 600ms to 30ms) in a single fix." @@ -61,14 +61,14 @@

Last fall, MacLeod Broad’s platform team at Workiva was prepping one of th

-
+
"A tracing system can at a glance explain an architecture, narrow down a performance bottleneck and zero in on it, and generally just help direct an investigation at a high level. Being able to do that at a glance is much faster than at a meeting or with three days of debugging, and it’s a lot faster than never figuring out the problem and just moving on."
— MACLEOD BROAD, SENIOR SOFTWARE ARCHITECT AT WORKIVA
- + Simply put, it was an ideal use case for tracing. "A tracing system can at a glance explain an architecture, narrow down a performance bottleneck and zero in on it, and generally just help direct an investigation at a high level," says Broad. "Being able to do that at a glance is much faster than at a meeting or with three days of debugging, and it’s a lot faster than never figuring out the problem and just moving on."

With Workiva’s back-end code running on Google Compute Engine as well as App Engine and AWS, Broad knew that he needed a tracing system that was platform agnostic. "We were looking at different tracing solutions," he says, "and we decided that because it seemed to be a very evolving market, we didn’t want to get stuck with one vendor. So OpenTracing seemed like the cleanest way to avoid vendor lock-in on what backend we actually had to use."

Once they introduced OpenTracing into this first use case, Broad says, "The trace made it super obvious where the bottlenecks were." Even though everyone had assumed it was Workiva’s existing code that was slowing things down, that wasn’t exactly the case. "It looked like the existing code was slow only because it was reaching out to our next-generation services, and they were taking a very long time to service all those requests," says Broad. "On the waterfall graph you can see the exact same work being done on every request when it was calling back in. So every service request would look the exact same for every response being paged out. And then it was just a no-brainer of, ‘Why is it doing all this work again?’"

@@ -78,7 +78,7 @@

Last fall, MacLeod Broad’s platform team at Workiva was prepping one of th

-
+
"We were looking at different tracing solutions and we decided that because it seemed to be a very evolving market, we didn’t want to get stuck with one vendor. So OpenTracing seemed like the cleanest way to avoid vendor lock-in on what backend we actually had to use."
— MACLEOD BROAD, SENIOR SOFTWARE ARCHITECT AT WORKIVA
@@ -90,7 +90,7 @@

Last fall, MacLeod Broad’s platform team at Workiva was prepping one of th Some teams were won over quickly. "Tracing has given us immediate, actionable insight into how to improve our [Workspaces] service," says Software Engineer Michael Davis. "Through a combination of seeing where each call spends its time, as well as which calls are most often used, we were able to reduce our average response time by 95 percent (from 600ms to 30ms) in a single fix."

Most of Workiva’s major products are now traced using OpenTracing, with data pushed into Google StackDriver. Even the products that aren’t fully traced have some components and libraries that are.

Broad points out that because some of the engineers were working on App Engine and already had experience with the platform’s Appstats library for profiling performance, it didn’t take much to get them used to using OpenTracing. But others were a little more reluctant. "The biggest hindrance to adoption I think has been the concern about how much latency is introducing tracing [and StackDriver] going to cost," he says. "People are also very concerned about adding middleware to whatever they’re working on. Questions about passing the context around and how that’s done were common. A lot of our Go developers were fine with it, because they were already doing that in one form or another. Our Java developers were not super keen on doing that because they’d used other systems that didn’t require that."

-But the benefits clearly outweighed the concerns, and today, Workiva’s official policy is to use tracing." +But the benefits clearly outweighed the concerns, and today, Workiva’s official policy is to use tracing." In fact, Broad believes that tracing naturally fits in with Workiva’s existing logging and metrics systems. "This was the way we presented it internally, and also the way we designed our use," he says. "Our traces are logged in the exact same mechanism as our app metric and logging data, and they get pushed the exact same way. So we treat all that data exactly the same when it’s being created and when it’s being recorded. We have one internal library that we use for logging, telemetry, analytics and tracing." @@ -98,7 +98,7 @@

Last fall, MacLeod Broad’s platform team at Workiva was prepping one of th
- "Tracing has given us immediate, actionable insight into how to improve our [Workspaces] service. Through a combination of seeing where each call spends its time, as well as which calls are most often used, we were able to reduce our average response time by 95 percent (from 600ms to 30ms) in a single fix."
— Michael Davis, Software Engineer, Workiva
+ "Tracing has given us immediate, actionable insight into how to improve our [Workspaces] service. Through a combination of seeing where each call spends its time, as well as which calls are most often used, we were able to reduce our average response time by 95 percent (from 600ms to 30ms) in a single fix."
— Michael Davis, Software Engineer, Workiva
diff --git a/content/en/case-studies/ygrene/index.html b/content/en/case-studies/ygrene/index.html index 498dc0ec73ac6..c07443249a5f9 100644 --- a/content/en/case-studies/ygrene/index.html +++ b/content/en/case-studies/ygrene/index.html @@ -12,7 +12,7 @@ We had to change some practices and code, and the way things were built, but we were able to get our main systems onto Kubernetes in a month or so, and then into production within two months. That’s very fast for a finance company. --- -
+

CASE STUDY:
Ygrene: Using Cloud Native to Bring Security and Scalability to the Finance Industry

@@ -61,7 +61,7 @@

In less than a decade, +
"CNCF has been an amazing incubator for so many projects. Now we look at its webpage regularly to find out if there are any new, awesome, high-quality projects we can implement into our stack. It’s actually become a hub for us for knowing what software we need to be looking at to make our systems more secure or more scalable."

— Austin Adams, Development Manager, Ygrene Energy Fund
@@ -78,7 +78,7 @@

In less than a decade, +
"We had to change some practices and code, and the way things were built," Adams says, "but we were able to get our main systems onto Kubernetes in a month or so, and then into production within two months. That’s very fast for a finance company."
diff --git a/content/en/docs/concepts/cluster-administration/_index.md b/content/en/docs/concepts/cluster-administration/_index.md index 7ce7126d9dc79..c3b51f3acfcdd 100644 --- a/content/en/docs/concepts/cluster-administration/_index.md +++ b/content/en/docs/concepts/cluster-administration/_index.md @@ -7,6 +7,7 @@ weight: 100 content_type: concept description: > Lower-level detail relevant to creating or administering a Kubernetes cluster. +no_list: true --- @@ -63,7 +64,7 @@ Before choosing a guide, here are some considerations: * [Auditing](/docs/tasks/debug-application-cluster/audit/) describes how to interact with Kubernetes' audit logs. ### Securing the kubelet - * [Master-Node communication](/docs/concepts/architecture/master-node-communication/) + * [Control Plane-Node communication](/docs/concepts/architecture/control-plane-node-communication/) * [TLS bootstrapping](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) * [Kubelet authentication/authorization](/docs/admin/kubelet-authentication-authorization/) diff --git a/content/en/docs/concepts/cluster-administration/flow-control.md b/content/en/docs/concepts/cluster-administration/flow-control.md index 26fc1194df008..5cdd070e0fe46 100644 --- a/content/en/docs/concepts/cluster-administration/flow-control.md +++ b/content/en/docs/concepts/cluster-administration/flow-control.md @@ -303,6 +303,9 @@ to get a mapping of UIDs to names for both FlowSchemas and PriorityLevelConfigurations. ## Observability + +### Metrics + When you enable the API Priority and Fairness feature, the kube-apiserver exports additional metrics. Monitoring these can help you determine whether your configuration is inappropriately throttling important traffic, or find @@ -365,9 +368,65 @@ poorly-behaved workloads that may be harming system health. long requests took to actually execute, grouped by the FlowSchema that matched the request and the PriorityLevel to which it was assigned. - - - +### Debug endpoints + +When you enable the API Priority and Fairness feature, the kube-apiserver serves the following additional paths at its HTTP[S] ports. + +- `/debug/api_priority_and_fairness/dump_priority_levels` - a listing of all the priority levels and the current state of each. You can fetch like this: + ```shell + kubectl get --raw /debug/api_priority_and_fairness/dump_priority_levels + ``` + The output is similar to this: + ``` + PriorityLevelName, ActiveQueues, IsIdle, IsQuiescing, WaitingRequests, ExecutingRequests, + workload-low, 0, true, false, 0, 0, + global-default, 0, true, false, 0, 0, + exempt, , , , , , + catch-all, 0, true, false, 0, 0, + system, 0, true, false, 0, 0, + leader-election, 0, true, false, 0, 0, + workload-high, 0, true, false, 0, 0, + ``` + +- `/debug/api_priority_and_fairness/dump_queues` - a listing of all the queues and their current state. You can fetch like this: + ```shell + kubectl get --raw /debug/api_priority_and_fairness/dump_queues + ``` + The output is similar to this: + ``` + PriorityLevelName, Index, PendingRequests, ExecutingRequests, VirtualStart, + workload-high, 0, 0, 0, 0.0000, + workload-high, 1, 0, 0, 0.0000, + workload-high, 2, 0, 0, 0.0000, + ... + leader-election, 14, 0, 0, 0.0000, + leader-election, 15, 0, 0, 0.0000, + ``` + +- `/debug/api_priority_and_fairness/dump_requests` - a listing of all the requests that are currently waiting in a queue. You can fetch like this: + ```shell + kubectl get --raw /debug/api_priority_and_fairness/dump_requests + ``` + The output is similar to this: + ``` + PriorityLevelName, FlowSchemaName, QueueIndex, RequestIndexInQueue, FlowDistingsher, ArriveTime, + exempt, , , , , , + system, system-nodes, 12, 0, system:node:127.0.0.1, 2020-07-23T15:26:57.179170694Z, + ``` + + In addition to the queued requests, the output includeas one phantom line for each priority level that is exempt from limitation. + + You can get a more detailed listing with a command like this: + ```shell + kubectl get --raw '/debug/api_priority_and_fairness/dump_requests?includeRequestDetails=1' + ``` + The output is similar to this: + ``` + PriorityLevelName, FlowSchemaName, QueueIndex, RequestIndexInQueue, FlowDistingsher, ArriveTime, UserName, Verb, APIPath, Namespace, Name, APIVersion, Resource, SubResource, + system, system-nodes, 12, 0, system:node:127.0.0.1, 2020-07-23T15:31:03.583823404Z, system:node:127.0.0.1, create, /api/v1/namespaces/scaletest/configmaps, + system, system-nodes, 12, 1, system:node:127.0.0.1, 2020-07-23T15:31:03.594555947Z, system:node:127.0.0.1, create, /api/v1/namespaces/scaletest/configmaps, + ``` + ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/concepts/cluster-administration/logging.md b/content/en/docs/concepts/cluster-administration/logging.md index 399f8f16ccb8c..0c2299e35c30f 100644 --- a/content/en/docs/concepts/cluster-administration/logging.md +++ b/content/en/docs/concepts/cluster-administration/logging.md @@ -82,7 +82,8 @@ and the former approach is used in any other environment. In both cases, by default rotation is configured to take place when log file exceeds 10MB. As an example, you can find detailed information about how `kube-up.sh` sets -up logging for COS image on GCP in the corresponding [script][cosConfigureHelper]. +up logging for COS image on GCP in the corresponding +[script](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh) When you run [`kubectl logs`](/docs/reference/generated/kubectl/kubectl-commands#logs) as in the basic logging example, the kubelet on the node handles the request and @@ -96,8 +97,6 @@ the rotation and there are two files, one 10MB in size and one empty, `kubectl logs` will return an empty response. {{< /note >}} -[cosConfigureHelper]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh - ### System component logs There are two types of system components: those that run in a container and those @@ -109,7 +108,7 @@ that do not run in a container. For example: On machines with systemd, the kubelet and container runtime write to journald. If systemd is not present, they write to `.log` files in the `/var/log` directory. System components inside containers always write to the `/var/log` directory, -bypassing the default logging mechanism. They use the [klog][klog] +bypassing the default logging mechanism. They use the [klog](https://github.com/kubernetes/klog) logging library. You can find the conventions for logging severity for those components in the [development docs on logging](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md). @@ -118,8 +117,6 @@ directory should be rotated. In Kubernetes clusters brought up by the `kube-up.sh` script, those logs are configured to be rotated by the `logrotate` tool daily or once the size exceeds 100MB. -[klog]: https://github.com/kubernetes/klog - ## Cluster-level logging architectures While Kubernetes does not provide a native solution for cluster-level logging, there are several common approaches you can consider. Here are some options: diff --git a/content/en/docs/concepts/cluster-administration/manage-deployment.md b/content/en/docs/concepts/cluster-administration/manage-deployment.md index b052dd3a15767..d0485a434287c 100644 --- a/content/en/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/en/docs/concepts/cluster-administration/manage-deployment.md @@ -323,7 +323,7 @@ When load on your application grows or shrinks, it's easy to scale with `kubectl kubectl scale deployment/my-nginx --replicas=1 ``` ```shell -deployment.extensions/my-nginx scaled +deployment.apps/my-nginx scaled ``` Now you only have one pod managed by the deployment. diff --git a/content/en/docs/concepts/cluster-administration/monitoring.md b/content/en/docs/concepts/cluster-administration/monitoring.md index fbea5e69c184b..cd6069d2296f2 100644 --- a/content/en/docs/concepts/cluster-administration/monitoring.md +++ b/content/en/docs/concepts/cluster-administration/monitoring.md @@ -40,14 +40,14 @@ Note that {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} also exposes If your cluster uses {{< glossary_tooltip term_id="rbac" text="RBAC" >}}, reading metrics requires authorization via a user, group or ServiceAccount with a ClusterRole that allows accessing `/metrics`. For example: ``` -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: prometheus -rules: - - nonResourceURLs: - - "/metrics" - verbs: +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: + - nonResourceURLs: + - "/metrics" + verbs: - get ``` @@ -130,5 +130,4 @@ cloudprovider_gce_api_request_duration_seconds { request = "list_disk"} * Read about the [Prometheus text format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) for metrics * See the list of [stable Kubernetes metrics](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) -* Read about the [Kubernetes deprecation policy](https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior ) - +* Read about the [Kubernetes deprecation policy](/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior) diff --git a/content/en/docs/concepts/cluster-administration/networking.md b/content/en/docs/concepts/cluster-administration/networking.md index 29044be250136..6779ee984ad21 100644 --- a/content/en/docs/concepts/cluster-administration/networking.md +++ b/content/en/docs/concepts/cluster-administration/networking.md @@ -12,7 +12,7 @@ understand exactly how it is expected to work. There are 4 distinct networking problems to address: 1. Highly-coupled container-to-container communications: this is solved by - [pods](/docs/concepts/workloads/pods/pod/) and `localhost` communications. + {{< glossary_tooltip text="Pods" term_id="pod" >}} and `localhost` communications. 2. Pod-to-Pod communications: this is the primary focus of this document. 3. Pod-to-Service communications: this is covered by [services](/docs/concepts/services-networking/service/). 4. External-to-Service communications: this is covered by [services](/docs/concepts/services-networking/service/). diff --git a/content/en/docs/concepts/configuration/configmap.md b/content/en/docs/concepts/configuration/configmap.md index 1c1a24106ec85..d7d2feb9d5ab1 100644 --- a/content/en/docs/concepts/configuration/configmap.md +++ b/content/en/docs/concepts/configuration/configmap.md @@ -126,25 +126,32 @@ spec: configMap: # Provide the name of the ConfigMap you want to mount. name: game-demo + # An array of keys from the ConfigMap to create as files + items: + - key: "game.properties" + path: "game.properties" + - key: "user-interface.properties" + path: "user-interface.properties" ``` A ConfigMap doesn't differentiate between single line property values and multi-line file-like values. What matters is how Pods and other objects consume those values. + For this example, defining a volume and mounting it inside the `demo` -container as `/config` creates four files: +container as `/config` creates two files, +`/config/game.properties` and `/config/user-interface.properties`, +even though there are four keys in the ConfigMap. This is because the Pod +definition specifies an `items` array in the `volumes` section. +If you omit the `items` array entirely, every key in the ConfigMap becomes +a file with the same name as the key, and you get 4 files. -- `/config/player_initial_lives` -- `/config/ui_properties_file_name` -- `/config/game.properties` -- `/config/user-interface.properties` +## Using ConfigMaps -If you want to make sure that `/config` only contains files with a -`.properties` extension, use two different ConfigMaps, and refer to both -ConfigMaps in the `spec` for a Pod. The first ConfigMap defines -`player_initial_lives` and `ui_properties_file_name`. The second -ConfigMap defines the files that the kubelet places into `/config`. +ConfigMaps can be mounted as data volumes. ConfigMaps can also be used by other +parts of the system, without being directly exposed to the Pod. For example, +ConfigMaps can hold data that other parts of the system should use for configuration. {{< note >}} The most common way to use ConfigMaps is to configure settings for @@ -157,12 +164,6 @@ or {{< glossary_tooltip text="operators" term_id="operator-pattern" >}} that adjust their behavior based on a ConfigMap. {{< /note >}} -## Using ConfigMaps - -ConfigMaps can be mounted as data volumes. ConfigMaps can also be used by other -parts of the system, without being directly exposed to the Pod. For example, -ConfigMaps can hold data that other parts of the system should use for configuration. - ### Using ConfigMaps as files from a Pod To consume a ConfigMap in a volume in a Pod: @@ -223,7 +224,7 @@ data has the following advantages: - improves performance of your cluster by significantly reducing load on kube-apiserver, by closing watches for config maps marked as immutable. -To use this feature, enable the `ImmutableEmphemeralVolumes` +To use this feature, enable the `ImmutableEphemeralVolumes` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and set your Secret or ConfigMap `immutable` field to `true`. For example: ```yaml diff --git a/content/en/docs/concepts/configuration/manage-resources-containers.md b/content/en/docs/concepts/configuration/manage-resources-containers.md index f8989c4a5dd81..275b70866a533 100644 --- a/content/en/docs/concepts/configuration/manage-resources-containers.md +++ b/content/en/docs/concepts/configuration/manage-resources-containers.md @@ -132,11 +132,9 @@ metadata: name: frontend spec: containers: - - name: db - image: mysql + - name: app + image: super.mycompany.com/app:v4 env: - - name: MYSQL_ROOT_PASSWORD - value: "password" resources: requests: memory: "64Mi" @@ -144,8 +142,8 @@ spec: limits: memory: "128Mi" cpu: "500m" - - name: wp - image: wordpress + - name: log-aggregator + image: super.mycompany.com/log-aggregator:v6 resources: requests: memory: "64Mi" @@ -227,7 +225,7 @@ locally-attached writeable devices or, sometimes, by RAM. Pods use ephemeral local storage for scratch space, caching, and for logs. The kubelet can provide scratch space to Pods using local ephemeral storage to -mount [`emptyDir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) +mount [`emptyDir`](/docs/concepts/storage/volumes/#emptydir) {{< glossary_tooltip term_id="volume" text="volumes" >}} into containers. The kubelet also uses this kind of storage to hold @@ -330,18 +328,15 @@ metadata: name: frontend spec: containers: - - name: db - image: mysql - env: - - name: MYSQL_ROOT_PASSWORD - value: "password" + - name: app + image: super.mycompany.com/app:v4 resources: requests: ephemeral-storage: "2Gi" limits: ephemeral-storage: "4Gi" - - name: wp - image: wordpress + - name: log-aggregator + image: super.mycompany.com/log-aggregator:v6 resources: requests: ephemeral-storage: "2Gi" @@ -657,7 +652,7 @@ Allocated resources: (Total limits may be over 100 percent, i.e., overcommitted.) CPU Requests CPU Limits Memory Requests Memory Limits ------------ ---------- --------------- ------------- - 680m (34%) 400m (20%) 920Mi (12%) 1070Mi (14%) + 680m (34%) 400m (20%) 920Mi (11%) 1070Mi (13%) ``` In the preceding output, you can see that if a Pod requests more than 1120m @@ -758,5 +753,3 @@ You can see that the Container was terminated because of `reason:OOM Killed`, wh * Read the [ResourceRequirements](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcerequirements-v1-core) API reference * Read about [project quotas](http://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html) in XFS - - diff --git a/content/en/docs/concepts/configuration/pod-overhead.md b/content/en/docs/concepts/configuration/pod-overhead.md index 7057383dacdd4..5eced7954f3cd 100644 --- a/content/en/docs/concepts/configuration/pod-overhead.md +++ b/content/en/docs/concepts/configuration/pod-overhead.md @@ -87,7 +87,7 @@ spec: memory: 100Mi ``` -At admission time the RuntimeClass [admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) +At admission time the RuntimeClass [admission controller](/docs/reference/access-authn-authz/admission-controllers/) updates the workload's PodSpec to include the `overhead` as described in the RuntimeClass. If the PodSpec already has this field defined, the Pod will be rejected. In the given example, since only the RuntimeClass name is specified, the admission controller mutates the Pod to include an `overhead`. @@ -195,5 +195,3 @@ from source in the meantime. * [RuntimeClass](/docs/concepts/containers/runtime-class/) * [PodOverhead Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) - - diff --git a/content/en/docs/concepts/configuration/pod-priority-preemption.md b/content/en/docs/concepts/configuration/pod-priority-preemption.md index 9bfc514257f40..295a029d9081a 100644 --- a/content/en/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/en/docs/concepts/configuration/pod-priority-preemption.md @@ -255,7 +255,7 @@ makes Pod P eligible to preempt Pods on another Node. #### Graceful termination of preemption victims When Pods are preempted, the victims get their -[graceful termination period](/docs/concepts/workloads/pods/pod/#termination-of-pods). +[graceful termination period](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination). They have that much time to finish their work and exit. If they don't, they are killed. This graceful termination period creates a time gap between the point that the scheduler preempts Pods and the time when the pending Pod (P) can be @@ -268,7 +268,7 @@ priority Pods to zero or a small number. #### PodDisruptionBudget is supported, but not guaranteed -A [Pod Disruption Budget (PDB)](/docs/concepts/workloads/pods/disruptions/) +A [PodDisruptionBudget](/docs/concepts/workloads/pods/disruptions/) (PDB) allows application owners to limit the number of Pods of a replicated application that are down simultaneously from voluntary disruptions. Kubernetes supports PDB when preempting Pods, but respecting PDB is best effort. The scheduler tries diff --git a/content/en/docs/concepts/containers/_index.md b/content/en/docs/concepts/containers/_index.md index 091cea881bc21..edee4eccc4d9e 100644 --- a/content/en/docs/concepts/containers/_index.md +++ b/content/en/docs/concepts/containers/_index.md @@ -6,6 +6,7 @@ reviewers: - erictune - thockin content_type: concept +no_list: true --- diff --git a/content/en/docs/concepts/containers/container-lifecycle-hooks.md b/content/en/docs/concepts/containers/container-lifecycle-hooks.md index 386e4d00bb436..c8e93e93db028 100644 --- a/content/en/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/en/docs/concepts/containers/container-lifecycle-hooks.md @@ -42,7 +42,7 @@ so it must complete before the call to delete the container can be sent. No parameters are passed to the handler. A more detailed description of the termination behavior can be found in -[Termination of Pods](/docs/concepts/workloads/pods/pod/#termination-of-pods). +[Termination of Pods](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination). ### Hook handler implementations diff --git a/content/en/docs/concepts/containers/images.md b/content/en/docs/concepts/containers/images.md index 496387a43af86..415d920b4042a 100644 --- a/content/en/docs/concepts/containers/images.md +++ b/content/en/docs/concepts/containers/images.md @@ -65,7 +65,7 @@ When `imagePullPolicy` is defined without a specific value, it is also set to `A ## Multi-architecture Images with Manifests -As well as providing binary images, a container registry can also server a [container image manifest](https://github.com/opencontainers/image-spec/blob/master/manifest.md). A manifest can reference image manifests for architecture-specific versions of an container. The idea is that you can have a name for an image (for example: `pause`, `example/mycontainer`, `kube-apiserver`) and allow different systems to fetch the right binary image for the machine architecture they are using. +As well as providing binary images, a container registry can also serve a [container image manifest](https://github.com/opencontainers/image-spec/blob/master/manifest.md). A manifest can reference image manifests for architecture-specific versions of an container. The idea is that you can have a name for an image (for example: `pause`, `example/mycontainer`, `kube-apiserver`) and allow different systems to fetch the right binary image for the machine architecture they are using. Kubernetes itself typically names container images with a suffix `-$(ARCH)`. For backward compatibility, please generate the older images with suffixes. The idea is to generate say `pause` image which has the manifest for all the arch(es) and say `pause-amd64` which is backwards compatible for older configurations or YAML files which may have hard coded the images with suffixes. @@ -129,7 +129,7 @@ example, run these on your desktop/laptop: - for example, to test this out: `for n in $nodes; do scp ~/.docker/config.json root@"$n":/var/lib/kubelet/config.json; done` {{< note >}} -For production clusers, use a configuration management tool so that you can apply this +For production clusters, use a configuration management tool so that you can apply this setting to all the nodes where you need it. {{< /note >}} diff --git a/content/en/docs/concepts/containers/runtime-class.md b/content/en/docs/concepts/containers/runtime-class.md index d1857f3807a81..8f685e35f3e3f 100644 --- a/content/en/docs/concepts/containers/runtime-class.md +++ b/content/en/docs/concepts/containers/runtime-class.md @@ -138,9 +138,7 @@ table](https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md#crioruntim runtime_path = "${PATH_TO_BINARY}" ``` -See CRI-O's [config documentation][100] for more details. - -[100]: https://raw.githubusercontent.com/cri-o/cri-o/9f11d1d/docs/crio.conf.5.md +See CRI-O's [config documentation](https://raw.githubusercontent.com/cri-o/cri-o/9f11d1d/docs/crio.conf.5.md) for more details. ## Scheduling @@ -149,7 +147,8 @@ See CRI-O's [config documentation][100] for more details. As of Kubernetes v1.16, RuntimeClass includes support for heterogenous clusters through its `scheduling` fields. Through the use of these fields, you can ensure that pods running with this RuntimeClass are scheduled to nodes that support it. To use the scheduling support, you must have -the [RuntimeClass admission controller][] enabled (the default, as of 1.16). +the [RuntimeClass admission controller](/docs/reference/access-authn-authz/admission-controllers/#runtimeclass) +enabled (the default, as of 1.16). To ensure pods land on nodes supporting a specific RuntimeClass, that set of nodes should have a common label which is then selected by the `runtimeclass.scheduling.nodeSelector` field. The @@ -165,8 +164,6 @@ by each. To learn more about configuring the node selector and tolerations, see [Assigning Pods to Nodes](/docs/concepts/scheduling-eviction/assign-pod-node/). -[RuntimeClass admission controller]: /docs/reference/access-authn-authz/admission-controllers/#runtimeclass - ### Pod Overhead {{< feature-state for_k8s_version="v1.18" state="beta" >}} diff --git a/content/en/docs/concepts/example-concept-template.md b/content/en/docs/concepts/example-concept-template.md deleted file mode 100644 index adf3741f902bd..0000000000000 --- a/content/en/docs/concepts/example-concept-template.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Example Concept Template -reviewers: -- chenopis -content_type: concept -toc_hide: true ---- - - - -{{< note >}} -Be sure to also [create an entry in the table of contents](/docs/home/contribute/write-new-topic/#creating-an-entry-in-the-table-of-contents) for your new document. -{{< /note >}} - -This page explains ... - - - - - -## Understanding ... - -Kubernetes provides ... - -## Using ... - -To use ... - - - -## {{% heading "whatsnext" %}} - - -**[Optional Section]** - -* Learn more about [Writing a New Topic](/docs/home/contribute/style/write-new-topic/). -* See [Page Content Types - Concept](/docs/home/contribute/style/page-concept-types/#concept). - - - - diff --git a/content/en/docs/concepts/extend-kubernetes/_index.md b/content/en/docs/concepts/extend-kubernetes/_index.md index 934861904fe89..6468ffa410f6e 100644 --- a/content/en/docs/concepts/extend-kubernetes/_index.md +++ b/content/en/docs/concepts/extend-kubernetes/_index.md @@ -8,6 +8,7 @@ reviewers: - cheftako - chenopis content_type: concept +no_list: true --- diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index c8388478f51fd..7e6a648669b47 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -232,6 +232,6 @@ Here are some examples of device plugin implementations: * Learn about [scheduling GPU resources](/docs/tasks/manage-gpus/scheduling-gpus/) using device plugins * Learn about [advertising extended resources](/docs/tasks/administer-cluster/extended-resource-node/) on a node * Read about using [hardware acceleration for TLS ingress](https://kubernetes.io/blog/2019/04/24/hardware-accelerated-ssl/tls-termination-in-ingress-controllers-using-kubernetes-device-plugins-and-runtimeclass/) with Kubernetes -* Learn about the [Topology Manager] (/docs/tasks/adminster-cluster/topology-manager/) +* Learn about the [Topology Manager](/docs/tasks/administer-cluster/topology-manager/) diff --git a/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md b/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md deleted file mode 100644 index 7f81439c417b8..0000000000000 --- a/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Poseidon-Firmament Scheduler -content_type: concept -weight: 80 ---- - - - -{{< feature-state for_k8s_version="v1.6" state="alpha" >}} - -The Poseidon-Firmament scheduler is an alternate scheduler that can be deployed alongside the default Kubernetes scheduler. - - - - - - -## Introduction - -Poseidon is a service that acts as the integration glue between the [Firmament scheduler](https://github.com/Huawei-PaaS/firmament) and Kubernetes. Poseidon-Firmament augments the current Kubernetes scheduling capabilities. It incorporates novel flow network graph based scheduling capabilities alongside the default Kubernetes scheduler. The Firmament scheduler models workloads and clusters as flow networks and runs min-cost flow optimizations over these networks to make scheduling decisions. - -Firmament models the scheduling problem as a constraint-based optimization over a flow network graph. This is achieved by reducing scheduling to a min-cost max-flow optimization problem. The Poseidon-Firmament scheduler dynamically refines the workload placements. - -Poseidon-Firmament scheduler runs alongside the default Kubernetes scheduler as an alternate scheduler. You can simultaneously run multiple, different schedulers. - -Flow graph scheduling with the Poseidon-Firmament scheduler provides the following advantages: - -- Workloads (Pods) are bulk scheduled to enable scheduling at massive scale. - The Poseidon-Firmament scheduler outperforms the Kubernetes default scheduler by a wide margin when it comes to throughput performance for scenarios where compute resource requirements are somewhat uniform across your workload (Deployments, ReplicaSets, Jobs). -- The Poseidon-Firmament's scheduler's end-to-end throughput performance and bind time improves as the number of nodes in a cluster increases. As you scale out, Poseidon-Firmament scheduler is able to amortize more and more work across workloads. -- Scheduling in Poseidon-Firmament is dynamic; it keeps cluster resources in a global optimal state during every scheduling run. -- The Poseidon-Firmament scheduler supports scheduling complex rule constraints. - -## How the Poseidon-Firmament scheduler works - -Kubernetes supports [using multiple schedulers](/docs/tasks/administer-cluster/configure-multiple-schedulers/). You can specify, for a particular Pod, that it is scheduled by a custom scheduler (“poseidon” for this case), by setting the `schedulerName` field in the PodSpec at the time of pod creation. The default scheduler will ignore that Pod and allow Poseidon-Firmament scheduler to schedule the Pod on a relevant node. - -For example: - -```yaml -apiVersion: v1 -kind: Pod -... -spec: - schedulerName: poseidon -... -``` - -## Batch scheduling - -As mentioned earlier, Poseidon-Firmament scheduler enables an extremely high throughput scheduling environment at scale due to its bulk scheduling approach versus Kubernetes pod-at-a-time approach. In our extensive tests, we have observed substantial throughput benefits as long as resource requirements (CPU/Memory) for incoming Pods are uniform across jobs (Replicasets/Deployments/Jobs), mainly due to efficient amortization of work across jobs. - -Although, Poseidon-Firmament scheduler is capable of scheduling various types of workloads, such as service, batch, etc., the following are a few use cases where it excels the most: - -1. For “Big Data/AI” jobs consisting of large number of tasks, throughput benefits are tremendous. -2. Service or batch jobs where workload resource requirements are uniform across jobs (Replicasets/Deployments/Jobs). - -## Feature state - -Poseidon-Firmament is designed to work with Kubernetes release 1.6 and all subsequent releases. - -{{< caution >}} -Poseidon-Firmament scheduler does not provide support for high availability; its implementation assumes that the scheduler cannot fail. -{{< /caution >}} - -## Feature comparison {#feature-comparison-matrix} - -{{< table caption="Feature comparison of Kubernetes and Poseidon-Firmament schedulers." >}} -|Feature|Kubernetes Default Scheduler|Poseidon-Firmament Scheduler|Notes| -|--- |--- |--- |--- | -|Node Affinity/Anti-Affinity|Y|Y|| -|Pod Affinity/Anti-Affinity - including support for pod anti-affinity symmetry|Y|Y|The default scheduler outperforms the Poseidon-Firmament scheduler pod affinity/anti-affinity functionality.| -|Taints & Tolerations|Y|Y|| -|Baseline Scheduling capability in accordance to available compute resources (CPU & Memory) on a node|Y|Y†|**†** Not all Predicates & Priorities are supported with Poseidon-Firmament.| -|Extreme Throughput at scale|Y†|Y|**†** Bulk scheduling approach scales or increases workload placement. Firmament scheduler offers high throughput when resource requirements (CPU/Memory) for incoming Pods are uniform across ReplicaSets/Deployments/Jobs.| -|Colocation Interference Avoidance|N|N|| -|Priority Preemption|Y|N†|**†** Partially exists in Poseidon-Firmament versus extensive support in Kubernetes default scheduler.| -|Inherent Rescheduling|N|Y†|**†** Poseidon-Firmament scheduler supports workload re-scheduling. In each scheduling run, Poseidon-Firmament considers all Pods, including running Pods, and as a result can migrate or evict Pods – a globally optimal scheduling environment.| -|Gang Scheduling|N|Y|| -|Support for Pre-bound Persistence Volume Scheduling|Y|Y|| -|Support for Local Volume & Dynamic Persistence Volume Binding Scheduling|Y|N|| -|High Availability|Y|N|| -|Real-time metrics based scheduling|N|Y†|**†** Partially supported in Poseidon-Firmament using Heapster (now deprecated) for placing Pods using actual cluster utilization statistics rather than reservations.| -|Support for Max-Pod per node|Y|Y|Poseidon-Firmament scheduler seamlessly co-exists with Kubernetes default scheduler.| -|Support for Ephemeral Storage, in addition to CPU/Memory|Y|Y|| -{{< /table >}} - -## Installation - -The [Poseidon-Firmament installation guide](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/install/README.md#Installation) explains how to deploy Poseidon-Firmament to your cluster. - -## Performance comparison - -{{< note >}} - Please refer to the [latest benchmark results](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/benchmark/README.md) for detailed throughput performance comparison test results between Poseidon-Firmament scheduler and the Kubernetes default scheduler. -{{< /note >}} - -Pod-by-pod schedulers, such as the Kubernetes default scheduler, process Pods in small batches (typically one at a time). These schedulers have the following crucial drawbacks: - -1. The scheduler commits to a pod placement early and restricts the choices for other pods that wait to be placed. -2. There is limited opportunities for amortizing work across pods because they are considered for placement individually. - -These downsides of pod-by-pod schedulers are addressed by batching or bulk scheduling in Poseidon-Firmament scheduler. Processing several pods in a batch allows the scheduler to jointly consider their placement, and thus to find the best trade-off for the whole batch instead of one pod. At the same time it amortizes work across pods resulting in much higher throughput. - - -## {{% heading "whatsnext" %}} - -* See [Poseidon-Firmament](https://github.com/kubernetes-sigs/poseidon#readme) on GitHub for more information. -* See the [design document](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/design/README.md) for Poseidon. -* Read [Firmament: Fast, Centralized Cluster Scheduling at Scale](https://www.usenix.org/system/files/conference/osdi16/osdi16-gog.pdf), the academic paper on the Firmament scheduling design. -* If you'd like to contribute to Poseidon-Firmament, refer to the [developer setup instructions](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/devel/README.md). - diff --git a/content/en/docs/concepts/overview/kubernetes-api.md b/content/en/docs/concepts/overview/kubernetes-api.md index b3b696035817d..42d12425b7994 100644 --- a/content/en/docs/concepts/overview/kubernetes-api.md +++ b/content/en/docs/concepts/overview/kubernetes-api.md @@ -25,8 +25,6 @@ The Kubernetes API lets you query and manipulate the state of objects in the Kub API endpoints, resource types and samples are described in the [API Reference](/docs/reference/kubernetes-api/). - - ## API changes @@ -87,7 +85,7 @@ Kubernetes implements an alternative Protobuf based serialization format for the To make it easier to eliminate fields or restructure resource representations, Kubernetes supports multiple API versions, each at a different API path, such as `/api/v1` or -`/apis/extensions/v1beta1`. +`/apis/rbac.authorization.k8s.io/v1alpha1`. Versioning is done at the API level rather than at the resource or field level to ensure that the API presents a clear, consistent view of system resources and behavior, and to enable controlling @@ -157,14 +155,6 @@ The flag accepts comma separated set of key=value pairs describing runtime confi {{< note >}}Enabling or disabling groups or resources requires restarting the kube-apiserver and the kube-controller-manager to pick up the `--runtime-config` changes.{{< /note >}} -## Enabling specific resources in the extensions/v1beta1 group - -DaemonSets, Deployments, StatefulSet, NetworkPolicies, PodSecurityPolicies and ReplicaSets in the `extensions/v1beta1` API group are disabled by default. -For example: to enable deployments and daemonsets, set -`--runtime-config=extensions/v1beta1/deployments=true,extensions/v1beta1/daemonsets=true`. - -{{< note >}}Individual resource enablement/disablement is only supported in the `extensions/v1beta1` API group for legacy reasons.{{< /note >}} - ## Persistence Kubernetes stores its serialized state in terms of the API resources by writing them into diff --git a/content/en/docs/concepts/overview/working-with-objects/common-labels.md b/content/en/docs/concepts/overview/working-with-objects/common-labels.md index 11e8944c8aded..a0a68c6dff65b 100644 --- a/content/en/docs/concepts/overview/working-with-objects/common-labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/common-labels.md @@ -35,7 +35,7 @@ on every resource object. | Key | Description | Example | Type | | ----------------------------------- | --------------------- | -------- | ---- | | `app.kubernetes.io/name` | The name of the application | `mysql` | string | -| `app.kubernetes.io/instance` | A unique name identifying the instance of an application | `wordpress-abcxzy` | string | +| `app.kubernetes.io/instance` | A unique name identifying the instance of an application | `mysql-abcxzy` | string | | `app.kubernetes.io/version` | The current version of the application (e.g., a semantic version, revision hash, etc.) | `5.7.21` | string | | `app.kubernetes.io/component` | The component within the architecture | `database` | string | | `app.kubernetes.io/part-of` | The name of a higher level application this one is part of | `wordpress` | string | @@ -49,7 +49,7 @@ kind: StatefulSet metadata: labels: app.kubernetes.io/name: mysql - app.kubernetes.io/instance: wordpress-abcxzy + app.kubernetes.io/instance: mysql-abcxzy app.kubernetes.io/version: "5.7.21" app.kubernetes.io/component: database app.kubernetes.io/part-of: wordpress diff --git a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md index 1f4f4e7509b5f..ab447cdcd6a58 100644 --- a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -92,7 +92,7 @@ and the `spec` format for a Deployment can be found in ## {{% heading "whatsnext" %}} * [Kubernetes API overview](/docs/reference/using-api/api-overview/) explains some more API concepts -* Learn about the most important basic Kubernetes objects, such as [Pod](/docs/concepts/workloads/pods/pod-overview/). +* Learn about the most important basic Kubernetes objects, such as [Pod](/docs/concepts/workloads/pods/). * Learn about [controllers](/docs/concepts/architecture/controller/) in Kubernetes diff --git a/content/en/docs/concepts/overview/working-with-objects/namespaces.md b/content/en/docs/concepts/overview/working-with-objects/namespaces.md index 07e7dac7266d2..59b1763450aed 100644 --- a/content/en/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/en/docs/concepts/overview/working-with-objects/namespaces.md @@ -26,7 +26,7 @@ need to create or think about namespaces at all. Start using namespaces when yo need the features they provide. Namespaces provide a scope for names. Names of resources need to be unique within a namespace, -but not across namespaces. Namespaces can not be nested inside one another and each Kubernetes +but not across namespaces. Namespaces cannot be nested inside one another and each Kubernetes resource can only be in one namespace. Namespaces are a way to divide cluster resources between multiple users (via [resource quota](/docs/concepts/policy/resource-quotas/)). diff --git a/content/en/docs/concepts/policy/pod-security-policy.md b/content/en/docs/concepts/policy/pod-security-policy.md index 5a5241c42ecde..835bbc84751eb 100644 --- a/content/en/docs/concepts/policy/pod-security-policy.md +++ b/content/en/docs/concepts/policy/pod-security-policy.md @@ -302,7 +302,7 @@ kubectl-user delete pod pause Let's try that again, slightly differently: ```shell -kubectl-user run pause --image=k8s.gcr.io/pause +kubectl-user create deployment pause --image=k8s.gcr.io/pause deployment "pause" created kubectl-user get pods diff --git a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md index 406c3f974baab..d469661849898 100644 --- a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -28,7 +28,7 @@ page will help you learn about scheduling. ## kube-scheduler -[kube-scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) +[kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/) is the default scheduler for Kubernetes and runs as part of the {{< glossary_tooltip text="control plane" term_id="control-plane" >}}. kube-scheduler is designed so that, if you want and need to, you can @@ -95,4 +95,3 @@ of the scheduler: * Learn about [configuring multiple schedulers](/docs/tasks/administer-cluster/configure-multiple-schedulers/) * Learn about [topology management policies](/docs/tasks/administer-cluster/topology-manager/) * Learn about [Pod Overhead](/docs/concepts/configuration/pod-overhead/) - diff --git a/content/en/docs/concepts/security/pod-security-standards.md b/content/en/docs/concepts/security/pod-security-standards.md index 2afd6c73355ab..20574c8f914ee 100644 --- a/content/en/docs/concepts/security/pod-security-standards.md +++ b/content/en/docs/concepts/security/pod-security-standards.md @@ -236,11 +236,7 @@ well as lower-trust users.The following listed controls should be enforced/disal spec.securityContext.supplementalGroups[*]
spec.securityContext.fsGroup
spec.containers[*].securityContext.runAsGroup
- spec.containers[*].securityContext.supplementalGroups[*]
- spec.containers[*].securityContext.fsGroup
spec.initContainers[*].securityContext.runAsGroup
- spec.initContainers[*].securityContext.supplementalGroups[*]
- spec.initContainers[*].securityContext.fsGroup

Allowed Values:
non-zero
undefined / nil (except for `*.runAsGroup`)
diff --git a/content/en/docs/concepts/services-networking/ingress-controllers.md b/content/en/docs/concepts/services-networking/ingress-controllers.md index 2c363ce7dc4b8..33875e3637396 100644 --- a/content/en/docs/concepts/services-networking/ingress-controllers.md +++ b/content/en/docs/concepts/services-networking/ingress-controllers.md @@ -32,7 +32,7 @@ Kubernetes as a project currently supports and maintains [GCE](https://git.k8s.i provided and supported by VMware. * Citrix provides an [Ingress Controller](https://github.com/citrix/citrix-k8s-ingress-controller) for its hardware (MPX), virtualized (VPX) and [free containerized (CPX) ADC](https://www.citrix.com/products/citrix-adc/cpx-express.html) for [baremetal](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/deployment/baremetal) and [cloud](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/deployment) deployments. * F5 Networks provides [support and maintenance](https://support.f5.com/csp/article/K86859508) - for the [F5 BIG-IP Controller for Kubernetes](http://clouddocs.f5.com/products/connectors/k8s-bigip-ctlr/latest). + for the [F5 BIG-IP Container Ingress Services for Kubernetes](https://clouddocs.f5.com/containers/latest/userguide/kubernetes/). * [Gloo](https://gloo.solo.io) is an open-source ingress controller based on [Envoy](https://www.envoyproxy.io) which offers API Gateway functionality with enterprise support from [solo.io](https://www.solo.io). * [HAProxy Ingress](https://haproxy-ingress.github.io) is a highly customizable community-driven ingress controller for HAProxy. * [HAProxy Technologies](https://www.haproxy.com/) offers support and maintenance for the [HAProxy Ingress Controller for Kubernetes](https://github.com/haproxytech/kubernetes-ingress). See the [official documentation](https://www.haproxy.com/documentation/hapee/1-9r1/traffic-management/kubernetes-ingress-controller/). diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index 430ee3c72d551..fc069a593c2d6 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -91,7 +91,7 @@ Different [Ingress controller](/docs/concepts/services-networking/ingress-contro The Ingress [spec](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) has all the information needed to configure a load balancer or proxy server. Most importantly, it contains a list of rules matched against all incoming requests. Ingress resource only supports rules -for directing HTTP traffic. +for directing HTTP(S) traffic. ### Ingress rules @@ -192,7 +192,7 @@ IngressClass resource will ensure that new Ingresses without an If you have more than one IngressClass marked as the default for your cluster, the admission controller prevents creating new Ingress objects that don't have an `ingressClassName` specified. You can resolve this by ensuring that at most 1 -IngressClasess are marked as default in your cluster. +IngressClasses are marked as default in your cluster. {{< /caution >}} ## Types of Ingress diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index 2c3140de83021..f60b90cb30ee5 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -30,7 +30,7 @@ Managing storage is a distinct problem from managing compute instances. The Pers A _PersistentVolume_ (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using [Storage Classes](/docs/concepts/storage/storage-classes/). It is a resource in the cluster just like a node is a cluster resource. PVs are volume plugins like Volumes, but have a lifecycle independent of any individual Pod that uses the PV. This API object captures the details of the implementation of the storage, be that NFS, iSCSI, or a cloud-provider-specific storage system. -A _PersistentVolumeClaim_ (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g., they can be mounted once read/write or many times read-only). +A _PersistentVolumeClaim_ (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or ReadWriteMany, see [AccessModes](#access-modes)). While PersistentVolumeClaims allow a user to consume abstract storage resources, it is common that users need PersistentVolumes with varying properties, such as performance, for different problems. Cluster administrators need to be able to offer a variety of PersistentVolumes that differ in more ways than just size and access modes, without exposing users to the details of how those volumes are implemented. For these needs, there is the _StorageClass_ resource. diff --git a/content/en/docs/concepts/storage/storage-classes.md b/content/en/docs/concepts/storage/storage-classes.md index a16baf46b4efa..1f12303eb7f6f 100644 --- a/content/en/docs/concepts/storage/storage-classes.md +++ b/content/en/docs/concepts/storage/storage-classes.md @@ -686,7 +686,7 @@ provisioner: kubernetes.io/portworx-volume parameters: repl: "1" snap_interval: "70" - io_priority: "high" + priority_io: "high" ``` @@ -695,7 +695,7 @@ parameters: * `repl`: number of synchronous replicas to be provided in the form of replication factor `1..3` (default: `1`) A string is expected here i.e. `"1"` and not `1`. -* `io_priority`: determines whether the volume will be created from higher +* `priority_io`: determines whether the volume will be created from higher performance or a lower priority storage `high/medium/low` (default: `low`). * `snap_interval`: clock/time interval in minutes for when to trigger snapshots. Snapshots are incremental based on difference with the prior snapshot, 0 diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index c3f43f0fa5d90..acb48349ae9f3 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -1323,7 +1323,7 @@ persistent volume: of a volume. This map must correspond to the map returned in the `volume.attributes` field of the `CreateVolumeResponse` by the CSI driver as defined in the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume). - The map is passed to the CSI driver via the `volume_attributes` field in the + The map is passed to the CSI driver via the `volume_context` field in the `ControllerPublishVolumeRequest`, `NodeStageVolumeRequest`, and `NodePublishVolumeRequest`. - `controllerPublishSecretRef`: A reference to the secret object containing diff --git a/content/en/docs/concepts/workloads/controllers/daemonset.md b/content/en/docs/concepts/workloads/controllers/daemonset.md index 7f1b5c46303e2..c3d8cf36d8e62 100644 --- a/content/en/docs/concepts/workloads/controllers/daemonset.md +++ b/content/en/docs/concepts/workloads/controllers/daemonset.md @@ -60,7 +60,7 @@ A DaemonSet also needs a [`.spec`](https://git.k8s.io/community/contributors/dev The `.spec.template` is one of the required fields in `.spec`. -The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/pod-overview/#pod-templates). It has exactly the same schema as a [Pod](/docs/concepts/workloads/pods/pod/), except it is nested and does not have an `apiVersion` or `kind`. +The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/#pod-templates). It has exactly the same schema as a {{< glossary_tooltip text="Pod" term_id="pod" >}}, except it is nested and does not have an `apiVersion` or `kind`. In addition to required fields for a Pod, a Pod template in a DaemonSet has to specify appropriate labels (see [pod selector](#pod-selector)). diff --git a/content/en/docs/concepts/workloads/controllers/deployment.md b/content/en/docs/concepts/workloads/controllers/deployment.md index 6b117cdc4449f..2c2fd8c7c2d44 100644 --- a/content/en/docs/concepts/workloads/controllers/deployment.md +++ b/content/en/docs/concepts/workloads/controllers/deployment.md @@ -13,8 +13,8 @@ weight: 30 -A _Deployment_ provides declarative updates for [Pods](/docs/concepts/workloads/pods/pod/) and -[ReplicaSets](/docs/concepts/workloads/controllers/replicaset/). +A _Deployment_ provides declarative updates for {{< glossary_tooltip text="Pods" term_id="pod" >}} +{{< glossary_tooltip term_id="replica-set" text="ReplicaSets" >}}. You describe a _desired state_ in a Deployment, and the Deployment {{< glossary_tooltip term_id="controller" >}} changes the actual state to the desired state at a controlled rate. You can define Deployments to create new ReplicaSets, or to remove existing Deployments and adopt all their resources with new Deployments. @@ -23,8 +23,6 @@ Do not manage ReplicaSets owned by a Deployment. Consider opening an issue in th {{< /note >}} - - ## Use Case @@ -1053,8 +1051,7 @@ A Deployment also needs a [`.spec` section](https://git.k8s.io/community/contrib The `.spec.template` and `.spec.selector` are the only required field of the `.spec`. -The `.spec.template` is a [Pod template](/docs/concepts/workloads/pods/pod-overview/#pod-templates). It has exactly the same schema as a [Pod](/docs/concepts/workloads/pods/pod/), except it is nested and does not have an -`apiVersion` or `kind`. +The `.spec.template` is a [Pod template](/docs/concepts/workloads/pods/#pod-templates). It has exactly the same schema as a {{< glossary_tooltip text="Pod" term_id="pod" >}}, except it is nested and does not have an `apiVersion` or `kind`. In addition to required fields for a Pod, a Pod template in a Deployment must specify appropriate labels and an appropriate restart policy. For labels, make sure not to overlap with other controllers. See [selector](#selector)). @@ -1068,7 +1065,7 @@ allowed, which is the default if not specified. ### Selector -`.spec.selector` is an required field that specifies a [label selector](/docs/concepts/overview/working-with-objects/labels/) +`.spec.selector` is a required field that specifies a [label selector](/docs/concepts/overview/working-with-objects/labels/) for the Pods targeted by this Deployment. `.spec.selector` must match `.spec.template.metadata.labels`, or it will be rejected by the API. @@ -1155,10 +1152,6 @@ created Pod should be ready without any of its containers crashing, for it to be This defaults to 0 (the Pod will be considered available as soon as it is ready). To learn more about when a Pod is considered ready, see [Container Probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). -### Rollback To - -Field `.spec.rollbackTo` has been deprecated in API versions `extensions/v1beta1` and `apps/v1beta1`, and is no longer supported in API versions starting `apps/v1beta2`. Instead, `kubectl rollout undo` as introduced in [Rolling Back to a Previous Revision](#rolling-back-to-a-previous-revision) should be used. - ### Revision History Limit A Deployment's revision history is stored in the ReplicaSets it controls. diff --git a/content/en/docs/concepts/workloads/controllers/garbage-collection.md b/content/en/docs/concepts/workloads/controllers/garbage-collection.md index a20951a35ef1a..79cc905f5819f 100644 --- a/content/en/docs/concepts/workloads/controllers/garbage-collection.md +++ b/content/en/docs/concepts/workloads/controllers/garbage-collection.md @@ -111,12 +111,6 @@ To control the cascading deletion policy, set the `propagationPolicy` field on the `deleteOptions` argument when deleting an Object. Possible values include "Orphan", "Foreground", or "Background". -Prior to Kubernetes 1.9, the default garbage collection policy for many controller resources was `orphan`. -This included ReplicationController, ReplicaSet, StatefulSet, DaemonSet, and -Deployment. For kinds in the `extensions/v1beta1`, `apps/v1beta1`, and `apps/v1beta2` group versions, unless you -specify otherwise, dependent objects are orphaned by default. In Kubernetes 1.9, for all kinds in the `apps/v1` -group version, dependent objects are deleted by default. - Here's an example that deletes dependents in background: ```shell diff --git a/content/en/docs/concepts/workloads/controllers/job.md b/content/en/docs/concepts/workloads/controllers/job.md index a55759c3007a4..81c1280943e2a 100644 --- a/content/en/docs/concepts/workloads/controllers/job.md +++ b/content/en/docs/concepts/workloads/controllers/job.md @@ -122,7 +122,7 @@ A Job also needs a [`.spec` section](https://git.k8s.io/community/contributors/d The `.spec.template` is the only required field of the `.spec`. -The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/pod-overview/#pod-templates). It has exactly the same schema as a [pod](/docs/user-guide/pods), except it is nested and does not have an `apiVersion` or `kind`. +The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/#pod-templates). It has exactly the same schema as a {{< glossary_tooltip text="Pod" term_id="pod" >}}, except it is nested and does not have an `apiVersion` or `kind`. In addition to required fields for a Pod, a pod template in a Job must specify appropriate labels (see [pod selector](#pod-selector)) and an appropriate restart policy. @@ -215,8 +215,8 @@ To do so, set `.spec.backoffLimit` to specify the number of retries before considering a Job as failed. The back-off limit is set by default to 6. Failed Pods associated with the Job are recreated by the Job controller with an exponential back-off delay (10s, 20s, 40s ...) capped at six minutes. The -back-off count is reset if no new failed Pods appear before the Job's next -status check. +back-off count is reset when a Job's Pod is deleted or successful without any +other Pods for the Job failing around that time. {{< note >}} If your job has `restartPolicy = "OnFailure"`, keep in mind that your container running the Job @@ -474,4 +474,3 @@ object, but maintains complete control over what Pods are created and how work i ## Cron Jobs {#cron-jobs} You can use a [`CronJob`](/docs/concepts/workloads/controllers/cron-jobs/) to create a Job that will run at specified times/dates, similar to the Unix tool `cron`. - diff --git a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md index 2cc828494075f..d59c09fc6b57c 100644 --- a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md @@ -126,7 +126,7 @@ A ReplicationController also needs a [`.spec` section](https://git.k8s.io/commun The `.spec.template` is the only required field of the `.spec`. -The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/pod-overview/#pod-templates). It has exactly the same schema as a [pod](/docs/concepts/workloads/pods/pod/), except it is nested and does not have an `apiVersion` or `kind`. +The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/#pod-templates). It has exactly the same schema as a {{< glossary_tooltip text="Pod" term_id="pod" >}}, except it is nested and does not have an `apiVersion` or `kind`. In addition to required fields for a Pod, a pod template in a ReplicationController must specify appropriate labels and an appropriate restart policy. For labels, make sure not to overlap with other controllers. See [pod selector](#pod-selector). diff --git a/content/en/docs/concepts/workloads/controllers/statefulset.md b/content/en/docs/concepts/workloads/controllers/statefulset.md index 4f8429d668b9a..aff9e7b9f915e 100644 --- a/content/en/docs/concepts/workloads/controllers/statefulset.md +++ b/content/en/docs/concepts/workloads/controllers/statefulset.md @@ -141,6 +141,18 @@ As each Pod is created, it gets a matching DNS subdomain, taking the form: `$(podname).$(governing service domain)`, where the governing service is defined by the `serviceName` field on the StatefulSet. +Depending on how DNS is configured in your cluster, you may not be able to look up the DNS +name for a newly-run Pod immediately. This behavior can occur when other clients in the +cluster have already sent queries for the hostname of the Pod before it was created. +Negative caching (normal in DNS) means that the results of previous failed lookups are +remembered and reused, even after the Pod is running, for at least a few seconds. + +If you need to discover Pods promptly after they are created, you have a few options: + +- Query the Kubernetes API directly (for example, using a watch) rather than relying on DNS lookups. +- Decrease the time of caching in your Kubernetes DNS provider (tpyically this means editing the config map for CoreDNS, which currently caches for 30 seconds). + + As mentioned in the [limitations](#limitations) section, you are responsible for creating the [Headless Service](/docs/concepts/services-networking/service/#headless-services) responsible for the network identity of the pods. @@ -278,5 +290,3 @@ StatefulSet will then begin to recreate the Pods using the reverted template. * Follow an example of [deploying Cassandra with Stateful Sets](/docs/tutorials/stateful-application/cassandra/). * Follow an example of [running a replicated stateful application](/docs/tasks/run-application/run-replicated-stateful-application/). - - diff --git a/content/en/docs/concepts/workloads/pods/_index.md b/content/en/docs/concepts/workloads/pods/_index.md old mode 100755 new mode 100644 index a105f18fb3327..c7408721b7ae2 --- a/content/en/docs/concepts/workloads/pods/_index.md +++ b/content/en/docs/concepts/workloads/pods/_index.md @@ -1,5 +1,271 @@ --- -title: "Pods" +reviewers: +- erictune +title: Pods +content_type: concept weight: 10 +no_list: true +card: + name: concepts + weight: 60 --- + + +_Pods_ are the smallest deployable units of computing that you can create and manage in Kubernetes. + +A _Pod_ (as in a pod of whales or pea pod) is a group of one or more +{{< glossary_tooltip text="containers" term_id="container" >}}, with shared storage/network resources, and a specification +for how to run the containers. A Pod's contents are always co-located and +co-scheduled, and run in a shared context. A Pod models an +application-specific "logical host": it contains one or more application +containers which are relatively tightly coupled. +In non-cloud contexts, applications executed on the same physical or virtual machine are analogous to cloud applications executed on the same logical host. + +As well as application containers, a Pod can contain +[init containers](/docs/concepts/workloads/pods/init-containers/) that run +during Pod startup. You can also inject +[ephemeral containers](/docs/concepts/workloads/pods/ephemeral-containers/) +for debugging if your cluster offers this. + + + +## What is a Pod? + +{{< note >}} +While Kubernetes supports more +{{< glossary_tooltip text="container runtimes" term_id="container-runtime" >}} +than just Docker, [Docker](https://www.docker.com/) is the most commonly known +runtime, and it helps to describe Pods using some terminology from Docker. +{{< /note >}} + +The shared context of a Pod is a set of Linux namespaces, cgroups, and +potentially other facets of isolation - the same things that isolate a Docker +container. Within a Pod's context, the individual applications may have +further sub-isolations applied. + +In terms of Docker concepts, a Pod is similar to a group of Docker containers +with shared namespaces and shared filesystem volumes. + +## Using Pods + +Usually you don't need to create Pods directly, even singleton Pods. Instead, create them using workload resources such as {{< glossary_tooltip text="Deployment" +term_id="deployment" >}} or {{< glossary_tooltip text="Job" term_id="job" >}}. +If your Pods need to track state, consider the +{{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}} resource. + +Pods in a Kubernetes cluster are used in two main ways: + +* **Pods that run a single container**. The "one-container-per-Pod" model is the + most common Kubernetes use case; in this case, you can think of a Pod as a + wrapper around a single container; Kubernetes manages Pods rather than managing + the containers directly. +* **Pods that run multiple containers that need to work together**. A Pod can + encapsulate an application composed of multiple co-located containers that are + tightly coupled and need to share resources. These co-located containers + form a single cohesive unit of service—for example, one container serving data + stored in a shared volume to the public, while a separate _sidecar_ container + refreshes or updates those files. + The Pod wraps these containers, storage resources, and an ephemeral network + identity together as a single unit. + + {{< note >}} + Grouping multiple co-located and co-managed containers in a single Pod is a + relatively advanced use case. You should use this pattern only in specific + instances in which your containers are tightly coupled. + {{< /note >}} + +Each Pod is meant to run a single instance of a given application. If you want to +scale your application horizontally (to provide more overall resources by running +more instances), you should use multiple Pods, one for each instance. In +Kubernetes, this is typically referred to as _replication_. +Replicated Pods are usually created and managed as a group by a workload resource +and its {{< glossary_tooltip text="controller" term_id="controller" >}}. + +See [Pods and controllers](#pods-and-controllers) for more information on how +Kubernetes uses workload resources, and their controllers, to implement application +scaling and auto-healing. + +### How Pods manage multiple containers + +Pods are designed to support multiple cooperating processes (as containers) that form +a cohesive unit of service. The containers in a Pod are automatically co-located and +co-scheduled on the same physical or virtual machine in the cluster. The containers +can share resources and dependencies, communicate with one another, and coordinate +when and how they are terminated. + +For example, you might have a container that +acts as a web server for files in a shared volume, and a separate "sidecar" container +that updates those files from a remote source, as in the following diagram: + +{{< figure src="/images/docs/pod.svg" alt="example pod diagram" width="50%" >}} + +Some Pods have {{< glossary_tooltip text="init containers" term_id="init-container" >}} as well as {{< glossary_tooltip text="app containers" term_id="app-container" >}}. Init containers run and complete before the app containers are started. + +Pods natively provide two kinds of shared resources for their constituent containers: +[networking](#pod-networking) and [storage](#pod-storage). + +## Working with Pods + +You'll rarely create individual Pods directly in Kubernetes—even singleton Pods. This +is because Pods are designed as relatively ephemeral, disposable entities. When +a Pod gets created (directly by you, or indirectly by a +{{< glossary_tooltip text="controller" term_id="controller" >}}), the new Pod is +scheduled to run on a {{< glossary_tooltip term_id="node" >}} in your cluster. +The Pod remains on that node until the Pod finishes execution, the Pod object is deleted, +the Pod is *evicted* for lack of resources, or the node fails. + +{{< note >}} +Restarting a container in a Pod should not be confused with restarting a Pod. A Pod +is not a process, but an environment for running container(s). A Pod persists until +it is deleted. +{{< /note >}} + +When you create the manifest for a Pod object, make sure the name specified is a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +### Pods and controllers + +You can use workload resources to create and manage multiple Pods for you. A controller +for the resource handles replication and rollout and automatic healing in case of +Pod failure. For example, if a Node fails, a controller notices that Pods on that +Node have stopped working and creates a replacement Pod. The scheduler places the +replacement Pod onto a healthy Node. + +Here are some examples of workload resources that manage one or more Pods: + +* {{< glossary_tooltip text="Deployment" term_id="deployment" >}} +* {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}} +* {{< glossary_tooltip text="DaemonSet" term_id="daemonset" >}} + +### Pod templates + +Controllers for {{< glossary_tooltip text="workload" term_id="workload" >}} resources create Pods +from a _pod template_ and manage those Pods on your behalf. + +PodTemplates are specifications for creating Pods, and are included in workload resources such as +[Deployments](/docs/concepts/workloads/controllers/deployment/), +[Jobs](/docs/concepts/jobs/run-to-completion-finite-workloads/), and +[DaemonSets](/docs/concepts/workloads/controllers/daemonset/). + +Each controller for a workload resource uses the `PodTemplate` inside the workload +object to make actual Pods. The `PodTemplate` is part of the desired state of whatever +workload resource you used to run your app. + +The sample below is a manifest for a simple Job with a `template` that starts one +container. The container in that Pod prints a message then pauses. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hello +spec: + template: + # This is the pod template + spec: + containers: + - name: hello + image: busybox + command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600'] + restartPolicy: OnFailure + # The pod template ends here +``` + +Modifying the pod template or switching to a new pod template has no effect on the +Pods that already exist. Pods do not receive template updates directly. Instead, +a new Pod is created to match the revised pod template. + +For example, the deployment controller ensures that the running Pods match the current +pod template for each Deployment object. If the template is updated, the Deployment has +to remove the existing Pods and create new Pods based on the updated template. Each workload +resource implements its own rules for handling changes to the Pod template. + +On Nodes, the {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} does not +directly observe or manage any of the details around pod templates and updates; those +details are abstracted away. That abstraction and separation of concerns simplifies +system semantics, and makes it feasible to extend the cluster's behavior without +changing existing code. + +## Resource sharing and communication + +Pods enable data sharing and communication among their constituent +containters. + +### Storage in Pods {#pod-storage} + +A Pod can specify a set of shared storage +{{< glossary_tooltip text="volumes" term_id="volume" >}}. All containers +in the Pod can access the shared volumes, allowing those containers to +share data. Volumes also allow persistent data in a Pod to survive +in case one of the containers within needs to be restarted. See +[Storage](/docs/concepts/storage/) for more information on how +Kubernetes implements shared storage and makes it available to Pods. + +### Pod networking + +Each Pod is assigned a unique IP address for each address family. Every +container in a Pod shares the network namespace, including the IP address and +network ports. Inside a Pod (and **only** then), the containers that belong to the Pod +can communicate with one another using `localhost`. When containers in a Pod communicate +with entities *outside the Pod*, +they must coordinate how they use the shared network resources (such as ports). +Within a Pod, containers share an IP address and port space, and +can find each other via `localhost`. The containers in a Pod can also communicate +with each other using standard inter-process communications like SystemV semaphores +or POSIX shared memory. Containers in different Pods have distinct IP addresses +and can not communicate by IPC without +[special configuration](/docs/concepts/policy/pod-security-policy/). +Containers that want to interact with a container running in a different Pod can +use IP networking to comunicate. + +Containers within the Pod see the system hostname as being the same as the configured +`name` for the Pod. There's more about this in the [networking](/docs/concepts/cluster-administration/networking/) +section. + +## Privileged mode for containers + +Any container in a Pod can enable privileged mode, using the `privileged` flag on the [security context](/docs/tasks/configure-pod-container/security-context/) of the container spec. This is useful for containers that want to use operating system administrative capabilities such as manipulating the network stack or accessing hardware devices. +Processes within a privileged container get almost the same privileges that are available to processes outside a container. + +{{< note >}} +Your {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}} must support the concept of a privileged container for this setting to be relevant. +{{< /note >}} + +## Static Pods + +_Static Pods_ are managed directly by the kubelet daemon on a specific node, +without the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} +observing them. +Whereas most Pods are managed by the control plane (for example, a +{{< glossary_tooltip text="Deployment" term_id="deployment" >}}), for static +Pods, the kubelet directly supervises each static Pod (and restarts it if it fails). + +Static Pods are always bound to one {{< glossary_tooltip term_id="kubelet" >}} on a specific node. +The main use for static Pods is to run a self-hosted control plane: in other words, +using the kubelet to supervise the individual [control plane components](/docs/concepts/overview/components/#control-plane-components). + +The kubelet automatically tries to create a {{< glossary_tooltip text="mirror Pod" term_id="mirror-pod" >}} +on the Kubernetes API server for each static Pod. +This means that the Pods running on a node are visible on the API server, +but cannot be controlled from there. + +## {{% heading "whatsnext" %}} + +* Learn about the [lifecycle of a Pod](/docs/concepts/workloads/pods/pod-lifecycle/). +* Learn about [PodPresets](/docs/concepts/workloads/pods/podpreset/). +* Lean about [RuntimeClass](/docs/concepts/containers/runtime-class/) and how you can use it to + configure different Pods with different container runtime configurations. +* Read about [Pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/). +* Read about [PodDisruptionBudget](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) and how you can use it to manage application availability during disruptions. +* Pod is a top-level resource in the Kubernetes REST API. + The [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) + object definition describes the object in detail. +* [The Distributed System Toolkit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) explains common layouts for Pods with more than one container. + +To understand the context for why Kubernetes wraps a common Pod API in other resources (such as {{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}} or {{< glossary_tooltip text="Deployments" term_id="deployment" >}}, you can read about the prior art, including: + * [Aurora](http://aurora.apache.org/documentation/latest/reference/configuration/#job-schema) + * [Borg](https://research.google.com/pubs/pub43438.html) + * [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html) + * [Omega](https://research.google/pubs/pub41684/) + * [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/). diff --git a/content/en/docs/concepts/workloads/pods/disruptions.md b/content/en/docs/concepts/workloads/pods/disruptions.md index 589bde56685e6..0810b6fec74bb 100644 --- a/content/en/docs/concepts/workloads/pods/disruptions.md +++ b/content/en/docs/concepts/workloads/pods/disruptions.md @@ -11,17 +11,15 @@ weight: 60 This guide is for application owners who want to build highly available applications, and thus need to understand -what types of Disruptions can happen to Pods. +what types of disruptions can happen to Pods. -It is also for Cluster Administrators who want to perform automated +It is also for cluster administrators who want to perform automated cluster actions, like upgrading and autoscaling clusters. - - -## Voluntary and Involuntary Disruptions +## Voluntary and involuntary disruptions Pods do not disappear until someone (a person or a controller) destroys them, or there is an unavoidable hardware or system software error. @@ -48,7 +46,7 @@ Administrator. Typical application owner actions include: - updating a deployment's pod template causing a restart - directly deleting a pod (e.g. by accident) -Cluster Administrator actions include: +Cluster administrator actions include: - [Draining a node](/docs/tasks/administer-cluster/safely-drain-node/) for repair or upgrade. - Draining a node from a cluster to scale the cluster down (learn about @@ -68,7 +66,7 @@ Not all voluntary disruptions are constrained by Pod Disruption Budgets. For exa deleting deployments or pods bypasses Pod Disruption Budgets. {{< /caution >}} -## Dealing with Disruptions +## Dealing with disruptions Here are some ways to mitigate involuntary disruptions: @@ -90,58 +88,58 @@ of cluster (node) autoscaling may cause voluntary disruptions to defragment and Your cluster administrator or hosting provider should have documented what level of voluntary disruptions, if any, to expect. -Kubernetes offers features to help run highly available applications at the same -time as frequent voluntary disruptions. We call this set of features -*Disruption Budgets*. - -## How Disruption Budgets Work +## Pod disruption budgets {{< feature-state for_k8s_version="v1.5" state="beta" >}} -An Application Owner can create a `PodDisruptionBudget` object (PDB) for each application. -A PDB limits the number of pods of a replicated application that are down simultaneously from -voluntary disruptions. For example, a quorum-based application would +Kubernetes offers features to help you run highly available applications even when you +introduce frequent voluntary disruptions. + +As an application owner, you can create a PodDisruptionBudget (PDB) for each application. +A PDB limits the number of Pods of a replicated application that are down simultaneously from +voluntary disruptions. For example, a quorum-based application would like to ensure that the number of replicas running is never brought below the number needed for a quorum. A web front end might want to ensure that the number of replicas serving load never falls below a certain percentage of the total. Cluster managers and hosting providers should use tools which -respect Pod Disruption Budgets by calling the [Eviction API](/docs/tasks/administer-cluster/safely-drain-node/#the-eviction-api) -instead of directly deleting pods or deployments. Examples are the `kubectl drain` command -and the Kubernetes-on-GCE cluster upgrade script (`cluster/gce/upgrade.sh`). +respect PodDisruptionBudgets by calling the [Eviction API](/docs/tasks/administer-cluster/safely-drain-node/#the-eviction-api) +instead of directly deleting pods or deployments. -When a cluster administrator wants to drain a node -they use the `kubectl drain` command. That tool tries to evict all -the pods on the machine. The eviction request may be temporarily rejected, -and the tool periodically retries all failed requests until all pods -are terminated, or until a configurable timeout is reached. +For example, the `kubectl drain` subcommand lets you mark a node as going out of +service. When you run `kubectl drain`, the tool tries to evict all of the Pods on +the Node you're taking out of service. The eviction request that `kubectl` submits on +your behalf may be temporarily rejected, so the tool periodically retries all failed +requests until all Pods on the target node are terminated, or until a configurable timeout +is reached. A PDB specifies the number of replicas that an application can tolerate having, relative to how many it is intended to have. For example, a Deployment which has a `.spec.replicas: 5` is supposed to have 5 pods at any given time. If its PDB allows for there to be 4 at a time, -then the Eviction API will allow voluntary disruption of one, but not two pods, at a time. +then the Eviction API will allow voluntary disruption of one (but not two) pods at a time. The group of pods that comprise the application is specified using a label selector, the same as the one used by the application's controller (deployment, stateful-set, etc). -The "intended" number of pods is computed from the `.spec.replicas` of the pods controller. -The controller is discovered from the pods using the `.metadata.ownerReferences` of the object. +The "intended" number of pods is computed from the `.spec.replicas` of the workload resource +that is managing those pods. The control plane discovers the owning workload resource by +examining the `.metadata.ownerReferences` of the Pod. PDBs cannot prevent [involuntary disruptions](#voluntary-and-involuntary-disruptions) from occurring, but they do count against the budget. Pods which are deleted or unavailable due to a rolling upgrade to an application do count -against the disruption budget, but controllers (like deployment and stateful-set) -are not limited by PDBs when doing rolling upgrades -- the handling of failures -during application updates is configured in the controller spec. -(Learn about [updating a deployment](/docs/concepts/workloads/controllers/deployment/#updating-a-deployment).) +against the disruption budget, but workload resources (such as Deployment and StatefulSet) +are not limited by PDBs when doing rolling upgrades. Instead, the handling of failures +during application updates is configured in the spec for the specific workload resource. -When a pod is evicted using the eviction API, it is gracefully terminated (see -`terminationGracePeriodSeconds` in [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core).) +When a pod is evicted using the eviction API, it is gracefully +[terminated](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination), honoring the +`terminationGracePeriodSeconds` setting in its [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core).) -## PDB Example +## PodDisruptionBudget example {#pdb-example} Consider a cluster with 3 nodes, `node-1` through `node-3`. The cluster is running several applications. One of them has 3 replicas initially called @@ -272,4 +270,6 @@ the nodes in your cluster, such as a node or system software upgrade, here are s * Learn more about [draining nodes](/docs/tasks/administer-cluster/safely-drain-node/) +* Learn about [updating a deployment](/docs/concepts/workloads/controllers/deployment/#updating-a-deployment) + including steps to maintain its availability during the rollout. diff --git a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md index d72265faf5fa1..9075bf1a8bc07 100644 --- a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md @@ -6,16 +6,60 @@ weight: 30 -{{< comment >}}Updated: 4/14/2015{{< /comment >}} -{{< comment >}}Edited and moved to Concepts section: 2/2/17{{< /comment >}} - -This page describes the lifecycle of a Pod. +This page describes the lifecycle of a Pod. Pods follow a defined lifecycle, starting +in the `Pending` [phase](#pod-phase), moving through `Running` if at least one +of its primary containers starts OK, and then through either the `Succeeded` or +`Failed` phases depending on whether any container in the Pod terminated in failure. +Whilst a Pod is running, the kubelet is able to restart containers to handle some +kind of faults. Within a Pod, Kubernetes tracks different container +[states](#container-states) and handles +In the Kubernetes API, Pods have both a specification and an actual status. The +status for a Pod object consists of a set of [Pod conditions](#pod-conditions). +You can also inject [custom readiness information](#pod-readiness-gate) into the +condition data for a Pod, if that is useful to your application. +Pods are only [scheduled](/docs/concepts/scheduling-eviction/) once in their lifetime. +Once a Pod is scheduled (assigned) to a Node, the Pod runs on that Node until it stops +or is [terminated](#pod-termination). +## Pod lifetime + +Like individual application containers, Pods are considered to be relatively +ephemeral (rather than durable) entities. Pods are created, assigned a unique +ID ([UID](/docs/concepts/overview/working-with-objects/names/#uids)), and scheduled +to nodes where they remain until termination (according to restart policy) or +deletion. +If a {{< glossary_tooltip term_id="node" >}} dies, the Pods scheduled to that node +are [scheduled for deletion](#pod-garbage-collection) after a timeout period. + +Pods do not, by themselves, self-heal. If a Pod is scheduled to a +{{< glossary_tooltip text="node" term_id="node" >}} that then fails, +or if the scheduling operation itself fails, the Pod is deleted; likewise, a Pod won't +survive an eviction due to a lack of resources or Node maintenance. Kubernetes uses a +higher-level abstraction, called a +{{< glossary_tooltip term_id="controller" text="controller" >}}, that handles the work of +managing the relatively disposable Pod instances. + +A given Pod (as defined by a UID) is never "rescheduled" to a different node; instead, +that Pod can be replaced by a new, near-identical Pod, with even the same name i +desired, but with a different UID. + +When something is said to have the same lifetime as a Pod, such as a +{{< glossary_tooltip term_id="volume" text="volume" >}}, +that means that the thing exists as long as that specific Pod (with that exact UID) +exists. If that Pod is deleted for any reason, and even if an identical replacement +is created, the related thing (a volume, in this example) is also destroyed and +created anew. + +{{< figure src="/images/docs/pod.svg" title="Pod diagram" width="50%" >}} + +*A multi-container Pod that contains a file puller and a +web server that uses a persistent volume for shared storage between the containers.* + ## Pod phase A Pod's `status` field is a @@ -24,7 +68,7 @@ object, which has a `phase` field. The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The phase is not intended to be a comprehensive rollup of observations -of Container or Pod state, nor is it intended to be a comprehensive state machine. +of container or Pod state, nor is it intended to be a comprehensive state machine. The number and meanings of Pod phase values are tightly guarded. Other than what is documented here, nothing should be assumed about Pods that @@ -34,188 +78,106 @@ Here are the possible values for `phase`: Value | Description :-----|:----------- -`Pending` | The Pod has been accepted by the Kubernetes system, but one or more of the Container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. -`Running` | The Pod has been bound to a node, and all of the Containers have been created. At least one Container is still running, or is in the process of starting or restarting. -`Succeeded` | All Containers in the Pod have terminated in success, and will not be restarted. -`Failed` | All Containers in the Pod have terminated, and at least one Container has terminated in failure. That is, the Container either exited with non-zero status or was terminated by the system. -`Unknown` | For some reason the state of the Pod could not be obtained, typically due to an error in communicating with the host of the Pod. - -## Pod conditions - -A Pod has a PodStatus, which has an array of -[PodConditions](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podcondition-v1-core) -through which the Pod has or has not passed. Each element of the PodCondition -array has six possible fields: - -* The `lastProbeTime` field provides a timestamp for when the Pod condition - was last probed. - -* The `lastTransitionTime` field provides a timestamp for when the Pod - last transitioned from one status to another. - -* The `message` field is a human-readable message indicating details - about the transition. - -* The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition. - -* The `status` field is a string, with possible values "`True`", "`False`", and "`Unknown`". - -* The `type` field is a string with the following possible values: - - * `PodScheduled`: the Pod has been scheduled to a node; - * `Ready`: the Pod is able to serve requests and should be added to the load - balancing pools of all matching Services; - * `Initialized`: all [init containers](/docs/concepts/workloads/pods/init-containers) - have started successfully; - * `ContainersReady`: all containers in the Pod are ready. - - - -## Container probes - -A [Probe](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#probe-v1-core) is a diagnostic -performed periodically by the [kubelet](/docs/admin/kubelet/) -on a Container. To perform a diagnostic, -the kubelet calls a -[Handler](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#handler-v1-core) implemented by -the Container. There are three types of handlers: - -* [ExecAction](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#execaction-v1-core): - Executes a specified command inside the Container. The diagnostic - is considered successful if the command exits with a status code of 0. - -* [TCPSocketAction](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#tcpsocketaction-v1-core): - Performs a TCP check against the Container's IP address on - a specified port. The diagnostic is considered successful if the port is open. - -* [HTTPGetAction](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#httpgetaction-v1-core): - Performs an HTTP Get request against the Container's IP - address on a specified port and path. The diagnostic is considered successful - if the response has a status code greater than or equal to 200 and less than 400. - -Each probe has one of three results: - -* Success: The Container passed the diagnostic. -* Failure: The Container failed the diagnostic. -* Unknown: The diagnostic failed, so no action should be taken. +`Pending` | The Pod has been accepted by the Kubernetes cluster, but one or more of the containers has not been set up and made ready to run. This includes time a Pod spends waiting to bescheduled as well as the time spent downloading container images over the network. +`Running` | The Pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. +`Succeeded` | All containers in the Pod have terminated in success, and will not be restarted. +`Failed` | All containers in the Pod have terminated, and at least one container has terminated in failure. That is, the container either exited with non-zero status or was terminated by the system. +`Unknown` | For some reason the state of the Pod could not be obtained. This phase typically occurs due to an error in communicating with the node where the Pod should be running. -The kubelet can optionally perform and react to three kinds of probes on running -Containers: - -* `livenessProbe`: Indicates whether the Container is running. If - the liveness probe fails, the kubelet kills the Container, and the Container - is subjected to its [restart policy](#restart-policy). If a Container does not - provide a liveness probe, the default state is `Success`. - -* `readinessProbe`: Indicates whether the Container is ready to service requests. - If the readiness probe fails, the endpoints controller removes the Pod's IP - address from the endpoints of all Services that match the Pod. The default - state of readiness before the initial delay is `Failure`. If a Container does - not provide a readiness probe, the default state is `Success`. - -* `startupProbe`: Indicates whether the application within the Container is started. - All other probes are disabled if a startup probe is provided, until it succeeds. - If the startup probe fails, the kubelet kills the Container, and the Container - is subjected to its [restart policy](#restart-policy). If a Container does not - provide a startup probe, the default state is `Success`. - -### When should you use a liveness probe? - -{{< feature-state for_k8s_version="v1.0" state="stable" >}} - -If the process in your Container is able to crash on its own whenever it -encounters an issue or becomes unhealthy, you do not necessarily need a liveness -probe; the kubelet will automatically perform the correct action in accordance -with the Pod's `restartPolicy`. +If a node dies or is disconnected from the rest of the cluster, Kubernetes +applies a policy for setting the `phase` of all Pods on the lost node to Failed. -If you'd like your Container to be killed and restarted if a probe fails, then -specify a liveness probe, and specify a `restartPolicy` of Always or OnFailure. +## Container states -### When should you use a readiness probe? +As well as the [phase](#pod-phase) of the Pod overall, Kubernetes tracks the state of +each container inside a Pod. You can use +[container lifecycle hooks](/docs/concepts/containers/container-lifecycle-hooks/) to +trigger events to run at certain points in a container's lifecycle. -{{< feature-state for_k8s_version="v1.0" state="stable" >}} +Once the {{< glossary_tooltip text="scheduler" term_id="kube-scheduler" >}} +assigns a Pod to a Node, the kubelet starts creating containers for that Pod +using a {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}. +There are three possible container states: `Waiting`, `Running`, and `Terminated`. -If you'd like to start sending traffic to a Pod only when a probe succeeds, -specify a readiness probe. In this case, the readiness probe might be the same -as the liveness probe, but the existence of the readiness probe in the spec means -that the Pod will start without receiving any traffic and only start receiving -traffic after the probe starts succeeding. -If your Container needs to work on loading large data, configuration files, or migrations during startup, specify a readiness probe. +To the check state of a Pod's containers, you can use +`kubectl describe pod `. The output shows the state for each container +within that Pod. -If you want your Container to be able to take itself down for maintenance, you -can specify a readiness probe that checks an endpoint specific to readiness that -is different from the liveness probe. +Each state has a specific meaning: -Note that if you just want to be able to drain requests when the Pod is deleted, -you do not necessarily need a readiness probe; on deletion, the Pod automatically -puts itself into an unready state regardless of whether the readiness probe exists. -The Pod remains in the unready state while it waits for the Containers in the Pod -to stop. +### `Waiting` {#container-state-waiting} -### When should you use a startup probe? +If a container is not in either the `Running` or `Terminated` state, it `Waiting`. +A container in the `Waiting` state is still running the operations it requires in +order to complete start up: for example, pulling the container image from a container +image registry, or applying {{< glossary_tooltip text="Secret" term_id="secret" >}} +data. +When you use `kubectl` to query a Pod with a container that is `Waiting`, you also see +a Reason field to summarize why the container is in that state. -{{< feature-state for_k8s_version="v1.16" state="alpha" >}} +### `Running` {#container-state-running} -If your Container usually starts in more than `initialDelaySeconds + failureThreshold × periodSeconds`, you should specify a startup probe that checks the same endpoint as the liveness probe. The default for `periodSeconds` is 30s. -You should then set its `failureThreshold` high enough to allow the Container to start, without changing the default values of the liveness probe. This helps to protect against deadlocks. +The `Running` status indicates that a container is executing without issues. If there +was a `postStart` hook configured, it has already executed and executed. When you use +`kubectl` to query a Pod with a container that is `Running`, you also see information +about when the container entered the `Running` state. -For more information about how to set up a liveness, readiness, startup probe, see -[Configure Liveness, Readiness and Startup Probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). +### `Terminated` {#container-state-terminated} -## Pod and Container status +A container in the `Terminated` state has begin execution and has then either run to +completion or has failed for some reason. When you use `kubectl` to query a Pod with +a container that is `Terminated`, you see a reason, and exit code, and the start and +finish time for that container's period of execution. -For detailed information about Pod Container status, see -[PodStatus](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podstatus-v1-core) -and -[ContainerStatus](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#containerstatus-v1-core). -Note that the information reported as Pod status depends on the current -[ContainerState](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#containerstatus-v1-core). +If a container has a `preStop` hook configured, that runs before the container enters +the `Terminated` state. -## Container States +## Container restart policy {#restart-policy} -Once Pod is assigned to a node by scheduler, kubelet starts creating containers using container runtime.There are three possible states of containers: Waiting, Running and Terminated. To check state of container, you can use `kubectl describe pod [POD_NAME]`. State is displayed for each container within that Pod. +The `spec` of a Pod has a `restartPolicy` field with possible values Always, OnFailure, +and Never. The default value is Always. -* `Waiting`: Default state of container. If container is not in either Running or Terminated state, it is in Waiting state. A container in Waiting state still runs its required operations, like pulling images, applying Secrets, etc. Along with this state, a message and reason about the state are displayed to provide more information. +The `restartPolicy` applies to all containers in the Pod. `restartPolicy` only +refers to restarts of the containers by the kubelet on the same node. After containers +in a Pod exit, the kubelet restarts them with an exponential back-off delay (10s, 20s, +40s, …), that is capped at five minutes. Once a container has executed with no problems +for 10 minutes without any problems, the kubelet resets the restart backoff timer for +that container. - ```yaml - ... - State: Waiting - Reason: ErrImagePull - ... - ``` +## Pod conditions -* `Running`: Indicates that the container is executing without issues. The `postStart` hook (if any) is executed prior to the container entering a Running state. This state also displays the time when the container entered Running state. +A Pod has a PodStatus, which has an array of +[PodConditions](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podcondition-v1-core) +through which the Pod has or has not passed: - ```yaml - ... - State: Running - Started: Wed, 30 Jan 2019 16:46:38 +0530 - ... - ``` +* `PodScheduled`: the Pod has been scheduled to a node. +* `ContainersReady`: all containers in the Pod are ready. +* `Initialized`: all [init containers](/docs/concepts/workloads/pods/init-containers/) + have started successfully. +* `Ready`: the Pod is able to serve requests and should be added to the load + balancing pools of all matching Services. -* `Terminated`: Indicates that the container completed its execution and has stopped running. A container enters into this when it has successfully completed execution or when it has failed for some reason. Regardless, a reason and exit code is displayed, as well as the container's start and finish time. Before a container enters into Terminated, `preStop` hook (if any) is executed. +Field name | Description +:--------------------|:----------- +`type` | Name of this Pod condition. +`status` | Indicates whether that condition is applicable, with possible values "`True`", "`False`", or "`Unknown`". +`lastProbeTime` | Timestamp of when the Pod condition was last probed. +`lastTransitionTime` | Timestamp for when the Pod last transitioned from one status to another. +`reason` | Machine-readable, UpperCamelCase text indicating the reason for the condition's last transition. +`messsage | Human-readable message indicating details about the last status transition. - ```yaml - ... - State: Terminated - Reason: Completed - Exit Code: 0 - Started: Wed, 30 Jan 2019 11:45:26 +0530 - Finished: Wed, 30 Jan 2019 11:45:26 +0530 - ... - ``` -## Pod readiness {#pod-readiness-gate} +### Pod readiness {#pod-readiness-gate} {{< feature-state for_k8s_version="v1.14" state="stable" >}} Your application can inject extra feedback or signals into PodStatus: -_Pod readiness_. To use this, set `readinessGates` in the PodSpec to specify -a list of additional conditions that the kubelet evaluates for Pod readiness. +_Pod readiness_. To use this, set `readinessGates` in the Pod's `spec` to +specify a list of additional conditions that the kubelet evaluates for Pod readiness. Readiness gates are determined by the current state of `status.condition` -fields for the Pod. If Kubernetes cannot find such a -condition in the `status.conditions` field of a Pod, the status of the condition +fields for the Pod. If Kubernetes cannot find such a condition in the +`status.conditions` field of a Pod, the status of the condition is defaulted to "`False`". Here is an example: @@ -258,152 +220,226 @@ For a Pod that uses custom conditions, that Pod is evaluated to be ready **only* when both the following statements apply: * All containers in the Pod are ready. -* All conditions specified in `ReadinessGates` are `True`. +* All conditions specified in `readinessGates` are `True`. When a Pod's containers are Ready but at least one custom condition is missing or -`False`, the kubelet sets the Pod's condition to `ContainersReady`. +`False`, the kubelet sets the Pod's [condition](#pod-condition) to `ContainersReady`. -## Restart policy +## Container probes -A PodSpec has a `restartPolicy` field with possible values Always, OnFailure, -and Never. The default value is Always. -`restartPolicy` applies to all Containers in the Pod. `restartPolicy` only -refers to restarts of the Containers by the kubelet on the same node. Exited -Containers that are restarted by the kubelet are restarted with an exponential -back-off delay (10s, 20s, 40s ...) capped at five minutes, and is reset after ten -minutes of successful execution. As discussed in the -[Pods document](/docs/user-guide/pods/#durability-of-pods-or-lack-thereof), -once bound to a node, a Pod will never be rebound to another node. +A [Probe](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#probe-v1-core) is a diagnostic +performed periodically by the [kubelet](/docs/admin/kubelet/) +on a Container. To perform a diagnostic, +the kubelet calls a +[Handler](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#handler-v1-core) implemented by +the container. There are three types of handlers: +* [ExecAction](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#execaction-v1-core): + Executes a specified command inside the container. The diagnostic + is considered successful if the command exits with a status code of 0. -## Pod lifetime +* [TCPSocketAction](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#tcpsocketaction-v1-core): + Performs a TCP check against the Pod's IP address on + a specified port. The diagnostic is considered successful if the port is open. -In general, Pods remain until a human or -{{< glossary_tooltip term_id="controller" text="controller" >}} process -explicitly removes them. -The control plane cleans up terminated Pods (with a phase of `Succeeded` or -`Failed`), when the number of Pods exceeds the configured threshold -(determined by `terminated-pod-gc-threshold` in the kube-controller-manager). -This avoids a resource leak as Pods are created and terminated over time. +* [HTTPGetAction](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#httpgetaction-v1-core): + Performs an HTTP `GET` request against the Pod's IP + address on a specified port and path. The diagnostic is considered successful + if the response has a status code greater than or equal to 200 and less than 400. -There are different kinds of resources for creating Pods: +Each probe has one of three results: -- Use a {{< glossary_tooltip term_id="deployment" >}}, - {{< glossary_tooltip term_id="replica-set" >}} or {{< glossary_tooltip term_id="statefulset" >}} - for Pods that are not expected to terminate, for example, web servers. +* `Success`: The container passed the diagnostic. +* `Failure`: The container failed the diagnostic. +* `Unknown`: The diagnostic failed, so no action should be taken. -- Use a {{< glossary_tooltip term_id="job" >}} - for Pods that are expected to terminate once their work is complete; - for example, batch computations. Jobs are appropriate only for Pods with - `restartPolicy` equal to OnFailure or Never. +The kubelet can optionally perform and react to three kinds of probes on running +containers: -- Use a {{< glossary_tooltip term_id="daemonset" >}} - for Pods that need to run one per eligible node. +* `livenessProbe`: Indicates whether the container is running. If + the liveness probe fails, the kubelet kills the container, and the container + is subjected to its [restart policy](#restart-policy). If a Container does not + provide a liveness probe, the default state is `Success`. -All workload resources contain a PodSpec. It is recommended to create the -appropriate workload resource and let the resource's controller create Pods -for you, rather than directly create Pods yourself. +* `readinessProbe`: Indicates whether the container is ready to respond to requests. + If the readiness probe fails, the endpoints controller removes the Pod's IP + address from the endpoints of all Services that match the Pod. The default + state of readiness before the initial delay is `Failure`. If a Container does + not provide a readiness probe, the default state is `Success`. -If a node dies or is disconnected from the rest of the cluster, Kubernetes -applies a policy for setting the `phase` of all Pods on the lost node to Failed. +* `startupProbe`: Indicates whether the application within the container is started. + All other probes are disabled if a startup probe is provided, until it succeeds. + If the startup probe fails, the kubelet kills the container, and the container + is subjected to its [restart policy](#restart-policy). If a Container does not + provide a startup probe, the default state is `Success`. + +For more information about how to set up a liveness, readiness, or startup probe, +see [Configure Liveness, Readiness and Startup Probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). -## Examples +### When should you use a liveness probe? -### Advanced liveness probe example +{{< feature-state for_k8s_version="v1.0" state="stable" >}} -Liveness probes are executed by the kubelet, so all requests are made in the -kubelet network namespace. +If the process in your container is able to crash on its own whenever it +encounters an issue or becomes unhealthy, you do not necessarily need a liveness +probe; the kubelet will automatically perform the correct action in accordance +with the Pod's `restartPolicy`. -```yaml -apiVersion: v1 -kind: Pod -metadata: - labels: - test: liveness - name: liveness-http -spec: - containers: - - args: - - /server - image: k8s.gcr.io/liveness - livenessProbe: - httpGet: - # when "host" is not defined, "PodIP" will be used - # host: my-host - # when "scheme" is not defined, "HTTP" scheme will be used. Only "HTTP" and "HTTPS" are allowed - # scheme: HTTPS - path: /healthz - port: 8080 - httpHeaders: - - name: X-Custom-Header - value: Awesome - initialDelaySeconds: 15 - timeoutSeconds: 1 - name: liveness -``` +If you'd like your container to be killed and restarted if a probe fails, then +specify a liveness probe, and specify a `restartPolicy` of Always or OnFailure. -### Example states - - * Pod is running and has one Container. Container exits with success. - * Log completion event. - * If `restartPolicy` is: - * Always: Restart Container; Pod `phase` stays Running. - * OnFailure: Pod `phase` becomes Succeeded. - * Never: Pod `phase` becomes Succeeded. - - * Pod is running and has one Container. Container exits with failure. - * Log failure event. - * If `restartPolicy` is: - * Always: Restart Container; Pod `phase` stays Running. - * OnFailure: Restart Container; Pod `phase` stays Running. - * Never: Pod `phase` becomes Failed. - - * Pod is running and has two Containers. Container 1 exits with failure. - * Log failure event. - * If `restartPolicy` is: - * Always: Restart Container; Pod `phase` stays Running. - * OnFailure: Restart Container; Pod `phase` stays Running. - * Never: Do not restart Container; Pod `phase` stays Running. - * If Container 1 is not running, and Container 2 exits: - * Log failure event. - * If `restartPolicy` is: - * Always: Restart Container; Pod `phase` stays Running. - * OnFailure: Restart Container; Pod `phase` stays Running. - * Never: Pod `phase` becomes Failed. - - * Pod is running and has one Container. Container runs out of memory. - * Container terminates in failure. - * Log OOM event. - * If `restartPolicy` is: - * Always: Restart Container; Pod `phase` stays Running. - * OnFailure: Restart Container; Pod `phase` stays Running. - * Never: Log failure event; Pod `phase` becomes Failed. - - * Pod is running, and a disk dies. - * Kill all Containers. - * Log appropriate event. - * Pod `phase` becomes Failed. - * If running under a controller, Pod is recreated elsewhere. - - * Pod is running, and its node is segmented out. - * Node controller waits for timeout. - * Node controller sets Pod `phase` to Failed. - * If running under a controller, Pod is recreated elsewhere. +### When should you use a readiness probe? +{{< feature-state for_k8s_version="v1.0" state="stable" >}} +If you'd like to start sending traffic to a Pod only when a probe succeeds, +specify a readiness probe. In this case, the readiness probe might be the same +as the liveness probe, but the existence of the readiness probe in the spec means +that the Pod will start without receiving any traffic and only start receiving +traffic after the probe starts succeeding. +If your container needs to work on loading large data, configuration files, or +migrations during startup, specify a readiness probe. +If you want your container to be able to take itself down for maintenance, you +can specify a readiness probe that checks an endpoint specific to readiness that +is different from the liveness probe. -## {{% heading "whatsnext" %}} +{{< note >}} +If you just want to be able to drain requests when the Pod is deleted, you do not +necessarily need a readiness probe; on deletion, the Pod automatically puts itself +into an unready state regardless of whether the readiness probe exists. +The Pod remains in the unready state while it waits for the containers in the Pod +to stop. +{{< /note >}} + +### When should you use a startup probe? + +{{< feature-state for_k8s_version="v1.16" state="alpha" >}} +Startup probes are useful for Pods that have containers that take a long time to +come into service. Rather than set a long liveness interval, you can configure +a separate configuration for probing the container as it starts up, allowing +a time longer than the liveness interval would allow. + +If your container usually starts in more than +`initialDelaySeconds + failureThreshold × periodSeconds`, you should specify a +startup probe that checks the same endpoint as the liveness probe. The default for +`periodSeconds` is 30s. You should then set its `failureThreshold` high enough to +allow the container to start, without changing the default values of the liveness +probe. This helps to protect against deadlocks. + +## Termination of Pods {#pod-termination} + +Because Pods represent processes running on nodes in the cluster, it is important to +allow those processes to gracefully terminate when they are no longer needed (rather +than being abruptly stopped with a `KILL` signal and having no chance to clean up). + +The design aim is for you to be able to request deletion and know when processes +terminate, but also be able to ensure that deletes eventually complete. +When you request deletion of a Pod, the cluster records and tracks the intended grace period +before the Pod is allowed to be forcefully killed. With that forceful shutdown tracking in +place, the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} attempts graceful +shutdown. + +Typically, the container runtime sends a a TERM signal is sent to the main process in each +container. Once the grace period has expired, the KILL signal is sent to any remainig +processes, and the Pod is then deleted from the +{{< glossary_tooltip text="API Server" term_id="kube-apiserver" >}}. If the kubelet or the +container runtime's management service is restarted while waiting for processes to terminate, the +cluster retries from the start including the full original grace period. + +An example flow: + +1. You use the `kubectl` tool to manually delete a specific Pod, with the default grace period + (30 seconds). +1. The Pod in the API server is updated with the time beyond which the Pod is considered "dead" + along with the grace period. + If you use `kubectl describe` to check on the Pod you're deleting, that Pod shows up as + "Terminating". + On the node where the Pod is running: as soon as the kubelet sees that a Pod has been marked + as terminating (a graceful shutdown duration has been set), the kubelet begins the local Pod + shutdown process. + 1. If one of the Pod's containers has defined a `preStop` + [hook](/docs/concepts/containers/container-lifecycle-hooks/#hook-details), the kubelet + runs that hook inside of the container. If the `preStop` hook is still running after the + grace period expires, the kubelet requests a small, one-off grace period extension of 2 + seconds. + {{< note >}} + If the `preStop` hook needs longer to complete than the default grace period allows, + you must modify `terminationGracePeriodSeconds` to suit this. + {{< /note >}} + 1. The kubelet triggers the container runtime to send a TERM signal to process 1 inside each + container. + {{< note >}} + The containers in the Pod receive the TERM signal at different times and in an arbitrary + order. If the order of shutdowns matters, consider using a `preStop` hook to synchronize. + {{< /note >}} +1. At the same time as the kubelet is starting graceful shutdown, the control plane removes that + shutting-down Pod from Endpoints (and, if enabled, EndpointSlice) objects where these represent + a {{< glossary_tooltip term_id="service" text="Service" >}} with a configured + {{< glossary_tooltip text="selector" term_id="selector" >}}. + {{< glossary_tooltip text="ReplicaSets" term_id="replica-set" >}} and other workload resources + no longer treat the shutting-down Pod as a valid, in-service replica. Pods that shut down slowly + cannot continue to serve traffic as load balancers (like the service proxy) remove the Pod from + the list of endpoints as soon as the termination grace period _begins_. +1. When the grace period expires, the kubelet triggers forcible shutdown. The container runtime sends + `SIGKILL` to any processes still running in any container in the Pod. + The kubelet also cleans up a hidden `pause` container if that container runtime uses one. +1. The kubelet triggers forcible removal of Pod object from the API server, by setting grace period + to 0 (immediate deletion). +1. The API server deletes the Pod's API object, which is then no longer visible from any client. + +### Forced Pod termination {#pod-termination-forced} + +{{< caution >}} +Forced deletions can be potentially disruptiove for some workloads and their Pods. +{{< /caution >}} + +By default, all deletes are graceful within 30 seconds. The `kubectl delete` command supports +the `--grace-period=` option which allows you to override the default and specify your +own value. + +Setting the grace period to `0` forcibly and immediately deletes the Pod from the API +server. If the pod was still running on a node, that forcible deletion triggers the kubelet to +begin immediate cleanup. + +{{< note >}} +You must specify an additional flag `--force` along with `--grace-period=0` in order to perform force deletions. +{{< /note >}} + +When a force deletion is performed, the API server does not wait for confirmation +from the kubelet that the Pod has been terminated on the node it was running on. It +removes the Pod in the API immediately so a new Pod can be created with the same +name. On the node, Pods that are set to terminate immediately will still be given +a small grace period before being force killed. + +If you need to force-delete Pods that are part of a StatefulSet, refer to the task +documentation for +[deleting Pods from a StatefulSet](/docs/tasks/run-application/force-delete-stateful-set-pod/). + +### Garbage collection of failed Pods {#pod-garbage-collection} + +For failed Pods, the API objects remain in the cluster's API until a human or +{{< glossary_tooltip term_id="controller" text="controller" >}} process +explicitly removes them. + +The control plane cleans up terminated Pods (with a phase of `Succeeded` or +`Failed`), when the number of Pods exceeds the configured threshold +(determined by `terminated-pod-gc-threshold` in the kube-controller-manager). +This avoids a resource leak as Pods are created and terminated over time. + + +## {{% heading "whatsnext" %}} * Get hands-on experience [attaching handlers to Container lifecycle events](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). * Get hands-on experience - [Configure Liveness, Readiness and Startup Probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). - -* Learn more about [Container lifecycle hooks](/docs/concepts/containers/container-lifecycle-hooks/). - + [configuring Liveness, Readiness and Startup Probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). +* Learn more about [container lifecycle hooks](/docs/concepts/containers/container-lifecycle-hooks/). +* For detailed information about Pod / Container status in the API, see [PodStatus](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podstatus-v1-core) +and +[ContainerStatus](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#containerstatus-v1-core). diff --git a/content/en/docs/concepts/workloads/pods/pod-overview.md b/content/en/docs/concepts/workloads/pods/pod-overview.md deleted file mode 100644 index e963b7ace67c6..0000000000000 --- a/content/en/docs/concepts/workloads/pods/pod-overview.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -reviewers: -- erictune -title: Pod Overview -content_type: concept -weight: 10 -card: - name: concepts - weight: 60 ---- - - -This page provides an overview of `Pod`, the smallest deployable object in the Kubernetes object model. - - - - -## Understanding Pods - -A *Pod* is the basic execution unit of a Kubernetes application--the smallest and simplest unit in the Kubernetes object model that you create or deploy. A Pod represents processes running on your {{< glossary_tooltip term_id="cluster" text="cluster" >}}. - -A Pod encapsulates an application's container (or, in some cases, multiple containers), storage resources, a unique network identity (IP address), as well as options that govern how the container(s) should run. A Pod represents a unit of deployment: *a single instance of an application in Kubernetes*, which might consist of either a single {{< glossary_tooltip text="container" term_id="container" >}} or a small number of containers that are tightly coupled and that share resources. - -[Docker](https://www.docker.com) is the most common container runtime used in a Kubernetes Pod, but Pods support other [container runtimes](/docs/setup/production-environment/container-runtimes/) as well. - - -Pods in a Kubernetes cluster can be used in two main ways: - -* **Pods that run a single container**. The "one-container-per-Pod" model is the most common Kubernetes use case; in this case, you can think of a Pod as a wrapper around a single container, and Kubernetes manages the Pods rather than the containers directly. -* **Pods that run multiple containers that need to work together**. A Pod might encapsulate an application composed of multiple co-located containers that are tightly coupled and need to share resources. These co-located containers might form a single cohesive unit of service--one container serving files from a shared volume to the public, while a separate "sidecar" container refreshes or updates those files. The Pod wraps these containers and storage resources together as a single manageable entity. - -Each Pod is meant to run a single instance of a given application. If you want to scale your application horizontally (to provide more overall resources by running more instances), you should use multiple Pods, one for each instance. In Kubernetes, this is typically referred to as _replication_. -Replicated Pods are usually created and managed as a group by a workload resource and its {{< glossary_tooltip text="_controller_" term_id="controller" >}}. -See [Pods and controllers](#pods-and-controllers) for more information on how Kubernetes uses controllers to implement workload scaling and healing. - -### How Pods manage multiple containers - -Pods are designed to support multiple cooperating processes (as containers) that form a cohesive unit of service. The containers in a Pod are automatically co-located and co-scheduled on the same physical or virtual machine in the cluster. The containers can share resources and dependencies, communicate with one another, and coordinate when and how they are terminated. - -Note that grouping multiple co-located and co-managed containers in a single Pod is a relatively advanced use case. You should use this pattern only in specific instances in which your containers are tightly coupled. For example, you might have a container that acts as a web server for files in a shared volume, and a separate "sidecar" container that updates those files from a remote source, as in the following diagram: - -{{< figure src="/images/docs/pod.svg" alt="example pod diagram" width="50%" >}} - -Some Pods have {{< glossary_tooltip text="init containers" term_id="init-container" >}} as well as {{< glossary_tooltip text="app containers" term_id="app-container" >}}. Init containers run and complete before the app containers are started. - -Pods provide two kinds of shared resources for their constituent containers: *networking* and *storage*. - -#### Networking - -Each Pod is assigned a unique IP address for each address family. Every container in a Pod shares the network namespace, including the IP address and network ports. Containers *inside a Pod* can communicate with one another using `localhost`. When containers in a Pod communicate with entities *outside the Pod*, they must coordinate how they use the shared network resources (such as ports). - -#### Storage - -A Pod can specify a set of shared storage {{< glossary_tooltip text="volumes" term_id="volume" >}}. All containers in the Pod can access the shared volumes, allowing those containers to share data. Volumes also allow persistent data in a Pod to survive in case one of the containers within needs to be restarted. See [Volumes](/docs/concepts/storage/volumes/) for more information on how Kubernetes implements shared storage in a Pod. - -## Working with Pods - -You'll rarely create individual Pods directly in Kubernetes--even singleton Pods. This is because Pods are designed as relatively ephemeral, disposable entities. When a Pod gets created (directly by you, or indirectly by a {{< glossary_tooltip text="_controller_" term_id="controller" >}}), it is scheduled to run on a {{< glossary_tooltip term_id="node" >}} in your cluster. The Pod remains on that node until the process is terminated, the pod object is deleted, the Pod is *evicted* for lack of resources, or the node fails. - -{{< note >}} -Restarting a container in a Pod should not be confused with restarting a Pod. A Pod is not a process, but an environment for running a container. A Pod persists until it is deleted. -{{< /note >}} - -Pods do not, by themselves, self-heal. If a Pod is scheduled to a Node that fails, or if the scheduling operation itself fails, the Pod is deleted; likewise, a Pod won't survive an eviction due to a lack of resources or Node maintenance. Kubernetes uses a higher-level abstraction, called a controller, that handles the work of managing the relatively disposable Pod instances. Thus, while it is possible to use Pod directly, it's far more common in Kubernetes to manage your pods using a controller. - -### Pods and controllers - -You can use workload resources to create and manage multiple Pods for you. A controller for the resource handles replication and rollout and automatic healing in case of Pod failure. For example, if a Node fails, a controller notices that Pods on that Node have stopped working and creates a replacement Pod. The scheduler places the replacement Pod onto a healthy Node. - -Here are some examples of workload resources that manage one or more Pods: - -* {{< glossary_tooltip text="Deployment" term_id="deployment" >}} -* {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}} -* {{< glossary_tooltip text="DaemonSet" term_id="daemonset" >}} - - -## Pod templates - -Controllers for {{< glossary_tooltip text="workload" term_id="workload" >}} resources create Pods -from a pod template and manage those Pods on your behalf. - -PodTemplates are specifications for creating Pods, and are included in workload resources such as -[Deployments](/docs/concepts/workloads/controllers/deployment/), -[Jobs](/docs/concepts/jobs/run-to-completion-finite-workloads/), and -[DaemonSets](/docs/concepts/workloads/controllers/daemonset/). - -Each controller for a workload resource uses the PodTemplate inside the workload object to make actual Pods. The PodTemplate is part of the desired state of whatever workload resource you used to run your app. - -The sample below is a manifest for a simple Job with a `template` that starts one container. The container in that Pod prints a message then pauses. - -```yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: hello -spec: - template: - # This is the pod template - spec: - containers: - - name: hello - image: busybox - command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600'] - restartPolicy: OnFailure - # The pod template ends here -``` - -Modifying the pod template or switching to a new pod template has no effect on the Pods that already exist. Pods do not receive template updates directly; instead, a new Pod is created to match the revised pod template. - -For example, a Deployment controller ensures that the running Pods match the current pod template. If the template is updated, the controller has to remove the existing Pods and create new Pods based on the updated template. Each workload controller implements its own rules for handling changes to the Pod template. - -On Nodes, the {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} does not directly observe or manage any of the details around pod templates and updates; those details are abstracted away. That abstraction and separation of concerns simplifies system semantics, and makes it feasible to extend the cluster's behavior without changing existing code. - - - -## {{% heading "whatsnext" %}} - -* Learn more about [Pods](/docs/concepts/workloads/pods/pod/) -* [The Distributed System Toolkit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) explains common layouts for Pods with more than one container -* Learn more about Pod behavior: - * [Pod Termination](/docs/concepts/workloads/pods/pod/#termination-of-pods) - * [Pod Lifecycle](/docs/concepts/workloads/pods/pod-lifecycle/) - diff --git a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index 2b16894e6ba40..c48b2aa5d0eb9 100644 --- a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -1,7 +1,7 @@ --- title: Pod Topology Spread Constraints content_type: concept -weight: 50 +weight: 40 --- diff --git a/content/en/docs/concepts/workloads/pods/pod.md b/content/en/docs/concepts/workloads/pods/pod.md deleted file mode 100644 index d87dc92cb2c65..0000000000000 --- a/content/en/docs/concepts/workloads/pods/pod.md +++ /dev/null @@ -1,209 +0,0 @@ ---- -reviewers: -title: Pods -content_type: concept -weight: 20 ---- - - - -_Pods_ are the smallest deployable units of computing that can be created and -managed in Kubernetes. - - - - - - -## What is a Pod? - -A _Pod_ (as in a pod of whales or pea pod) is a group of one or more -{{< glossary_tooltip text="containers" term_id="container" >}} (such as -Docker containers), with shared storage/network, and a specification -for how to run the containers. A Pod's contents are always co-located and -co-scheduled, and run in a shared context. A Pod models an -application-specific "logical host" - it contains one or more application -containers which are relatively tightly coupled — in a pre-container -world, being executed on the same physical or virtual machine would mean being -executed on the same logical host. - -While Kubernetes supports more container runtimes than just Docker, Docker is -the most commonly known runtime, and it helps to describe Pods in Docker terms. - -The shared context of a Pod is a set of Linux namespaces, cgroups, and -potentially other facets of isolation - the same things that isolate a Docker -container. Within a Pod's context, the individual applications may have -further sub-isolations applied. - -Containers within a Pod share an IP address and port space, and -can find each other via `localhost`. They can also communicate with each -other using standard inter-process communications like SystemV semaphores or -POSIX shared memory. Containers in different Pods have distinct IP addresses -and can not communicate by IPC without -[special configuration](/docs/concepts/policy/pod-security-policy/). -These containers usually communicate with each other via Pod IP addresses. - -Applications within a Pod also have access to shared {{< glossary_tooltip text="volumes" term_id="volume" >}}, which are defined -as part of a Pod and are made available to be mounted into each application's -filesystem. - -In terms of [Docker](https://www.docker.com/) constructs, a Pod is modelled as -a group of Docker containers with shared namespaces and shared filesystem -volumes. - -Like individual application containers, Pods are considered to be relatively -ephemeral (rather than durable) entities. As discussed in -[pod lifecycle](/docs/concepts/workloads/pods/pod-lifecycle/), Pods are created, assigned a unique ID (UID), and -scheduled to nodes where they remain until termination (according to restart -policy) or deletion. If a {{< glossary_tooltip term_id="node" >}} dies, the Pods scheduled to that node are -scheduled for deletion, after a timeout period. A given Pod (as defined by a UID) is not -"rescheduled" to a new node; instead, it can be replaced by an identical Pod, -with even the same name if desired, but with a new UID (see [replication -controller](/docs/concepts/workloads/controllers/replicationcontroller/) for more details). - -When something is said to have the same lifetime as a Pod, such as a volume, -that means that it exists as long as that Pod (with that UID) exists. If that -Pod is deleted for any reason, even if an identical replacement is created, the -related thing (e.g. volume) is also destroyed and created anew. - -{{< figure src="/images/docs/pod.svg" title="Pod diagram" width="50%" >}} - -*A multi-container Pod that contains a file puller and a -web server that uses a persistent volume for shared storage between the containers.* - -## Motivation for Pods - -### Management - -Pods are a model of the pattern of multiple cooperating processes which form a -cohesive unit of service. They simplify application deployment and management -by providing a higher-level abstraction than the set of their constituent -applications. Pods serve as unit of deployment, horizontal scaling, and -replication. Colocation (co-scheduling), shared fate (e.g. termination), -coordinated replication, resource sharing, and dependency management are -handled automatically for containers in a Pod. - -### Resource sharing and communication - -Pods enable data sharing and communication among their constituents. - -The applications in a Pod all use the same network namespace (same IP and port -space), and can thus "find" each other and communicate using `localhost`. -Because of this, applications in a Pod must coordinate their usage of ports. -Each Pod has an IP address in a flat shared networking space that has full -communication with other physical computers and Pods across the network. - -Containers within the Pod see the system hostname as being the same as the configured -`name` for the Pod. There's more about this in the [networking](/docs/concepts/cluster-administration/networking/) -section. - -In addition to defining the application containers that run in the Pod, the Pod -specifies a set of shared storage volumes. Volumes enable data to survive -container restarts and to be shared among the applications within the Pod. - -## Uses of pods - -Pods can be used to host vertically integrated application stacks (e.g. LAMP), -but their primary motivation is to support co-located, co-managed helper -programs, such as: - -* content management systems, file and data loaders, local cache managers, etc. -* log and checkpoint backup, compression, rotation, snapshotting, etc. -* data change watchers, log tailers, logging and monitoring adapters, event publishers, etc. -* proxies, bridges, and adapters -* controllers, managers, configurators, and updaters - -Individual Pods are not intended to run multiple instances of the same -application, in general. - -For a longer explanation, see [The Distributed System ToolKit: Patterns for -Composite -Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns). - -## Alternatives considered - -_Why not just run multiple programs in a single (Docker) container?_ - -1. Transparency. Making the containers within the Pod visible to the - infrastructure enables the infrastructure to provide services to those - containers, such as process management and resource monitoring. This - facilitates a number of conveniences for users. -1. Decoupling software dependencies. The individual containers may be - versioned, rebuilt and redeployed independently. Kubernetes may even support - live updates of individual containers someday. -1. Ease of use. Users don't need to run their own process managers, worry about - signal and exit-code propagation, etc. -1. Efficiency. Because the infrastructure takes on more responsibility, - containers can be lighter weight. - -_Why not support affinity-based co-scheduling of containers?_ - -That approach would provide co-location, but would not provide most of the -benefits of Pods, such as resource sharing, IPC, guaranteed fate sharing, and -simplified management. - -## Durability of pods (or lack thereof) - -Pods aren't intended to be treated as durable entities. They won't survive scheduling failures, node failures, or other evictions, such as due to lack of resources, or in the case of node maintenance. - -In general, users shouldn't need to create Pods directly. They should almost -always use controllers even for singletons, for example, -[Deployments](/docs/concepts/workloads/controllers/deployment/). -Controllers provide self-healing with a cluster scope, as well as replication -and rollout management. -Controllers like [StatefulSet](/docs/concepts/workloads/controllers/statefulset.md) -can also provide support to stateful Pods. - -The use of collective APIs as the primary user-facing primitive is relatively common among cluster scheduling systems, including [Borg](https://research.google.com/pubs/pub43438.html), [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html), [Aurora](http://aurora.apache.org/documentation/latest/reference/configuration/#job-schema), and [Tupperware](https://www.slideshare.net/Docker/aravindnarayanan-facebook140613153626phpapp02-37588997). - -Pod is exposed as a primitive in order to facilitate: - -* scheduler and controller pluggability -* support for pod-level operations without the need to "proxy" them via controller APIs -* decoupling of Pod lifetime from controller lifetime, such as for bootstrapping -* decoupling of controllers and services — the endpoint controller just watches Pods -* clean composition of Kubelet-level functionality with cluster-level functionality — Kubelet is effectively the "pod controller" -* high-availability applications, which will expect Pods to be replaced in advance of their termination and certainly in advance of deletion, such as in the case of planned evictions or image prefetching. - -## Termination of Pods - -Because Pods represent running processes on nodes in the cluster, it is important to allow those processes to gracefully terminate when they are no longer needed (vs being violently killed with a KILL signal and having no chance to clean up). Users should be able to request deletion and know when processes terminate, but also be able to ensure that deletes eventually complete. When a user requests deletion of a Pod, the system records the intended grace period before the Pod is allowed to be forcefully killed, and a TERM signal is sent to the main process in each container. Once the grace period has expired, the KILL signal is sent to those processes, and the Pod is then deleted from the API server. If the Kubelet or the container manager is restarted while waiting for processes to terminate, the termination will be retried with the full grace period. - -An example flow: - -1. User sends command to delete Pod, with default grace period (30s) -1. The Pod in the API server is updated with the time beyond which the Pod is considered "dead" along with the grace period. -1. Pod shows up as "Terminating" when listed in client commands -1. (simultaneous with 3) When the Kubelet sees that a Pod has been marked as terminating because the time in 2 has been set, it begins the Pod shutdown process. - 1. If one of the Pod's containers has defined a [preStop hook](/docs/concepts/containers/container-lifecycle-hooks/#hook-details), it is invoked inside of the container. If the `preStop` hook is still running after the grace period expires, step 2 is then invoked with a small (2 second) one-time extended grace period. You must modify `terminationGracePeriodSeconds` if the `preStop` hook needs longer to complete. - 1. The container is sent the TERM signal. Note that not all containers in the Pod will receive the TERM signal at the same time and may each require a `preStop` hook if the order in which they shut down matters. -1. (simultaneous with 3) Pod is removed from endpoints list for service, and are no longer considered part of the set of running Pods for replication controllers. Pods that shutdown slowly cannot continue to serve traffic as load balancers (like the service proxy) remove them from their rotations. -1. When the grace period expires, any processes still running in the Pod are killed with SIGKILL. -1. The Kubelet will finish deleting the Pod on the API server by setting grace period 0 (immediate deletion). The Pod disappears from the API and is no longer visible from the client. - -By default, all deletes are graceful within 30 seconds. The `kubectl delete` command supports the `--grace-period=` option which allows a user to override the default and specify their own value. The value `0` [force deletes](/docs/concepts/workloads/pods/pod/#force-deletion-of-pods) the Pod. -You must specify an additional flag `--force` along with `--grace-period=0` in order to perform force deletions. - -### Force deletion of pods - -Force deletion of a Pod is defined as deletion of a Pod from the cluster state and etcd immediately. When a force deletion is performed, the API server does not wait for confirmation from the kubelet that the Pod has been terminated on the node it was running on. It removes the Pod in the API immediately so a new Pod can be created with the same name. On the node, Pods that are set to terminate immediately will still be given a small grace period before being force killed. - -Force deletions can be potentially dangerous for some Pods and should be performed with caution. In case of StatefulSet Pods, please refer to the task documentation for [deleting Pods from a StatefulSet](/docs/tasks/run-application/force-delete-stateful-set-pod/). - -## Privileged mode for pod containers - -Any container in a Pod can enable privileged mode, using the `privileged` flag on the [security context](/docs/tasks/configure-pod-container/security-context/) of the container spec. This is useful for containers that want to use Linux capabilities like manipulating the network stack and accessing devices. Processes within the container get almost the same privileges that are available to processes outside a container. With privileged mode, it should be easier to write network and volume plugins as separate Pods that don't need to be compiled into the kubelet. - -{{< note >}} -Your container runtime must support the concept of a privileged container for this setting to be relevant. -{{< /note >}} - -## API Object - -Pod is a top-level resource in the Kubernetes REST API. -The [Pod API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) definition -describes the object in detail. -When creating the manifest for a Pod object, make sure the name specified is a valid -[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). - - diff --git a/content/en/docs/concepts/workloads/pods/podpreset.md b/content/en/docs/concepts/workloads/pods/podpreset.md index f77e34a3f9115..9cbb7bdff8c4e 100644 --- a/content/en/docs/concepts/workloads/pods/podpreset.md +++ b/content/en/docs/concepts/workloads/pods/podpreset.md @@ -1,7 +1,7 @@ --- reviewers: - jessfraz -title: Pod Preset +title: Pod Presets content_type: concept weight: 50 --- @@ -32,20 +32,20 @@ specific service do not need to know all the details about that service. In order to use Pod presets in your cluster you must ensure the following: -1. You have enabled the API type `settings.k8s.io/v1alpha1/podpreset`. For - example, this can be done by including `settings.k8s.io/v1alpha1=true` in - the `--runtime-config` option for the API server. In minikube add this flag - `--extra-config=apiserver.runtime-config=settings.k8s.io/v1alpha1=true` while - starting the cluster. -1. You have enabled the admission controller `PodPreset`. One way to doing this - is to include `PodPreset` in the `--enable-admission-plugins` option value specified - for the API server. In minikube, add this flag - - ```shell - --extra-config=apiserver.enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PodPreset - ``` - - while starting the cluster. +1. You have enabled the API type `settings.k8s.io/v1alpha1/podpreset`. For + example, this can be done by including `settings.k8s.io/v1alpha1=true` in + the `--runtime-config` option for the API server. In minikube add this flag + `--extra-config=apiserver.runtime-config=settings.k8s.io/v1alpha1=true` while + starting the cluster. +1. You have enabled the admission controller named `PodPreset`. One way to doing this + is to include `PodPreset` in the `--enable-admission-plugins` option value specified + for the API server. For example, if you use Minikube, add this flag: + + ```shell + --extra-config=apiserver.enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,PodPreset + ``` + + while starting your cluster. ## How it works @@ -64,31 +64,28 @@ When a pod creation request occurs, the system does the following: modified by a `PodPreset`. The annotation is of the form `podpreset.admission.kubernetes.io/podpreset-: ""`. -Each Pod can be matched by zero or more Pod Presets; and each `PodPreset` can be -applied to zero or more pods. When a `PodPreset` is applied to one or more -Pods, Kubernetes modifies the Pod Spec. For changes to `Env`, `EnvFrom`, and -`VolumeMounts`, Kubernetes modifies the container spec for all containers in -the Pod; for changes to `Volume`, Kubernetes modifies the Pod Spec. +Each Pod can be matched by zero or more PodPresets; and each PodPreset can be +applied to zero or more Pods. When a PodPreset is applied to one or more +Pods, Kubernetes modifies the Pod Spec. For changes to `env`, `envFrom`, and +`volumeMounts`, Kubernetes modifies the container spec for all containers in +the Pod; for changes to `volumes`, Kubernetes modifies the Pod Spec. {{< note >}} A Pod Preset is capable of modifying the following fields in a Pod spec when appropriate: -- The `.spec.containers` field. -- The `initContainers` field (requires Kubernetes version 1.14.0 or later). +- The `.spec.containers` field +- The `.spec.initContainers` field {{< /note >}} -### Disable Pod Preset for a Specific Pod +### Disable Pod Preset for a specific pod There may be instances where you wish for a Pod to not be altered by any Pod -Preset mutations. In these cases, you can add an annotation in the Pod Spec +preset mutations. In these cases, you can add an annotation in the Pod's `.spec` of the form: `podpreset.admission.kubernetes.io/exclude: "true"`. ## {{% heading "whatsnext" %}} - See [Injecting data into a Pod using PodPreset](/docs/tasks/inject-data-application/podpreset/) For more information about the background, see the [design proposal for PodPreset](https://git.k8s.io/community/contributors/design-proposals/service-catalog/pod-preset.md). - - diff --git a/content/en/docs/contribute/_index.md b/content/en/docs/contribute/_index.md index 2f93af4a35e71..cd1b03efd46ce 100644 --- a/content/en/docs/contribute/_index.md +++ b/content/en/docs/contribute/_index.md @@ -3,6 +3,7 @@ content_type: concept title: Contribute to Kubernetes docs linktitle: Contribute main_menu: true +no_list: true weight: 80 card: name: contribute @@ -23,47 +24,66 @@ Kubernetes documentation contributors: Kubernetes documentation welcomes improvements from all contributors, new and experienced! - - ## Getting started -Anyone can open an issue about documentation, or contribute a change with a pull request (PR) to the [`kubernetes/website` GitHub repository](https://github.com/kubernetes/website). You need to be comfortable with [git](https://git-scm.com/) and [GitHub](https://lab.github.com/) to operate effectively in the Kubernetes community. +Anyone can open an issue about documentation, or contribute a change with a +pull request (PR) to the +[`kubernetes/website` GitHub repository](https://github.com/kubernetes/website). +You need to be comfortable with +[git](https://git-scm.com/) and +[GitHub](https://lab.github.com/) +to work effectively in the Kubernetes community. To get involved with documentation: 1. Sign the CNCF [Contributor License Agreement](https://github.com/kubernetes/community/blob/master/CLA.md). -2. Familiarize yourself with the [documentation repository](https://github.com/kubernetes/website) and the website's [static site generator](https://gohugo.io). -3. Make sure you understand the basic processes for [opening a pull request](/docs/contribute/new-content/new-content/) and [reviewing changes](/docs/contribute/review/reviewing-prs/). +1. Familiarize yourself with the [documentation repository](https://github.com/kubernetes/website) + and the website's [static site generator](https://gohugo.io). +1. Make sure you understand the basic processes for + [opening a pull request](/docs/contribute/new-content/open-a-pr/) and + [reviewing changes](/docs/contribute/review/reviewing-prs/). Some tasks require more trust and more access in the Kubernetes organization. -See [Participating in SIG Docs](/docs/contribute/participating/) for more details about +See [Participating in SIG Docs](/docs/contribute/participate/) for more details about roles and permissions. ## Your first contribution -- Read the [Contribution overview](/docs/contribute/new-content/overview/) to learn about the different ways you can contribute. -- See [Contribute to kubernetes/website](https://github.com/kubernetes/website/contribute) to find issues that make good entry points. -- [Open a pull request using GitHub](/docs/contribute/new-content/new-content/#changes-using-github) to existing documentation and learn more about filing issues in GitHub. -- [Review pull requests](/docs/contribute/review/reviewing-prs/) from other Kubernetes community members for accuracy and language. -- Read the Kubernetes [content](/docs/contribute/style/content-guide/) and [style guides](/docs/contribute/style/style-guide/) so you can leave informed comments. -- Learn about [page content types](/docs/contribute/style/page-content-types/) and [Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/). +- Read the [Contribution overview](/docs/contribute/new-content/overview/) to + learn about the different ways you can contribute. +- Check [kubernetes/website issues list](/https://github.com/kubernetes/website/issues/) + for issues that make good entry points. +- [Open a pull request using GitHub](/docs/contribute/new-content/open-a-pr/#changes-using-github) + to existing documentation and learn more about filing issues in GitHub. +- [Review pull requests](/docs/contribute/review/reviewing-prs/) from other + Kubernetes community members for accuracy and language. +- Read the Kubernetes [content](/docs/contribute/style/content-guide/) and + [style guides](/docs/contribute/style/style-guide/) so you can leave informed comments. +- Learn about [page content types](/docs/contribute/style/page-content-types/) + and [Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/). ## Next steps -- Learn to [work from a local clone](/docs/contribute/new-content/new-content/#fork-the-repo) of the repository. +- Learn to [work from a local clone](/docs/contribute/new-content/open-a-pr/#fork-the-repo) + of the repository. - Document [features in a release](/docs/contribute/new-content/new-features/). -- Participate in [SIG Docs](/docs/contribute/participating/), and become a [member or reviewer](/docs/contribute/participating/#roles-and-responsibilities). +- Participate in [SIG Docs](/docs/contribute/participate/), and become a + [member or reviewer](/docs/contribute/participate/roles-and-responsibilities/). + - Start or help with a [localization](/docs/contribute/localization/). ## Get involved with SIG Docs -[SIG Docs](/docs/contribute/participating/) is the group of contributors who publish and maintain Kubernetes documentation and the website. Getting involved with SIG Docs is a great way for Kubernetes contributors (feature development or otherwise) to have a large impact on the Kubernetes project. +[SIG Docs](/docs/contribute/participate/) is the group of contributors who +publish and maintain Kubernetes documentation and the website. Getting +involved with SIG Docs is a great way for Kubernetes contributors (feature +development or otherwise) to have a large impact on the Kubernetes project. SIG Docs communicates with different methods: -- [Join `#sig-docs` on the Kubernetes Slack instance](http://slack.k8s.io/). Make sure to +- [Join `#sig-docs` on the Kubernetes Slack instance](https://slack.k8s.io/). Make sure to introduce yourself! - [Join the `kubernetes-sig-docs` mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs), where broader discussions take place and official decisions are recorded. @@ -74,5 +94,3 @@ SIG Docs communicates with different methods: - Visit the [Kubernetes community site](/community/). Participate on Twitter or Stack Overflow, learn about local Kubernetes meetups and events, and more. - Read the [contributor cheatsheet](https://github.com/kubernetes/community/tree/master/contributors/guide/contributor-cheatsheet) to get involved with Kubernetes feature development. - Submit a [blog post or case study](/docs/contribute/new-content/blogs-case-studies/). - - diff --git a/content/en/docs/contribute/advanced.md b/content/en/docs/contribute/advanced.md index 594816a2b56e8..52ae7b0efd6fa 100644 --- a/content/en/docs/contribute/advanced.md +++ b/content/en/docs/contribute/advanced.md @@ -13,74 +13,12 @@ This page assumes that you understand how to to learn about more ways to contribute. You need to use the Git command line client and other tools for some of these tasks. - - -## Be the PR Wrangler for a week - -SIG Docs [approvers](/docs/contribute/participating/#approvers) take week-long turns [wrangling PRs](https://github.com/kubernetes/website/wiki/PR-Wranglers) for the repository. - -The PR wrangler’s duties include: - -- Review [open pull requests](https://github.com/kubernetes/website/pulls) daily for quality and adherence to the [Style](/docs/contribute/style/style-guide/) and [Content](/docs/contribute/style/content-guide/) guides. - - Review the smallest PRs (`size/XS`) first, then iterate towards the largest (`size/XXL`). - - Review as many PRs as you can. -- Ensure that the CLA is signed by each contributor. - - Help new contributors sign the [CLA](https://github.com/kubernetes/community/blob/master/CLA.md). - - Use [this](https://github.com/zparnold/k8s-docs-pr-botherer) script to automatically remind contributors that haven’t signed the CLA to sign the CLA. -- Provide feedback on proposed changes and help facilitate technical reviews from members of other SIGs. - - Provide inline suggestions on the PR for the proposed content changes. - - If you need to verify content, comment on the PR and request more details. - - Assign relevant `sig/` label(s). - - If needed, assign reviewers from the `reviewers:` block in the file's front matter. - - Assign `Docs Review` and `Tech Review` labels to indicate the PR's review status. - - Assign `Needs Doc Review` or `Needs Tech Review` for PRs that haven't yet been reviewed. - - Assign `Doc Review: Open Issues` or `Tech Review: Open Issues` for PRs that have been reviewed and require further input or action before merging. - - Assign `/lgtm` and `/approve` labels to PRs that can be merged. -- Merge PRs when they are ready, or close PRs that shouldn’t be accepted. - - Consider accepting accurate technical content even if the content meets only some of the docs' [style guidelines](/docs/contribute/style/style-guide/). Open a new issue with the label `good first issue` to address style concerns. -- Triage and tag incoming issues daily. See [Triage and categorize issues](/docs/contribute/review/for-approvers/#triage-and-categorize-issues) for guidelines on how SIG Docs uses metadata. - -### Helpful GitHub queries for wranglers - -The following queries are helpful when wrangling. After working through these queries, the remaining list of PRs to be -reviewed is usually small. These queries specifically exclude localization PRs, and only include the `master` branch (except for the last one). - -- [No CLA, not eligible to merge](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3A%22cncf-cla%3A+no%22+-label%3Ado-not-merge+label%3Alanguage%2Fen): - Remind the contributor to sign the CLA. If they have already been reminded by both the bot and a human, close - the PR and remind them that they can open it after signing the CLA. - **Do not review PRs whose authors have not signed the CLA!** -- [Needs LGTM](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fen+-label%3Algtm+): - If it needs technical review, loop in one of the reviewers suggested by the bot. If it needs docs review - or copy-editing, either suggest changes or add a copyedit commit to the PR to move it along. -- [Has LGTM, needs docs approval](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fen+label%3Algtm): - Determine whether any additional changes or updates need to be made for the PR to be merged. If you think the PR is ready to be merged, comment `/approve`. -- [Quick Wins](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amaster+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22+): If it’s a small PR against master with no clear blockers. (change "XS" in the size label as you work through the PRs [XS, S, M, L, XL, XXL]). -- [Not against master](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fen+-base%3Amaster): If it's against a `dev-` branch, it's for an upcoming release. Make sure the [release meister](https://github.com/kubernetes/sig-release/tree/master/release-team) knows about it by adding a comment with `/assign @`. If it's against an old branch, help the PR author figure out whether it's targeted against the best branch. - -### When to close Pull Requests - -Reviews and approvals are one tool to keep our PR queue short and current. Another tool is closure. - -- Close any PR where the CLA hasn’t been signed for two weeks. -PR authors can reopen the PR after signing the CLA, so this is a low-risk way to make sure nothing gets merged without a signed CLA. - -- Close any PR where the author has not responded to comments or feedback in 2 or more weeks. - -Don't be afraid to close pull requests. Contributors can easily reopen and resume works in progress. Oftentimes a closure notice is what spurs an author to resume and finish their contribution. - -To close a pull request, leave a `/close` comment on the PR. - -{{< note >}} - -An automated service, [`fejta-bot`](https://github.com/fejta-bot) automatically marks issues as stale after 90 days of inactivity, then closes them after an additional 30 days of inactivity when they become rotten. PR wranglers should close issues after 14-30 days of inactivity. - -{{< /note >}} - ## Propose improvements -SIG Docs [members](/docs/contribute/participating/#members) can propose improvements. +SIG Docs [members](/docs/contribute/participate/roles-and-responsibilities/#members) +can propose improvements. After you've been contributing to the Kubernetes documentation for a while, you may have ideas for improving the [Style Guide](/docs/contribute/style/style-guide/) @@ -103,13 +41,13 @@ documentation testing might involve working with sig-testing. ## Coordinate docs for a Kubernetes release -SIG Docs [approvers](/docs/contribute/participating/#approvers) can coordinate -docs for a Kubernetes release. +SIG Docs [approvers](/docs/contribute/participate/roles-and-responsibilities/#approvers) +can coordinate docs for a Kubernetes release. Each Kubernetes release is coordinated by a team of people participating in the sig-release Special Interest Group (SIG). Others on the release team for a given -release include an overall release lead, as well as representatives from sig-pm, -sig-testing, and others. To find out more about Kubernetes release processes, +release include an overall release lead, as well as representatives from +sig-testing and others. To find out more about Kubernetes release processes, refer to [https://github.com/kubernetes/sig-release](https://github.com/kubernetes/sig-release). @@ -134,8 +72,8 @@ rotated among SIG Docs approvers. ## Serve as a New Contributor Ambassador -SIG Docs [approvers](/docs/contribute/participating/#approvers) can serve as -New Contributor Ambassadors. +SIG Docs [approvers](/docs/contribute/participate/roles-and-responsibilities/#approvers) +can serve as New Contributor Ambassadors. New Contributor Ambassadors welcome new contributors to SIG-Docs, suggest PRs to new contributors, and mentor new contributors through their first @@ -153,14 +91,14 @@ Current New Contributor Ambassadors are announced at each SIG-Docs meeting, and ## Sponsor a new contributor -SIG Docs [reviewers](/docs/contribute/participating/#reviewers) can sponsor -new contributors. +SIG Docs [reviewers](/docs/contribute/participate/roles-and-responsibilities/#reviewers) +can sponsor new contributors. After a new contributor has successfully submitted 5 substantive pull requests to one or more Kubernetes repositories, they are eligible to apply for -[membership](/docs/contribute/participating#members) in the Kubernetes -organization. The contributor's membership needs to be backed by two sponsors -who are already reviewers. +[membership](/docs/contribute/participate/roles-and-responsibilities/#members) +in the Kubernetes organization. The contributor's membership needs to be +backed by two sponsors who are already reviewers. New docs contributors can request sponsors by asking in the #sig-docs channel on the [Kubernetes Slack instance](https://kubernetes.slack.com) or on the @@ -172,7 +110,8 @@ membership in the Kubernetes organization. ## Serve as a SIG Co-chair -SIG Docs [approvers](/docs/contribute/participating/#approvers) can serve a term as a co-chair of SIG Docs. +SIG Docs [approvers](/docs/contribute/participate/roles-and-responsibilities/#approvers) +can serve a term as a co-chair of SIG Docs. ### Prerequisites @@ -181,7 +120,12 @@ Approvers must meet the following requirements to be a co-chair: - Have been a SIG Docs approver for at least 6 months - Have [led a Kubernetes docs release](/docs/contribute/advanced/#coordinate-docs-for-a-kubernetes-release) or shadowed two releases - Understand SIG Docs workflows and tooling: git, Hugo, localization, blog subproject -- Understand how other Kubernetes SIGs and repositories affect the SIG Docs workflow, including: [teams in k/org](https://github.com/kubernetes/org/blob/master/config/kubernetes/sig-docs/teams.yaml), [process in k/community](https://github.com/kubernetes/community/tree/master/sig-docs), plugins in [k/test-infra](https://github.com/kubernetes/test-infra/), and the role of [SIG Architecture](https://github.com/kubernetes/community/tree/master/sig-architecture). +- Understand how other Kubernetes SIGs and repositories affect the SIG Docs + workflow, including: + [teams in k/org](https://github.com/kubernetes/org/blob/master/config/kubernetes/sig-docs/teams.yaml), + [process in k/community](https://github.com/kubernetes/community/tree/master/sig-docs), + plugins in [k/test-infra](https://github.com/kubernetes/test-infra/), and the role of + [SIG Architecture](https://github.com/kubernetes/community/tree/master/sig-architecture). - Commit at least 5 hours per week (and often more) to the role for a minimum of 6 months ### Responsibilities @@ -245,5 +189,3 @@ When you’re ready to start the recording, click Record to Cloud. When you’re ready to stop recording, click Stop. The video uploads automatically to YouTube. - - diff --git a/content/en/docs/contribute/generate-ref-docs/kubectl.md b/content/en/docs/contribute/generate-ref-docs/kubectl.md index f057ce6800aea..ea6065472e4c3 100644 --- a/content/en/docs/contribute/generate-ref-docs/kubectl.md +++ b/content/en/docs/contribute/generate-ref-docs/kubectl.md @@ -15,21 +15,16 @@ like [kubectl apply](/docs/reference/generated/kubectl/kubectl-commands#apply) and [kubectl taint](/docs/reference/generated/kubectl/kubectl-commands#taint). This topic does not show how to generate the -[kubectl](/docs/reference/generated/kubectl/kubectl/) +[kubectl](/docs/reference/generated/kubectl/kubectl-commands/) options reference page. For instructions on how to generate the kubectl options reference page, see -[Generating Reference Pages for Kubernetes Components and Tools](/docs/home/contribute/generated-reference/kubernetes-components/). +[Generating Reference Pages for Kubernetes Components and Tools](/docs/contribute/generate-ref-docs/kubernetes-components/). {{< /note >}} - - ## {{% heading "prerequisites" %}} - {{< include "prerequisites-ref-docs.md" >}} - - ## Setting up the local repositories @@ -237,6 +232,9 @@ Build the Kubernetes documentation in your local ``. cd make docker-serve ``` +{{< note >}} +The use of `make docker-serve` is deprecated. Please use `make container-serve` instead. +{{< /note >}} View the [local preview](https://localhost:1313/docs/reference/generated/kubectl/kubectl-commands/). diff --git a/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md b/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md index 10482eda9759d..f2ec01d8e84c2 100644 --- a/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md +++ b/content/en/docs/contribute/generate-ref-docs/kubernetes-api.md @@ -185,21 +185,23 @@ cd make docker-serve ``` +{{< note >}} +The use of `make docker-serve` is deprecated. Please use `make container-serve` instead. +{{< /note >}} + ## Commit the changes In `` run `git add` and `git commit` to commit the change. Submit your changes as a -[pull request](/docs/contribute/start/) to the +[pull request](/docs/contribute/new-content/open-a-pr/) to the [kubernetes/website](https://github.com/kubernetes/website) repository. Monitor your pull request, and respond to reviewer comments as needed. Continue to monitor your pull request until it has been merged. - ## {{% heading "whatsnext" %}} - * [Generating Reference Documentation Quickstart](/docs/contribute/generate-ref-docs/quickstart/) * [Generating Reference Docs for Kubernetes Components and Tools](/docs/contribute/generate-ref-docs/kubernetes-components/) * [Generating Reference Documentation for kubectl Commands](/docs/contribute/generate-ref-docs/kubectl/) diff --git a/content/en/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md b/content/en/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md index a777fb77e5250..c7199208130be 100644 --- a/content/en/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md +++ b/content/en/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md @@ -18,4 +18,5 @@ - You need to know how to create a pull request to a GitHub repository. This involves creating your own fork of the repository. For more - information, see [Work from a local clone](/docs/contribute/intermediate/#work_from_a_local_clone). + information, see [Work from a local clone](/docs/contribute/new-content/open-a-pr/#fork-the-repo). + diff --git a/content/en/docs/contribute/generate-ref-docs/quickstart.md b/content/en/docs/contribute/generate-ref-docs/quickstart.md index df5cdbb95f55d..0790f7925a7a9 100644 --- a/content/en/docs/contribute/generate-ref-docs/quickstart.md +++ b/content/en/docs/contribute/generate-ref-docs/quickstart.md @@ -10,15 +10,10 @@ This page shows how to use the `update-imported-docs` script to generate the Kubernetes reference documentation. The script automates the build setup and generates the reference documentation for a release. - - ## {{% heading "prerequisites" %}} - {{< include "prerequisites-ref-docs.md" >}} - - ## Getting the docs repository @@ -87,7 +82,7 @@ The `update-imported-docs` script performs the following steps: the sections in the `kubectl` command reference. When the generated files are in your local clone of the `` -repository, you can submit them in a [pull request](/docs/contribute/start/) +repository, you can submit them in a [pull request](/docs/contribute/new-content/open-a-pr/) to ``. ## Configuration file format diff --git a/content/en/docs/contribute/localization.md b/content/en/docs/contribute/localization.md index 0c698305b95a3..1ae4796522a65 100644 --- a/content/en/docs/contribute/localization.md +++ b/content/en/docs/contribute/localization.md @@ -183,7 +183,7 @@ Description | URLs -----|----- Home | [All heading and subheading URLs](/docs/home/) Setup | [All heading and subheading URLs](/docs/setup/) -Tutorials | [Kubernetes Basics](/docs/tutorials/kubernetes-basics/), [Hello Minikube](/docs/tutorials/stateless-application/hello-minikube/) +Tutorials | [Kubernetes Basics](/docs/tutorials/kubernetes-basics/), [Hello Minikube](/docs/tutorials/hello-minikube/) Site strings | [All site strings in a new localized TOML file](https://github.com/kubernetes/website/tree/master/i18n) Translated documents must reside in their own `content/**/` subdirectory, but otherwise follow the same URL path as the English source. For example, to prepare the [Kubernetes Basics](/docs/tutorials/kubernetes-basics/) tutorial for translation into German, create a subfolder under the `content/de/` folder and copy the English source: diff --git a/content/en/docs/contribute/new-content/blogs-case-studies.md b/content/en/docs/contribute/new-content/blogs-case-studies.md index 76acbd2d410e4..2ec9f35ac0789 100644 --- a/content/en/docs/contribute/new-content/blogs-case-studies.md +++ b/content/en/docs/contribute/new-content/blogs-case-studies.md @@ -12,35 +12,77 @@ weight: 30 Anyone can write a blog post and submit it for review. Case studies require extensive review before they're approved. - - -## Write a blog post - -Blog posts should not be -vendor pitches. They must contain content that applies broadly to -the Kubernetes community. The SIG Docs [blog subproject](https://github.com/kubernetes/community/tree/master/sig-docs/blog-subproject) manages the review process for blog posts. For more information, see [Submit a post](https://github.com/kubernetes/community/tree/master/sig-docs/blog-subproject#submit-a-post). - -To submit a blog post, you can either: +## The Kubernetes Blog -- Use the -[Kubernetes blog submission form](https://docs.google.com/forms/d/e/1FAIpQLSdMpMoSIrhte5omZbTE7nB84qcGBy8XnnXhDFoW0h7p2zwXrw/viewform) -- [Open a pull request](/docs/contribute/new-content/new-content/#fork-the-repo) with a new blog post. Create new blog posts in the [`content/en/blog/_posts`](https://github.com/kubernetes/website/tree/master/content/en/blog/_posts) directory. +The Kubernetes blog is used by the project to communicate new features, community reports, and any news that might be relevant to the Kubernetes community. +This includes end users and developers. +Most of the blog's content is about things happening in the core project, but we encourage you to submit about things happening elsewhere in the ecosystem too! -If you open a pull request, ensure that your blog post follows the correct naming conventions and frontmatter information: +Anyone can write a blog post and submit it for review. -- The markdown file name must follow the format `YYY-MM-DD-Your-Title-Here.md`. For example, `2020-02-07-Deploying-External-OpenStack-Cloud-Provider-With-Kubeadm.md`. -- The front matter must include the following: +### Guidelines and expectations + +- Blog posts should not be vendor pitches. + - Articles must contain content that applies broadly to the Kubernetes community. For example, a submission should focus on upstream Kubernetes as opposed to vendor-specific configurations. Check the [Documentation style guide](https://kubernetes.io/docs/contribute/style/content-guide/#what-s-allowed) for what is typically allowed on Kubernetes properties. + - Links should primarily be to the official Kubernetes documentation. When using external references, links should be diverse - For example a submission shouldn't contain only links back to a single company's blog. + - Sometimes this is a delicate balance. The [blog team](https://kubernetes.slack.com/messages/sig-docs-blog/) is there to give guidance on whether a post is appropriate for the Kubernetes blog, so don't hesitate to reach out. +- Blog posts are not published on specific dates. + - Articles are reviewed by community volunteers. We'll try our best to accommodate specific timing, but we make no guarantees. + - Many core parts of the Kubernetes projects submit blog posts during release windows, delaying publication times. Consider submitting during a quieter period of the release cycle. + - If you are looking for greater coordination on post release dates, coordinating with [CNCF marketing](https://www.cncf.io/about/contact/) is a more appropriate choice than submitting a blog post. + - Sometimes reviews can get backed up. If you feel your review isn't getting the attention it needs, you can reach out to the blog team via [this slack channel](https://kubernetes.slack.com/messages/sig-docs-blog/) to ask in real time. +- Blog posts should be relevant to Kubernetes users. + - Topics related to participation in or results of Kubernetes SIGs activities are always on topic (see the work in the [Upstream Marketing Team](https://github.com/kubernetes/community/blob/master/communication/marketing-team/blog-guidelines.md#upstream-marketing-blog-guidelines) for support on these posts). + - The components of Kubernetes are purposely modular, so tools that use existing integration points like CNI and CSI are on topic. + - Posts about other CNCF projects may or may not be on topic. We recommend asking the blog team before submitting a draft. + - Many CNCF projects have their own blog. These are often a better choice for posts. There are times of major feature or milestone for a CNCF project that users would be interested in reading on the Kubernetes blog. +- Blog posts should be original content + - The official blog is not for repurposing existing content from a third party as new content. + - The [license](https://github.com/kubernetes/website/blob/master/LICENSE) for the blog does allow commercial use of the content for commercial purposes, just not the other way around. +- Blog posts should aim to be future proof + - Given the development velocity of the project, we want evergreen content that won't require updates to stay accurate for the reader. + - It can be a better choice to add a tutorial or update official documentation than to write a high level overview as a blog post. + - Consider concentrating the long technical content as a call to action of the blog post, and focus on the problem space or why readers should care. + +### Technical Considerations for submitting a blog post + +Submissions need to be in Markdown format to be used by the [Hugo](https://gohugo.io/) generator for the blog. There are [many resources available](https://gohugo.io/documentation/) on how to use this technology stack. + +We recognize that this requirement makes the process more difficult for less-familiar folks to submit, and we're constantly looking at solutions to lower this bar. If you have ideas on how to lower the barrier, please volunteer to help out. + +The SIG Docs [blog subproject](https://github.com/kubernetes/community/tree/master/sig-docs/blog-subproject) manages the review process for blog posts. For more information, see [Submit a post](https://github.com/kubernetes/community/tree/master/sig-docs/blog-subproject#submit-a-post). + +To submit a blog post follow these directions: + +- [Open a pull request](/docs/contribute/new-content/new-content/#fork-the-repo) with a new blog post. New blog posts go under the [`content/en/blog/_posts`](https://github.com/kubernetes/website/tree/master/content/en/blog/_posts) directory. + +- Ensure that your blog post follows the correct naming conventions and the following frontmatter (metadata) information: + + - The Markdown file name must follow the format `YYYY-MM-DD-Your-Title-Here.md`. For example, `2020-02-07-Deploying-External-OpenStack-Cloud-Provider-With-Kubeadm.md`. + - Do **not** include dots in the filename. A name like `2020-01-01-whats-new-in-1.19.md` causes failures during a build. + - The front matter must include the following: + + ```yaml + --- + layout: blog + title: "Your Title Here" + date: YYYY-MM-DD + slug: text-for-URL-link-here-no-spaces + --- + ``` + - The first or initial commit message should be a short summary of the work being done and should stand alone as a description of the blog post. Please note that subsequent edits to your blog will be squashed into this main commit, so it should be as useful as possible. + - Examples of a good commit message: + - _Add blog post on the foo kubernetes feature_ + - _blog: foobar announcement_ + - Examples of bad commit message: + - _Add blog post_ + - _._ + - _initial commit_ + - _draft post_ + - The blog team will then review your PR and give you comments on things you might need to fix. After that the bot will merge your PR and your blog post will be published. -```yaml ---- -layout: blog -title: "Your Title Here" -date: YYYY-MM-DD -slug: text-for-URL-link-here-no-spaces ---- -``` ## Submit a case study @@ -50,11 +92,4 @@ real-world problems. The Kubernetes marketing team and members of the {{< glossa Have a look at the source for the [existing case studies](https://github.com/kubernetes/website/tree/master/content/en/case-studies). -Refer to the [case study guidelines](https://github.com/cncf/foundation/blob/master/case-study-guidelines.md) and submit your request as outlined in the guidelines. - - - -## {{% heading "whatsnext" %}} - - - +Refer to the [case study guidelines](https://github.com/cncf/foundation/blob/master/case-study-guidelines.md) and submit your request as outlined in the guidelines. diff --git a/content/en/docs/contribute/new-content/open-a-pr.md b/content/en/docs/contribute/new-content/open-a-pr.md index 5b2642dd39c44..d511360e2205f 100644 --- a/content/en/docs/contribute/new-content/open-a-pr.md +++ b/content/en/docs/contribute/new-content/open-a-pr.md @@ -1,6 +1,5 @@ --- title: Opening a pull request -slug: new-content content_type: concept weight: 10 card: @@ -97,10 +96,12 @@ Make sure you have [git](https://git-scm.com/book/en/v2/Getting-Started-Installi ### Create a local clone and set the upstream -3. In a terminal window, clone your fork: +3. In a terminal window, clone your fork and update the [Docsy Hugo theme](https://github.com/google/docsy#readme): ```bash git clone git@github.com//website + cd website + git submodule update --init --recursive --depth 1 ``` 4. Navigate to the new `website` directory. Set the `kubernetes/website` repository as the `upstream` remote: @@ -261,18 +262,26 @@ The commands below use Docker as default container engine. Set the `CONTAINER_EN Alternately, install and use the `hugo` command on your computer: -5. Install the [Hugo](https://gohugo.io/getting-started/installing/) version specified in [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/master/netlify.toml). +1. Install the [Hugo](https://gohugo.io/getting-started/installing/) version specified in [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/master/netlify.toml). -6. In a terminal, go to your Kubernetes website repository and start the Hugo server: +2. If you have not updated your website repository, the `website/themes/docsy` directory is empty. + The site cannot build without a local copy of the theme. To update the website theme, run: + + ```bash + git submodule update --init --recursive --depth 1 + ``` + +3. In a terminal, go to your Kubernetes website repository and start the Hugo server: ```bash cd /website - hugo server + hugo server --buildFuture ``` -7. In your browser’s address bar, enter `https://localhost:1313`. +4. In a web browser, navigate to `https://localhost:1313`. Hugo watches the + changes and rebuilds the site as needed. -8. To stop the local Hugo instance, go back to the terminal and type `Ctrl+C`, +5. To stop the local Hugo instance, go back to the terminal and type `Ctrl+C`, or close the terminal window. {{% /tab %}} @@ -496,6 +505,6 @@ the templates with as much detail as possible when you file issues or PRs. ## {{% heading "whatsnext" %}} -- Read [Reviewing](/docs/contribute/reviewing/revewing-prs) to learn more about the review process. +- Read [Reviewing](/docs/contribute/review/reviewing-prs) to learn more about the review process. diff --git a/content/en/docs/contribute/new-content/overview.md b/content/en/docs/contribute/new-content/overview.md index e9ef332430d36..b1f7e4f20a32d 100644 --- a/content/en/docs/contribute/new-content/overview.md +++ b/content/en/docs/contribute/new-content/overview.md @@ -20,8 +20,12 @@ This section contains information you should know before contributing new conten - Write Kubernetes documentation in Markdown and build the Kubernetes site using [Hugo](https://gohugo.io/). - The source is in [GitHub](https://github.com/kubernetes/website). You can find Kubernetes documentation at `/content/en/docs/`. Some of the reference documentation is automatically generated from scripts in the `update-imported-docs/` directory. - [Page content types](/docs/contribute/style/page-content-types/) describe the presentation of documentation content in Hugo. -- In addition to the standard Hugo shortcodes, we use a number of [custom Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/) in our documentation to control the presentation of content. -- Documentation source is available in multiple languages in `/content/`. Each language has its own folder with a two-letter code determined by the [ISO 639-1 standard](https://www.loc.gov/standards/iso639-2/php/code_list.php). For example, English documentation source is stored in `/content/en/docs/`. +- In addition to the standard Hugo shortcodes, we use a number of + [custom Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/) in our documentation to control the presentation of content. +- Documentation source is available in multiple languages in `/content/`. Each + language has its own folder with a two-letter code determined by the + [ISO 639-1 standard](https://www.loc.gov/standards/iso639-2/php/code_list.php). For + example, English documentation source is stored in `/content/en/docs/`. - For more information about contributing to documentation in multiple languages or starting a new translation, see [localization](/docs/contribute/localization). ## Before you begin {#before-you-begin} diff --git a/content/en/docs/contribute/participate/_index.md b/content/en/docs/contribute/participate/_index.md new file mode 100644 index 0000000000000..a5c0f2880a0de --- /dev/null +++ b/content/en/docs/contribute/participate/_index.md @@ -0,0 +1,120 @@ +--- +title: Participating in SIG Docs +content_type: concept +weight: 60 +card: + name: contribute + weight: 60 +--- + + + +SIG Docs is one of the +[special interest groups](https://github.com/kubernetes/community/blob/master/sig-list.md) +within the Kubernetes project, focused on writing, updating, and maintaining +the documentation for Kubernetes as a whole. See +[SIG Docs from the community github repo](https://github.com/kubernetes/community/tree/master/sig-docs) +for more information about the SIG. + +SIG Docs welcomes content and reviews from all contributors. Anyone can open a +pull request (PR), and anyone is welcome to file issues about content or comment +on pull requests in progress. + +You can also become a [member](/docs/contribute/participate/roles-and-responsibilities/#members), +[reviewer](/docs/contribute/participate/roles-and-responsibilities/#reviewers), or +[approver](/docs/contribute/participate/roles-and-responsibilities/#approvers). +These roles require greater access and entail certain responsibilities for +approving and committing changes. See +[community-membership](https://github.com/kubernetes/community/blob/master/community-membership.md) +for more information on how membership works within the Kubernetes community. + +The rest of this document outlines some unique ways these roles function within +SIG Docs, which is responsible for maintaining one of the most public-facing +aspects of Kubernetes -- the Kubernetes website and documentation. + + + +## SIG Docs chairperson + +Each SIG, including SIG Docs, selects one or more SIG members to act as +chairpersons. These are points of contact between SIG Docs and other parts of +the Kubernetes organization. They require extensive knowledge of the structure +of the Kubernetes project as a whole and how SIG Docs works within it. See +[Leadership](https://github.com/kubernetes/community/tree/master/sig-docs#leadership) +for the current list of chairpersons. + +## SIG Docs teams and automation + +Automation in SIG Docs relies on two different mechanisms: +GitHub teams and OWNERS files. + +### GitHub teams + +There are two categories of SIG Docs [teams](https://github.com/orgs/kubernetes/teams?query=sig-docs) on GitHub: + +- `@sig-docs-{language}-owners` are approvers and leads +- `@sig-docs-{language}-reviewers` are reviewers + +Each can be referenced with their `@name` in GitHub comments to communicate with +everyone in that group. + +Sometimes Prow and GitHub teams overlap without matching exactly. For +assignment of issues, pull requests, and to support PR approvals, the +automation uses information from `OWNERS` files. + +### OWNERS files and front-matter + +The Kubernetes project uses an automation tool called prow for automation +related to GitHub issues and pull requests. The +[Kubernetes website repository](https://github.com/kubernetes/website) uses +two [prow plugins](https://github.com/kubernetes/test-infra/tree/master/prow/plugins): + +- blunderbuss +- approve + +These two plugins use the +[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS) and +[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES) +files in the top level of the `kubernetes/website` GitHub repository to control +how prow works within the repository. + +An OWNERS file contains a list of people who are SIG Docs reviewers and +approvers. OWNERS files can also exist in subdirectories, and can override who +can act as a reviewer or approver of files in that subdirectory and its +descendants. For more information about OWNERS files in general, see +[OWNERS](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md). + +In addition, an individual Markdown file can list reviewers and approvers in its +front-matter, either by listing individual GitHub usernames or GitHub groups. + +The combination of OWNERS files and front-matter in Markdown files determines +the advice PR owners get from automated systems about who to ask for technical +and editorial review of their PR. + +## How merging works + +When a pull request is merged to the branch used to publish content, that content is published to http://kubernetes.io. To ensure that +the quality of our published content is high, we limit merging pull requests to +SIG Docs approvers. Here's how it works. + +- When a pull request has both the `lgtm` and `approve` labels, has no `hold` + labels, and all tests are passing, the pull request merges automatically. +- Kubernetes organization members and SIG Docs approvers can add comments to + prevent automatic merging of a given pull request (by adding a `/hold` comment + or withholding a `/lgtm` comment). +- Any Kubernetes member can add the `lgtm` label by adding a `/lgtm` comment. +- Only SIG Docs approvers can merge a pull request + by adding an `/approve` comment. Some approvers also perform additional + specific roles, such as [PR Wrangler](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) or + [SIG Docs chairperson](#sig-docs-chairperson). + + + +## {{% heading "whatsnext" %}} + + +For more information about contributing to the Kubernetes documentation, see: + +- [Contributing new content](/docs/contribute/new-content/overview/) +- [Reviewing content](/docs/contribute/review/reviewing-prs) +- [Documentation style guide](/docs/contribute/style/) diff --git a/content/en/docs/contribute/participate/pr-wranglers.md b/content/en/docs/contribute/participate/pr-wranglers.md new file mode 100644 index 0000000000000..c2ab60a811a8f --- /dev/null +++ b/content/en/docs/contribute/participate/pr-wranglers.md @@ -0,0 +1,69 @@ +--- +title: PR wranglers +content_type: concept +weight: 20 +--- + + + +SIG Docs [approvers](/docs/contribute/participating/roles-and-responsibilites/#approvers) take week-long shifts [managing pull requests](https://github.com/kubernetes/website/wiki/PR-Wranglers) for the repository. + +This section covers the duties of a PR wrangler. For more information on giving good reviews, see [Reviewing changes](/docs/contribute/review/). + + + +## Duties + +Each day in a week-long shift as PR Wrangler: + +- Triage and tag incoming issues daily. See [Triage and categorize issues](/docs/contribute/review/for-approvers/#triage-and-categorize-issues) for guidelines on how SIG Docs uses metadata. +- Review [open pull requests](https://github.com/kubernetes/website/pulls) for quality and adherence to the [Style](/docs/contribute/style/style-guide/) and [Content](/docs/contribute/style/content-guide/) guides. + - Start with the smallest PRs (`size/XS`) first, and end with the largest (`size/XXL`). Review as many PRs as you can. +- Make sure PR contributors sign the [CLA](https://github.com/kubernetes/community/blob/master/CLA.md). + - Use [this](https://github.com/zparnold/k8s-docs-pr-botherer) script to remind contributors that haven’t signed the CLA to do so. +- Provide feedback on changes and ask for technical reviews from members of other SIGs. + - Provide inline suggestions on the PR for the proposed content changes. + - If you need to verify content, comment on the PR and request more details. + - Assign relevant `sig/` label(s). + - If needed, assign reviewers from the `reviewers:` block in the file's front matter. +- Use the `/approve` comment to approve a PR for merging. Merge the PR when ready. + - PRs should have a `/lgtm` comment from another member before merging. + - Consider accepting technically accurate content that doesn't meet the [style guidelines](/docs/contribute/style/style-guide/). Open a new issue with the label `good first issue` to address style concerns. + +### Helpful GitHub queries for wranglers + +The following queries are helpful when wrangling. +After working through these queries, the remaining list of PRs to review is usually small. +These queries exclude localization PRs. All queries are against the main branch except the last one. + +- [No CLA, not eligible to merge](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3A%22cncf-cla%3A+no%22+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3Alanguage%2Fen): + Remind the contributor to sign the CLA. If both the bot and a human have reminded them, close + the PR and remind them that they can open it after signing the CLA. + **Do not review PRs whose authors have not signed the CLA!** +- [Needs LGTM](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+-label%3A%22cncf-cla%3A+no%22+-label%3Ado-not-merge%2Fwork-in-progress+-label%3Ado-not-merge%2Fhold+label%3Alanguage%2Fen+-label%3Algtm): + Lists PRs that need an LGTM from a member. If the PR needs technical review, loop in one of the reviewers suggested by the bot. If the content needs work, add suggestions and feedback in-line. +- [Has LGTM, needs docs approval](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+-label%3Ado-not-merge%2Fwork-in-progress+-label%3Ado-not-merge%2Fhold+label%3Alanguage%2Fen+label%3Algtm+): + Lists PRs that need an `/approve` comment to merge. +- [Quick Wins](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amaster+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22): Lists PRs against the main branch with no clear blockers. (change "XS" in the size label as you work through the PRs [XS, S, M, L, XL, XXL]). +- [Not against the main branch](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3Alanguage%2Fen+-base%3Amaster): If the PR is against a `dev-` branch, it's for an upcoming release. Assign the [docs release manager](https://github.com/kubernetes/sig-release/tree/master/release-team#kubernetes-release-team-roles) using: `/assign @`. If the PR is against an old branch, help the author figure out whether it's targeted against the best branch. + +### When to close Pull Requests + +Reviews and approvals are one tool to keep our PR queue short and current. Another tool is closure. + +Close PRs where: +- The author hasn't signed the CLA for two weeks. + + Authors can reopen the PR after signing the CLA. This is a low-risk way to make sure nothing gets merged without a signed CLA. + +- The author has not responded to comments or feedback in 2 or more weeks. + +Don't be afraid to close pull requests. Contributors can easily reopen and resume works in progress. Often a closure notice is what spurs an author to resume and finish their contribution. + +To close a pull request, leave a `/close` comment on the PR. + +{{< note >}} + +The [`fejta-bot`](https://github.com/fejta-bot) bot marks issues as stale after 90 days of inactivity. After 30 more days it marks issues as rotten and closes them. PR wranglers should close issues after 14-30 days of inactivity. + +{{< /note >}} \ No newline at end of file diff --git a/content/en/docs/contribute/participate/roles-and-responsibilities.md b/content/en/docs/contribute/participate/roles-and-responsibilities.md new file mode 100644 index 0000000000000..8ebe7a1303c98 --- /dev/null +++ b/content/en/docs/contribute/participate/roles-and-responsibilities.md @@ -0,0 +1,237 @@ +--- +title: Roles and responsibilities +content_type: concept +weight: 10 +--- + + + +Anyone can contribute to Kubernetes. As your contributions to SIG Docs grow, +you can apply for different levels of membership in the community. +These roles allow you to take on more responsibility within the community. +Each role requires more time and commitment. The roles are: + +- Anyone: regular contributors to the Kubernetes documentation +- Members: can assign and triage issues and provide non-binding review on pull requests +- Reviewers: can lead reviews on documentation pull requests and can vouch for a change's quality +- Approvers: can lead reviews on documentation and merge changes + + + +## Anyone + +Anyone with a GitHub account can contribute to Kubernetes. SIG Docs welcomes all new contributors! + +Anyone can: + +- Open an issue in any [Kubernetes](https://github.com/kubernetes/) + repository, including + [`kubernetes/website`](https://github.com/kubernetes/website) +- Give non-binding feedback on a pull request +- Contribute to a localization +- Suggest improvements on [Slack](https://slack.k8s.io/) or the + [SIG docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs). + +After [signing the CLA](/docs/contribute/new-content/overview/#sign-the-cla), anyone can also: + +- Open a pull request to improve existing content, add new content, or write a blog post or case study +- Create diagrams, graphics assets, and embeddable screencasts and videos + +For more information, see [contributing new content](/docs/contribute/new-content/). + +## Members + +A member is someone who has submitted multiple pull requests to +`kubernetes/website`. Members are a part of the +[Kubernetes GitHub organization](https://github.com/kubernetes). + +Members can: + +- Do everything listed under [Anyone](#anyone) +- Use the `/lgtm` comment to add the LGTM (looks good to me) label to a pull request + + {{< note >}} + Using `/lgtm` triggers automation. If you want to provide non-binding + approval, simply commenting "LGTM" works too! + {{< /note >}} + +- Use the `/hold` comment to block merging for a pull request +- Use the `/assign` comment to assign a reviewer to a pull request +- Provide non-binding review on pull requests +- Use automation to triage and categorize issues +- Document new features + +### Becoming a member + +After submitting at least 5 substantial pull requests and meeting the other +[requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#member): + +1. Find two [reviewers](#reviewers) or [approvers](#approvers) to + [sponsor](/docs/contribute/advanced#sponsor-a-new-contributor) your + membership. + + Ask for sponsorship in the [#sig-docs channel on Slack](https://kubernetes.slack.com) or on the + [SIG Docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs). + + {{< note >}} + Don't send a direct email or Slack direct message to an individual + SIG Docs member. You must request sponsorship before submitting your application. + {{< /note >}} + +1. Open a GitHub issue in the + [`kubernetes/org`](https://github.com/kubernetes/org/) repository. Use the + **Organization Membership Request** issue template. + +1. Let your sponsors know about the GitHub issue. You can either: + - Mention their GitHub username in an issue (`@`) + - Send them the issue link using Slack or email. + + Sponsors will approve your request with a `+1` vote. Once your sponsors + approve the request, a Kubernetes GitHub admin adds you as a member. + Congratulations! + + If your membership request is not accepted you will receive feedback. + After addressing the feedback, apply again. + +1. Accept the invitation to the Kubernetes GitHub organization in your email account. + + {{< note >}} + GitHub sends the invitation to the default email address in your account. + {{< /note >}} + +## Reviewers + +Reviewers are responsible for reviewing open pull requests. Unlike member +feedback, you must address reviewer feedback. Reviewers are members of the +[@kubernetes/sig-docs-{language}-reviews](https://github.com/orgs/kubernetes/teams?query=sig-docs) +GitHub team. + +Reviewers can: + +- Do everything listed under [Anyone](#anyone) and [Members](#members) +- Review pull requests and provide binding feedback + + {{< note >}} + To provide non-binding feedback, prefix your comments with a phrase like "Optionally: ". + {{< /note >}} + +- Edit user-facing strings in code +- Improve code comments + +You can be a SIG Docs reviewer, or a reviewer for docs in a specific subject area. + +### Assigning reviewers to pull requests + +Automation assigns reviewers to all pull requests. You can request a +review from a specific person by commenting: `/assign +[@_github_handle]`. + +If the assigned reviewer has not commented on the PR, another reviewer can +step in. You can also assign technical reviewers as needed. + +### Using `/lgtm` + +LGTM stands for "Looks good to me" and indicates that a pull request is +technically accurate and ready to merge. All PRs need a `/lgtm` comment from a +reviewer and a `/approve` comment from an approver to merge. + +A `/lgtm` comment from reviewer is binding and triggers automation that adds the `lgtm` label. + +### Becoming a reviewer + +When you meet the +[requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#reviewer), +you can become a SIG Docs reviewer. Reviewers in other SIGs must apply +separately for reviewer status in SIG Docs. + +To apply: + +1. Open a pull request that adds your GitHub user name to a section of the + [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) file + in the `kubernetes/website` repository. + + {{< note >}} + If you aren't sure where to add yourself, add yourself to `sig-docs-en-reviews`. + {{< /note >}} + +1. Assign the PR to one or more SIG-Docs approvers (user names listed under + `sig-docs-{language}-owners`). + +If approved, a SIG Docs lead adds you to the appropriate GitHub team. Once added, +[K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home) +assigns and suggests you as a reviewer on new pull requests. + +## Approvers + +Approvers review and approve pull requests for merging. Approvers are members of the +[@kubernetes/sig-docs-{language}-owners](https://github.com/orgs/kubernetes/teams/?query=sig-docs) +GitHub teams. + +Approvers can do the following: + +- Everything listed under [Anyone](#anyone), [Members](#members) and [Reviewers](#reviewers) +- Publish contributor content by approving and merging pull requests using the `/approve` comment +- Propose improvements to the style guide +- Propose improvements to docs tests +- Propose improvements to the Kubernetes website or other tooling + +If the PR already has a `/lgtm`, or if the approver also comments with +`/lgtm`, the PR merges automatically. A SIG Docs approver should only leave a +`/lgtm` on a change that doesn't need additional technical review. + + +### Approving pull requests + +Approvers and SIG Docs leads are the only ones who can merge pull requests +into the website repository. This comes with certain responsibilities. + +- Approvers can use the `/approve` command, which merges PRs into the repo. + + {{< warning >}} + A careless merge can break the site, so be sure that when you merge something, you mean it. + {{< /warning >}} + +- Make sure that proposed changes meet the + [contribution guidelines](/docs/contribute/style/content-guide/#contributing-content). + + If you ever have a question, or you're not sure about something, feel free + to call for additional review. + +- Verify that Netlify tests pass before you `/approve` a PR. + + Netlify tests must pass before approving + +- Visit the Netlify page preview for a PR to make sure things look good before approving. + +- Participate in the + [PR Wrangler rotation schedule](https://github.com/kubernetes/website/wiki/PR-Wranglers) + for weekly rotations. SIG Docs expects all approvers to participate in this + rotation. See [PR wranglers](/docs/contribute/participate/pr-wranglers/). + for more details. + +### Becoming an approver + +When you meet the +[requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#approver), +you can become a SIG Docs approver. Approvers in other SIGs must apply +separately for approver status in SIG Docs. + +To apply: + +1. Open a pull request adding yourself to a section of the + [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) + file in the `kubernetes/website` repository. + + {{< note >}} + If you aren't sure where to add yourself, add yourself to `sig-docs-en-owners`. + {{< /note >}} + +2. Assign the PR to one or more current SIG Docs approvers. + +If approved, a SIG Docs lead adds you to the appropriate GitHub team. Once added, +[@k8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home) +assigns and suggests you as a reviewer on new pull requests. + +## {{% heading "whatsnext" %}} + +- Read about [PR wrangling](/docs/contribute/participate/pr-wranglers/), a role all approvers take on rotation. diff --git a/content/en/docs/contribute/participating.md b/content/en/docs/contribute/participating.md deleted file mode 100644 index 681c53f9940c9..0000000000000 --- a/content/en/docs/contribute/participating.md +++ /dev/null @@ -1,316 +0,0 @@ ---- -title: Participating in SIG Docs -content_type: concept -weight: 60 -card: - name: contribute - weight: 60 ---- - - - -SIG Docs is one of the -[special interest groups](https://github.com/kubernetes/community/blob/master/sig-list.md) -within the Kubernetes project, focused on writing, updating, and maintaining -the documentation for Kubernetes as a whole. See -[SIG Docs from the community github repo](https://github.com/kubernetes/community/tree/master/sig-docs) -for more information about the SIG. - -SIG Docs welcomes content and reviews from all contributors. Anyone can open a -pull request (PR), and anyone is welcome to file issues about content or comment -on pull requests in progress. - -You can also become a [member](#members), -[reviewer](#reviewers), or [approver](#approvers). These roles require greater -access and entail certain responsibilities for approving and committing changes. -See [community-membership](https://github.com/kubernetes/community/blob/master/community-membership.md) -for more information on how membership works within the Kubernetes community. - -The rest of this document outlines some unique ways these roles function within -SIG Docs, which is responsible for maintaining one of the most public-facing -aspects of Kubernetes -- the Kubernetes website and documentation. - - - - - -## Roles and responsibilities - -- **Anyone** can contribute to Kubernetes documentation. To contribute, you must [sign the CLA](/docs/contribute/new-content/overview/#sign-the-cla) and have a GitHub account. -- **Members** of the Kubernetes organization are contributors who have spent time and effort on the Kubernetes project, usually by opening pull requests with accepted changes. See [Community membership](https://github.com/kubernetes/community/blob/master/community-membership.md) for membership criteria. -- A SIG Docs **Reviewer** is a member of the Kubernetes organization who has - expressed interest in reviewing documentation pull requests, and has been - added to the appropriate GitHub group and `OWNERS` files in the GitHub - repository by a SIG Docs Approver. -- A SIG Docs **Approver** is a member in good standing who has shown a continued - commitment to the project. An approver can merge pull requests - and publish content on behalf of the Kubernetes organization. - Approvers can also represent SIG Docs in the larger Kubernetes community. - Some duties of a SIG Docs approver, such as coordinating a release, - require a significant time commitment. - -## Anyone - -Anyone can do the following: - -- Open a GitHub issue against any part of Kubernetes, including documentation. -- Provide non-binding feedback on a pull request. -- Help to localize existing content -- Bring up ideas for improvement on [Slack](http://slack.k8s.io/) or the [SIG docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs). -- Use the `/lgtm` Prow command (short for "looks good to me") to recommend the changes in a pull request for merging. - {{< note >}} - If you are not a member of the Kubernetes organization, using `/lgtm` has no effect on automated systems. - {{< /note >}} - -After [signing the CLA](/docs/contribute/new-content/overview/#sign-the-cla), anyone can also: -- Open a pull request to improve existing content, add new content, or write a blog post or case study. - -## Members - -Members are contributors to the Kubernetes project who meet the [membership criteria](https://github.com/kubernetes/community/blob/master/community-membership.md#member). SIG Docs welcomes contributions from all members of the Kubernetes community, -and frequently requests reviews from members of other SIGs for technical accuracy. - -Any member of the [Kubernetes organization](https://github.com/kubernetes) can do the following: - -- Everything listed under [Anyone](#anyone) -- Use the `/lgtm` comment to add the LGTM (looks good to me) label to a pull request. -- Use the `/hold` command to prevent a pull request from being merged, if the pull request already has the LGTM and approve labels. -- Use the `/assign` comment to assign a reviewer to a pull request. - -### Becoming a member - -After you have successfully submitted at least 5 substantive pull requests, you -can request [membership](https://github.com/kubernetes/community/blob/master/community-membership.md#member) -in the Kubernetes organization. Follow these steps: - -1. Find two reviewers or approvers to [sponsor](/docs/contribute/advanced#sponsor-a-new-contributor) - your membership. - - Ask for sponsorship in the [#sig-docs channel on the - Kubernetes Slack instance](https://kubernetes.slack.com) or on the - [SIG Docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs). - - {{< note >}} - Don't send a direct email or Slack direct message to an individual - SIG Docs member. - {{< /note >}} - -2. Open a GitHub issue in the `kubernetes/org` repository to request membership. - Fill out the template using the guidelines at - [Community membership](https://github.com/kubernetes/community/blob/master/community-membership.md). - -3. Let your sponsors know about the GitHub issue, either by at-mentioning them - in the GitHub issue (adding a comment with `@`) or by sending them the link directly, - so that they can add a `+1` vote. - -4. When your membership is approved, the github admin team member assigned to your request updates the - GitHub issue to show approval and then closes the GitHub issue. - Congratulations, you are now a member! - -If your membership request is not accepted, the -membership committee provides information or steps to take before applying -again. - -## Reviewers - -Reviewers are members of the -[@kubernetes/sig-docs-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews) -GitHub group. Reviewers review documentation pull requests and provide feedback on proposed -changes. Reviewers can: - -- Do everything listed under [Anyone](#anyone) and [Members](#members) -- Document new features -- Triage and categorize issues -- Review pull requests and provide binding feedback -- Create diagrams, graphics assets, and embeddable screencasts and videos -- Edit user-facing strings in code -- Improve code comments - -### Assigning reviewers to pull requests - -Automation assigns reviewers to all pull requests. You can request a -review from a specific reviewer with a comment on the pull request: `/assign -[@_github_handle]`. To indicate that a pull request is technically accurate and -requires no further changes, a reviewer adds a `/lgtm` comment to the pull -request. - -If the assigned reviewer has not yet reviewed the content, another reviewer can -step in. In addition, you can assign technical reviewers and wait for them to -provide a `/lgtm` comment. - -For a trivial change or one that needs no technical review, SIG Docs -[approvers](#approvers) can provide the `/lgtm` as well. - -An `/approve` comment from a reviewer is ignored by automation. - -### Becoming a reviewer - -When you meet the -[requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#reviewer), -you can become a SIG Docs reviewer. Reviewers in other SIGs must apply -separately for reviewer status in SIG Docs. - -To apply, open a pull request to add yourself to the `reviewers` section of the -[top-level OWNERS file](https://github.com/kubernetes/website/blob/master/OWNERS) -in the `kubernetes/website` repository. Assign the PR to one or more current SIG -Docs approvers. - -If your pull request is approved, you are now a SIG Docs reviewer. -[K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home) -will assign and suggest you as a reviewer on new pull requests. - -If you are approved, request that a current SIG Docs approver add you to the -[@kubernetes/sig-docs-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews) -GitHub group. Only members of the `kubernetes-website-admins` GitHub group can -add new members to a GitHub group. - -## Approvers - -Approvers are members of the -[@kubernetes/sig-docs-maintainers](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers) -GitHub group. See [SIG Docs teams and automation](#sig-docs-teams-and-automation) for details. - -Approvers can do the following: - -- Everything listed under [Anyone](#anyone), [Members](#members) and [Reviewers](#reviewers) -- Publish contributor content by approving and merging pull requests using the `/approve` comment. - If someone who is not an approver leaves the approval comment, automation ignores it. -- Participate in a Kubernetes release team as a docs representative -- Propose improvements to the style guide -- Propose improvements to docs tests -- Propose improvements to the Kubernetes website or other tooling - -If the PR already has a `/lgtm`, or if the approver also comments with `/lgtm`, -the PR merges automatically. A SIG Docs approver should only leave a `/lgtm` on -a change that doesn't need additional technical review. - -### Becoming an approver - -When you meet the -[requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#approver), -you can become a SIG Docs approver. Approvers in other SIGs must apply -separately for approver status in SIG Docs. - -To apply, open a pull request to add yourself to the `approvers` section of the -[top-level OWNERS file](https://github.com/kubernetes/website/blob/master/OWNERS) -in the `kubernetes/website` repository. Assign the PR to one or more current SIG -Docs approvers. - -If your pull request is approved, you are now a SIG Docs approver. -[K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home) -will assign and suggest you as a reviewer on new pull requests. - -If you are approved, request that a current SIG Docs approver add you to the -[@kubernetes/sig-docs-maintainers](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers) -GitHub group. Only members of the `kubernetes-website-admins` GitHub group can -add new members to a GitHub group. - -### Approver responsibilities - -Approvers improve the documentation by reviewing and merging pull requests into the website repository. Because this role carries additional privileges, approvers have additional responsibilities: - -- Approvers can use the `/approve` command, which merges PRs into the repo. - - A careless merge can break the site, so be sure that when you merge something, you mean it. - -- Make sure that proposed changes meet the [contribution guidelines](/docs/contribute/style/content-guide/#contributing-content). - - If you ever have a question, or you're not sure about something, feel free to call for additional review. - -- Verify that Netlify tests pass before you `/approve` a PR. - - Netlify tests must pass before approving - -- Visit the Netlify page preview for a PR to make sure things look good before approving. - -- Participate in the [PR Wrangler rotation schedule](https://github.com/kubernetes/website/wiki/PR-Wranglers) for weekly rotations. SIG Docs expects all approvers to participate in this -rotation. See [Be the PR Wrangler for a week](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) -for more details. - -## SIG Docs chairperson - -Each SIG, including SIG Docs, selects one or more SIG members to act as -chairpersons. These are points of contact between SIG Docs and other parts of -the Kubernetes organization. They require extensive knowledge of the structure -of the Kubernetes project as a whole and how SIG Docs works within it. See -[Leadership](https://github.com/kubernetes/community/tree/master/sig-docs#leadership) -for the current list of chairpersons. - -## SIG Docs teams and automation - -Automation in SIG Docs relies on two different mechanisms for automation: -GitHub groups and OWNERS files. - -### GitHub groups - -The SIG Docs group defines two teams on GitHub: - - - [@kubernetes/sig-docs-maintainers](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers) - - [@kubernetes/sig-docs-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews) - -Each can be referenced with their `@name` in GitHub comments to communicate with -everyone in that group. - -These teams overlap, but do not exactly match, the groups used by the automation -tooling. For assignment of issues, pull requests, and to support PR approvals, -the automation uses information from OWNERS files. - -### OWNERS files and front-matter - -The Kubernetes project uses an automation tool called prow for automation -related to GitHub issues and pull requests. The -[Kubernetes website repository](https://github.com/kubernetes/website) uses -two [prow plugins](https://github.com/kubernetes/test-infra/tree/master/prow/plugins): - -- blunderbuss -- approve - -These two plugins use the -[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS) and -[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES) -files in the top level of the `kubernetes/website` GitHub repository to control -how prow works within the repository. - -An OWNERS file contains a list of people who are SIG Docs reviewers and -approvers. OWNERS files can also exist in subdirectories, and can override who -can act as a reviewer or approver of files in that subdirectory and its -descendents. For more information about OWNERS files in general, see -[OWNERS](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md). - -In addition, an individual Markdown file can list reviewers and approvers in its -front-matter, either by listing individual GitHub usernames or GitHub groups. - -The combination of OWNERS files and front-matter in Markdown files determines -the advice PR owners get from automated systems about who to ask for technical -and editorial review of their PR. - -## How merging works - -When a pull request is merged to the branch used to publish content (currently -`master`), that content is published and available to the world. To ensure that -the quality of our published content is high, we limit merging pull requests to -SIG Docs approvers. Here's how it works. - -- When a pull request has both the `lgtm` and `approve` labels, has no `hold` - labels, and all tests are passing, the pull request merges automatically. -- Kubernetes organization members and SIG Docs approvers can add comments to - prevent automatic merging of a given pull request (by adding a `/hold` comment - or withholding a `/lgtm` comment). -- Any Kubernetes member can add the `lgtm` label by adding a `/lgtm` comment. -- Only SIG Docs approvers can merge a pull request - by adding an `/approve` comment. Some approvers also perform additional - specific roles, such as [PR Wrangler](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) or - [SIG Docs chairperson](#sig-docs-chairperson). - - - -## {{% heading "whatsnext" %}} - - -For more information about contributing to the Kubernetes documentation, see: - -- [Contributing new content](/docs/contribute/overview/) -- [Reviewing content](/docs/contribute/review/reviewing-prs) -- [Documentation style guide](/docs/contribute/style/) - - diff --git a/content/en/docs/contribute/review/for-approvers.md b/content/en/docs/contribute/review/for-approvers.md index 0cddbcba6aa5e..82a05bdb86ad0 100644 --- a/content/en/docs/contribute/review/for-approvers.md +++ b/content/en/docs/contribute/review/for-approvers.md @@ -8,7 +8,9 @@ weight: 20 -SIG Docs [Reviewers](/docs/contribute/participating/#reviewers) and [Approvers](/docs/contribute/participating/#approvers) do a few extra things when reviewing a change. +SIG Docs [Reviewers](/docs/contribute/participate/#reviewers) and +[Approvers](/docs/contribute/participate/#approvers) do a few extra things +when reviewing a change. Every week a specific docs approver volunteers to triage and review pull requests. This @@ -19,9 +21,6 @@ requests (PRs) that are not already under active review. In addition to the rotation, a bot assigns reviewers and approvers for the PR based on the owners for the affected files. - - - ## Reviewing a PR @@ -202,9 +201,9 @@ Sample response to a request for support: This issue sounds more like a request for support and less like an issue specifically for docs. I encourage you to bring your question to the `#kubernetes-users` channel in -[Kubernetes slack](http://slack.k8s.io/). You can also search +[Kubernetes slack](https://slack.k8s.io/). You can also search resources like -[Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) +[Stack Overflow](https://stackoverflow.com/questions/tagged/kubernetes) for answers to similar questions. You can also open issues for Kubernetes functionality in diff --git a/content/en/docs/contribute/review/reviewing-prs.md b/content/en/docs/contribute/review/reviewing-prs.md index 3c271aa44f1f7..ff6ef9d709094 100644 --- a/content/en/docs/contribute/review/reviewing-prs.md +++ b/content/en/docs/contribute/review/reviewing-prs.md @@ -16,10 +16,10 @@ It helps you learn the code base and build trust with other contributors. Before reviewing, it's a good idea to: - Read the [content guide](/docs/contribute/style/content-guide/) and -[style guide](/docs/contribute/style/style-guide/) so you can leave informed comments. -- Understand the different [roles and responsibilities](/docs/contribute/participating/#roles-and-responsibilities) in the Kubernetes documentation community. - - + [style guide](/docs/contribute/style/style-guide/) so you can leave informed comments. +- Understand the different + [roles and responsibilities](/docs/contribute/participate/roles-and-responsibilities/) + in the Kubernetes documentation community. diff --git a/content/en/docs/contribute/style/content-guide.md b/content/en/docs/contribute/style/content-guide.md index 2f367c9a8102d..0de4a381a3f4d 100644 --- a/content/en/docs/contribute/style/content-guide.md +++ b/content/en/docs/contribute/style/content-guide.md @@ -9,10 +9,10 @@ weight: 10 This page contains guidelines for Kubernetes documentation. -If you have questions about what's allowed, join the #sig-docs channel in -[Kubernetes Slack](http://slack.k8s.io/) and ask! +If you have questions about what's allowed, join the #sig-docs channel in +[Kubernetes Slack](https://slack.k8s.io/) and ask! -You can register for Kubernetes Slack at http://slack.k8s.io/. +You can register for Kubernetes Slack at https://slack.k8s.io/. For information on creating new content for the Kubernetes docs, follow the [style guide](/docs/contribute/style/style-guide). @@ -28,7 +28,7 @@ Source for the Kubernetes website, including the docs, resides in the Located in the `kubernetes/website/content//docs` folder, the majority of Kubernetes documentation is specific to the [Kubernetes -project](https://github.com/kubernetes/kubernetes). +project](https://github.com/kubernetes/kubernetes). ## What's allowed @@ -41,12 +41,12 @@ Kubernetes docs allow content for third-party projects only when: ### Third party content Kubernetes documentation includes applied examples of projects in the Kubernetes project—projects that live in the [kubernetes](https://github.com/kubernetes) and -[kubernetes-sigs](https://github.com/kubernetes-sigs) GitHub organizations. +[kubernetes-sigs](https://github.com/kubernetes-sigs) GitHub organizations. -Links to active content in the Kubernetes project are always allowed. +Links to active content in the Kubernetes project are always allowed. -Kubernetes requires some third party content to function. Examples include container runtimes (containerd, CRI-O, Docker), -[networking policy](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) (CNI plugins), [Ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/), and [logging](https://kubernetes.io/docs/concepts/cluster-administration/logging/). +Kubernetes requires some third party content to function. Examples include container runtimes (containerd, CRI-O, Docker), +[networking policy](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) (CNI plugins), [Ingress controllers](/docs/concepts/services-networking/ingress-controllers/), and [logging](/docs/concepts/cluster-administration/logging/). Docs can link to third-party open source software (OSS) outside the Kubernetes project only if it's necessary for Kubernetes to function. @@ -60,14 +60,14 @@ and grows stale more quickly. {{< note >}} -If you're a maintainer for a Kubernetes project and need help hosting your own docs, +If you're a maintainer for a Kubernetes project and need help hosting your own docs, ask for help in [#sig-docs on Kubernetes Slack](https://kubernetes.slack.com/messages/C1J0BPD2M/). {{< /note >}} ### More information -If you have questions about allowed content, join the [Kubernetes Slack](http://slack.k8s.io/) #sig-docs channel and ask! +If you have questions about allowed content, join the [Kubernetes Slack](https://slack.k8s.io/) #sig-docs channel and ask! @@ -75,5 +75,3 @@ If you have questions about allowed content, join the [Kubernetes Slack](http:// * Read the [Style guide](/docs/contribute/style/style-guide). - - diff --git a/content/en/docs/contribute/style/hugo-shortcodes/index.md b/content/en/docs/contribute/style/hugo-shortcodes/index.md index e4a6d703adec8..ab949be7fce71 100644 --- a/content/en/docs/contribute/style/hugo-shortcodes/index.md +++ b/content/en/docs/contribute/style/hugo-shortcodes/index.md @@ -232,7 +232,7 @@ Renders to: {{< tabs name="tab_with_file_include" >}} {{< tab name="Content File #1" include="example1" />}} {{< tab name="Content File #2" include="example2" />}} -{{< tab name="JSON File" include="podtemplate" />}} +{{< tab name="JSON File" include="podtemplate.json" />}} {{< /tabs >}} @@ -242,6 +242,6 @@ Renders to: * Learn about [Hugo](https://gohugo.io/). * Learn about [writing a new topic](/docs/contribute/style/write-new-topic/). * Learn about [page content types](/docs/contribute/style/page-content-types/). -* Learn about [creating a pull request](/docs/contribute/new-content/new-content/). +* Learn about [opening a pull request](/docs/contribute/new-content/open-a-pr/). * Learn about [advanced contributing](/docs/contribute/advanced/). diff --git a/content/en/docs/contribute/style/page-content-types.md b/content/en/docs/contribute/style/page-content-types.md index 2a3325d397b48..5d3b519bc0bc2 100644 --- a/content/en/docs/contribute/style/page-content-types.md +++ b/content/en/docs/contribute/style/page-content-types.md @@ -191,7 +191,7 @@ Within each section, write your content. Use the following guidelines: interested in reading next. An example of a published tutorial topic is -[Running a Stateless Application Using a Deployment](/docs/tutorials/stateless-application/run-stateless-application-deployment/). +[Running a Stateless Application Using a Deployment](/docs/tasks/run-application/run-stateless-application-deployment/). ### Reference diff --git a/content/en/docs/contribute/style/style-guide.md b/content/en/docs/contribute/style/style-guide.md index 55aa30d66cf6f..44653708ec429 100644 --- a/content/en/docs/contribute/style/style-guide.md +++ b/content/en/docs/contribute/style/style-guide.md @@ -22,8 +22,11 @@ discussion. {{< note >}} -Kubernetes documentation uses [Blackfriday Markdown Renderer](https://github.com/russross/blackfriday) along with a few [Hugo Shortcodes](/docs/home/contribute/includes/) to support glossary entries, tabs, -and representing feature state. +Kubernetes documentation uses +[Goldmark Markdown Renderer](https://github.com/yuin/goldmark) +with some adjustments along with a few +[Hugo Shortcodes](/docs/contribute/style/hugo-shortcodes/) to support +glossary entries, tabs, and representing feature state. {{< /note >}} ## Language @@ -584,12 +587,8 @@ The Federation feature provides ... | The new Federation feature provides ... {{< /table >}} - - ## {{% heading "whatsnext" %}} - * Learn about [writing a new topic](/docs/contribute/style/write-new-topic/). * Learn about [using page templates](/docs/contribute/style/page-content-types/). -* Learn about [staging your changes](/docs/contribute/stage-documentation-changes/) * Learn about [creating a pull request](/docs/contribute/new-content/open-a-pr/). diff --git a/content/en/docs/contribute/style/write-new-topic.md b/content/en/docs/contribute/style/write-new-topic.md index 8bd4b8fbe25be..7cac1aa6b7fd5 100644 --- a/content/en/docs/contribute/style/write-new-topic.md +++ b/content/en/docs/contribute/style/write-new-topic.md @@ -11,7 +11,7 @@ This page shows how to create a new topic for the Kubernetes docs. ## {{% heading "prerequisites" %}} Create a fork of the Kubernetes documentation repository as described in -[Open a PR](/docs/new-content/open-a-pr/). +[Open a PR](/docs/contribute/new-content/open-a-pr/). @@ -28,9 +28,17 @@ Task | A task page shows how to do a single thing. The idea is to give readers a Tutorial | A tutorial page shows how to accomplish a goal that ties together several Kubernetes features. A tutorial might provide several sequences of steps that readers can actually do as they read the page. Or it might provide explanations of related pieces of code. For example, a tutorial could provide a walkthrough of a code sample. A tutorial can include brief explanations of the Kubernetes features that are being tied together, but should link to related concept topics for deep explanations of individual features. {{< /table >}} +### Creating a new page + Use a [content type](/docs/contribute/style/page-content-types/) for each new page -that you write. Using page type helps ensure -consistency among topics of a given type. +that you write. The docs site provides templates or +[Hugo archetypes](https://gohugo.io/content-management/archetypes/) to create +new content pages. To create a new type of page, run `hugo new` with the path to the file +you want to create. For example: + +``` +hugo new docs/concepts/my-first-concept.md +``` ## Choosing a title and filename @@ -152,7 +160,7 @@ submitted to ensure all examples pass the tests. {{< /note >}} For an example of a topic that uses this technique, see -[Running a Single-Instance Stateful Application](/docs/tutorials/stateful-application/run-stateful-application/). +[Running a Single-Instance Stateful Application](/docs/tasks/run-application/run-single-instance-stateful-application/). ## Adding images to a topic diff --git a/content/en/docs/reference/access-authn-authz/admission-controllers.md b/content/en/docs/reference/access-authn-authz/admission-controllers.md index 874bcbeb1c00e..7e1f8ced6649f 100644 --- a/content/en/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/admission-controllers.md @@ -677,9 +677,6 @@ for more information. This admission controller acts on creation and modification of the pod and determines if it should be admitted based on the requested security context and the available Pod Security Policies. -For Kubernetes < 1.6.0, the API Server must enable the extensions/v1beta1/podsecuritypolicy API -extensions group (`--runtime-config=extensions/v1beta1/podsecuritypolicy=true`). - See also [Pod Security Policy documentation](/docs/concepts/policy/pod-security-policy/) for more information. @@ -706,8 +703,8 @@ kind: Namespace metadata: name: apps-that-need-nodes-exclusively annotations: - scheduler.alpha.kubernetes.io/defaultTolerations: '{"operator": "Exists", "effect": "NoSchedule", "key": "dedicated-node"}' - scheduler.alpha.kubernetes.io/tolerationsWhitelist: '{"operator": "Exists", "effect": "NoSchedule", "key": "dedicated-node"}' + scheduler.alpha.kubernetes.io/defaultTolerations: '[{"operator": "Exists", "effect": "NoSchedule", "key": "dedicated-node"}]' + scheduler.alpha.kubernetes.io/tolerationsWhitelist: '[{"operator": "Exists", "effect": "NoSchedule", "key": "dedicated-node"}]' ``` ### Priority {#priority} @@ -793,4 +790,4 @@ phase, and therefore is the last admission controller to run. in the mutating phase. For earlier versions, there was no concept of validating versus mutating and the -admission controllers ran in the exact order specified. \ No newline at end of file +admission controllers ran in the exact order specified. diff --git a/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md index 576cde3d88d00..f208bfb770252 100644 --- a/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md +++ b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md @@ -416,10 +416,8 @@ signed certificate. ## {{% heading "whatsnext" %}} -* Read [Manage TLS Certificates in a Cluster](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) +* Read [Manage TLS Certificates in a Cluster](/docs/tasks/tls/managing-tls-in-a-cluster/) * View the source code for the kube-controller-manager built in [signer](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/signer/cfssl_signer.go) * View the source code for the kube-controller-manager built in [approver](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/approver/sarapprove.go) * For details of X.509 itself, refer to [RFC 5280](https://tools.ietf.org/html/rfc5280#section-3.1) section 3.1 * For information on the syntax of PKCS#10 certificate signing requests, refer to [RFC 2986](https://tools.ietf.org/html/rfc2986) - - diff --git a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md index 718c9d11477ab..8b0794a6b7277 100644 --- a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md @@ -949,7 +949,7 @@ See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for ### Matching requests: matchPolicy API servers can make objects available via multiple API groups or versions. -For example, the Kubernetes API server allows creating and modifying `Deployment` objects +For example, the Kubernetes API server may allow creating and modifying `Deployment` objects via `extensions/v1beta1`, `apps/v1beta1`, `apps/v1beta2`, and `apps/v1` APIs. For example, if a webhook only specified a rule for some API groups/versions (like `apiGroups:["apps"], apiVersions:["v1","v1beta1"]`), diff --git a/content/en/docs/reference/access-authn-authz/rbac.md b/content/en/docs/reference/access-authn-authz/rbac.md index 20b1224e59e9f..2be833826c513 100644 --- a/content/en/docs/reference/access-authn-authz/rbac.md +++ b/content/en/docs/reference/access-authn-authz/rbac.md @@ -606,12 +606,15 @@ either do not manually edit the role, or disable auto-reconciliation. - + + + + @@ -627,6 +630,7 @@ either do not manually edit the role, or disable auto-reconciliation. +
Kubernetes RBAC API discovery roles
Default ClusterRole Default ClusterRoleBinding Description
system:basic-user system:authenticated groupsystem:authenticated and system:unauthenticated groups Allows read-only access to non-sensitive information about the cluster. Introduced in Kubernetes v1.14.
### User-facing roles @@ -649,12 +653,15 @@ metadata: ``` - + + + + @@ -691,17 +698,21 @@ the contents of Secrets enables access to ServiceAccount credentials in the namespace, which would allow API access as any ServiceAccount in the namespace (a form of privilege escalation). +
Default ClusterRole Default ClusterRoleBinding Description
cluster-admin system:masters group
### Core component roles - + + + + @@ -733,17 +744,21 @@ The system:node role only exists for compatibility with Kubernetes clus +
Default ClusterRole Default ClusterRoleBinding Description
system:kube-scheduler system:kube-scheduler usersystem:kube-proxy user Allows access to the resources required by the {{< glossary_tooltip term_id="kube-proxy" text="kube-proxy" >}} component.
### Other component roles - + + + + @@ -786,6 +801,7 @@ This is commonly used by add-on API servers for unified authentication and autho +
Default ClusterRole Default ClusterRoleBinding Description
system:auth-delegator NoneNone Allows access to the resources required by most dynamic volume provisioners.
### Roles for built-in controllers {#controller-roles} diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 6a9b6c1ced4ea..9fc130f157451 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -129,9 +129,10 @@ different Kubernetes components. | `RuntimeClass` | `false` | Alpha | 1.12 | 1.13 | | `RuntimeClass` | `true` | Beta | 1.14 | | | `SCTPSupport` | `false` | Alpha | 1.12 | | -| `ServiceAppProtocol` | `false` | Alpha | 1.18 | | | `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | | `ServerSideApply` | `true` | Beta | 1.16 | | +| `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | | +| `ServiceAppProtocol` | `false` | Alpha | 1.18 | | | `ServiceNodeExclusion` | `false` | Alpha | 1.8 | | | `ServiceTopology` | `false` | Alpha | 1.17 | | | `StartupProbe` | `false` | Alpha | 1.16 | 1.17 | @@ -433,7 +434,7 @@ Each feature gate is designed for enabling/disabling a specific feature: - `KubeletPluginsWatcher`: Enable probe-based plugin watcher utility to enable kubelet to discover plugins such as [CSI volume drivers](/docs/concepts/storage/volumes/#csi). - `KubeletPodResources`: Enable the kubelet's pod resources grpc endpoint. - See [Support Device Monitoring](https://git.k8s.io/community/keps/sig-node/compute-device-assignment.md) for more details. + See [Support Device Monitoring](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/compute-device-assignment.md) for more details. - `LegacyNodeRoleBehavior`: When disabled, legacy behavior in service load balancers and node disruption will ignore the `node-role.kubernetes.io/master` label in favor of the feature-specific labels. - `LocalStorageCapacityIsolation`: Enable the consumption of [local ephemeral storage](/docs/concepts/configuration/manage-compute-resources-container/) and also the `sizeLimit` property of an [emptyDir volume](/docs/concepts/storage/volumes/#emptydir). - `LocalStorageCapacityIsolationFSQuotaMonitoring`: When `LocalStorageCapacityIsolation` is enabled for [local ephemeral storage](/docs/concepts/configuration/manage-compute-resources-container/) and the backing filesystem for [emptyDir volumes](/docs/concepts/storage/volumes/#emptydir) supports project quotas and they are enabled, use project quotas to monitor [emptyDir volume](/docs/concepts/storage/volumes/#emptydir) storage consumption rather than filesystem walk for better performance and accuracy. @@ -473,11 +474,12 @@ Each feature gate is designed for enabling/disabling a specific feature: - `ScheduleDaemonSetPods`: Enable DaemonSet Pods to be scheduled by the default scheduler instead of the DaemonSet controller. - `SCTPSupport`: Enables the usage of SCTP as `protocol` value in `Service`, `Endpoint`, `NetworkPolicy` and `Pod` definitions - `ServerSideApply`: Enables the [Sever Side Apply (SSA)](/docs/reference/using-api/api-concepts/#server-side-apply) path at the API Server. +- `ServiceAccountIssuerDiscovery`: Enable OIDC discovery endpoints (issuer and JWKS URLs) for the service account issuer in the API server. See [Configure Service Accounts for Pods](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery) for more details. - `ServiceAppProtocol`: Enables the `AppProtocol` field on Services and Endpoints. - `ServiceLoadBalancerFinalizer`: Enable finalizer protection for Service load balancers. - `ServiceNodeExclusion`: Enable the exclusion of nodes from load balancers created by a cloud provider. A node is eligible for exclusion if labelled with "`alpha.service-controller.kubernetes.io/exclude-balancer`" key or `node.kubernetes.io/exclude-from-external-load-balancers`. -- `ServiceTopology`: Enable service to route traffic based upon the Node topology of the cluster. See [ServiceTopology](https://kubernetes.io/docs/concepts/services-networking/service-topology/) for more details. +- `ServiceTopology`: Enable service to route traffic based upon the Node topology of the cluster. See [ServiceTopology](/docs/concepts/services-networking/service-topology/) for more details. - `StartupProbe`: Enable the [startup](/docs/concepts/workloads/pods/pod-lifecycle/#when-should-you-use-a-startup-probe) probe in the kubelet. - `StorageObjectInUseProtection`: Postpone the deletion of PersistentVolume or PersistentVolumeClaim objects if they are still being used. @@ -517,4 +519,3 @@ Each feature gate is designed for enabling/disabling a specific feature: * The [deprecation policy](/docs/reference/using-api/deprecation-policy/) for Kubernetes explains the project's approach to removing features and components. - diff --git a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md index d51061014020e..4a788b1ab975f 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md @@ -14,7 +14,7 @@ and capacity. The scheduler needs to take into account individual and collective resource requirements, quality of service requirements, hardware/software/policy constraints, affinity and anti-affinity specifications, data locality, inter-workload interference, deadlines, and so on. Workload-specific requirements will be exposed -through the API as necessary. See [scheduling](https://kubernetes.io/docs/concepts/scheduling-eviction/) +through the API as necessary. See [scheduling](/docs/concepts/scheduling-eviction/) for more information about scheduling and the kube-scheduler component. ``` @@ -511,8 +511,3 @@ kube-scheduler [flags] - - - - - diff --git a/content/en/docs/reference/glossary/endpoint.md b/content/en/docs/reference/glossary/endpoint.md new file mode 100644 index 0000000000000..3934faa18af4d --- /dev/null +++ b/content/en/docs/reference/glossary/endpoint.md @@ -0,0 +1,17 @@ +--- +title: Endpoints +id: endpoints +date: 2020-04-23 +full_link: +short_description: > + Endpoints track the IP addresses of Pods with matching Service selectors. + +aka: +tags: +- networking +--- + Endpoints track the IP addresses of Pods with matching {{< glossary_tooltip text="selectors" term_id="selector" >}}. + + +Endpoints can be configured manually for {{< glossary_tooltip text="Services" term_id="service" >}} without selectors specified. +The {{< glossary_tooltip text="EndpointSlice" term_id="endpoint-slice" >}} resource provides a scalable and extensible alternative to Endpoints. diff --git a/content/en/docs/reference/glossary/pod.md b/content/en/docs/reference/glossary/pod.md index f14393072c559..b551dead19cb2 100755 --- a/content/en/docs/reference/glossary/pod.md +++ b/content/en/docs/reference/glossary/pod.md @@ -2,7 +2,7 @@ title: Pod id: pod date: 2018-04-12 -full_link: /docs/concepts/workloads/pods/pod-overview/ +full_link: /docs/concepts/workloads/pods/ short_description: > A Pod represents a set of running containers in your cluster. diff --git a/content/en/docs/reference/glossary/volume.md b/content/en/docs/reference/glossary/volume.md index 2076378bb33a4..22cebca917f5a 100755 --- a/content/en/docs/reference/glossary/volume.md +++ b/content/en/docs/reference/glossary/volume.md @@ -6,15 +6,15 @@ full_link: /docs/concepts/storage/volumes/ short_description: > A directory containing data, accessible to the containers in a pod. -aka: +aka: tags: - core-object - fundamental --- A directory containing data, accessible to the {{< glossary_tooltip text="containers" term_id="container" >}} in a {{< glossary_tooltip term_id="pod" >}}. - + A Kubernetes volume lives as long as the Pod that encloses it. Consequently, a volume outlives any containers that run within the Pod, and data in the volume is preserved across container restarts. -See [storage](https://kubernetes.io/docs/concepts/storage/) for more information. +See [storage](/docs/concepts/storage/) for more information. diff --git a/content/en/docs/reference/issues-security/security.md b/content/en/docs/reference/issues-security/security.md index b9b1ce7c3719f..2d16e37662d4b 100644 --- a/content/en/docs/reference/issues-security/security.md +++ b/content/en/docs/reference/issues-security/security.md @@ -19,7 +19,7 @@ This page describes Kubernetes security and disclosure information. Join the [kubernetes-security-announce](https://groups.google.com/forum/#!forum/kubernetes-security-announce) group for emails about security and major API announcements. -You can also subscribe to an RSS feed of the above using [this link](https://groups.google.com/forum/feed/kubernetes-announce/msgs/rss_v2_0.xml?num=50). +You can also subscribe to an RSS feed of the above using [this link](https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50). ## Report a Vulnerability diff --git a/content/en/docs/reference/kubectl/cheatsheet.md b/content/en/docs/reference/kubectl/cheatsheet.md index 36629d2c29821..dda75574a98c8 100644 --- a/content/en/docs/reference/kubectl/cheatsheet.md +++ b/content/en/docs/reference/kubectl/cheatsheet.md @@ -166,6 +166,10 @@ kubectl get pv --sort-by=.spec.capacity.storage kubectl get pods --selector=app=cassandra -o \ jsonpath='{.items[*].metadata.labels.version}' +# Retrieve the value of a key with dots, e.g. 'ca.crt' +kubectl get configmap myconfig \ + -o jsonpath='{.data.ca\.crt}' + # Get all worker nodes (use a selector to exclude results that have a label # named 'node-role.kubernetes.io/master') kubectl get node --selector='!node-role.kubernetes.io/master' diff --git a/content/en/docs/reference/scheduling/profiles.md b/content/en/docs/reference/scheduling/profiles.md index fe28d10bd1dab..3cb4eb71b3f57 100644 --- a/content/en/docs/reference/scheduling/profiles.md +++ b/content/en/docs/reference/scheduling/profiles.md @@ -91,7 +91,7 @@ extension points: - `NodeResourcesFit`: Checks if the node has all the resources that the Pod is requesting. Extension points: `PreFilter`, `Filter`. -- `NodeResourcesBallancedAllocation`: Favors nodes that would obtain a more +- `NodeResourcesBalancedAllocation`: Favors nodes that would obtain a more balanced resource usage if the Pod is scheduled there. Extension points: `Score`. - `NodeResourcesLeastAllocated`: Favors nodes that have a low allocation of diff --git a/content/en/docs/reference/using-api/api-concepts.md b/content/en/docs/reference/using-api/api-concepts.md index f83c43c00f9f3..f2109071a18cd 100644 --- a/content/en/docs/reference/using-api/api-concepts.md +++ b/content/en/docs/reference/using-api/api-concepts.md @@ -706,9 +706,9 @@ Resource versions are strings that identify the server's internal version of an Clients find resource versions in resources, including the resources in watch events, and list responses returned from the server: -[v1.meta/ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#objectmeta-v1-meta) - The `metadata.resourceVersion` of a resource instance identifies the resource version the instance was last modified at. +[v1.meta/ObjectMeta](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#objectmeta-v1-meta) - The `metadata.resourceVersion` of a resource instance identifies the resource version the instance was last modified at. -[v1.meta/ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#listmeta-v1-meta) - The `metadata.resourceVersion` of a resource collection (i.e. a list response) identifies the resource version at which the list response was constructed. +[v1.meta/ListMeta](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#listmeta-v1-meta) - The `metadata.resourceVersion` of a resource collection (i.e. a list response) identifies the resource version at which the list response was constructed. ### The ResourceVersion Parameter @@ -726,11 +726,11 @@ For get and list, the semantics of resource version are: **List:** -| paging | resourceVersion unset | resourceVersion="0" | resourceVersion="{value other than 0}" | -|-------------------------------|-----------------------|------------------------------------------------|----------------------------------------| -| limit unset | Most Recent | Any | Not older than | -| limit="n", continue unset | Most Recent | Any | Exact | -| limit="n", continue="" | Continue Token, Exact | Invalid, but treated as Continue Token, Exact | Invalid, HTTP `400 Bad Request` | +| paging | resourceVersion unset | resourceVersion="0" | resourceVersion="{value other than 0}" | +|---------------------------------|-----------------------|------------------------------------------------|----------------------------------------| +| limit unset | Most Recent | Any | Not older than | +| limit="n", continue unset | Most Recent | Any | Exact | +| limit="n", continue="\" | Continue Token, Exact | Invalid, but treated as Continue Token, Exact | Invalid, HTTP `400 Bad Request` | The meaning of the get and list semantics are: diff --git a/content/en/docs/reference/using-api/api-overview.md b/content/en/docs/reference/using-api/api-overview.md index 25b7d46af9e4f..c0adee3bdb4b0 100644 --- a/content/en/docs/reference/using-api/api-overview.md +++ b/content/en/docs/reference/using-api/api-overview.md @@ -33,7 +33,7 @@ if you are writing an application using the Kubernetes API. To eliminate fields or restructure resource representations, Kubernetes supports multiple API versions, each at a different API path. For example: `/api/v1` or -`/apis/extensions/v1beta1`. +`/apis/rbac.authorization.k8s.io/v1alpha1`. The version is set at the API level rather than at the resource or field level to: @@ -106,10 +106,3 @@ When you enable or disable groups or resources, you need to restart the apiserve to pick up the `--runtime-config` changes. {{< /note >}} -## Enabling specific resources in the extensions/v1beta1 group - -DaemonSets, Deployments, StatefulSet, NetworkPolicies, PodSecurityPolicies and ReplicaSets in the `extensions/v1beta1` API group are disabled by default. -For example: to enable deployments and daemonsets, set -`--runtime-config=extensions/v1beta1/deployments=true,extensions/v1beta1/daemonsets=true`. - -{{< note >}}Individual resource enablement/disablement is only supported in the `extensions/v1beta1` API group for legacy reasons.{{< /note >}} diff --git a/content/en/docs/reference/using-api/health-checks.md b/content/en/docs/reference/using-api/health-checks.md new file mode 100644 index 0000000000000..a7be3b267f46a --- /dev/null +++ b/content/en/docs/reference/using-api/health-checks.md @@ -0,0 +1,103 @@ +--- +title: Kubernetes API health endpoints +reviewers: +- logicalhan +content_type: concept +weight: 50 +--- + + +The Kubernetes {{< glossary_tooltip term_id="kube-apiserver" text="API server" >}} provides API endpoints to indicate the current status of the API server. +This page describes these API endpoints and explains how you can use them. + + + +## API endpoints for health + +The Kubernetes API server provides 3 API endpoints (`healthz`, `livez` and `readyz`) to indicate the current status of the API server. +The `healthz` endpoint is deprecated (since Kubernetes v1.16), and you should use the more specific `livez` and `readyz` endpoints instead. +The `livez` endpoint can be used with the `--livez-grace-period` [flag](/docs/reference/command-line-tools-reference/kube-apiserver) to specify the startup duration. +For a graceful shutdown you can specify the `--shutdown-delay-duration` [flag](/docs/reference/command-line-tools-reference/kube-apiserver) with the `/readyz` endpoint. +Machines that check the `health`/`livez`/`readyz` of the API server should rely on the HTTP status code. +A status code `200` indicates the the API server is `healthy`/`live`/`ready`, depending of the called endpoint. +The more verbose options shown below are intended to be used by human operators to debug their cluster or specially the state of the API server. + +The following examples will show how you can interact with the health API endpoints. + +For all endpoints you can use the `verbose` parameter to print out the checks and their status. +This can be useful for a human operator to debug the current status of the Api server, it is not intended to be consumed by a machine: + + ```shell + curl -k https://localhost:6443/livez?verbose + ``` + +or from a remote host with authentication: + + ```shell + kubectl get --raw='/readyz?verbose' + ``` + +The output will look like this: + + [+]ping ok + [+]log ok + [+]etcd ok + [+]poststarthook/start-kube-apiserver-admission-initializer ok + [+]poststarthook/generic-apiserver-start-informers ok + [+]poststarthook/start-apiextensions-informers ok + [+]poststarthook/start-apiextensions-controllers ok + [+]poststarthook/crd-informer-synced ok + [+]poststarthook/bootstrap-controller ok + [+]poststarthook/rbac/bootstrap-roles ok + [+]poststarthook/scheduling/bootstrap-system-priority-classes ok + [+]poststarthook/start-cluster-authentication-info-controller ok + [+]poststarthook/start-kube-aggregator-informers ok + [+]poststarthook/apiservice-registration-controller ok + [+]poststarthook/apiservice-status-available-controller ok + [+]poststarthook/kube-apiserver-autoregistration ok + [+]autoregister-completion ok + [+]poststarthook/apiservice-openapi-controller ok + healthz check passed + +The Kubernetes API server also supports to exclude specific checks. +The query parameters can also be combined like in this example: + + ```shell + curl -k 'https://localhost:6443/readyz?verbose&exclude=etcd' + ``` + +The output show that the `etcd` check is excluded: + + [+]ping ok + [+]log ok + [+]etcd excluded: ok + [+]poststarthook/start-kube-apiserver-admission-initializer ok + [+]poststarthook/generic-apiserver-start-informers ok + [+]poststarthook/start-apiextensions-informers ok + [+]poststarthook/start-apiextensions-controllers ok + [+]poststarthook/crd-informer-synced ok + [+]poststarthook/bootstrap-controller ok + [+]poststarthook/rbac/bootstrap-roles ok + [+]poststarthook/scheduling/bootstrap-system-priority-classes ok + [+]poststarthook/start-cluster-authentication-info-controller ok + [+]poststarthook/start-kube-aggregator-informers ok + [+]poststarthook/apiservice-registration-controller ok + [+]poststarthook/apiservice-status-available-controller ok + [+]poststarthook/kube-apiserver-autoregistration ok + [+]autoregister-completion ok + [+]poststarthook/apiservice-openapi-controller ok + [+]shutdown ok + healthz check passed + +## Individual health checks + +{{< feature-state state="alpha" >}} + +Each individual health check exposes an http endpoint and could can be checked individually. +The schema for the individual health checks is `/livez/` where `livez` and `readyz` and be used to indicate if you want to check thee liveness or the readiness of the API server. +The `` path can be discovered using the `verbose` flag from above and take the path between `[+]` and `ok`. +These individual health checks should not be consumed by machines but can be helpful for a human operator to debug a system: + + ```shell + curl -k https://localhost:6443/livez/etcd + ``` diff --git a/content/en/docs/setup/_index.md b/content/en/docs/setup/_index.md index 91b734953c31d..59db384258a5b 100644 --- a/content/en/docs/setup/_index.md +++ b/content/en/docs/setup/_index.md @@ -20,35 +20,20 @@ card: -This section covers different options to set up and run Kubernetes. - -Different Kubernetes solutions meet different requirements: ease of maintenance, security, control, available resources, and expertise required to operate and manage a cluster. - -You can deploy a Kubernetes cluster on a local machine, cloud, on-prem datacenter, or choose a managed Kubernetes cluster. You can also create custom solutions across a wide range of cloud providers, or bare metal environments. - -More simply, you can create a Kubernetes cluster in learning and production environments. - +This section lists the different ways to set up and run Kubernetes. +When you install Kubernetes, choose an installation type based on: ease of maintenance, security, +control, available resources, and expertise required to operate and manage a cluster. +You can deploy a Kubernetes cluster on a local machine, cloud, on-prem datacenter, or choose a managed Kubernetes cluster. There are also custom solutions across a wide range of cloud providers, or bare metal environments. ## Learning environment -If you're learning Kubernetes, use the Docker-based solutions: tools supported by the Kubernetes community, or tools in the ecosystem to set up a Kubernetes cluster on a local machine. - -{{< table caption="Local machine solutions table that lists the tools supported by the community and the ecosystem to deploy Kubernetes." >}} - -|Community |Ecosystem | -| ------------ | -------- | -| [Minikube](/docs/setup/learning-environment/minikube/) | [Docker Desktop](https://www.docker.com/products/docker-desktop)| -| [kind (Kubernetes IN Docker)](/docs/setup/learning-environment/kind/) | [Minishift](https://docs.okd.io/latest/minishift/)| -| | [MicroK8s](https://microk8s.io/)| - +If you're learning Kubernetes, use the tools supported by the Kubernetes community, or tools in the ecosystem to set up a Kubernetes cluster on a local machine. ## Production environment When evaluating a solution for a production environment, consider which aspects of operating a Kubernetes cluster (or _abstractions_) you want to manage yourself or offload to a provider. [Kubernetes Partners](https://kubernetes.io/partners/#conformance) includes a list of [Certified Kubernetes](https://github.com/cncf/k8s-conformance/#certified-kubernetes) providers. - - diff --git a/content/en/docs/setup/best-practices/certificates.md b/content/en/docs/setup/best-practices/certificates.md index a85d44e0f4ce4..9e27b4094301d 100644 --- a/content/en/docs/setup/best-practices/certificates.md +++ b/content/en/docs/setup/best-practices/certificates.md @@ -28,7 +28,7 @@ Kubernetes requires PKI for the following operations: * Client certificate for the API server to talk to etcd * Client certificate/kubeconfig for the controller manager to talk to the API server * Client certificate/kubeconfig for the scheduler to talk to the API server. -* Client and server certificates for the [front-proxy][proxy] +* Client and server certificates for the [front-proxy](/docs/tasks/extend-kubernetes/configure-aggregation-layer/) {{< note >}} `front-proxy` certificates are required only if you run kube-proxy to support [an extension API server](/docs/tasks/extend-kubernetes/setup-extension-api-server/). @@ -54,7 +54,7 @@ Required CAs: |------------------------|---------------------------|----------------------------------| | ca.crt,key | kubernetes-ca | Kubernetes general CA | | etcd/ca.crt,key | etcd-ca | For all etcd-related functions | -| front-proxy-ca.crt,key | kubernetes-front-proxy-ca | For the [front-end proxy][proxy] | +| front-proxy-ca.crt,key | kubernetes-front-proxy-ca | For the [front-end proxy](/docs/tasks/extend-kubernetes/configure-aggregation-layer/) | On top of the above CAs, it is also necessary to get a public/private key pair for service account management, `sa.key` and `sa.pub`. @@ -74,10 +74,11 @@ Required certificates: | kube-apiserver-kubelet-client | kubernetes-ca | system:masters | client | | | front-proxy-client | kubernetes-front-proxy-ca | | client | | -[1]: any other IP or DNS name you contact your cluster on (as used by [kubeadm][kubeadm] the load balancer stable IP and/or DNS name, `kubernetes`, `kubernetes.default`, `kubernetes.default.svc`, +[1]: any other IP or DNS name you contact your cluster on (as used by [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) +the load balancer stable IP and/or DNS name, `kubernetes`, `kubernetes.default`, `kubernetes.default.svc`, `kubernetes.default.svc.cluster`, `kubernetes.default.svc.cluster.local`) -where `kind` maps to one or more of the [x509 key usage][usage] types: +where `kind` maps to one or more of the [x509 key usage](https://godoc.org/k8s.io/api/certificates/v1beta1#KeyUsage) types: | kind | Key usage | |--------|---------------------------------------------------------------------------------| @@ -99,7 +100,8 @@ For kubeadm users only: ### Certificate paths -Certificates should be placed in a recommended path (as used by [kubeadm][kubeadm]). Paths should be specified using the given argument regardless of location. +Certificates should be placed in a recommended path (as used by [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/)). +Paths should be specified using the given argument regardless of location. | Default CN | recommended key path | recommended cert path | command | key argument | cert argument | |------------------------------|------------------------------|-----------------------------|----------------|------------------------------|-------------------------------------------| @@ -160,8 +162,4 @@ These files are used as follows: | controller-manager.conf | kube-controller-manager | Must be added to manifest in `manifests/kube-controller-manager.yaml` | | scheduler.conf | kube-scheduler | Must be added to manifest in `manifests/kube-scheduler.yaml` | -[usage]: https://godoc.org/k8s.io/api/certificates/v1beta1#KeyUsage -[kubeadm]: /docs/reference/setup-tools/kubeadm/kubeadm/ -[proxy]: /docs/tasks/extend-kubernetes/configure-aggregation-layer/ - diff --git a/content/en/docs/setup/best-practices/cluster-large.md b/content/en/docs/setup/best-practices/cluster-large.md index c8692c88728cf..2b8f7b487fb00 100644 --- a/content/en/docs/setup/best-practices/cluster-large.md +++ b/content/en/docs/setup/best-practices/cluster-large.md @@ -20,7 +20,7 @@ At {{< param "version" >}}, Kubernetes supports clusters with up to 5000 nodes. A cluster is a set of nodes (physical or virtual machines) running Kubernetes agents, managed by a "master" (the cluster-level control plane). -Normally the number of nodes in a cluster is controlled by the value `NUM_NODES` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/gce/config-default.sh)). +Normally the number of nodes in a cluster is controlled by the value `NUM_NODES` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/gce/config-default.sh)). Simply changing that value to something very large, however, may cause the setup script to fail for many cloud providers. A GCE deployment, for example, will run in to quota issues and fail to bring the cluster up. @@ -80,7 +80,7 @@ On AWS, master node sizes are currently set at cluster startup time and do not c ### Addon Resources -To prevent memory leaks or other resource issues in [cluster addons](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons) from consuming all the resources available on a node, Kubernetes sets resource limits on addon containers to limit the CPU and Memory resources they can consume (See PR [#10653](http://pr.k8s.io/10653/files) and [#10778](http://pr.k8s.io/10778/files)). +To prevent memory leaks or other resource issues in [cluster addons](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons) from consuming all the resources available on a node, Kubernetes sets resource limits on addon containers to limit the CPU and Memory resources they can consume (See PR [#10653](https://pr.k8s.io/10653/files) and [#10778](https://pr.k8s.io/10778/files)). For example: @@ -94,28 +94,26 @@ For example: memory: 200Mi ``` -Except for Heapster, these limits are static and are based on data we collected from addons running on 4-node clusters (see [#10335](http://issue.k8s.io/10335#issuecomment-117861225)). The addons consume a lot more resources when running on large deployment clusters (see [#5880](http://issue.k8s.io/5880#issuecomment-113984085)). So, if a large cluster is deployed without adjusting these values, the addons may continuously get killed because they keep hitting the limits. +Except for Heapster, these limits are static and are based on data we collected from addons running on 4-node clusters (see [#10335](https://issue.k8s.io/10335#issuecomment-117861225)). The addons consume a lot more resources when running on large deployment clusters (see [#5880](http://issue.k8s.io/5880#issuecomment-113984085)). So, if a large cluster is deployed without adjusting these values, the addons may continuously get killed because they keep hitting the limits. To avoid running into cluster addon resource issues, when creating a cluster with many nodes, consider the following: * Scale memory and CPU limits for each of the following addons, if used, as you scale up the size of cluster (there is one replica of each handling the entire cluster so memory and CPU usage tends to grow proportionally with size/load on cluster): - * [InfluxDB and Grafana](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml) - * [kubedns, dnsmasq, and sidecar](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/kube-dns/kube-dns.yaml.in) - * [Kibana](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml) + * [InfluxDB and Grafana](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml) + * [kubedns, dnsmasq, and sidecar](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/kube-dns/kube-dns.yaml.in) + * [Kibana](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml) * Scale number of replicas for the following addons, if used, along with the size of cluster (there are multiple replicas of each so increasing replicas should help handle increased load, but, since load per replica also increases slightly, also consider increasing CPU/memory limits): - * [elasticsearch](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml) + * [elasticsearch](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml) * Increase memory and CPU limits slightly for each of the following addons, if used, along with the size of cluster (there is one replica per node but CPU/memory usage increases slightly along with cluster load/size as well): - * [FluentD with ElasticSearch Plugin](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml) - * [FluentD with GCP Plugin](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml) + * [FluentD with ElasticSearch Plugin](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml) + * [FluentD with GCP Plugin](https://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml) Heapster's resource limits are set dynamically based on the initial size of your cluster (see [#16185](http://issue.k8s.io/16185) and [#22940](http://issue.k8s.io/22940)). If you find that Heapster is running out of resources, you should adjust the formulas that compute heapster memory request (see those PRs for details). -For directions on how to detect if addon containers are hitting resource limits, see the [Troubleshooting section of Compute Resources](/docs/concepts/configuration/manage-compute-resources-container/#troubleshooting). - -In the [future](http://issue.k8s.io/13048), we anticipate to set all cluster addon resource limits based on cluster size, and to dynamically adjust them if you grow or shrink your cluster. -We welcome PRs that implement those features. +For directions on how to detect if addon containers are hitting resource limits, see the +[Troubleshooting section of Compute Resources](/docs/concepts/configuration/manage-resources-containers/#troubleshooting). ### Allowing minor node failure at startup @@ -126,3 +124,4 @@ running `kube-up.sh` set the environment variable `ALLOWED_NOTREADY_NODES` to wh with. This will allow `kube-up.sh` to succeed with fewer than `NUM_NODES` coming up. Depending on the reason for the failure, those additional nodes may join later or the cluster may remain at a size of `NUM_NODES - ALLOWED_NOTREADY_NODES`. + diff --git a/content/en/docs/setup/best-practices/multiple-zones.md b/content/en/docs/setup/best-practices/multiple-zones.md index ab61c839a94ae..7c2622641b865 100644 --- a/content/en/docs/setup/best-practices/multiple-zones.md +++ b/content/en/docs/setup/best-practices/multiple-zones.md @@ -78,7 +78,7 @@ federation support). a single master node by default. While services are highly available and can tolerate the loss of a zone, the control plane is located in a single zone. Users that want a highly available control -plane should follow the [high availability](/docs/admin/high-availability) instructions. +plane should follow the [high availability](/docs/setup/production-environment/tools/kubeadm/high-availability/) instructions. ### Volume limitations The following limitations are addressed with [topology-aware volume binding](/docs/concepts/storage/storage-classes/#volume-binding-mode). diff --git a/content/en/docs/setup/learning-environment/minikube.md b/content/en/docs/setup/learning-environment/minikube.md index a794141f2d479..009be9adc8985 100644 --- a/content/en/docs/setup/learning-environment/minikube.md +++ b/content/en/docs/setup/learning-environment/minikube.md @@ -198,7 +198,7 @@ This brief demo guides you on how to start, use, and delete Minikube locally. Fo The `minikube start` command can be used to start your cluster. This command creates and configures a Virtual Machine that runs a single-node Kubernetes cluster. -This command also configures your [kubectl](/docs/user-guide/kubectl-overview/) installation to communicate with this cluster. +This command also configures your [kubectl](/docs/reference/kubectl/overview/) installation to communicate with this cluster. {{< note >}} If you are behind a web proxy, you need to pass this information to the `minikube start` command: @@ -514,6 +514,6 @@ For more information about Minikube, see the [proposal](https://git.k8s.io/commu ## Community -Contributions, questions, and comments are all welcomed and encouraged! Minikube developers hang out on [Slack](https://kubernetes.slack.com) in the #minikube channel (get an invitation [here](http://slack.kubernetes.io/)). We also have the [kubernetes-dev Google Groups mailing list](https://groups.google.com/forum/#!forum/kubernetes-dev). If you are posting to the list please prefix your subject with "minikube: ". +Contributions, questions, and comments are all welcomed and encouraged! Minikube developers hang out on [Slack](https://kubernetes.slack.com) in the `#minikube` channel (get an invitation [here](https://slack.kubernetes.io/)). We also have the [kubernetes-dev Google Groups mailing list](https://groups.google.com/forum/#!forum/kubernetes-dev). If you are posting to the list please prefix your subject with "minikube: ". diff --git a/content/en/docs/setup/production-environment/on-premises-vm/cloudstack.md b/content/en/docs/setup/production-environment/on-premises-vm/cloudstack.md index 1f7d1fd81fbb4..c440f14b31611 100644 --- a/content/en/docs/setup/production-environment/on-premises-vm/cloudstack.md +++ b/content/en/docs/setup/production-environment/on-premises-vm/cloudstack.md @@ -9,12 +9,10 @@ content_type: concept [CloudStack](https://cloudstack.apache.org/) is a software to build public and private clouds based on hardware virtualization principles (traditional IaaS). To deploy Kubernetes on CloudStack there are several possibilities depending on the Cloud being used and what images are made available. CloudStack also has a vagrant plugin available, hence Vagrant could be used to deploy Kubernetes either using the existing shell provisioner or using new Salt based recipes. -[CoreOS](http://coreos.com) templates for CloudStack are built [nightly](http://stable.release.core-os.net/amd64-usr/current/). CloudStack operators need to [register](http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/templates.html) this template in their cloud before proceeding with these Kubernetes deployment instructions. +[CoreOS](https://coreos.com) templates for CloudStack are built [nightly](https://stable.release.core-os.net/amd64-usr/current/). CloudStack operators need to [register](https://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/templates.html) this template in their cloud before proceeding with these Kubernetes deployment instructions. This guide uses a single [Ansible playbook](https://github.com/apachecloudstack/k8s), which is completely automated and can deploy Kubernetes on a CloudStack based Cloud using CoreOS images. The playbook, creates an ssh key pair, creates a security group and associated rules and finally starts coreOS instances configured via cloud-init. - - ## Prerequisites @@ -112,10 +110,7 @@ e9af8293... role=node ## Support Level - IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level -------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- CloudStack | Ansible | CoreOS | flannel | [docs](/docs/setup/production-environment/on-premises-vm/cloudstack/) | | Community ([@Guiques](https://github.com/ltupin/)) - - diff --git a/content/en/docs/setup/production-environment/tools/kops.md b/content/en/docs/setup/production-environment/tools/kops.md index 338dbee0e5c30..8394c28fafec5 100644 --- a/content/en/docs/setup/production-environment/tools/kops.md +++ b/content/en/docs/setup/production-environment/tools/kops.md @@ -27,7 +27,7 @@ kops is an automated provisioning system: * You must [install](https://github.com/kubernetes/kops#installing) `kops` on a 64-bit (AMD64 and Intel 64) device architecture. -* You must have an [AWS account](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html), generate [IAM keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys) and [configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) them. +* You must have an [AWS account](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html), generate [IAM keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys) and [configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) them. The IAM user will need [adequate permissions](https://github.com/kubernetes/kops/blob/master/docs/getting_started/aws.md#setup-iam-user). @@ -140,7 +140,7 @@ you choose for organization reasons (e.g. you are allowed to create records unde but not under `example.com`). Let's assume you're using `dev.example.com` as your hosted zone. You create that hosted zone using -the [normal process](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html), or +the [normal process](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html), or with a command such as `aws route53 create-hosted-zone --name dev.example.com --caller-reference 1`. You must then set up your NS records in the parent domain, so that records in the domain will resolve. Here, @@ -231,9 +231,8 @@ See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to expl ## {{% heading "whatsnext" %}} -* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/user-guide/kubectl-overview/). +* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/reference/kubectl/overview/). * Learn more about `kops` [advanced usage](https://kops.sigs.k8s.io/) for tutorials, best practices and advanced configuration options. * Follow `kops` community discussions on Slack: [community discussions](https://github.com/kubernetes/kops#other-ways-to-communicate-with-the-contributors) * Contribute to `kops` by addressing or raising an issue [GitHub Issues](https://github.com/kubernetes/kops/issues) - diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 1afd13c4e7585..82184f7784562 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -8,7 +8,7 @@ weight: 30 -The `kubeadm` tool helps you bootstrap a minimum viable Kubernetes cluster that conforms to best practices. In fact, you can use `kubeadm` to set up a cluster that will pass the [Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). +The `kubeadm` tool helps you bootstrap a minimum viable Kubernetes cluster that conforms to best practices. In fact, you can use `kubeadm` to set up a cluster that will pass the [Kubernetes Conformance tests](https://kubernetes.io/blog/2017/10/software-conformance-certification). `kubeadm` also supports other cluster lifecycle functions, such as [bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) and cluster upgrades. @@ -42,7 +42,7 @@ To follow this guide, you need: You also need to use a version of `kubeadm` that can deploy the version of Kubernetes that you want to use in your new cluster. -[Kubernetes' version and version skew support policy](https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions) applies to `kubeadm` as well as to Kubernetes overall. +[Kubernetes' version and version skew support policy](/docs/setup/release/version-skew-policy/#supported-versions) applies to `kubeadm` as well as to Kubernetes overall. Check that policy to learn about what versions of Kubernetes and `kubeadm` are supported. This page is written for Kubernetes {{< param "version" >}}. @@ -254,11 +254,11 @@ Read all of this advice carefully before proceeding. **You must deploy a {{< glossary_tooltip text="Container Network Interface" term_id="cni" >}} -(CNI) based Pod network add-on so that your Pods can communicate with each other. +(CNI) based Pod network add-on so that your Pods can communicate with each other. Cluster DNS (CoreDNS) will not start up before a network is installed.** - Take care that your Pod network must not overlap with any of the host - networks: you are likely to see problems if there is any overlap. + networks: you are likely to see problems if there is any overlap. (If you find a collision between your network plugin’s preferred Pod network and some of your host networks, you should think of a suitable CIDR block to use instead, then use that during `kubeadm init` with @@ -266,13 +266,13 @@ Cluster DNS (CoreDNS) will not start up before a network is installed.** - By default, `kubeadm` sets up your cluster to use and enforce use of [RBAC](/docs/reference/access-authn-authz/rbac/) (role based access - control). + control). Make sure that your Pod network plugin supports RBAC, and so do any manifests that you use to deploy it. - If you want to use IPv6--either dual-stack, or single-stack IPv6 only networking--for your cluster, make sure that your Pod network plugin - supports IPv6. + supports IPv6. IPv6 support was added to CNI in [v0.6.0](https://github.com/containernetworking/cni/releases/tag/v0.6.0). {{< /caution >}} @@ -284,10 +284,10 @@ tracker instead of the kubeadm or kubernetes issue trackers. {{< /note >}} Several external projects provide Kubernetes Pod networks using CNI, some of which also -support [Network Policy](/docs/concepts/services-networking/networkpolicies/). +support [Network Policy](/docs/concepts/services-networking/network-policies/). -See the list of available -[networking and network policy add-ons](https://kubernetes.io/docs/concepts/cluster-administration/addons/#networking-and-network-policy). +See a list of add-ons that implement the +[Kubernetes networking model](/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-networking-model). You can install a Pod network add-on with the following command on the control-plane node or a node that has the kubeconfig credentials: @@ -297,79 +297,6 @@ kubectl apply -f ``` You can install only one Pod network per cluster. -Below you can find installation instructions for some popular Pod network plugins: - -{{< tabs name="tabs-pod-install" >}} - -{{% tab name="Calico" %}} -[Calico](https://docs.projectcalico.org/latest/introduction/) is a networking and network policy provider. Calico supports a flexible set of networking options so you can choose the most efficient option for your situation, including non-overlay and overlay networks, with or without BGP. Calico uses the same engine to enforce network policy for hosts, pods, and (if using Istio & Envoy) applications at the service mesh layer. Calico works on several architectures, including `amd64`, `arm64`, and `ppc64le`. - -Calico will automatically detect which IP address range to use for pod IPs based on the value provided via the `--pod-network-cidr` flag or via kubeadm's configuration. - -```shell -kubectl apply -f https://docs.projectcalico.org/v3.14/manifests/calico.yaml -``` - -{{% /tab %}} - -{{% tab name="Cilium" %}} - -To deploy Cilium you just need to run: - -```shell -kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml -``` - -Once all Cilium Pods are marked as `READY`, you start using your cluster. - -```shell -kubectl get pods -n kube-system --selector=k8s-app=cilium -``` -The output is similar to this: -``` -NAME READY STATUS RESTARTS AGE -cilium-drxkl 1/1 Running 0 18m -``` - -Cilium can be used as a replacement for kube-proxy, see [Kubernetes without kube-proxy](https://docs.cilium.io/en/stable/gettingstarted/kubeproxy-free). - -For more information about using Cilium with Kubernetes, see [Kubernetes Install guide for Cilium](https://docs.cilium.io/en/stable/kubernetes/). - -{{% /tab %}} - -{{% tab name="Contiv-VPP" %}} -[Contiv-VPP](https://contivpp.io/) employs a programmable CNF vSwitch based on [FD.io VPP](https://fd.io/), -offering feature-rich & high-performance cloud-native networking and services. - -It implements k8s services and network policies in the user space (on VPP). - -Please refer to this installation guide: [Contiv-VPP Manual Installation](https://github.com/contiv/vpp/blob/master/docs/setup/MANUAL_INSTALL.md) -{{% /tab %}} - -{{% tab name="Kube-router" %}} - -Kube-router relies on kube-controller-manager to allocate Pod CIDR for the nodes. Therefore, use `kubeadm init` with the `--pod-network-cidr` flag. - -Kube-router provides Pod networking, network policy, and high-performing IP Virtual Server(IPVS)/Linux Virtual Server(LVS) based service proxy. - -For information on using the `kubeadm` tool to set up a Kubernetes cluster with Kube-router, please see the official [setup guide](https://github.com/cloudnativelabs/kube-router/blob/master/docs/kubeadm.md). -{{% /tab %}} - -{{% tab name="Weave Net" %}} - -For more information on setting up your Kubernetes cluster with Weave Net, please see [Integrating Kubernetes via the Addon](https://www.weave.works/docs/net/latest/kube-addon/). - -Weave Net works on `amd64`, `arm`, `arm64` and `ppc64le` platforms without any extra action required. -Weave Net sets hairpin mode by default. This allows Pods to access themselves via their Service IP address -if they don't know their PodIP. - -```shell -kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" -``` -{{% /tab %}} - -{{< /tabs >}} - Once a Pod network has been installed, you can confirm that it is working by checking that the CoreDNS Pod is `Running` in the output of `kubectl get pods --all-namespaces`. @@ -578,9 +505,9 @@ options. *
See [Upgrading kubeadm clusters](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for details about upgrading your cluster using `kubeadm`. * Learn about advanced `kubeadm` usage in the [kubeadm reference documentation](/docs/reference/setup-tools/kubeadm/kubeadm) -* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/user-guide/kubectl-overview/). +* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/reference/kubectl/overview/). * See the [Cluster Networking](/docs/concepts/cluster-administration/networking/) page for a bigger list -of Pod network add-ons. + of Pod network add-ons. * See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to explore other add-ons, including tools for logging, monitoring, network policy, visualization & control of your Kubernetes cluster. @@ -644,5 +571,3 @@ supports your chosen platform. ## Troubleshooting {#troubleshooting} If you are running into difficulties with kubeadm, please consult our [troubleshooting docs](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). - - diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md index 5584309406358..e91e9f7a600dc 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md @@ -22,7 +22,7 @@ and environment. [This comparison topic](/docs/setup/production-environment/tool If you encounter issues with setting up the HA cluster, please provide us with feedback in the kubeadm [issue tracker](https://github.com/kubernetes/kubeadm/issues/new). -See also [The upgrade documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15). +See also [The upgrade documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/). {{< caution >}} This page does not address running your cluster on a cloud provider. In a cloud @@ -30,8 +30,6 @@ environment, neither approach documented here works with Service objects of type LoadBalancer, or with dynamic PersistentVolumes. {{< /caution >}} - - ## {{% heading "prerequisites" %}} @@ -51,8 +49,6 @@ For the external etcd cluster only, you also need: - Three additional machines for etcd members - - ## First steps for both methods diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index c3e5f57c1c0a3..2996568369dfa 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -54,6 +54,8 @@ route, we recommend you add IP route(s) so Kubernetes cluster addresses go via t ## Letting iptables see bridged traffic +Make sure that the `br_netfilter` module is loaded. This can be done by running `lsmod | grep br_netfilter`. To load it explicitly call `sudo modprobe br_netfilter`. + As a requirement for your Linux Node's iptables to correctly see bridged traffic, you should ensure `net.bridge.bridge-nf-call-iptables` is set to 1 in your `sysctl` config, e.g. ```bash @@ -64,9 +66,7 @@ EOF sudo sysctl --system ``` -Make sure that the `br_netfilter` module is loaded before this step. This can be done by running `lsmod | grep br_netfilter`. To load it explicitly call `sudo modprobe br_netfilter`. - -For more details please see the [Network Plugin Requirements](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#network-plugin-requirements) page. +For more details please see the [Network Plugin Requirements](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#network-plugin-requirements) page. ## Check required ports @@ -218,7 +218,7 @@ sudo systemctl enable --now kubelet You have to do this until SELinux support is improved in the kubelet. - You can leave SELinux enabled if you know how to configure it but it may require settings that are not supported by kubeadm. - + {{% /tab %}} {{% tab name="Fedora CoreOS" %}} Install CNI plugins (required for most pod network): @@ -310,4 +310,3 @@ If you are running into difficulties with kubeadm, please consult our [troublesh * [Using kubeadm to Create a Cluster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) - diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md b/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md index 334e2266f25aa..d860a88bddc37 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/self-hosting.md @@ -13,14 +13,12 @@ weight: 100 kubeadm allows you to experimentally create a _self-hosted_ Kubernetes control plane. This means that key components such as the API server, controller manager, and scheduler run as [DaemonSet pods](/docs/concepts/workloads/controllers/daemonset/) -configured via the Kubernetes API instead of [static pods](/docs/tasks/administer-cluster/static-pod/) +configured via the Kubernetes API instead of [static pods](/docs/tasks/configure-pod-container/static-pod/) configured in the kubelet via static files. To create a self-hosted cluster see the [kubeadm alpha selfhosting pivot](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-selfhosting) command. - - #### Caveats diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index 739b405d14267..11ddaaf8f8721 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -23,22 +23,15 @@ becoming unavailable. This task walks through the process of creating a high availability etcd cluster of three members that can be used as an external etcd when using kubeadm to set up a kubernetes cluster. - - ## {{% heading "prerequisites" %}} - * Three hosts that can talk to each other over ports 2379 and 2380. This document assumes these default ports. However, they are configurable through the kubeadm config file. -* Each host must [have docker, kubelet, and kubeadm installed][toolbox]. +* Each host must [have docker, kubelet, and kubeadm installed](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). * Some infrastructure to copy files between hosts. For example `ssh` and `scp` can satisfy this requirement. -[toolbox]: /docs/setup/production-environment/tools/kubeadm/install-kubeadm/ - - - ## Setting up the cluster diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index a4d6d54cc2a9e..696778f974f5d 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -15,11 +15,10 @@ If your problem is not listed below, please follow the following steps: - Go to [github.com/kubernetes/kubeadm](https://github.com/kubernetes/kubeadm/issues) and search for existing issues. - If no issue exists, please [open one](https://github.com/kubernetes/kubeadm/issues/new) and follow the issue template. -- If you are unsure about how kubeadm works, you can ask on [Slack](http://slack.k8s.io/) in #kubeadm, or open a question on [StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes). Please include +- If you are unsure about how kubeadm works, you can ask on [Slack](https://slack.k8s.io/) in `#kubeadm`, + or open a question on [StackOverflow](https://stackoverflow.com/questions/tagged/kubernetes). Please include relevant tags like `#kubernetes` and `#kubeadm` so folks can help you. - - ## Not possible to join a v1.18 Node to a v1.17 cluster due to missing RBAC diff --git a/content/en/docs/setup/production-environment/tools/kubespray.md b/content/en/docs/setup/production-environment/tools/kubespray.md index 64ad3f4b1aa09..02d99d926a83c 100644 --- a/content/en/docs/setup/production-environment/tools/kubespray.md +++ b/content/en/docs/setup/production-environment/tools/kubespray.md @@ -8,7 +8,7 @@ weight: 30 This quickstart helps to install a Kubernetes cluster hosted on GCE, Azure, OpenStack, AWS, vSphere, Packet (bare metal), Oracle Cloud Infrastructure (Experimental) or Baremetal with [Kubespray](https://github.com/kubernetes-sigs/kubespray). -Kubespray is a composition of [Ansible](http://docs.ansible.com/) playbooks, [inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md), provisioning tools, and domain knowledge for generic OS/Kubernetes clusters configuration management tasks. Kubespray provides: +Kubespray is a composition of [Ansible](https://docs.ansible.com/) playbooks, [inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md), provisioning tools, and domain knowledge for generic OS/Kubernetes clusters configuration management tasks. Kubespray provides: * a highly available cluster * composable attributes @@ -21,9 +21,8 @@ Kubespray is a composition of [Ansible](http://docs.ansible.com/) playbooks, [in * openSUSE Leap 15 * continuous integration tests -To choose a tool which best fits your use case, read [this comparison](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md) to [kubeadm](/docs/admin/kubeadm/) and [kops](/docs/setup/production-environment/tools/kops/). - - +To choose a tool which best fits your use case, read [this comparison](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md) to +[kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) and [kops](/docs/setup/production-environment/tools/kops/). @@ -50,7 +49,7 @@ Kubespray provides the following utilities to help provision your environment: ### (2/5) Compose an inventory file -After you provision your servers, create an [inventory file for Ansible](http://docs.ansible.com/ansible/intro_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". +After you provision your servers, create an [inventory file for Ansible](https://docs.ansible.com/ansible/intro_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". ### (3/5) Plan your cluster deployment @@ -68,7 +67,7 @@ Kubespray provides the ability to customize many aspects of the deployment: * {{< glossary_tooltip term_id="cri-o" >}} * Certificate generation methods -Kubespray customizations can be made to a [variable file](http://docs.ansible.com/ansible/playbooks_variables.html). If you are just getting started with Kubespray, consider using the Kubespray defaults to deploy your cluster and explore Kubernetes. +Kubespray customizations can be made to a [variable file](https://docs.ansible.com/ansible/playbooks_variables.html). If you are just getting started with Kubespray, consider using the Kubespray defaults to deploy your cluster and explore Kubernetes. ### (4/5) Deploy a Cluster @@ -110,11 +109,9 @@ When running the reset playbook, be sure not to accidentally target your product ## Feedback -* Slack Channel: [#kubespray](https://kubernetes.slack.com/messages/kubespray/) (You can get your invite [here](http://slack.k8s.io/)) +* Slack Channel: [#kubespray](https://kubernetes.slack.com/messages/kubespray/) (You can get your invite [here](https://slack.k8s.io/)) * [GitHub Issues](https://github.com/kubernetes-sigs/kubespray/issues) - - ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/setup/production-environment/turnkey/aws.md b/content/en/docs/setup/production-environment/turnkey/aws.md index cbfccd7a56b07..be75623158516 100644 --- a/content/en/docs/setup/production-environment/turnkey/aws.md +++ b/content/en/docs/setup/production-environment/turnkey/aws.md @@ -48,7 +48,7 @@ export PATH=/platforms/darwin/amd64:$PATH export PATH=/platforms/linux/amd64:$PATH ``` -An up-to-date documentation page for this tool is available here: [kubectl manual](/docs/user-guide/kubectl/) +An up-to-date documentation page for this tool is available here: [kubectl manual](/docs/reference/kubectl/kubectl/) By default, `kubectl` will use the `kubeconfig` file generated during the cluster startup for authenticating against the API. For more information, please read [kubeconfig files](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) @@ -63,7 +63,8 @@ For more complete applications, please look in the [examples directory](https:// ## Scaling the cluster -Adding and removing nodes through `kubectl` is not supported. You can still scale the amount of nodes manually through adjustments of the 'Desired' and 'Max' properties within the [Auto Scaling Group](http://docs.aws.amazon.com/autoscaling/latest/userguide/as-manual-scaling.html), which was created during the installation. +Adding and removing nodes through `kubectl` is not supported. You can still scale the amount of nodes manually through adjustments of the 'Desired' and 'Max' properties within the +[Auto Scaling Group](https://docs.aws.amazon.com/autoscaling/latest/userguide/as-manual-scaling.html), which was created during the installation. ## Tearing down the cluster @@ -80,13 +81,8 @@ cluster/kube-down.sh IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level -------------------- | ------------ | ------------- | ---------- | --------------------------------------------- | ---------| ---------------------------- AWS | kops | Debian | k8s (VPC) | [docs](https://github.com/kubernetes/kops) | | Community ([@justinsb](https://github.com/justinsb)) -AWS | CoreOS | CoreOS | flannel | [docs](/docs/getting-started-guides/aws) | | Community -AWS | Juju | Ubuntu | flannel, calico, canal | [docs](/docs/getting-started-guides/ubuntu) | 100% | Commercial, Community +AWS | CoreOS | CoreOS | flannel | - | | Community +AWS | Juju | Ubuntu | flannel, calico, canal | - | 100% | Commercial, Community AWS | KubeOne | Ubuntu, CoreOS, CentOS | canal, weavenet | [docs](https://github.com/kubermatic/kubeone) | 100% | Commercial, Community -## Further reading - -Please see the [Kubernetes docs](/docs/) for more details on administering -and using a Kubernetes cluster. - diff --git a/content/en/docs/setup/production-environment/turnkey/gce.md b/content/en/docs/setup/production-environment/turnkey/gce.md index 60c4e690d962b..3ea666eb7c5b7 100644 --- a/content/en/docs/setup/production-environment/turnkey/gce.md +++ b/content/en/docs/setup/production-environment/turnkey/gce.md @@ -72,7 +72,7 @@ cluster/kube-up.sh If you want more than one cluster running in your project, want to use a different name, or want a different number of worker nodes, see the `/cluster/gce/config-default.sh` file for more fine-grained configuration before you start up your cluster. If you run into trouble, please see the section on [troubleshooting](/docs/setup/production-environment/turnkey/gce/#troubleshooting), post to the -[Kubernetes Forum](https://discuss.kubernetes.io), or come ask questions on [Slack](/docs/troubleshooting/#slack). +[Kubernetes Forum](https://discuss.kubernetes.io), or come ask questions on `#gke` Slack channel. The next few steps will show you: @@ -85,7 +85,7 @@ The next few steps will show you: The cluster startup script will leave you with a running cluster and a `kubernetes` directory on your workstation. -The [kubectl](/docs/user-guide/kubectl/) tool controls the Kubernetes cluster +The [kubectl](/docs/reference/kubectl/kubectl/) tool controls the Kubernetes cluster manager. It lets you inspect your cluster resources, create, delete, and update components, and much more. You will use it to look at your new cluster and bring up example apps. @@ -98,7 +98,7 @@ gcloud components install kubectl {{< note >}} The kubectl version bundled with `gcloud` may be older than the one -downloaded by the get.k8s.io install script. See [Installing kubectl](/docs/tasks/kubectl/install/) +downloaded by the get.k8s.io install script. See [Installing kubectl](/docs/tasks/tools/install-kubectl/) document to see how you can set up the latest `kubectl` on your workstation. {{< /note >}} @@ -112,7 +112,7 @@ Once `kubectl` is in your path, you can use it to look at your cluster. E.g., ru kubectl get --all-namespaces services ``` -should show a set of [services](/docs/user-guide/services) that look something like this: +should show a set of [services](/docs/concepts/services-networking/service/) that look something like this: ```shell NAMESPACE NAME TYPE CLUSTER_IP EXTERNAL_IP PORT(S) AGE @@ -122,7 +122,7 @@ kube-system kube-ui ClusterIP 10.0.0.3 ... ``` -Similarly, you can take a look at the set of [pods](/docs/user-guide/pods) that were created during cluster startup. +Similarly, you can take a look at the set of [pods](/docs/concepts/workloads/pods/pod/) that were created during cluster startup. You can do this via the ```shell @@ -149,7 +149,7 @@ Some of the pods may take a few seconds to start up (during this time they'll sh ### Run some examples -Then, see [a simple nginx example](/docs/user-guide/simple-nginx) to try out your new cluster. +Then, see [a simple nginx example](/docs/tasks/run-application/run-stateless-application-deployment/) to try out your new cluster. For more complete applications, please look in the [examples directory](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/). The [guestbook example](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) is a good "getting started" walkthrough. @@ -221,9 +221,3 @@ IaaS Provider | Config. Mgmt | OS | Networking | Docs GCE | Saltstack | Debian | GCE | [docs](/docs/setup/production-environment/turnkey/gce/) | | Project -## Further reading - -Please see the [Kubernetes docs](/docs/) for more details on administering -and using a Kubernetes cluster. - - diff --git a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index 09a74d14505f9..0192cfeb5e08c 100644 --- a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -17,7 +17,7 @@ Windows applications constitute a large portion of the services and applications ## Windows containers in Kubernetes -To enable the orchestration of Windows containers in Kubernetes, simply include Windows nodes in your existing Linux cluster. Scheduling Windows containers in [Pods](/docs/concepts/workloads/pods/pod-overview/) on Kubernetes is as simple and easy as scheduling Linux-based containers. +To enable the orchestration of Windows containers in Kubernetes, simply include Windows nodes in your existing Linux cluster. Scheduling Windows containers in {{< glossary_tooltip text="Pods" term_id="pod" >}} on Kubernetes is as simple and easy as scheduling Linux-based containers. In order to run Windows containers, your Kubernetes cluster must include multiple operating systems, with control plane nodes running Linux and workers running either Windows or Linux depending on your workload needs. Windows Server 2019 is the only Windows operating system supported, enabling [Kubernetes Node](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) on Windows (including kubelet, [container runtime](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/containerd), and kube-proxy). For a detailed explanation of Windows distribution channels see the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19). @@ -56,7 +56,7 @@ Windows containers with process isolation have strict compatibility rules, [wher Key Kubernetes elements work the same way in Windows as they do in Linux. In this section, we talk about some of the key workload enablers and how they map to Windows. -* [Pods](/docs/concepts/workloads/pods/pod-overview/) +* [Pods](/docs/concepts/workloads/pods/) A Pod is the basic building block of Kubernetes–the smallest and simplest unit in the Kubernetes object model that you create or deploy. You may not deploy Windows and Linux containers in the same Pod. All containers in a Pod are scheduled onto a single Node where each Node represents a specific platform and architecture. The following Pod capabilities, properties and events are supported with Windows containers: diff --git a/content/en/docs/setup/release/notes.md b/content/en/docs/setup/release/notes.md index d80d6c0ffd4cc..8bc87867bc85f 100644 --- a/content/en/docs/setup/release/notes.md +++ b/content/en/docs/setup/release/notes.md @@ -63,11 +63,9 @@ filename | sha512 hash ## Changelog since v1.17.0 A complete changelog for the release notes is now hosted in a customizable -format at [https://relnotes.k8s.io][1]. Check it out and please give us your +format at [https://relnotes.k8s.io](https://relnotes.k8s.io/?releaseVersions=1.18.0). Check it out and please give us your feedback! -[1]: https://relnotes.k8s.io/?releaseVersions=1.18.0 - ## What’s New (Major Themes) ### Kubernetes Topology Manager Moves to Beta - Align Up! @@ -80,13 +78,13 @@ Server-side Apply was promoted to Beta in 1.16, but is now introducing a second ### Extending Ingress with and replacing a deprecated annotation with IngressClass -In Kubernetes 1.18, there are two significant additions to Ingress: A new `pathType` field and a new `IngressClass` resource. The `pathType` field allows specifying how paths should be matched. In addition to the default `ImplementationSpecific` type, there are new `Exact` and `Prefix` path types. +In Kubernetes 1.18, there are two significant additions to Ingress: A new `pathType` field and a new `IngressClass` resource. The `pathType` field allows specifying how paths should be matched. In addition to the default `ImplementationSpecific` type, there are new `Exact` and `Prefix` path types. The `IngressClass` resource is used to describe a type of Ingress within a Kubernetes cluster. Ingresses can specify the class they are associated with by using a new `ingressClassName` field on Ingresses. This new resource and field replace the deprecated `kubernetes.io/ingress.class` annotation. ### SIG CLI introduces kubectl debug -SIG CLI was debating the need for a debug utility for quite some time already. With the development of [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/), it became more obvious how we can support developers with tooling built on top of `kubectl exec`. The addition of the `kubectl debug` [command](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/20190805-kubectl-debug.md) (it is alpha but your feedback is more than welcome), allows developers to easily debug their Pods inside the cluster. We think this addition is invaluable. This command allows one to create a temporary container which runs next to the Pod one is trying to examine, but also attaches to the console for interactive troubleshooting. +SIG CLI was debating the need for a debug utility for quite some time already. With the development of [ephemeral containers](/docs/concepts/workloads/pods/ephemeral-containers/), it became more obvious how we can support developers with tooling built on top of `kubectl exec`. The addition of the `kubectl debug` [command](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/20190805-kubectl-debug.md) (it is alpha but your feedback is more than welcome), allows developers to easily debug their Pods inside the cluster. We think this addition is invaluable. This command allows one to create a temporary container which runs next to the Pod one is trying to examine, but also attaches to the console for interactive troubleshooting. ### Introducing Windows CSI support alpha for Kubernetes @@ -126,7 +124,7 @@ No Known Issues Reported #### kubectl: - `kubectl` and k8s.io/client-go no longer default to a server address of `http://localhost:8080`. If you own one of these legacy clusters, you are *strongly* encouraged to secure your server. If you cannot secure your server, you can set the `$KUBERNETES_MASTER` environment variable to `http://localhost:8080` to continue defaulting the server address. `kubectl` users can also set the server address using the `--server` flag, or in a kubeconfig file specified via `--kubeconfig` or `$KUBECONFIG`. ([#86173](https://github.com/kubernetes/kubernetes/pull/86173), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, CLI and Testing] -- `kubectl run` has removed the previously deprecated generators, along with flags unrelated to creating pods. `kubectl run` now only creates pods. See specific `kubectl create` subcommands to create objects other than pods. +- `kubectl run` has removed the previously deprecated generators, along with flags unrelated to creating pods. `kubectl run` now only creates pods. See specific `kubectl create` subcommands to create objects other than pods. ([#87077](https://github.com/kubernetes/kubernetes/pull/87077), [@soltysh](https://github.com/soltysh)) [SIG Architecture, CLI and Testing] - The deprecated command `kubectl rolling-update` has been removed ([#88057](https://github.com/kubernetes/kubernetes/pull/88057), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG Architecture, CLI and Testing] @@ -193,13 +191,13 @@ No Known Issues Reported - node_memory_working_set_bytes --> node_memory_working_set_bytes - container_cpu_usage_seconds_total --> container_cpu_usage_seconds - container_memory_working_set_bytes --> container_memory_working_set_bytes - - scrape_error --> scrape_error + - scrape_error --> scrape_error ([#86282](https://github.com/kubernetes/kubernetes/pull/86282), [@RainbowMango](https://github.com/RainbowMango)) [SIG Node] - In a future release, kubelet will no longer create the CSI NodePublishVolume target directory, in accordance with the CSI specification. CSI drivers may need to be updated accordingly to properly create and process the target path. ([#75535](https://github.com/kubernetes/kubernetes/issues/75535)) [SIG Storage] #### kube-proxy: - `--healthz-port` and `--metrics-port` flags are deprecated, please use `--healthz-bind-address` and `--metrics-bind-address` instead ([#88512](https://github.com/kubernetes/kubernetes/pull/88512), [@SataQiu](https://github.com/SataQiu)) [SIG Network] -- a new `EndpointSliceProxying` feature gate has been added to control the use of EndpointSlices in kube-proxy. The EndpointSlice feature gate that used to control this behavior no longer affects kube-proxy. This feature has been disabled by default. ([#86137](https://github.com/kubernetes/kubernetes/pull/86137), [@robscott](https://github.com/robscott)) +- a new `EndpointSliceProxying` feature gate has been added to control the use of EndpointSlices in kube-proxy. The EndpointSlice feature gate that used to control this behavior no longer affects kube-proxy. This feature has been disabled by default. ([#86137](https://github.com/kubernetes/kubernetes/pull/86137), [@robscott](https://github.com/robscott)) #### kubeadm: - command line option "kubelet-version" for `kubeadm upgrade node` has been deprecated and will be removed in a future release. ([#87942](https://github.com/kubernetes/kubernetes/pull/87942), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] @@ -245,7 +243,7 @@ No Known Issues Reported - The alpha feature `ServiceAccountIssuerDiscovery` enables publishing OIDC discovery information and service account token verification keys at `/.well-known/openid-configuration` and `/openid/v1/jwks` endpoints by API servers configured to issue service account tokens. ([#80724](https://github.com/kubernetes/kubernetes/pull/80724), [@cceckman](https://github.com/cceckman)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] - CustomResourceDefinition schemas that use `x-kubernetes-list-map-keys` to specify properties that uniquely identify list items must make those properties required or have a default value, to ensure those properties are present for all list items. See https://kubernetes.io/docs/reference/using-api/api-concepts/#merge-strategy for details. ([#88076](https://github.com/kubernetes/kubernetes/pull/88076), [@eloyekunle](https://github.com/eloyekunle)) [SIG API Machinery and Testing] - CustomResourceDefinition schemas that use `x-kubernetes-list-type: map` or `x-kubernetes-list-type: set` now enable validation that the list items in the corresponding custom resources are unique. ([#84920](https://github.com/kubernetes/kubernetes/pull/84920), [@sttts](https://github.com/sttts)) [SIG API Machinery] - + #### Configuration file changes: #### kube-apiserver: @@ -257,7 +255,7 @@ No Known Issues Reported - Kube-scheduler can run more than one scheduling profile. Given a pod, the profile is selected by using its `.spec.schedulerName`. ([#88285](https://github.com/kubernetes/kubernetes/pull/88285), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps, Scheduling and Testing] - Scheduler Extenders can now be configured in the v1alpha2 component config ([#88768](https://github.com/kubernetes/kubernetes/pull/88768), [@damemi](https://github.com/damemi)) [SIG Release, Scheduling and Testing] - The PostFilter of scheduler framework is renamed to PreScore in kubescheduler.config.k8s.io/v1alpha2. ([#87751](https://github.com/kubernetes/kubernetes/pull/87751), [@skilxn-go](https://github.com/skilxn-go)) [SIG Scheduling and Testing] - + #### kube-proxy: - Added kube-proxy flags `--ipvs-tcp-timeout`, `--ipvs-tcpfin-timeout`, `--ipvs-udp-timeout` to configure IPVS connection timeouts. ([#85517](https://github.com/kubernetes/kubernetes/pull/85517), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cluster Lifecycle and Network] - Added optional `--detect-local-mode` flag to kube-proxy. Valid values are "ClusterCIDR" (default matching previous behavior) and "NodeCIDR" ([#87748](https://github.com/kubernetes/kubernetes/pull/87748), [@satyasm](https://github.com/satyasm)) [SIG Cluster Lifecycle, Network and Scheduling] @@ -689,8 +687,8 @@ filename | sha512 hash - Add `rest_client_rate_limiter_duration_seconds` metric to component-base to track client side rate limiter latency in seconds. Broken down by verb and URL. ([#88134](https://github.com/kubernetes/kubernetes/pull/88134), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] - Allow user to specify resource using --filename flag when invoking kubectl exec ([#88460](https://github.com/kubernetes/kubernetes/pull/88460), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Apiserver add a new flag --goaway-chance which is the fraction of requests that will be closed gracefully(GOAWAY) to prevent HTTP/2 clients from getting stuck on a single apiserver. - After the connection closed(received GOAWAY), the client's other in-flight requests won't be affected, and the client will reconnect. +- Apiserver add a new flag --goaway-chance which is the fraction of requests that will be closed gracefully(GOAWAY) to prevent HTTP/2 clients from getting stuck on a single apiserver. + After the connection closed(received GOAWAY), the client's other in-flight requests won't be affected, and the client will reconnect. The flag min value is 0 (off), max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. ([#88567](https://github.com/kubernetes/kubernetes/pull/88567), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] - Azure: add support for single stack IPv6 ([#88448](https://github.com/kubernetes/kubernetes/pull/88448), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] @@ -739,7 +737,7 @@ filename | sha512 hash - Kubelets perform fewer unnecessary pod status update operations on the API server. ([#88591](https://github.com/kubernetes/kubernetes/pull/88591), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Scalability] - Plugin/PluginConfig and Policy APIs are mutually exclusive when running the scheduler ([#88864](https://github.com/kubernetes/kubernetes/pull/88864), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] - Specifying PluginConfig for the same plugin more than once fails scheduler startup. - + Specifying extenders and configuring .ignoredResources for the NodeResourcesFit plugin fails ([#88870](https://github.com/kubernetes/kubernetes/pull/88870), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] - Support TLS Server Name overrides in kubeconfig file and via --tls-server-name in kubectl ([#88769](https://github.com/kubernetes/kubernetes/pull/88769), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and CLI] - Terminating a restartPolicy=Never pod no longer has a chance to report the pod succeeded when it actually failed. ([#88440](https://github.com/kubernetes/kubernetes/pull/88440), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Testing] @@ -806,18 +804,18 @@ filename | sha512 hash If you are setting `--redirect-container-streaming=true`, then you must migrate off this configuration. The flag will no longer be able to be enabled starting in v1.20. If you are not setting the flag, no action is necessary. ([#88290](https://github.com/kubernetes/kubernetes/pull/88290), [@tallclair](https://github.com/tallclair)) [SIG API Machinery and Node] - Yes. - + Feature Name: Support using network resources (VNet, LB, IP, etc.) in different AAD Tenant and Subscription than those for the cluster. - + Changes in Pull Request: - + 1. Add properties `networkResourceTenantID` and `networkResourceSubscriptionID` in cloud provider auth config section, which indicates the location of network resources. 2. Add function `GetMultiTenantServicePrincipalToken` to fetch multi-tenant service principal token, which will be used by Azure VM/VMSS Clients in this feature. 3. Add function `GetNetworkResourceServicePrincipalToken` to fetch network resource service principal token, which will be used by Azure Network Resource (Load Balancer, Public IP, Route Table, Network Security Group and their sub level resources) Clients in this feature. 4. Related unit tests. - + None. - + User Documentation: In PR https://github.com/kubernetes-sigs/cloud-provider-azure/pull/301 ([#88384](https://github.com/kubernetes/kubernetes/pull/88384), [@bowen5](https://github.com/bowen5)) [SIG Cloud Provider] ## Changes by Kind @@ -833,8 +831,8 @@ filename | sha512 hash - Added support for multiple sizes huge pages on a container level ([#84051](https://github.com/kubernetes/kubernetes/pull/84051), [@bart0sh](https://github.com/bart0sh)) [SIG Apps, Node and Storage] - AppProtocol is a new field on Service and Endpoints resources, enabled with the ServiceAppProtocol feature gate. ([#88503](https://github.com/kubernetes/kubernetes/pull/88503), [@robscott](https://github.com/robscott)) [SIG Apps and Network] - Fixed missing validation of uniqueness of list items in lists with `x-kubernetes-list-type: map` or x-kubernetes-list-type: set` in CustomResources. ([#84920](https://github.com/kubernetes/kubernetes/pull/84920), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Introduces optional --detect-local flag to kube-proxy. - Currently the only supported value is "cluster-cidr", +- Introduces optional --detect-local flag to kube-proxy. + Currently the only supported value is "cluster-cidr", which is the default if not specified. ([#87748](https://github.com/kubernetes/kubernetes/pull/87748), [@satyasm](https://github.com/satyasm)) [SIG Cluster Lifecycle, Network and Scheduling] - Kube-scheduler can run more than one scheduling profile. Given a pod, the profile is selected by using its `.spec.SchedulerName`. ([#88285](https://github.com/kubernetes/kubernetes/pull/88285), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps, Scheduling and Testing] - Moving Windows RunAsUserName feature to GA ([#87790](https://github.com/kubernetes/kubernetes/pull/87790), [@marosset](https://github.com/marosset)) [SIG Apps and Windows] @@ -1048,9 +1046,9 @@ filename | sha512 hash - aggragation api will have alpha support for network proxy ([#87515](https://github.com/kubernetes/kubernetes/pull/87515), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] - API request throttling (due to a high rate of requests) is now reported in client-go logs at log level 2. The messages are of the form - + Throttling request took 1.50705208s, request: GET: - + The presence of these messages, may indicate to the administrator the need to tune the cluster accordingly. ([#87740](https://github.com/kubernetes/kubernetes/pull/87740), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery] - kubeadm: reject a node joining the cluster if a node with the same name already exists ([#81056](https://github.com/kubernetes/kubernetes/pull/81056), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] - disableAvailabilitySetNodes is added to avoid VM list for VMSS clusters. It should only be used when vmType is "vmss" and all the nodes (including masters) are VMSS virtual machines. ([#87685](https://github.com/kubernetes/kubernetes/pull/87685), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] diff --git a/content/en/docs/setup/release/version-skew-policy.md b/content/en/docs/setup/release/version-skew-policy.md index cc506352d33da..5b189667db17c 100644 --- a/content/en/docs/setup/release/version-skew-policy.md +++ b/content/en/docs/setup/release/version-skew-policy.md @@ -21,7 +21,7 @@ Specific cluster deployment tools may place additional restrictions on version s ## Supported versions Kubernetes versions are expressed as **x.y.z**, -where **x** is the major version, **y** is the minor version, and **z** is the patch version, following [Semantic Versioning](http://semver.org/) terminology. +where **x** is the major version, **y** is the minor version, and **z** is the patch version, following [Semantic Versioning](https://semver.org/) terminology. For more information, see [Kubernetes Release Versioning](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#kubernetes-release-versioning). The Kubernetes project maintains release branches for the most recent three minor releases ({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}). @@ -146,3 +146,16 @@ Running a cluster with `kubelet` instances that are persistently two minor versi * they must be upgraded within one minor version of `kube-apiserver` before the control plane can be upgraded * it increases the likelihood of running `kubelet` versions older than the three maintained minor releases {{}} + +### kube-proxy + +* `kube-proxy` must be the same minor version as `kubelet` on the node. +* `kube-proxy` must not be newer than `kube-apiserver`. +* `kube-proxy` must be at most two minor versions older than `kube-apiserver.` + +Example: + +If `kube-proxy` version is **{{< skew latestVersion >}}**: + +* `kubelet` version must be at the same minor version as **{{< skew latestVersion >}}**. +* `kube-apiserver` version must be between **{{< skew oldestMinorVersion >}}** and **{{< skew latestVersion >}}**, inclusive. diff --git a/content/en/docs/tasks/_index.md b/content/en/docs/tasks/_index.md index 552f17e48c173..0d424ee4db3d6 100644 --- a/content/en/docs/tasks/_index.md +++ b/content/en/docs/tasks/_index.md @@ -11,9 +11,5 @@ This section of the Kubernetes documentation contains pages that show how to do individual tasks. A task page shows how to do a single thing, typically by giving a short sequence of steps. - -## {{% heading "whatsnext" %}} - - If you would like to write a task page, see [Creating a Documentation Pull Request](/docs/home/contribute/create-pull-request/). diff --git a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md index 9288ec3064a54..a0c68ff682751 100644 --- a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md +++ b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md @@ -132,7 +132,7 @@ The following file is an Ingress resource that sends traffic to your Service via 1. Create `example-ingress.yaml` from the following file: - apiVersion: networking.k8s.io/v1beta1 # for versions before 1.14 use extensions/v1beta1 + apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: example-ingress @@ -243,7 +243,7 @@ The following file is an Ingress resource that sends traffic to your Service via Output: ```shell - ingress.extensions/example-ingress configured + ingress.networking/example-ingress configured ``` ## Test Your Ingress diff --git a/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md index fe90981432a02..1194288386fe2 100644 --- a/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md @@ -45,13 +45,14 @@ Here is the configuration file for the application Deployment: kubectl apply -f https://k8s.io/examples/service/access/hello-application.yaml ``` The preceding command creates a - [Deployment](/docs/concepts/workloads/controllers/deployment/) - object and an associated - [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) - object. The ReplicaSet has two - [Pods](/docs/concepts/workloads/pods/pod/), + {{< glossary_tooltip text="Deployment" term_id="deployment" >}} + and an associated + {{< glossary_tooltip term_id="replica-set" text="ReplicaSet" >}}. + The ReplicaSet has two + {{< glossary_tooltip text="Pods" term_id="pod" >}} each of which runs the Hello World application. + 1. Display information about the Deployment: ```shell kubectl get deployments hello-world diff --git a/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md b/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md index 729c7bde4fc41..be7cbf2673294 100644 --- a/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md +++ b/content/en/docs/tasks/administer-cluster/change-pv-reclaim-policy.md @@ -19,15 +19,15 @@ PersistentVolume. ## Why change reclaim policy of a PersistentVolume -`PersistentVolumes` can have various reclaim policies, including "Retain", -"Recycle", and "Delete". For dynamically provisioned `PersistentVolumes`, +PersistentVolumes can have various reclaim policies, including "Retain", +"Recycle", and "Delete". For dynamically provisioned PersistentVolumes, the default reclaim policy is "Delete". This means that a dynamically provisioned volume is automatically deleted when a user deletes the corresponding -`PersistentVolumeClaim`. This automatic behavior might be inappropriate if the volume +PersistentVolumeClaim. This automatic behavior might be inappropriate if the volume contains precious data. In that case, it is more appropriate to use the "Retain" -policy. With the "Retain" policy, if a user deletes a `PersistentVolumeClaim`, -the corresponding `PersistentVolume` is not be deleted. Instead, it is moved to the -`Released` phase, where all of its data can be manually recovered. +policy. With the "Retain" policy, if a user deletes a PersistentVolumeClaim, +the corresponding PersistentVolume is not be deleted. Instead, it is moved to the +Released phase, where all of its data can be manually recovered. ## Changing the reclaim policy of a PersistentVolume diff --git a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md index f436b641a0620..9fb0452ddc55a 100644 --- a/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md +++ b/content/en/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -17,7 +17,7 @@ DNS resolution process in your cluster. {{< include "task-tutorial-prereqs.md" >}} Your cluster must be running the CoreDNS add-on. -[Migrating to CoreDNS](https://kubernetes.io/docs/tasks/administer-cluster/coredns/#migrating-to-coredns) +[Migrating to CoreDNS](/docs/tasks/administer-cluster/coredns/#migrating-to-coredns) explains how to use `kubeadm` to migrate from `kube-dns`. {{% version-check %}} @@ -117,7 +117,7 @@ You can modify the default CoreDNS behavior by modifying the ConfigMap. ### Configuration of Stub-domain and upstream nameserver using CoreDNS -CoreDNS has the ability to configure stubdomains and upstream nameservers using the [forward plugin](https://coredns.io/plugins/forward/). +CoreDNS has the ability to configure stubdomains and upstream nameservers using the [forward plugin](https://coredns.io/plugins/forward/). #### Example If a cluster operator has a [Consul](https://www.consul.io/) domain server located at 10.150.0.1, and all Consul names have the suffix .consul.local. To configure it in CoreDNS, the cluster administrator creates the following stanza in the CoreDNS ConfigMap. @@ -261,4 +261,4 @@ You can also migrate using the offical CoreDNS ## {{% heading "whatsnext" %}} -- Read [Debugging DNS Resolution](/docs/tasks/debug-application-cluster/dns-debugging-resolution/) +- Read [Debugging DNS Resolution](/docs/tasks/administer-cluster/dns-debugging-resolution/) diff --git a/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md b/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md index 6fd887bd8f4ee..f333b215a21ea 100644 --- a/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md +++ b/content/en/docs/tasks/administer-cluster/dns-horizontal-autoscaling.md @@ -160,7 +160,7 @@ kubectl scale deployment --replicas=0 dns-autoscaler --namespace=kube-system The output is: - deployment.extensions/dns-autoscaler scaled + deployment.apps/dns-autoscaler scaled Verify that the replica count is zero: diff --git a/content/en/docs/tasks/administer-cluster/kms-provider.md b/content/en/docs/tasks/administer-cluster/kms-provider.md index 34cc1d6b66198..15bc1290ff97c 100644 --- a/content/en/docs/tasks/administer-cluster/kms-provider.md +++ b/content/en/docs/tasks/administer-cluster/kms-provider.md @@ -7,10 +7,8 @@ content_type: task This page shows how to configure a Key Management Service (KMS) provider and plugin to enable secret data encryption. - ## {{% heading "prerequisites" %}} - * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * Kubernetes version 1.10.0 or later is required @@ -19,8 +17,6 @@ This page shows how to configure a Key Management Service (KMS) provider and plu {{< feature-state for_k8s_version="v1.12" state="beta" >}} - - The KMS encryption provider uses an envelope encryption scheme to encrypt data in etcd. The data is encrypted using a data encryption key (DEK); a new DEK is generated for each encryption. The DEKs are encrypted with a key encryption key (KEK) that is stored and managed in a remote KMS. The KMS provider uses gRPC to communicate with a specific KMS @@ -30,10 +26,12 @@ plugin. The KMS plugin, which is implemented as a gRPC server and deployed on th To configure a KMS provider on the API server, include a provider of type ```kms``` in the providers array in the encryption configuration file and set the following properties: - * `name`: Display name of the KMS plugin. - * `endpoint`: Listen address of the gRPC server (KMS plugin). The endpoint is a UNIX domain socket. - * `cachesize`: Number of data encryption keys (DEKs) to be cached in the clear. When cached, DEKs can be used without another call to the KMS; whereas DEKs that are not cached require a call to the KMS to unwrap. - * `timeout`: How long should kube-apiserver wait for kms-plugin to respond before returning an error (default is 3 seconds). +* `name`: Display name of the KMS plugin. +* `endpoint`: Listen address of the gRPC server (KMS plugin). The endpoint is a UNIX domain socket. +* `cachesize`: Number of data encryption keys (DEKs) to be cached in the clear. + When cached, DEKs can be used without another call to the KMS; + whereas DEKs that are not cached require a call to the KMS to unwrap. +* `timeout`: How long should kube-apiserver wait for kms-plugin to respond before returning an error (default is 3 seconds). See [Understanding the encryption at rest configuration.](/docs/tasks/administer-cluster/encrypt-data) @@ -57,17 +55,18 @@ Then use the functions and data structures in the stub file to develop the serve * kms plugin version: `v1beta1` -In response to procedure call Version, a compatible KMS plugin should return v1beta1 as VersionResponse.version + In response to procedure call Version, a compatible KMS plugin should return v1beta1 as VersionResponse.version. * message version: `v1beta1` -All messages from KMS provider have the version field set to current version v1beta1 + All messages from KMS provider have the version field set to current version v1beta1. * protocol: UNIX domain socket (`unix`) -The gRPC server should listen at UNIX domain socket + The gRPC server should listen at UNIX domain socket. ### Integrating a KMS plugin with the remote KMS + The KMS plugin can communicate with the remote KMS using any protocol supported by the KMS. All configuration data, including authentication credentials the KMS plugin uses to communicate with the remote KMS, are stored and managed by the KMS plugin independently. The KMS plugin can encode the ciphertext with additional metadata that may be required before sending it to the KMS for decryption. @@ -80,108 +79,113 @@ To encrypt the data: 1. Create a new encryption configuration file using the appropriate properties for the `kms` provider: - ```yaml - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - resources: - - secrets - providers: - - kms: - name: myKmsPlugin - endpoint: unix:///tmp/socketfile.sock - cachesize: 100 - timeout: 3s - - identity: {} - ``` - -2. Set the `--encryption-provider-config` flag on the kube-apiserver to point to the location of the configuration file. -3. Restart your API server. - -Note: -The alpha version of the encryption feature prior to 1.13 required a config file with -`kind: EncryptionConfig` and `apiVersion: v1`, and used the `--experimental-encryption-provider-config` flag. + ```yaml + apiVersion: apiserver.config.k8s.io/v1 + kind: EncryptionConfiguration + resources: + - resources: + - secrets + providers: + - kms: + name: myKmsPlugin + endpoint: unix:///tmp/socketfile.sock + cachesize: 100 + timeout: 3s + - identity: {} + ``` + +1. Set the `--encryption-provider-config` flag on the kube-apiserver to point to the location of the configuration file. +1. Restart your API server. ## Verifying that the data is encrypted -Data is encrypted when written to etcd. After restarting your kube-apiserver, any newly created or updated secret should be encrypted when stored. To verify, you can use the etcdctl command line program to retrieve the contents of your secret. - -1. Create a new secret called secret1 in the default namespace: -``` -kubectl create secret generic secret1 -n default --from-literal=mykey=mydata -``` -2. Using the etcdctl command line, read that secret out of etcd: -``` -ETCDCTL_API=3 etcdctl get /kubernetes.io/secrets/default/secret1 [...] | hexdump -C -``` - where `[...]` must be the additional arguments for connecting to the etcd server. -3. Verify the stored secret is prefixed with `k8s:enc:kms:v1:`, which indicates that the `kms` provider has encrypted the resulting data. +Data is encrypted when written to etcd. After restarting your `kube-apiserver`, +any newly created or updated secret should be encrypted when stored. To verify, +you can use the `etcdctl` command line program to retrieve the contents of your secret. -4. Verify that the secret is correctly decrypted when retrieved via the API: -``` -kubectl describe secret secret1 -n default -``` -should match `mykey: mydata` +1. Create a new secret called secret1 in the default namespace: + ``` + kubectl create secret generic secret1 -n default --from-literal=mykey=mydata + ``` +1. Using the etcdctl command line, read that secret out of etcd: + ``` + ETCDCTL_API=3 etcdctl get /kubernetes.io/secrets/default/secret1 [...] | hexdump -C + ``` + where `[...]` must be the additional arguments for connecting to the etcd server. + +1. Verify the stored secret is prefixed with `k8s:enc:kms:v1:`, which indicates that the `kms` provider has encrypted the resulting data. + +1. Verify that the secret is correctly decrypted when retrieved via the API: + ``` + kubectl describe secret secret1 -n default + ``` + should match `mykey: mydata` ## Ensuring all secrets are encrypted + Because secrets are encrypted on write, performing an update on a secret encrypts that content. -The following command reads all secrets and then updates them to apply server side encryption. If an error occurs due to a conflicting write, retry the command. For larger clusters, you may wish to subdivide the secrets by namespace or script an update. +The following command reads all secrets and then updates them to apply server side encryption. +If an error occurs due to a conflicting write, retry the command. +For larger clusters, you may wish to subdivide the secrets by namespace or script an update. + ``` kubectl get secrets --all-namespaces -o json | kubectl replace -f - ``` ## Switching from a local encryption provider to the KMS provider + To switch from a local encryption provider to the `kms` provider and re-encrypt all of the secrets: 1. Add the `kms` provider as the first entry in the configuration file as shown in the following example. - ```yaml - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - resources: - - secrets - providers: - - kms: - name : myKmsPlugin - endpoint: unix:///tmp/socketfile.sock - cachesize: 100 - - aescbc: - keys: - - name: key1 - secret: - ``` - -2. Restart all kube-apiserver processes. - -3. Run the following command to force all secrets to be re-encrypted using the `kms` provider. - -``` -kubectl get secrets --all-namespaces -o json| kubectl replace -f - -``` + ```yaml + apiVersion: apiserver.config.k8s.io/v1 + kind: EncryptionConfiguration + resources: + - resources: + - secrets + providers: + - kms: + name : myKmsPlugin + endpoint: unix:///tmp/socketfile.sock + cachesize: 100 + - aescbc: + keys: + - name: key1 + secret: + ``` + +1. Restart all kube-apiserver processes. + +1. Run the following command to force all secrets to be re-encrypted using the `kms` provider. + + ``` + kubectl get secrets --all-namespaces -o json| kubectl replace -f - + ``` ## Disabling encryption at rest + To disable encryption at rest: 1. Place the `identity` provider as the first entry in the configuration file: - ```yaml - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - resources: - - secrets - providers: - - identity: {} - - kms: - name : myKmsPlugin - endpoint: unix:///tmp/socketfile.sock - cachesize: 100 - ``` -2. Restart all kube-apiserver processes. -3. Run the following command to force all secrets to be decrypted. -``` -kubectl get secrets --all-namespaces -o json | kubectl replace -f - -``` + ```yaml + apiVersion: apiserver.config.k8s.io/v1 + kind: EncryptionConfiguration + resources: + - resources: + - secrets + providers: + - identity: {} + - kms: + name : myKmsPlugin + endpoint: unix:///tmp/socketfile.sock + cachesize: 100 + ``` +1. Restart all kube-apiserver processes. +1. Run the following command to force all secrets to be decrypted. + ``` + kubectl get secrets --all-namespaces -o json | kubectl replace -f - + ``` diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md index e82c53f3a6a66..c3498fce61f13 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md @@ -140,7 +140,7 @@ curl -L https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/dow ### Joining a Windows worker node {{< note >}} You must install the `Containers` feature and install Docker. Instructions -to do so are available at [Install Docker Engine - Enterprise on Windows Servers](https://docs.docker.com/ee/docker-ee/windows/docker-ee/#install-docker-engine---enterprise). +to do so are available at [Install Docker Engine - Enterprise on Windows Servers](https://docs.mirantis.com/docker-enterprise/v3.1/dockeree-products/docker-engine-enterprise/dee-windows.html). {{< /note >}} {{< note >}} diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index 461e45bda644a..02687a85f25f3 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -12,15 +12,11 @@ weight: 10 Client certificates generated by [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) expire after 1 year. This page explains how to manage certificate renewals with kubeadm. - - ## {{% heading "prerequisites" %}} You should be familiar with [PKI certificates and requirements in Kubernetes](/docs/setup/best-practices/certificates/). - - ## Using custom certificates {#custom-certificates} @@ -155,33 +151,29 @@ These are advanced topics for users who need to integrate their organization's c ### Set up a signer The Kubernetes Certificate Authority does not work out of the box. -You can configure an external signer such as [cert-manager][cert-manager-issuer], or you can use the built-in signer. +You can configure an external signer such as [cert-manager](https://docs.cert-manager.io/en/latest/tasks/issuers/setup-ca.html), or you can use the built-in signer. -The built-in signer is part of [`kube-controller-manager`][kcm]. +The built-in signer is part of [`kube-controller-manager`](/docs/reference/command-line-tools-reference/kube-controller-manager/). To activate the built-in signer, you must pass the `--cluster-signing-cert-file` and `--cluster-signing-key-file` flags. -If you're creating a new cluster, you can use a kubeadm [configuration file][config]: +If you're creating a new cluster, you can use a kubeadm [configuration file](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2): - ```yaml - apiVersion: kubeadm.k8s.io/v1beta2 - kind: ClusterConfiguration - controllerManager: - extraArgs: - cluster-signing-cert-file: /etc/kubernetes/pki/ca.crt - cluster-signing-key-file: /etc/kubernetes/pki/ca.key - ``` - -[cert-manager-issuer]: https://docs.cert-manager.io/en/latest/tasks/issuers/setup-ca.html -[kcm]: /docs/reference/command-line-tools-reference/kube-controller-manager/ -[config]: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +controllerManager: + extraArgs: + cluster-signing-cert-file: /etc/kubernetes/pki/ca.crt + cluster-signing-key-file: /etc/kubernetes/pki/ca.key +``` ### Create certificate signing requests (CSR) You can create the certificate signing requests for the Kubernetes certificates API with `kubeadm alpha certs renew --use-api`. -If you set up an external signer such as [cert-manager][cert-manager], certificate signing requests (CSRs) are automatically approved. -Otherwise, you must manually approve certificates with the [`kubectl certificate`][certs] command. +If you set up an external signer such as [cert-manager](https://github.com/jetstack/cert-manager), certificate signing requests (CSRs) are automatically approved. +Otherwise, you must manually approve certificates with the [`kubectl certificate`](/docs/setup/best-practices/certificates/) command. The following kubeadm command outputs the name of the certificate to approve, then blocks and waits for approval to occur: ```shell @@ -197,7 +189,7 @@ The output is similar to this: If you set up an external signer, certificate signing requests (CSRs) are automatically approved. -Otherwise, you must manually approve certificates with the [`kubectl certificate`][certs] command. e.g. +Otherwise, you must manually approve certificates with the [`kubectl certificate`](/docs/setup/best-practices/certificates/) command. e.g. ```shell kubectl certificate approve kubeadm-cert-kube-apiserver-ld526 @@ -229,20 +221,16 @@ Certificates can be renewed with `kubeadm alpha certs renew --csr-only`. As with `kubeadm init`, an output directory can be specified with the `--csr-dir` flag. A CSR contains a certificate's name, domains, and IPs, but it does not specify usages. -It is the responsibility of the CA to specify [the correct cert usages][cert-table] when issuing a certificate. +It is the responsibility of the CA to specify [the correct cert usages](/docs/setup/best-practices/certificates/#all-certificates) +when issuing a certificate. -* In `openssl` this is done with the [`openssl ca` command][openssl-ca]. -* In `cfssl` you specify [usages in the config file][cfssl-usages] +* In `openssl` this is done with the + [`openssl ca` command](https://superuser.com/questions/738612/openssl-ca-keyusage-extension). +* In `cfssl` you specify + [usages in the config file](https://github.com/cloudflare/cfssl/blob/master/doc/cmd/cfssl.txt#L170). After a certificate is signed using your preferred method, the certificate and the private key must be copied to the PKI directory (by default `/etc/kubernetes/pki`). -[cert-manager]: https://github.com/jetstack/cert-manager -[openssl-ca]: https://superuser.com/questions/738612/openssl-ca-keyusage-extension -[cfssl-usages]: https://github.com/cloudflare/cfssl/blob/master/doc/cmd/cfssl.txt#L170 -[certs]: /docs/setup/best-practices/certificates/ -[cert-cas]: /docs/setup/best-practices/certificates/#single-root-ca -[cert-table]: /docs/setup/best-practices/certificates/#all-certificates - ## Certificate authority (CA) rotation {#certificate-authority-rotation} Kubeadm does not support rotation or replacement of CA certificates out of the box. diff --git a/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md b/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md index 2bf0de8231733..1d3d34867cfb6 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md +++ b/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md @@ -36,7 +36,7 @@ This example demonstrates how to use Kubernetes namespaces to subdivide your clu This example assumes the following: 1. You have an [existing Kubernetes cluster](/docs/setup/). -2. You have a basic understanding of Kubernetes _[Pods](/docs/concepts/workloads/pods/pod/)_, _[Services](/docs/concepts/services-networking/service/)_, and _[Deployments](/docs/concepts/workloads/controllers/deployment/)_. +2. You have a basic understanding of Kubernetes {{< glossary_tooltip text="Pods" term_id="pod" >}}, {{< glossary_tooltip term_id="service" text="Services" >}}, and {{< glossary_tooltip text="Deployments" term_id="deployment" >}}. ## Understand the default namespace diff --git a/content/en/docs/tasks/administer-cluster/namespaces.md b/content/en/docs/tasks/administer-cluster/namespaces.md index eabf58ff0b26d..3266f06602b52 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces.md +++ b/content/en/docs/tasks/administer-cluster/namespaces.md @@ -13,7 +13,7 @@ This page shows how to view, work in, and delete {{< glossary_tooltip text="name ## {{% heading "prerequisites" %}} * Have an [existing Kubernetes cluster](/docs/setup/). -* Have a basic understanding of Kubernetes _[Pods](/docs/concepts/workloads/pods/pod/)_, _[Services](/docs/concepts/services-networking/service/)_, and _[Deployments](/docs/concepts/workloads/controllers/deployment/)_. +2. You have a basic understanding of Kubernetes {{< glossary_tooltip text="Pods" term_id="pod" >}}, {{< glossary_tooltip term_id="service" text="Services" >}}, and {{< glossary_tooltip text="Deployments" term_id="deployment" >}}. diff --git a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md index 6218e8ce81a67..7f56e4ec85500 100644 --- a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md +++ b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md @@ -38,7 +38,7 @@ if your cluster is running v1.16 then you can use kubectl v1.15, v1.16 or v1.17; other combinations [aren't supported](/docs/setup/release/version-skew-policy/#kubectl). -Some of the examples use the commandline tool +Some of the examples use the command line tool [jq](https://stedolan.github.io/jq/). You do not need `jq` to complete the task, because there are manual alternatives. @@ -380,4 +380,4 @@ internal failure, see Kubelet log for details | The kubelet encountered some int - For more information on configuring the kubelet via a configuration file, see [Set kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file). -- See the reference documentation for [`NodeConfigSource`](https://kubernetes.io/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#nodeconfigsource-v1-core) +- See the reference documentation for [`NodeConfigSource`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#nodeconfigsource-v1-core) diff --git a/content/en/docs/tasks/administer-cluster/safely-drain-node.md b/content/en/docs/tasks/administer-cluster/safely-drain-node.md index e18b2ed87d322..ed1b9657c81a1 100644 --- a/content/en/docs/tasks/administer-cluster/safely-drain-node.md +++ b/content/en/docs/tasks/administer-cluster/safely-drain-node.md @@ -34,7 +34,7 @@ This task assumes that you have met the following prerequisites: You can use `kubectl drain` to safely evict all of your pods from a node before you perform maintenance on the node (e.g. kernel upgrade, hardware maintenance, etc.). Safe evictions allow the pod's containers -to [gracefully terminate](/docs/concepts/workloads/pods/pod/#termination-of-pods) +to [gracefully terminate](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) and will respect the `PodDisruptionBudgets` you have specified. {{< note >}} diff --git a/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md b/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md index f5116e76917da..00b9251be889f 100644 --- a/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md +++ b/content/en/docs/tasks/configure-pod-container/attach-handler-lifecycle-event.md @@ -75,7 +75,7 @@ set to RUNNING until the postStart handler completes. Kubernetes sends the preStop event immediately before the Container is terminated. Kubernetes' management of the Container blocks until the preStop handler completes, unless the Pod's grace period expires. For more details, see -[Termination of Pods](/docs/concepts/workloads/pods/pod/#termination-of-pods). +[Pod Lifecycle](/docs/concepts/workloads/pods/pod-lifecycle/). {{< note >}} Kubernetes only sends the preStop event when a Pod is *terminated*. diff --git a/content/en/docs/tasks/configure-pod-container/configure-service-account.md b/content/en/docs/tasks/configure-pod-container/configure-service-account.md index 2486e520e1e5e..f1b1e22db9545 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/en/docs/tasks/configure-pod-container/configure-service-account.md @@ -316,7 +316,7 @@ kubectl create -f https://k8s.io/examples/pods/pod-projected-svc-token.yaml The kubelet will request and store the token on behalf of the pod, make the token available to the pod at a configurable file path, and refresh the token as it approaches expiration. Kubelet proactively rotates the token if it is older than 80% of its total TTL, or if the token is older than 24 hours. -The application is responsible for reloading the token when it rotates. Periodic reloading (e.g. once every 5 minutes) is sufficient for most usecases. +The application is responsible for reloading the token when it rotates. Periodic reloading (e.g. once every 5 minutes) is sufficient for most use cases. ## Service Account Issuer Discovery diff --git a/content/en/docs/tasks/configure-pod-container/security-context.md b/content/en/docs/tasks/configure-pod-container/security-context.md index 38662760b7b0b..db9a0aa96f400 100644 --- a/content/en/docs/tasks/configure-pod-container/security-context.md +++ b/content/en/docs/tasks/configure-pod-container/security-context.md @@ -30,8 +30,8 @@ a Pod or Container. Security context settings include, but are not limited to: * readOnlyRootFilesystem: Mounts the container's root filesystem as read-only. -The above bullets are not a complete set of security context settings -- please see -[SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core) +The above bullets are not a complete set of security context settings -- please see +[SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core) for a comprehensive list. For more information about security mechanisms in Linux, see @@ -59,11 +59,11 @@ Here is a configuration file for a Pod that has a `securityContext` and an `empt {{< codenew file="pods/security/security-context.yaml" >}} In the configuration file, the `runAsUser` field specifies that for any Containers in -the Pod, all processes run with user ID 1000. The `runAsGroup` field specifies the primary group ID of 3000 for +the Pod, all processes run with user ID 1000. The `runAsGroup` field specifies the primary group ID of 3000 for all processes within any containers of the Pod. If this field is omitted, the primary group ID of the containers -will be root(0). Any files created will also be owned by user 1000 and group 3000 when `runAsGroup` is specified. -Since `fsGroup` field is specified, all processes of the container are also part of the supplementary group ID 2000. -The owner for volume `/data/demo` and any files created in that volume will be Group ID 2000. +will be root(0). Any files created will also be owned by user 1000 and group 3000 when `runAsGroup` is specified. +Since `fsGroup` field is specified, all processes of the container are also part of the supplementary group ID 2000. +The owner for volume `/data/demo` and any files created in that volume will be Group ID 2000. Create the Pod: @@ -138,7 +138,7 @@ $ id uid=1000 gid=3000 groups=2000 ``` You will see that gid is 3000 which is same as `runAsGroup` field. If the `runAsGroup` was omitted the gid would -remain as 0(root) and the process will be able to interact with files that are owned by root(0) group and that have +remain as 0(root) and the process will be able to interact with files that are owned by root(0) group and that have the required group permissions for root(0) group. Exit your shell: @@ -180,9 +180,9 @@ This is an alpha feature. To use it, enable the [feature gate](/docs/reference/c {{< note >}} This field has no effect on ephemeral volume types such as -[`secret`](https://kubernetes.io/docs/concepts/storage/volumes/#secret), -[`configMap`](https://kubernetes.io/docs/concepts/storage/volumes/#configmap), -and [`emptydir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir). +[`secret`](/docs/concepts/storage/volumes/#secret), +[`configMap`](/docs/concepts/storage/volumes/#configmap), +and [`emptydir`](/docs/concepts/storage/volumes/#emptydir). {{< /note >}} @@ -423,6 +423,3 @@ kubectl delete pod security-context-demo-4 * [Pod Security Policies](/docs/concepts/policy/pod-security-policy/) * [AllowPrivilegeEscalation design document](https://git.k8s.io/community/contributors/design-proposals/auth/no-new-privs.md) - - - diff --git a/content/en/docs/tasks/configure-pod-container/static-pod.md b/content/en/docs/tasks/configure-pod-container/static-pod.md index 5189fdb882454..cf31d822d6ce0 100644 --- a/content/en/docs/tasks/configure-pod-container/static-pod.md +++ b/content/en/docs/tasks/configure-pod-container/static-pod.md @@ -14,7 +14,7 @@ without the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} observing them. Unlike Pods that are managed by the control plane (for example, a {{< glossary_tooltip text="Deployment" term_id="deployment" >}}); -instead, the kubelet watches each static Pod (and restarts it if it crashes). +instead, the kubelet watches each static Pod (and restarts it if it fails). Static Pods are always bound to one {{< glossary_tooltip term_id="kubelet" >}} on a specific node. diff --git a/content/en/docs/tasks/debug-application-cluster/audit.md b/content/en/docs/tasks/debug-application-cluster/audit.md index 600af51d003af..14d5bef7f1df9 100644 --- a/content/en/docs/tasks/debug-application-cluster/audit.md +++ b/content/en/docs/tasks/debug-application-cluster/audit.md @@ -22,12 +22,10 @@ answer the following questions: - from where was it initiated? - to where was it going? - - - -[Kube-apiserver][kube-apiserver] performs auditing. Each request on each stage +[Kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) +performs auditing. Each request on each stage of its execution generates an event, which is then pre-processed according to a certain policy and written to a backend. The policy determines what's recorded and the backends persist the records. The current backend implementations @@ -55,7 +53,8 @@ Additionally, memory consumption depends on the audit logging configuration. Audit policy defines rules about what events should be recorded and what data they should include. The audit policy object structure is defined in the -[`audit.k8s.io` API group][auditing-api]. When an event is processed, it's +[`audit.k8s.io` API group](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go). +When an event is processed, it's compared against the list of rules in order. The first matching rule sets the "audit level" of the event. The known audit levels are: @@ -67,7 +66,7 @@ compared against the list of rules in order. The first matching rule sets the - `RequestResponse` - log event metadata, request and response bodies. This does not apply for non-resource requests. -You can pass a file with the policy to [kube-apiserver][kube-apiserver] +You can pass a file with the policy to `kube-apiserver` using the `--audit-policy-file` flag. If the flag is omitted, no events are logged. Note that the `rules` field __must__ be provided in the audit policy file. A policy with no (0) rules is treated as illegal. @@ -86,12 +85,14 @@ rules: - level: Metadata ``` -The audit profile used by GCE should be used as reference by admins constructing their own audit profiles. You can check the [configure-helper.sh][configure-helper] script, which generates the audit policy file. You can see most of the audit policy file by looking directly at the script. +The audit profile used by GCE should be used as reference by admins constructing their own audit profiles. You can check the +[configure-helper.sh](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh) +script, which generates the audit policy file. You can see most of the audit policy file by looking directly at the script. ## Audit backends Audit backends persist audit events to an external storage. -[Kube-apiserver][kube-apiserver] out of the box provides three backends: +`Kube-apiserver` out of the box provides three backends: - Log backend, which writes events to a disk - Webhook backend, which sends events to an external API @@ -99,7 +100,7 @@ Audit backends persist audit events to an external storage. In all cases, audit events structure is defined by the API in the `audit.k8s.io` API group. The current version of the API is -[`v1`][auditing-api]. +[`v1`](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go). {{< note >}} In case of patches, request body is a JSON array with patch operations, not a JSON object @@ -125,7 +126,7 @@ request to `/apis/batch/v1/namespaces/some-namespace/jobs/some-job-name`. ### Log backend Log backend writes audit events to a file in JSON format. You can configure -log audit backend using the following [kube-apiserver][kube-apiserver] flags: +log audit backend using the following `kube-apiserver` flags: - `--audit-log-path` specifies the log file path that log backend uses to write audit events. Not specifying this flag disables log backend. `-` means standard out @@ -136,11 +137,12 @@ log audit backend using the following [kube-apiserver][kube-apiserver] flags: ### Webhook backend Webhook backend sends audit events to a remote API, which is assumed to be the -same API as [kube-apiserver][kube-apiserver] exposes. You can configure webhook +same API as `kube-apiserver` exposes. You can configure webhook audit backend using the following kube-apiserver flags: - `--audit-webhook-config-file` specifies the path to a file with a webhook - configuration. Webhook configuration is effectively a [kubeconfig][kubeconfig]. + configuration. Webhook configuration is effectively a + [kubeconfig](/docs/tasks/access-application-cluster/configure-access-multiple-clusters). - `--audit-webhook-initial-backoff` specifies the amount of time to wait after the first failed request before retrying. Subsequent requests are retried with exponential backoff. @@ -327,23 +329,29 @@ Currently, this feature has performance implications for the apiserver in the fo ## Setup for multiple API servers -If you're extending the Kubernetes API with the [aggregation layer][kube-aggregator], you can also -set up audit logging for the aggregated apiserver. To do this, pass the configuration options in the -same format as described above to the aggregated apiserver and set up the log ingesting pipeline -to pick up audit logs. Different apiservers can have different audit configurations and different -audit policies. +If you're extending the Kubernetes API with the [aggregation +layer](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/), +you can also set up audit logging for the aggregated apiserver. To do this, +pass the configuration options in the same format as described above to the +aggregated apiserver and set up the log ingesting pipeline to pick up audit +logs. Different apiservers can have different audit configurations and +different audit policies. ## Log Collector Examples ### Use fluentd to collect and distribute audit events from log file -[Fluentd][fluentd] is an open source data collector for unified logging layer. +[Fluentd](http://www.fluentd.org/) is an open source data collector for unified logging layer. In this example, we will use fluentd to split audit events by different namespaces. -{{< note >}}Fluent-plugin-forest and fluent-plugin-rewrite-tag-filter are plugins for fluentd. You can get details about plugin installation from [fluentd plugin-management][fluentd_plugin_management_doc]. +{{< note >}} +The `fluent-plugin-forest` and `fluent-plugin-rewrite-tag-filter` are plugins for fluentd. +You can get details about plugin installation from +[fluentd plugin-management](https://docs.fluentd.org/v1.0/articles/plugin-management). {{< /note >}} -1. Install [fluentd][fluentd_install_doc], fluent-plugin-forest and fluent-plugin-rewrite-tag-filter in the kube-apiserver node +1. Install [`fluentd`](https://docs.fluentd.org/v1.0/articles/quickstart#step-1:-installing-fluentd), + `fluent-plugin-forest` and `fluent-plugin-rewrite-tag-filter` in the kube-apiserver node 1. Create a config file for fluentd @@ -416,11 +424,12 @@ In this example, we will use fluentd to split audit events by different namespac ### Use logstash to collect and distribute audit events from webhook backend -[Logstash][logstash] is an open source, server-side data processing tool. In this example, +[Logstash](https://www.elastic.co/products/logstash) +is an open source, server-side data processing tool. In this example, we will use logstash to collect audit events from webhook backend, and save events of different users into different files. -1. install [logstash][logstash_install_doc] +1. install [logstash](https://www.elastic.co/guide/en/logstash/current/installing-logstash.html) 1. create config file for logstash @@ -491,19 +500,6 @@ Note that in addition to file output plugin, logstash has a variety of outputs t let users route data where they want. For example, users can emit audit events to elasticsearch plugin which supports full-text search and analytics. -[kube-apiserver]: /docs/reference/command-line-tools-reference/kube-apiserver/ -[auditing-proposal]: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/auditing.md -[auditing-api]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go -[configure-helper]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh -[kubeconfig]: /docs/tasks/access-application-cluster/configure-access-multiple-clusters/ -[fluentd]: http://www.fluentd.org/ -[fluentd_install_doc]: https://docs.fluentd.org/v1.0/articles/quickstart#step-1:-installing-fluentd -[fluentd_plugin_management_doc]: https://docs.fluentd.org/v1.0/articles/plugin-management -[logstash]: https://www.elastic.co/products/logstash -[logstash_install_doc]: https://www.elastic.co/guide/en/logstash/current/installing-logstash.html -[kube-aggregator]: /docs/concepts/api-extension/apiserver-aggregation - - ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/tasks/debug-application-cluster/debug-application.md b/content/en/docs/tasks/debug-application-cluster/debug-application.md index a5c37541c3d60..edd23c35e76c2 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-application.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-application.md @@ -118,7 +118,7 @@ You can view this resource with: kubectl get endpoints ${SERVICE_NAME} ``` -Make sure that the endpoints match up with the number of containers that you expect to be a member of your service. +Make sure that the endpoints match up with the number of pods that you expect to be members of your service. For example, if your Service is for an nginx container with 3 replicas, you would expect to see three different IP addresses in the Service's endpoints. diff --git a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 9793b472e0c9f..8fb5bffd37e7a 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -17,7 +17,8 @@ This page shows how to debug Pods and ReplicationControllers. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * You should be familiar with the basics of - [Pods](/docs/concepts/workloads/pods/pod/) and [Pod Lifecycle](/docs/concepts/workloads/pods/pod-lifecycle/). + {{< glossary_tooltip text="Pods" term_id="pod" >}} and with + Pods' [lifecycles](/docs/concepts/workloads/pods/pod-lifecycle/). diff --git a/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md b/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md index 44dcf0e90986f..543573781be01 100644 --- a/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md +++ b/content/en/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md @@ -78,7 +78,7 @@ only the termination message: ## Customizing the termination message Kubernetes retrieves termination messages from the termination message file -specified in the `terminationMessagePath` field of a Container, which as a default +specified in the `terminationMessagePath` field of a Container, which has a default value of `/dev/termination-log`. By customizing this field, you can tell Kubernetes to use a different file. Kubernetes use the contents from the specified file to populate the Container's status message on both success and failure. diff --git a/content/en/docs/tasks/debug-application-cluster/falco.md b/content/en/docs/tasks/debug-application-cluster/falco.md deleted file mode 100644 index 2b6eb9323dd1a..0000000000000 --- a/content/en/docs/tasks/debug-application-cluster/falco.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -reviewers: -- soltysh -- sttts -- ericchiang -content_type: concept -title: Auditing with Falco ---- - - -### Use Falco to collect audit events - -[Falco](https://falco.org/) is an open source project for intrusion and abnormality detection for Cloud Native platforms. -This section describes how to set up Falco, how to send audit events to the Kubernetes Audit endpoint exposed by Falco, and how Falco applies a set of rules to automatically detect suspicious behavior. - - - - - - -#### Install Falco - -Install Falco by using one of the following methods: - -- [Standalone Falco][falco_installation] -- [Kubernetes DaemonSet][falco_installation] -- [Falco Helm Chart][falco_helm_chart] - -Once Falco is installed make sure it is configured to expose the Audit webhook. To do so, use the following configuration: - -```yaml -webserver: - enabled: true - listen_port: 8765 - k8s_audit_endpoint: /k8s_audit - ssl_enabled: false - ssl_certificate: /etc/falco/falco.pem -``` - -This configuration is typically found in the `/etc/falco/falco.yaml` file. If Falco is installed as a Kubernetes DaemonSet, edit the `falco-config` ConfigMap and add this configuration. - -#### Configure Kubernetes Audit - -1. Create a [kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) for the [kube-apiserver][kube-apiserver] webhook audit backend. - - cat < /etc/kubernetes/audit-webhook-kubeconfig - apiVersion: v1 - kind: Config - clusters: - - cluster: - server: http://:8765/k8s_audit - name: falco - contexts: - - context: - cluster: falco - user: "" - name: default-context - current-context: default-context - preferences: {} - users: [] - EOF - -1. Start [kube-apiserver][kube-apiserver] with the following options: - - ```shell - --audit-policy-file=/etc/kubernetes/audit-policy.yaml --audit-webhook-config-file=/etc/kubernetes/audit-webhook-kubeconfig - ``` - -#### Audit Rules - -Rules devoted to Kubernetes Audit Events can be found in [k8s_audit_rules.yaml][falco_k8s_audit_rules]. If Audit Rules is installed as a native package or using the official Docker images, Falco copies the rules file to `/etc/falco/`, so they are available for use. - -There are three classes of rules. - -The first class of rules looks for suspicious or exceptional activities, such as: - -- Any activity by an unauthorized or anonymous user. -- Creating a pod with an unknown or disallowed image. -- Creating a privileged pod, a pod mounting a sensitive filesystem from the host, or a pod using host networking. -- Creating a NodePort service. -- Creating a ConfigMap containing private credentials, such as passwords and cloud provider secrets. -- Attaching to or executing a command on a running pod. -- Creating a namespace external to a set of allowed namespaces. -- Creating a pod or service account in the kube-system or kube-public namespaces. -- Trying to modify or delete a system ClusterRole. -- Creating a ClusterRoleBinding to the cluster-admin role. -- Creating a ClusterRole with wildcarded verbs or resources. For example, overly permissive. -- Creating a ClusterRole with write permissions or a ClusterRole that can execute commands on pods. - -A second class of rules tracks resources being created or destroyed, including: - -- Deployments -- Services -- ConfigMaps -- Namespaces -- Service accounts -- Role/ClusterRoles -- Role/ClusterRoleBindings - -The final class of rules simply displays any Audit Event received by Falco. This rule is disabled by default, as it can be quite noisy. - -For further details, see [Kubernetes Audit Events][falco_ka_docs] in the Falco documentation. - -[kube-apiserver]: /docs/admin/kube-apiserver -[auditing-proposal]: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/auditing.md -[auditing-api]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go -[gce-audit-profile]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh#L735 -[kubeconfig]: /docs/tasks/access-application-cluster/configure-access-multiple-clusters/ -[fluentd]: http://www.fluentd.org/ -[fluentd_install_doc]: https://docs.fluentd.org/v1.0/articles/quickstart#step-1:-installing-fluentd -[fluentd_plugin_management_doc]: https://docs.fluentd.org/v1.0/articles/plugin-management -[logstash]: https://www.elastic.co/products/logstash -[logstash_install_doc]: https://www.elastic.co/guide/en/logstash/current/installing-logstash.html -[kube-aggregator]: /docs/concepts/api-extension/apiserver-aggregation -[falco_website]: https://www.falco.org -[falco_k8s_audit_rules]: https://github.com/falcosecurity/falco/blob/master/rules/k8s_audit_rules.yaml -[falco_ka_docs]: https://falco.org/docs/event-sources/kubernetes-audit -[falco_installation]: https://falco.org/docs/installation -[falco_helm_chart]: https://github.com/falcosecurity/charts/tree/master/falco - - diff --git a/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md index dbd4aa6cf4771..098776cb7b7e5 100644 --- a/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md +++ b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md @@ -41,7 +41,7 @@ The API requires metrics server to be deployed in the cluster. Otherwise it will ### CPU -CPU is reported as the average usage, in [CPU cores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu), over a period of time. This value is derived by taking a rate over a cumulative CPU counter provided by the kernel (in both Linux and Windows kernels). The kubelet chooses the window for the rate calculation. +CPU is reported as the average usage, in [CPU cores](/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu), over a period of time. This value is derived by taking a rate over a cumulative CPU counter provided by the kernel (in both Linux and Windows kernels). The kubelet chooses the window for the rate calculation. ### Memory @@ -60,5 +60,3 @@ Metrics Server is registered with the main API server through [Kubernetes aggregator](/docs/concepts/api-extension/apiserver-aggregation/). Learn more about the metrics server in [the design doc](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/metrics-server.md). - - diff --git a/content/en/docs/tasks/example-task-template.md b/content/en/docs/tasks/example-task-template.md deleted file mode 100644 index 90d14e98dad8d..0000000000000 --- a/content/en/docs/tasks/example-task-template.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Example Task Template -reviewers: -- chenopis -content_type: task -toc_hide: true ---- - - - -{{< note >}} -Be sure to also [create an entry in the table of contents](/docs/contribute/style/write-new-topic/#placing-your-topic-in-the-table-of-contents) for your new document. -{{< /note >}} - -This page shows how to ... - - - -## {{% heading "prerequisites" %}} - - -* {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -* Do this. -* Do this too. - - - - - -## Doing ... - -1. Do this. -1. Do this next. Possibly read this [related explanation](#). - - - - - -## Understanding ... -**[Optional Section]** - -Here's an interesting thing to know about the steps you just did. - - - -## {{% heading "whatsnext" %}} - - -**[Optional Section]** - -* Learn more about [Writing a New Topic](/docs/home/contribute/write-new-topic/). -* Learn about [Page Content Types - Task](/docs/home/contribute/style/page-content-types/#task). diff --git a/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md b/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md index 4afbee21c8a6b..b14777111e182 100644 --- a/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md +++ b/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md @@ -129,45 +129,8 @@ If RBAC is enabled on your cluster, you must update the `system:kube-scheduler` ``` kubectl edit clusterrole system:kube-scheduler ``` -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - labels: - kubernetes.io/bootstrapping: rbac-defaults - name: system:kube-scheduler -rules: -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - coordination.k8s.io - resourceNames: - - kube-scheduler - - my-scheduler - resources: - - leases - verbs: - - get - - update -- apiGroups: - - "" - resourceNames: - - kube-scheduler - - my-scheduler - resources: - - endpoints - verbs: - - delete - - get - - patch - - update -``` + +{{< codenew file="admin/sched/clusterrole.yaml" >}} ## Specify schedulers for pods diff --git a/content/en/docs/tasks/extend-kubernetes/http-proxy-access-api.md b/content/en/docs/tasks/extend-kubernetes/http-proxy-access-api.md index dd80c8c349063..b3aae7fc3e6ca 100644 --- a/content/en/docs/tasks/extend-kubernetes/http-proxy-access-api.md +++ b/content/en/docs/tasks/extend-kubernetes/http-proxy-access-api.md @@ -17,7 +17,7 @@ If you do not already have an application running in your cluster, start a Hello world application by entering this command: ```shell -kubectl run node-hello --image=gcr.io/google-samples/node-hello:1.0 --port=8080 +kubectl create deployment node-hello --image=gcr.io/google-samples/node-hello:1.0 --port=8080 ``` diff --git a/content/en/docs/tasks/inject-data-application/define-interdependent-environment-variables.md b/content/en/docs/tasks/inject-data-application/define-interdependent-environment-variables.md new file mode 100644 index 0000000000000..74c5c245db4ea --- /dev/null +++ b/content/en/docs/tasks/inject-data-application/define-interdependent-environment-variables.md @@ -0,0 +1,78 @@ +--- +title: Define Dependent Environment Variables +content_type: task +weight: 20 +--- + + + +This page shows how to define dependent environment variables for a container +in a Kubernetes Pod. + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} + + + + +## Define an environment dependent variable for a container + +When you create a Pod, you can set dependent environment variables for the containers that run in the Pod. To set dependent environment variables, you can use $(VAR_NAME) in the `value` of `env` in the configuration file. + +In this exercise, you create a Pod that runs one container. The configuration +file for the Pod defines an dependent environment variable with common usage defined. Here is the configuration manifest for the +Pod: + +{{< codenew file="pods/inject/dependent-envars.yaml" >}} + +1. Create a Pod based on that manifest: + + ```shell + kubectl apply -f https://k8s.io/examples/pods/inject/dependent-envars.yaml + ``` + ``` + pod/dependent-envars-demo created + ``` + +2. List the running Pods: + + ```shell + kubectl get pods dependent-envars-demo + ``` + ``` + NAME READY STATUS RESTARTS AGE + dependent-envars-demo 1/1 Running 0 9s + ``` + +3. Check the logs for the container running in your Pod: + + ```shell + kubectl logs pod/dependent-envars-demo + ``` + ``` + + UNCHANGED_REFERENCE=$(PROTOCOL)://172.17.0.1:80 + SERVICE_ADDRESS=https://172.17.0.1:80 + ESCAPED_REFERENCE=$(PROTOCOL)://172.17.0.1:80 + ``` + +As shown above, you have defined the correct dependency reference of `SERVICE_ADDRESS`, bad dependency reference of `UNCHANGED_REFERENCE` and skip dependent references of `ESCAPED_REFERENCE`. + +When an environment variable is already defined when being referenced, +the reference can be correctly resolved, such as in the `SERVICE_ADDRESS` case. + +When the environment variable is undefined or only includes some variables, the undefined environment variable is treated as a normal string, such as `UNCHANGED_REFERENCE`. Note that incorrectly parsed environment variables, in general, will not block the container from starting. + +The `$(VAR_NAME)` syntax can be escaped with a double `$`, ie: `$$(VAR_NAME)`. +Escaped references are never expanded, regardless of whether the referenced variable +is defined or not. This can be seen from the `ESCAPED_REFERENCE` case above. + +## {{% heading "whatsnext" %}} + + +* Learn more about [environment variables](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/). +* See [EnvVarSource](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#envvarsource-v1-core). + diff --git a/content/en/docs/tasks/inject-data-application/podpreset.md b/content/en/docs/tasks/inject-data-application/podpreset.md index 6533629ce4c91..9eea08232154e 100644 --- a/content/en/docs/tasks/inject-data-application/podpreset.md +++ b/content/en/docs/tasks/inject-data-application/podpreset.md @@ -140,7 +140,7 @@ verify that the preset has been applied. ## ReplicaSet with Pod spec example -This is an example to show that only Pod specs are modified by Pod presets. Other workload types +This is an example to show that only Pod specs are modified by Pod presets. Other workload types like ReplicaSets or Deployments are unaffected. Here is the manifest for the PodPreset for this example: @@ -290,7 +290,7 @@ kubectl get pod website -o yaml You can see there is no preset annotation (`podpreset.admission.kubernetes.io`). Seeing no annotation tells you that no preset has not been applied to the Pod. However, the -[PodPreset admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#podpreset) +[PodPreset admission controller](/docs/reference/access-authn-authz/admission-controllers/#podpreset) logs a warning containing details of the conflict. You can view the warning using `kubectl`: @@ -301,7 +301,7 @@ kubectl -n kube-system logs -l=component=kube-apiserver The output should look similar to: ``` -W1214 13:00:12.987884 1 admission.go:147] conflict occurred while applying podpresets: allow-database on pod: err: merging volume mounts for allow-database has a conflict on mount path /cache: +W1214 13:00:12.987884 1 admission.go:147] conflict occurred while applying podpresets: allow-database on pod: err: merging volume mounts for allow-database has a conflict on mount path /cache: v1.VolumeMount{Name:"other-volume", ReadOnly:false, MountPath:"/cache", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""} does not match core.VolumeMount{Name:"cache-volume", ReadOnly:false, MountPath:"/cache", SubPath:"", MountPropagation:(*core.MountPropagationMode)(nil), SubPathExpr:""} @@ -321,5 +321,3 @@ The output shows that the PodPreset was deleted: ``` podpreset "allow-database" deleted ``` - - diff --git a/content/en/docs/tasks/run-application/delete-stateful-set.md b/content/en/docs/tasks/run-application/delete-stateful-set.md index 7a4a94fab4537..57e54e679722a 100644 --- a/content/en/docs/tasks/run-application/delete-stateful-set.md +++ b/content/en/docs/tasks/run-application/delete-stateful-set.md @@ -58,7 +58,7 @@ kubectl delete pods -l app=myapp ### Persistent Volumes -Deleting the Pods in a StatefulSet will not delete the associated volumes. This is to ensure that you have the chance to copy data off the volume before deleting it. Deleting the PVC after the pods have left the [terminating state](/docs/concepts/workloads/pods/pod/#termination-of-pods) might trigger deletion of the backing Persistent Volumes depending on the storage class and reclaim policy. You should never assume ability to access a volume after claim deletion. +Deleting the Pods in a StatefulSet will not delete the associated volumes. This is to ensure that you have the chance to copy data off the volume before deleting it. Deleting the PVC after the pods have terminated might trigger deletion of the backing Persistent Volumes depending on the storage class and reclaim policy. You should never assume ability to access a volume after claim deletion. {{< note >}} Use caution when deleting a PVC, as it may lead to data loss. diff --git a/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md b/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md index 48a61a260d964..e706c6179a55f 100644 --- a/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md +++ b/content/en/docs/tasks/run-application/force-delete-stateful-set-pod.md @@ -37,7 +37,7 @@ You can perform a graceful pod deletion with the following command: kubectl delete pods ``` -For the above to lead to graceful termination, the Pod **must not** specify a `pod.Spec.TerminationGracePeriodSeconds` of 0. The practice of setting a `pod.Spec.TerminationGracePeriodSeconds` of 0 seconds is unsafe and strongly discouraged for StatefulSet Pods. Graceful deletion is safe and will ensure that the [Pod shuts down gracefully](/docs/concepts/workloads/pods/pod/#termination-of-pods) before the kubelet deletes the name from the apiserver. +For the above to lead to graceful termination, the Pod **must not** specify a `pod.Spec.TerminationGracePeriodSeconds` of 0. The practice of setting a `pod.Spec.TerminationGracePeriodSeconds` of 0 seconds is unsafe and strongly discouraged for StatefulSet Pods. Graceful deletion is safe and will ensure that the Pod [shuts down gracefully](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) before the kubelet deletes the name from the apiserver. Kubernetes (versions 1.5 or newer) will not delete Pods just because a Node is unreachable. The Pods running on an unreachable Node enter the 'Terminating' or 'Unknown' state after a [timeout](/docs/admin/node/#node-condition). Pods may also enter these states when the user attempts graceful deletion of a Pod on an unreachable Node. The only ways in which a Pod in such a state can be removed from the apiserver are as follows: diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 7f3b046b6838e..6806ba0dc0129 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -47,7 +47,7 @@ The Dockerfile has the following content: ``` FROM php:5-apache -ADD index.php /var/www/html/index.php +COPY index.php /var/www/html/index.php RUN chmod a+rx index.php ``` diff --git a/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md b/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md index 41466087609ab..a55ff3b5fdd6c 100644 --- a/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md +++ b/content/en/docs/tasks/tls/manual-rotation-of-ca-certificates.md @@ -1,7 +1,7 @@ --- title: Manual Rotation of CA Certificates min-kubernetes-server-version: v1.13 -content_template: templates/task +content_type: task --- diff --git a/content/en/docs/tasks/tools/_index.md b/content/en/docs/tasks/tools/_index.md index cabf9a3c7be00..7f43d34be71d3 100755 --- a/content/en/docs/tasks/tools/_index.md +++ b/content/en/docs/tasks/tools/_index.md @@ -2,5 +2,40 @@ title: "Install Tools" description: Set up Kubernetes tools on your computer. weight: 10 +no_list: true --- +## kubectl + +The Kubernetes command-line tool, `kubectl`, allows you to run commands against +Kubernetes clusters. You can use kubectl to deploy applications, inspect and manage +cluster resources, and view logs. + +See [Install and Set Up kubectl](/docs/tasks/tools/install-kubectl/) for information about how to +download and install `kubectl` and set it up for accessing your cluster. + +You can also read the [`kubectl` reference documentation](/docs/reference/kubectl/). + +## Minikube + +[Minikube](https://minikube.sigs.k8s.io/) is a tool that lets you run +Kubernetes locally. Minikube runs a single-node Kubernetes cluster on your personal +computer (including Windows, macOS and Linux PCs) so that you can try out Kubernetes, +or for daily development work. + +You can follow the official [Get Started!](https://minikube.sigs.k8s.io/docs/start/) +guide, or read [Install Minikube](/docs/tasks/tools/install-minikube/) if your focus +is on getting the tool installed. + +Once you have Minikube working, you can use it to +[run a sample application](/docs/tutorials/hello-minikube/). + +## kind + +Like Minikube, [kind](https://kind.sigs.k8s.io/docs/) lets you run Kubernetes on +your local compute. Unlike Minikuke, kind only works with a single container runtime: +it requires that you have [Docker](https://docs.docker.com/get-docker/) installed +and configured. + +[Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/) shows you what you +need to do to get up and running with kind. diff --git a/content/en/docs/tasks/tools/install-kubectl.md b/content/en/docs/tasks/tools/install-kubectl.md index 25b5cab9b5318..22b960751f23c 100644 --- a/content/en/docs/tasks/tools/install-kubectl.md +++ b/content/en/docs/tasks/tools/install-kubectl.md @@ -28,7 +28,7 @@ You must use a kubectl version that is within one minor version difference of yo 1. Download the latest release with the command: ``` - curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl + curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" ``` To download a specific version, replace the `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` portion of the command with the specific version. diff --git a/content/en/docs/tasks/tools/install-minikube.md b/content/en/docs/tasks/tools/install-minikube.md index f1f3788141467..a5e7ed0c2b012 100644 --- a/content/en/docs/tasks/tools/install-minikube.md +++ b/content/en/docs/tasks/tools/install-minikube.md @@ -206,7 +206,7 @@ To confirm successful installation of both a hypervisor and Minikube, you can ru {{< note >}} -For setting the `--driver` with `minikube start`, enter the name of the hypervisor you installed in lowercase letters where `` is mentioned below. A full list of `--driver` values is available in [specifying the VM driver documentation](https://kubernetes.io/docs/setup/learning-environment/minikube/#specifying-the-vm-driver). +For setting the `--driver` with `minikube start`, enter the name of the hypervisor you installed in lowercase letters where `` is mentioned below. A full list of `--driver` values is available in [specifying the VM driver documentation](/docs/setup/learning-environment/minikube/#specifying-the-vm-driver). {{< /note >}} diff --git a/content/en/docs/test.md b/content/en/docs/test.md index 848decff35411..a08aeb3caabed 100644 --- a/content/en/docs/test.md +++ b/content/en/docs/test.md @@ -235,7 +235,6 @@ link target in parentheses. [Link to Kubernetes.io](https://kubernetes.io/) or You can also use HTML, but it is not preferred. Link to Kubernetes.io - ## Images To format an image, use similar syntax to [links](#links), but add a leading `!` diff --git a/content/en/docs/tutorials/_index.md b/content/en/docs/tutorials/_index.md index 0deadcd945d88..2313d78e8770b 100644 --- a/content/en/docs/tutorials/_index.md +++ b/content/en/docs/tutorials/_index.md @@ -1,6 +1,7 @@ --- title: Tutorials main_menu: true +no_list: true weight: 60 content_type: concept --- @@ -14,8 +15,6 @@ each of which has a sequence of steps. Before walking through each tutorial, you may want to bookmark the [Standardized Glossary](/docs/reference/glossary/) page for later references. - - ## Basics @@ -64,13 +63,8 @@ Before walking through each tutorial, you may want to bookmark the * [Using Source IP](/docs/tutorials/services/source-ip/) - - ## {{% heading "whatsnext" %}} - If you would like to write a tutorial, see [Content Page Types](/docs/contribute/style/page-content-types/) for information about the tutorial page type. - - diff --git a/content/en/docs/tutorials/hello-minikube.md b/content/en/docs/tutorials/hello-minikube.md index 9ba2de1abfbb3..f0aa44369e532 100644 --- a/content/en/docs/tutorials/hello-minikube.md +++ b/content/en/docs/tutorials/hello-minikube.md @@ -65,7 +65,7 @@ This tutorial provides a container image that uses NGINX to echo back all the re ## Create a Deployment -A Kubernetes [*Pod*](/docs/concepts/workloads/pods/pod/) is a group of one or more Containers, +A Kubernetes [*Pod*](/docs/concepts/workloads/pods/) is a group of one or more Containers, tied together for the purposes of administration and networking. The Pod in this tutorial has only one Container. A Kubernetes [*Deployment*](/docs/concepts/workloads/controllers/deployment/) checks on the health of your diff --git a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html index 6d7e15a7c44c2..fb782458de356 100644 --- a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html +++ b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html @@ -20,7 +20,7 @@
diff --git a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html index 8a7d60dd8774c..c610b6e9f4db2 100644 --- a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -28,7 +28,7 @@

Objectives

Overview of Kubernetes Services

-

Kubernetes Pods are mortal. Pods in fact have a lifecycle. When a worker node dies, the Pods running on the Node are also lost. A ReplicaSet might then dynamically drive the cluster back to desired state via creation of new Pods to keep your application running. As another example, consider an image-processing backend with 3 replicas. Those replicas are exchangeable; the front-end system should not care about backend replicas or even if a Pod is lost and recreated. That said, each Pod in a Kubernetes cluster has a unique IP address, even Pods on the same Node, so there needs to be a way of automatically reconciling changes among Pods so that your applications continue to function.

+

Kubernetes Pods are mortal. Pods in fact have a lifecycle. When a worker node dies, the Pods running on the Node are also lost. A ReplicaSet might then dynamically drive the cluster back to desired state via creation of new Pods to keep your application running. As another example, consider an image-processing backend with 3 replicas. Those replicas are exchangeable; the front-end system should not care about backend replicas or even if a Pod is lost and recreated. That said, each Pod in a Kubernetes cluster has a unique IP address, even Pods on the same Node, so there needs to be a way of automatically reconciling changes among Pods so that your applications continue to function.

A Service in Kubernetes is an abstraction which defines a logical set of Pods and a policy by which to access them. Services enable a loose coupling between dependent Pods. A Service is defined using YAML (preferred) or JSON, like all Kubernetes objects. The set of Pods targeted by a Service is usually determined by a LabelSelector (see below for why you might want a Service without including selector in the spec).

diff --git a/content/en/docs/tutorials/services/source-ip.md b/content/en/docs/tutorials/services/source-ip.md index 03a9bb097c658..d04902ddfe1f2 100644 --- a/content/en/docs/tutorials/services/source-ip.md +++ b/content/en/docs/tutorials/services/source-ip.md @@ -1,6 +1,7 @@ --- title: Using Source IP content_type: tutorial +mermaid: true min-kubernetes-server-version: v1.5 --- @@ -177,7 +178,7 @@ service/nodeport exposed ```shell NODEPORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services nodeport) -NODES=$(kubectl get nodes -o jsonpath='{ $.items[*].status.addresses[?(@.type=="ExternalIP")].address }') +NODES=$(kubectl get nodes -o jsonpath='{ $.items[*].status.addresses[?(@.type=="InternalIP")].address }') ``` If you're running on a cloud provider, you may need to open up a firewall-rule @@ -206,18 +207,19 @@ Note that these are not the correct client IPs, they're cluster internal IPs. Th Visually: -``` - client - \ ^ - \ \ - v \ - node 1 <--- node 2 - | ^ SNAT - | | ---> - v | - endpoint -``` +{{< mermaid >}} +graph LR; + client(client)-->node2[Node 2]; + node2-->client; + node2-. SNAT .->node1[Node 1]; + node1-. SNAT .->node2; + node1-->endpoint(Endpoint); + classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000; + classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff; + class node1,node2,endpoint k8s; + class client plain; +{{}} To avoid this, Kubernetes has a feature to [preserve the client source IP](/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip). @@ -261,17 +263,18 @@ This is what happens: Visually: -``` - client - ^ / \ - / / \ - / v X - node 1 node 2 - ^ | - | | - | v - endpoint -``` +{{< mermaid >}} +graph TD; + client --> node1[Node 1]; + client(client) --x node2[Node 2]; + node1 --> endpoint(endpoint); + endpoint --> node1; + + classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000; + classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff; + class node1,node2,endpoint k8s; + class client plain; +{{}} @@ -324,17 +327,7 @@ deliberately failing health checks. Visually: -``` - client - | - lb VIP - / ^ - v / -health check ---> node 1 node 2 <--- health check - 200 <--- ^ | ---> 500 - | V - endpoint -``` +![Source IP with externalTrafficPolicy](/images/docs/sourceip-externaltrafficpolicy.svg) You can test this by setting the annotation: @@ -447,6 +440,4 @@ kubectl delete deployment source-ip-app ## {{% heading "whatsnext" %}} * Learn more about [connecting applications via services](/docs/concepts/services-networking/connect-applications-service/) -* Read how to [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) - - +* Read how to [Create an External Load Balancer](/docs/tasks/access-application-cluster/create-external-load-balancer/) diff --git a/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md b/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md index 2974c77c94966..5babc2c0b0d9d 100644 --- a/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md +++ b/content/en/docs/tutorials/stateless-application/expose-external-ip-address.md @@ -52,11 +52,11 @@ kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml The preceding command creates a - [Deployment](/docs/concepts/workloads/controllers/deployment/) - object and an associated - [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) - object. The ReplicaSet has five - [Pods](/docs/concepts/workloads/pods/pod/), + {{< glossary_tooltip text="Deployment" term_id="deployment" >}} + and an associated + {{< glossary_tooltip term_id="replica-set" text="ReplicaSet" >}}. + The ReplicaSet has five + {{< glossary_tooltip text="Pods" term_id="pod" >}} each of which runs the Hello World application. 1. Display information about the Deployment: diff --git a/content/en/examples/README.md b/content/en/examples/README.md index 3804697b6fa5c..6a5f3ceea7732 100644 --- a/content/en/examples/README.md +++ b/content/en/examples/README.md @@ -1,13 +1,12 @@ -Note: These tests are importing code from kubernetes that isn't really -meant to be used outside the repo. This causes vendoring problems. As -a result, we have to work around those with these lines in the travis -config: +To run the tests for a localization, use the following command: ``` -- rm $GOPATH/src/k8s.io/kubernetes/vendor/k8s.io/apimachinery -- rm $GOPATH/src/k8s.io/kubernetes/vendor/k8s.io/apiserver -- rm $GOPATH/src/k8s.io/kubernetes/vendor/k8s.io/client-go -- cp -r $GOPATH/src/k8s.io/kubernetes/vendor/* $GOPATH/src/ -- rm -rf $GOPATH/src/k8s.io/kubernetes/vendor/* -- cp -r $GOPATH/src/k8s.io/kubernetes/staging/src/* $GOPATH/src/ +go test k8s.io/website/content//examples ``` + +where `` is the two character representation of a language. For example: + +``` +go test k8s.io/website/content/en/examples +``` + diff --git a/content/en/examples/admin/sched/clusterrole.yaml b/content/en/examples/admin/sched/clusterrole.yaml new file mode 100644 index 0000000000000..554b8659db5b0 --- /dev/null +++ b/content/en/examples/admin/sched/clusterrole.yaml @@ -0,0 +1,37 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kube-scheduler +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - kube-scheduler + - my-scheduler + resources: + - leases + verbs: + - get + - update + - apiGroups: + - "" + resourceNames: + - kube-scheduler + - my-scheduler + resources: + - endpoints + verbs: + - delete + - get + - patch + - update diff --git a/content/en/examples/application/php-apache.yaml b/content/en/examples/application/php-apache.yaml index 5eb04cfb899ad..e8e1b5aeb43e2 100644 --- a/content/en/examples/application/php-apache.yaml +++ b/content/en/examples/application/php-apache.yaml @@ -22,9 +22,7 @@ spec: cpu: 500m requests: cpu: 200m - --- - apiVersion: v1 kind: Service metadata: @@ -36,4 +34,3 @@ spec: - port: 80 selector: run: php-apache - diff --git a/content/en/examples/examples_test.go b/content/en/examples/examples_test.go index 7c9664b64c168..d653d8303e2dd 100644 --- a/content/en/examples/examples_test.go +++ b/content/en/examples/examples_test.go @@ -28,34 +28,104 @@ import ( "testing" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/yaml" - utilfeature "k8s.io/apiserver/pkg/util/feature" + // "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/apis/apps" apps_validation "k8s.io/kubernetes/pkg/apis/apps/validation" + "k8s.io/kubernetes/pkg/apis/autoscaling" autoscaling_validation "k8s.io/kubernetes/pkg/apis/autoscaling/validation" + "k8s.io/kubernetes/pkg/apis/batch" batch_validation "k8s.io/kubernetes/pkg/apis/batch/validation" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/validation" - "k8s.io/kubernetes/pkg/apis/extensions" - ext_validation "k8s.io/kubernetes/pkg/apis/extensions/validation" + + "k8s.io/kubernetes/pkg/apis/networking" + networking_validation "k8s.io/kubernetes/pkg/apis/networking/validation" + "k8s.io/kubernetes/pkg/apis/policy" policy_validation "k8s.io/kubernetes/pkg/apis/policy/validation" + "k8s.io/kubernetes/pkg/apis/rbac" rbac_validation "k8s.io/kubernetes/pkg/apis/rbac/validation" + "k8s.io/kubernetes/pkg/apis/settings" settings_validation "k8s.io/kubernetes/pkg/apis/settings/validation" + "k8s.io/kubernetes/pkg/apis/storage" storage_validation "k8s.io/kubernetes/pkg/apis/storage/validation" + "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/registry/batch/job" + + // initialize install packages + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/core/install" + _ "k8s.io/kubernetes/pkg/apis/networking/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" + _ "k8s.io/kubernetes/pkg/apis/settings/install" + _ "k8s.io/kubernetes/pkg/apis/storage/install" ) +var ( + Groups map[string]TestGroup + serializer runtime.SerializerInfo +) + +// TestGroup contains GroupVersion to uniquely identify the API +type TestGroup struct { + externalGroupVersion schema.GroupVersion +} + +// GroupVersion makes copy of schema.GroupVersion +func (g TestGroup) GroupVersion() *schema.GroupVersion { + copyOfGroupVersion := g.externalGroupVersion + return ©OfGroupVersion +} + +// Codec returns the codec for the API version to test against +func (g TestGroup) Codec() runtime.Codec { + if serializer.Serializer == nil { + return legacyscheme.Codecs.LegacyCodec(g.externalGroupVersion) + } + return legacyscheme.Codecs.CodecForVersions(serializer.Serializer, legacyscheme.Codecs.UniversalDeserializer(), schema.GroupVersions{g.externalGroupVersion}, nil) +} + +func initGroups() { + Groups = make(map[string]TestGroup) + groupNames := []string{ + api.GroupName, + apps.GroupName, + autoscaling.GroupName, + batch.GroupName, + networking.GroupName, + policy.GroupName, + rbac.GroupName, + settings.GroupName, + storage.GroupName, + } + + for _, gn := range groupNames { + versions := legacyscheme.Scheme.PrioritizedVersionsForGroup(gn) + Groups[gn] = TestGroup{ + externalGroupVersion: schema.GroupVersion{ + Group: gn, + Version: versions[0].Version, + }, + } + } +} + func getCodecForObject(obj runtime.Object) (runtime.Codec, error) { kinds, _, err := legacyscheme.Scheme.ObjectKinds(obj) if err != nil { @@ -63,7 +133,7 @@ func getCodecForObject(obj runtime.Object) (runtime.Codec, error) { } kind := kinds[0] - for _, group := range testapi.Groups { + for _, group := range Groups { if group.GroupVersion().Group != kind.Group { continue } @@ -85,7 +155,7 @@ func getCodecForObject(obj runtime.Object) (runtime.Codec, error) { func validateObject(obj runtime.Object) (errors field.ErrorList) { // Enable CustomPodDNS for testing - utilfeature.DefaultFeatureGate.Set("CustomPodDNS=true") + // feature.DefaultFeatureGate.Set("CustomPodDNS=true") switch t := obj.(type) { case *api.ConfigMap: if t.Namespace == "" { @@ -96,7 +166,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = validation.ValidateEndpoints(t) + errors = validation.ValidateEndpointsCreate(t) case *api.LimitRange: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -115,7 +185,10 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = validation.ValidatePod(t) + opts := validation.PodValidationOptions{ + AllowMultipleHugePageResources: true, + } + errors = validation.ValidatePod(t, opts) case *api.PodList: for i := range t.Items { errors = append(errors, validateObject(&t.Items[i])...) @@ -148,7 +221,7 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = validation.ValidateService(t) + errors = validation.ValidateService(t, true) case *api.ServiceAccount: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -189,11 +262,15 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { t.Namespace = api.NamespaceDefault } errors = apps_validation.ValidateDeployment(t) - case *extensions.Ingress: + case *networking.Ingress: if t.Namespace == "" { t.Namespace = api.NamespaceDefault } - errors = ext_validation.ValidateIngress(t) + gv := schema.GroupVersion{ + Group: networking.GroupName, + Version: legacyscheme.Scheme.PrioritizedVersionsForGroup(networking.GroupName)[0].Version, + } + errors = networking_validation.ValidateIngressCreate(t, gv) case *policy.PodSecurityPolicy: errors = policy_validation.ValidatePodSecurityPolicy(t) case *apps.ReplicaSet: @@ -206,6 +283,11 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) { t.Namespace = api.NamespaceDefault } errors = batch_validation.ValidateCronJob(t) + case *networking.NetworkPolicy: + if t.Namespace == "" { + t.Namespace = api.NamespaceDefault + } + errors = networking_validation.ValidateNetworkPolicy(t) case *policy.PodDisruptionBudget: if t.Namespace == "" { t.Namespace = api.NamespaceDefault @@ -247,10 +329,6 @@ func walkConfigFiles(inDir string, t *testing.T, fn func(name, path string, data if err != nil { return err } - // workaround for Jekyllr limit - if bytes.HasPrefix(data, []byte("---\n")) { - return fmt.Errorf("YAML file cannot start with \"---\", please remove the first line") - } name := strings.TrimSuffix(file, ext) var docs [][]byte @@ -286,11 +364,14 @@ func walkConfigFiles(inDir string, t *testing.T, fn func(name, path string, data } func TestExampleObjectSchemas(t *testing.T) { + initGroups() + // Please help maintain the alphabeta order in the map cases := map[string]map[string][]runtime.Object{ "admin": { - "namespace-dev": {&api.Namespace{}}, - "namespace-prod": {&api.Namespace{}}, + "namespace-dev": {&api.Namespace{}}, + "namespace-prod": {&api.Namespace{}}, + "snowflake-deployment": {&apps.Deployment{}}, }, "admin/cloud": { "ccm-example": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &apps.DaemonSet{}}, @@ -298,6 +379,7 @@ func TestExampleObjectSchemas(t *testing.T) { "admin/dns": { "busybox": {&api.Pod{}}, "dns-horizontal-autoscaler": {&apps.Deployment{}}, + "dnsutils": {&api.Pod{}}, }, "admin/logging": { "fluentd-sidecar-config": {&api.ConfigMap{}}, @@ -343,21 +425,23 @@ func TestExampleObjectSchemas(t *testing.T) { "storagelimits": {&api.LimitRange{}}, }, "admin/sched": { - "my-scheduler": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &apps.Deployment{}}, + "my-scheduler": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &rbac.ClusterRoleBinding{}, &apps.Deployment{}}, "pod1": {&api.Pod{}}, "pod2": {&api.Pod{}}, "pod3": {&api.Pod{}}, }, "application": { - "deployment": {&apps.Deployment{}}, - "deployment-patch": {&apps.Deployment{}}, - "deployment-scale": {&apps.Deployment{}}, - "deployment-update": {&apps.Deployment{}}, - "nginx-app": {&api.Service{}, &apps.Deployment{}}, - "nginx-with-request": {&apps.Deployment{}}, - "shell-demo": {&api.Pod{}}, - "simple_deployment": {&apps.Deployment{}}, - "update_deployment": {&apps.Deployment{}}, + "deployment": {&apps.Deployment{}}, + "deployment-patch": {&apps.Deployment{}}, + "deployment-retainkeys": {&apps.Deployment{}}, + "deployment-scale": {&apps.Deployment{}}, + "deployment-update": {&apps.Deployment{}}, + "nginx-app": {&api.Service{}, &apps.Deployment{}}, + "nginx-with-request": {&apps.Deployment{}}, + "php-apache": {&apps.Deployment{}, &api.Service{}}, + "shell-demo": {&api.Pod{}}, + "simple_deployment": {&apps.Deployment{}}, + "update_deployment": {&apps.Deployment{}}, }, "application/cassandra": { "cassandra-service": {&api.Service{}}, @@ -413,15 +497,17 @@ func TestExampleObjectSchemas(t *testing.T) { "configmap-multikeys": {&api.ConfigMap{}}, }, "controllers": { - "daemonset": {&apps.DaemonSet{}}, - "frontend": {&apps.ReplicaSet{}}, - "hpa-rs": {&autoscaling.HorizontalPodAutoscaler{}}, - "job": {&batch.Job{}}, - "replicaset": {&apps.ReplicaSet{}}, - "replication": {&api.ReplicationController{}}, - "replication-nginx-1.7.9": {&api.ReplicationController{}}, - "replication-nginx-1.9.2": {&api.ReplicationController{}}, - "nginx-deployment": {&apps.Deployment{}}, + "daemonset": {&apps.DaemonSet{}}, + "fluentd-daemonset": {&apps.DaemonSet{}}, + "fluentd-daemonset-update": {&apps.DaemonSet{}}, + "frontend": {&apps.ReplicaSet{}}, + "hpa-rs": {&autoscaling.HorizontalPodAutoscaler{}}, + "job": {&batch.Job{}}, + "replicaset": {&apps.ReplicaSet{}}, + "replication": {&api.ReplicationController{}}, + "replication-nginx-1.14.2": {&api.ReplicationController{}}, + "replication-nginx-1.16.1": {&api.ReplicationController{}}, + "nginx-deployment": {&apps.Deployment{}}, }, "debug": { "counter-pod": {&api.Pod{}}, @@ -455,6 +541,8 @@ func TestExampleObjectSchemas(t *testing.T) { "pod-configmap-volume": {&api.Pod{}}, "pod-configmap-volume-specific-key": {&api.Pod{}}, "pod-multiple-configmap-env-variable": {&api.Pod{}}, + "pod-nginx-preferred-affinity": {&api.Pod{}}, + "pod-nginx-required-affinity": {&api.Pod{}}, "pod-nginx-specific-node": {&api.Pod{}}, "pod-nginx": {&api.Pod{}}, "pod-projected-svc-token": {&api.Pod{}}, @@ -462,6 +550,7 @@ func TestExampleObjectSchemas(t *testing.T) { "pod-single-configmap-env-variable": {&api.Pod{}}, "pod-with-node-affinity": {&api.Pod{}}, "pod-with-pod-affinity": {&api.Pod{}}, + "pod-with-toleration": {&api.Pod{}}, "private-reg-pod": {&api.Pod{}}, "share-process-namespace": {&api.Pod{}}, "simple-pod": {&api.Pod{}}, @@ -471,14 +560,17 @@ func TestExampleObjectSchemas(t *testing.T) { "redis-pod": {&api.Pod{}}, }, "pods/inject": { - "dapi-envars-container": {&api.Pod{}}, - "dapi-envars-pod": {&api.Pod{}}, - "dapi-volume": {&api.Pod{}}, - "dapi-volume-resources": {&api.Pod{}}, - "envars": {&api.Pod{}}, - "secret": {&api.Secret{}}, - "secret-envars-pod": {&api.Pod{}}, - "secret-pod": {&api.Pod{}}, + "dapi-envars-container": {&api.Pod{}}, + "dapi-envars-pod": {&api.Pod{}}, + "dapi-volume": {&api.Pod{}}, + "dapi-volume-resources": {&api.Pod{}}, + "envars": {&api.Pod{}}, + "pod-multiple-secret-env-variable": {&api.Pod{}}, + "pod-secret-envFrom": {&api.Pod{}}, + "pod-single-secret-env-variable": {&api.Pod{}}, + "secret": {&api.Secret{}}, + "secret-envars-pod": {&api.Pod{}}, + "secret-pod": {&api.Pod{}}, }, "pods/probe": { "exec-liveness": {&api.Pod{}}, @@ -517,38 +609,53 @@ func TestExampleObjectSchemas(t *testing.T) { "redis": {&api.Pod{}}, }, "policy": { + "baseline-psp": {&policy.PodSecurityPolicy{}}, + "example-psp": {&policy.PodSecurityPolicy{}}, "privileged-psp": {&policy.PodSecurityPolicy{}}, "restricted-psp": {&policy.PodSecurityPolicy{}}, - "example-psp": {&policy.PodSecurityPolicy{}}, "zookeeper-pod-disruption-budget-maxunavailable": {&policy.PodDisruptionBudget{}}, - "zookeeper-pod-disruption-budget-minunavailable": {&policy.PodDisruptionBudget{}}, + "zookeeper-pod-disruption-budget-minavailable": {&policy.PodDisruptionBudget{}}, }, "service": { - "nginx-service": {&api.Service{}}, + "nginx-service": {&api.Service{}}, + "load-balancer-example": {&apps.Deployment{}}, }, "service/access": { - "frontend": {&api.Service{}, &apps.Deployment{}}, - "hello-service": {&api.Service{}}, - "hello": {&apps.Deployment{}}, + "frontend": {&api.Service{}, &apps.Deployment{}}, + "hello-application": {&apps.Deployment{}}, + "hello-service": {&api.Service{}}, + "hello": {&apps.Deployment{}}, }, "service/networking": { - "curlpod": {&apps.Deployment{}}, - "custom-dns": {&api.Pod{}}, - "hostaliases-pod": {&api.Pod{}}, - "ingress": {&extensions.Ingress{}}, - "nginx-secure-app": {&api.Service{}, &apps.Deployment{}}, - "nginx-svc": {&api.Service{}}, - "run-my-nginx": {&apps.Deployment{}}, + "curlpod": {&apps.Deployment{}}, + "custom-dns": {&api.Pod{}}, + "dual-stack-default-svc": {&api.Service{}}, + "dual-stack-ipv4-svc": {&api.Service{}}, + "dual-stack-ipv6-lb-svc": {&api.Service{}}, + "dual-stack-ipv6-svc": {&api.Service{}}, + "hostaliases-pod": {&api.Pod{}}, + "ingress": {&networking.Ingress{}}, + "network-policy-allow-all-egress": {&networking.NetworkPolicy{}}, + "network-policy-allow-all-ingress": {&networking.NetworkPolicy{}}, + "network-policy-default-deny-egress": {&networking.NetworkPolicy{}}, + "network-policy-default-deny-ingress": {&networking.NetworkPolicy{}}, + "network-policy-default-deny-all": {&networking.NetworkPolicy{}}, + "nginx-policy": {&networking.NetworkPolicy{}}, + "nginx-secure-app": {&api.Service{}, &apps.Deployment{}}, + "nginx-svc": {&api.Service{}}, + "run-my-nginx": {&apps.Deployment{}}, }, "windows": { - "configmap-pod": {&api.ConfigMap{}, &api.Pod{}}, - "daemonset": {&apps.DaemonSet{}}, - "deploy-hyperv": {&apps.Deployment{}}, - "deploy-resource": {&apps.Deployment{}}, - "emptydir-pod": {&api.Pod{}}, - "hostpath-volume-pod": {&api.Pod{}}, - "secret-pod": {&api.Secret{}, &api.Pod{}}, - "simple-pod": {&api.Pod{}}, + "configmap-pod": {&api.ConfigMap{}, &api.Pod{}}, + "daemonset": {&apps.DaemonSet{}}, + "deploy-hyperv": {&apps.Deployment{}}, + "deploy-resource": {&apps.Deployment{}}, + "emptydir-pod": {&api.Pod{}}, + "hostpath-volume-pod": {&api.Pod{}}, + "run-as-username-container": {&api.Pod{}}, + "run-as-username-pod": {&api.Pod{}}, + "secret-pod": {&api.Secret{}, &api.Pod{}}, + "simple-pod": {&api.Pod{}}, }, } diff --git a/content/en/examples/pods/inject/dependent-envars.yaml b/content/en/examples/pods/inject/dependent-envars.yaml new file mode 100644 index 0000000000000..2509c6f47b56d --- /dev/null +++ b/content/en/examples/pods/inject/dependent-envars.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dependent-envars-demo +spec: + containers: + - name: dependent-envars-demo + args: + - while true; do echo -en '\n'; printf UNCHANGED_REFERENCE=$UNCHANGED_REFERENCE'\n'; printf SERVICE_ADDRESS=$SERVICE_ADDRESS'\n';printf ESCAPED_REFERENCE=$ESCAPED_REFERENCE'\n'; sleep 30; done; + command: + - sh + - -c + image: busybox + env: + - name: SERVICE_PORT + value: "80" + - name: SERVICE_IP + value: "172.17.0.1" + - name: UNCHANGED_REFERENCE + value: "$(PROTOCOL)://$(SERVICE_IP):$(SERVICE_PORT)" + - name: PROTOCOL + value: "https" + - name: SERVICE_ADDRESS + value: "$(PROTOCOL)://$(SERVICE_IP):$(SERVICE_PORT)" + - name: ESCAPED_REFERENCE + value: "$$(PROTOCOL)://$(SERVICE_IP):$(SERVICE_PORT)" diff --git a/content/en/includes/partner-script.js b/content/en/includes/partner-script.js deleted file mode 100644 index cdf69bcb29659..0000000000000 --- a/content/en/includes/partner-script.js +++ /dev/null @@ -1,1609 +0,0 @@ -;(function () { - var partners = [ - { - type: 0, - name: 'Sysdig', - logo: 'sys_dig', - link: 'https://sysdig.com/blog/monitoring-kubernetes-with-sysdig-cloud/', - blurb: 'Sysdig is the container intelligence company. Sysdig has created the only unified platform to deliver monitoring, security, and troubleshooting in a microservices-friendly architecture.' - }, - { - type: 0, - name: 'Puppet', - logo: 'puppet', - link: 'https://puppet.com/blog/announcing-kream-and-new-kubernetes-helm-and-docker-modules', - blurb: 'We\'ve developed tools and products to make your adoption of Kubernetes as efficient as possible, covering your full workflow cycle from development to production. And now Puppet Pipelines for Containers is your complete DevOps dashboard for Kubernetes.' - }, - { - type: 0, - name: 'Citrix', - logo: 'citrix', - link: 'https://www.citrix.com/networking/microservices.html', - blurb: 'Netscaler CPX gives app developers all the features they need to load balance their microservices and containerized apps with Kubernetes.' - }, - { - type: 0, - name: 'Cockroach Labs', - logo: 'cockroach_labs', - link: 'https://www.cockroachlabs.com/blog/running-cockroachdb-on-kubernetes/', - blurb: 'CockroachDB is a distributed SQL database whose built-in replication and survivability model pair with Kubernetes to truly make data easy.' - }, - { - type: 2, - name: 'Weaveworks', - logo: 'weave_works', - link: ' https://weave.works/kubernetes', - blurb: 'Weaveworks enables Developers and Dev/Ops teams to easily connect, deploy, secure, manage, and troubleshoot microservices in Kubernetes.' - }, - { - type: 0, - name: 'Intel', - logo: 'intel', - link: 'https://tectonic.com/press/intel-coreos-collaborate-on-openstack-with-kubernetes.html', - blurb: 'Powering the GIFEE (Google’s Infrastructure for Everyone Else), to run OpenStack deployments on Kubernetes.' - }, - { - type: 3, - name: 'Platform9', - logo: 'platform9', - link: 'https://platform9.com/products/kubernetes/', - blurb: 'Platform9 is the open source-as-a-service company that takes all of the goodness of Kubernetes and delivers it as a managed service.' - }, - { - type: 0, - name: 'Datadog', - logo: 'datadog', - link: 'http://docs.datadoghq.com/integrations/kubernetes/', - blurb: 'Full-stack observability for dynamic infrastructure & applications. Includes precision alerting, analytics and deep Kubernetes integrations. ' - }, - { - type: 0, - name: 'AppFormix', - logo: 'appformix', - link: 'http://www.appformix.com/solutions/appformix-for-kubernetes/', - blurb: 'AppFormix is a cloud infrastructure performance optimization service helping enterprise operators streamline their cloud operations on any Kubernetes cloud. ' - }, - { - type: 0, - name: 'Crunchy', - logo: 'crunchy', - link: 'http://info.crunchydata.com/blog/advanced-crunchy-containers-for-postgresql', - blurb: 'Crunchy PostgreSQL Container Suite is a set of containers for managing PostgreSQL with DBA microservices leveraging Kubernetes and Helm.' - }, - { - type: 0, - name: 'Aqua', - logo: 'aqua', - link: 'http://blog.aquasec.com/security-best-practices-for-kubernetes-deployment', - blurb: 'Deep, automated security for your containers running on Kubernetes.' - }, - { - type: 0, - name: 'Distelli', - logo: 'distelli', - link: 'https://www.distelli.com/', - blurb: 'Pipelines from your source repositories to your Kubernetes Clusters on any cloud.' - }, - { - type: 0, - name: 'Nuage networks', - logo: 'nuagenetworks', - link: 'https://github.com/nuagenetworks/nuage-kubernetes', - blurb: 'The Nuage SDN platform provides policy-based networking between Kubernetes Pods and non-Kubernetes environments with visibility and security monitoring.' - }, - { - type: 0, - name: 'Sematext', - logo: 'sematext', - link: 'https://sematext.com/kubernetes/', - blurb: 'Logging & Monitoring: Automatic collection and processing of Metrics, Events and Logs for auto-discovered pods and Kubernetes nodes.' - }, - { - type: 0, - name: 'Diamanti', - logo: 'diamanti', - link: 'https://www.diamanti.com/products/', - blurb: 'Diamanti deploys containers with guaranteed performance using Kubernetes in the first hyperconverged appliance purpose built for containerized applications.' - }, - { - type: 0, - name: 'Aporeto', - logo: 'aporeto', - link: 'https://aporeto.com/trireme', - blurb: 'Aporeto makes cloud-native applications secure by default without impacting developer velocity and works at any scale, on any cloud.' - }, - { - type: 2, - name: 'Giant Swarm', - logo: 'giantswarm', - link: 'https://giantswarm.io', - blurb: 'Giant Swarm enables you to simply and rapidly create and use Kubernetes clusters on-demand either on-premises or in the cloud. Contact Giant Swarm to learn about the best way to run cloud native applications anywhere.' - }, - { - type: 3, - name: 'Giant Swarm', - logo: 'giantswarm', - link: 'https://giantswarm.io/product/', - blurb: 'Giant Swarm enables you to simply and rapidly create and use Kubernetes clusters on-demand either on-premises or in the cloud. Contact Giant Swarm to learn about the best way to run cloud native applications anywhere.' - }, - { - type: 3, - name: 'Hasura', - logo: 'hasura', - link: 'https://hasura.io', - blurb: 'Hasura is a Kubernetes-based PaaS and a Postgres-based BaaS that accelerates app development with ready-to-use components.' - }, - { - type: 3, - name: 'Mirantis', - logo: 'mirantis', - link: 'https://www.mirantis.com/software/kubernetes/', - blurb: 'Mirantis - Mirantis Cloud Platform' - }, - { - type: 2, - name: 'Mirantis', - logo: 'mirantis', - link: 'https://content.mirantis.com/Containerizing-OpenStack-on-Kubernetes-Video-Landing-Page.html', - blurb: 'Mirantis builds and manages private clouds with open source software such as OpenStack, deployed as containers orchestrated by Kubernetes.' - }, - { - type: 0, - name: 'Kubernetic', - logo: 'kubernetic', - link: 'https://kubernetic.com/', - blurb: 'Kubernetic is a Kubernetes Desktop client that simplifies and democratizes cluster management for DevOps.' - }, - { - type: 1, - name: 'Reactive Ops', - logo: 'reactive_ops', - link: 'https://www.reactiveops.com/the-kubernetes-experts/', - blurb: 'ReactiveOps has written automation on best practices for infrastructure as code on GCP & AWS using Kubernetes, helping you build and maintain a world-class infrastructure at a fraction of the price of an internal hire.' - }, - { - type: 2, - name: 'Livewyer', - logo: 'livewyer', - link: 'https://livewyer.io/services/kubernetes-experts/', - blurb: 'Kubernetes experts that on-board applications and empower IT teams to get the most out of containerised technology.' - }, - { - type: 2, - name: 'Samsung SDS', - logo: 'samsung_sds', - link: 'http://www.samsungsdsa.com/cloud-infrastructure_kubernetes', - blurb: 'Samsung SDS’s Cloud Native Computing Team offers expert consulting across the range of technical aspects involved in building services targeted at a Kubernetes cluster.' - }, - { - type: 2, - name: 'Container Solutions', - logo: 'container_solutions', - link: 'http://container-solutions.com/resources/kubernetes/', - blurb: 'Container Solutions is a premium software consultancy that focuses on programmable infrastructure, offering our expertise in software development, strategy and operations to help you innovate at speed and scale.' - }, - { - type: 4, - name: 'Container Solutions', - logo: 'container_solutions', - link: 'http://container-solutions.com/resources/kubernetes/', - blurb: 'Container Solutions is a premium software consultancy that focuses on programmable infrastructure, offering our expertise in software development, strategy and operations to help you innovate at speed and scale.' - }, - { - type: 2, - name: 'Jetstack', - logo: 'jetstack', - link: 'https://www.jetstack.io/', - blurb: 'Jetstack is an organisation focused entirely on Kubernetes. They will help you to get the most out of Kubernetes through expert professional services and open source tooling. Get in touch, and accelerate your project.' - }, - { - type: 0, - name: 'Tigera', - logo: 'tigera', - link: 'http://docs.projectcalico.org/latest/getting-started/kubernetes/', - blurb: 'Tigera builds high performance, policy driven, cloud native networking solutions for Kubernetes.' - }, - { - type: 1, - name: 'Harbur', - logo: 'harbur', - link: 'https://harbur.io/', - blurb: 'Based in Barcelona, Harbur is a consulting firm that helps companies deploy self-healing solutions empowered by Container technologies' - }, - { - type: 0, - name: 'Spotinst', - logo: 'spotinst', - link: 'http://blog.spotinst.com/2016/08/04/elastigroup-kubernetes-minions-steroids/', - blurb: 'Your Kubernetes For 80% Less. Run K8s workloads on Spot Instances with 100% availability to save 80% + autoscale your Kubernetes with maximum efficiency in heterogenous environments.' - }, - { - type: 2, - name: 'InwinSTACK', - logo: 'inwinstack', - link: 'http://www.inwinstack.com/index.php/en/solutions-en/', - blurb: 'Our container service leverages OpenStack-based infrastructure and its container orchestration engine Magnum to manage Kubernetes clusters.' - }, - { - type: 4, - name: 'InwinSTACK', - logo: 'inwinstack', - link: 'http://www.inwinstack.com/index.php/en/solutions-en/', - blurb: 'Our container service leverages OpenStack-based infrastructure and its container orchestration engine Magnum to manage Kubernetes clusters.' - }, - { - type: 3, - name: 'InwinSTACK', - logo: 'inwinstack', - link: 'https://github.com/inwinstack/kube-ansible', - blurb: 'inwinSTACK - kube-ansible' - }, - { - type: 1, - name: 'Semantix', - logo: 'semantix', - link: 'http://www.semantix.com.br/', - blurb: 'Semantix is a company that works with data analytics and distributed systems. Kubernetes is used to orchestrate services for our customers.' - }, - { - type: 0, - name: 'ASM Technologies Limited', - logo: 'asm', - link: 'http://www.asmtech.com/', - blurb: 'Our technology supply chain portfolio enables your software products to be accessible, viable and available more effectively.' - }, - { - type: 1, - name: 'InfraCloud Technologies', - logo: 'infracloud', - link: 'http://blog.infracloud.io/state-of-kubernetes/', - blurb: 'InfraCloud Technologies is software consultancy which provides services in Containers, Cloud and DevOps.' - }, - { - type: 0, - name: 'SignalFx', - logo: 'signalfx', - link: 'https://github.com/signalfx/integrations/tree/master/kubernetes', - blurb: 'Gain real-time visibility across metrics & the most intelligent alerts for todays architectures, including deep integration with Kubernetes' - }, - { - type: 0, - name: 'NATS', - logo: 'nats', - link: 'https://github.com/pires/kubernetes-nats-cluster', - blurb: 'NATS is a simple, secure, and scalable cloud native messaging system.' - }, - { - type: 2, - name: 'RX-M', - logo: 'rxm', - link: 'http://rx-m.com/training/kubernetes-training/', - blurb: 'Market neutral Kubernetes Dev, DevOps and Production training and consulting services.' - }, - { - type: 4, - name: 'RX-M', - logo: 'rxm', - link: 'http://rx-m.com/training/kubernetes-training/', - blurb: 'Market neutral Kubernetes Dev, DevOps and Production training and consulting services.' - }, - { - type: 1, - name: 'Emerging Technology Advisors', - logo: 'eta', - link: 'https://www.emergingtechnologyadvisors.com/services/kubernetes.html', - blurb: 'ETA helps companies architect, implement, and manage scalable applications using Kubernetes on public or private cloud.' - }, - { - type: 0, - name: 'CloudPlex.io', - logo: 'cloudplex', - link: 'http://www.cloudplex.io', - blurb: 'CloudPlex enables operations teams to visually deploy, orchestrate, manage, and monitor infrastructure, applications, and services in public or private cloud.' - }, - { - type: 2, - name: 'Kumina', - logo: 'kumina', - link: 'https://www.kumina.nl/managed_kubernetes', - blurb: 'Kumina combines the power of Kubernetes with 10+ years of experience in IT operations. We create, build and support fully managed Kubernetes solutions on your choice of infrastructure. We also provide consulting and training.' - }, - { - type: 0, - name: 'CA Technologies', - logo: 'ca', - link: 'https://docops.ca.com/ca-continuous-delivery-director/integrations/en/plug-ins/kubernetes-plug-in', - blurb: 'The CA Continuous Delivery Director Kubernetes plugin orchestrates deployment of containerized applications within an end-to-end release pipeline.' - }, - { - type: 0, - name: 'CoScale', - logo: 'coscale', - link: 'http://www.coscale.com/blog/how-to-monitor-your-kubernetes-cluster', - blurb: 'Full stack monitoring of containers and microservices orchestrated by Kubernetes. Powered by anomaly detection to find problems faster.' - }, - { - type: 2, - name: 'Supergiant.io', - logo: 'supergiant', - link: 'https://supergiant.io/blog/supergiant-packing-algorithm-unique-save-money', - blurb: 'Supergiant autoscales hardware for Kubernetes. Open-source, it makes HA, distributed, stateful apps easy to deploy, manage, and scale.' - }, - { - type: 0, - name: 'Avi Networks', - logo: 'avinetworks', - link: 'https://kb.avinetworks.com/avi-vantage-openshift-installation-guide/', - blurb: 'Avis elastic application services fabric provides scalable, feature rich & integrated L4-7 networking for K8S environments.' - }, - { - type: 1, - name: 'Codecrux web technologies pvt ltd', - logo: 'codecrux', - link: 'http://codecrux.com/kubernetes/', - blurb: 'At CodeCrux we help your organization get the most out of Containers and Kubernetes, regardless of where you are in your journey' - }, - { - type: 0, - name: 'Greenqloud', - logo: 'qstack', - link: 'https://www.qstack.com/application-orchestration/', - blurb: 'Qstack provides self-serviceable on-site Kubernetes clusters with an intuitive User Interface for Infrastructure and Kubernetes management.' - }, - { - type: 1, - name: 'StackOverdrive.io', - logo: 'stackoverdrive', - link: 'http://www.stackoverdrive.net/kubernetes-consulting/', - blurb: 'StackOverdrive helps organizations of all sizes leverage Kubernetes for container based orchestration and management.' - }, - { - type: 0, - name: 'StackIQ, Inc.', - logo: 'stackiq', - link: 'https://www.stackiq.com/kubernetes/', - blurb: 'With Stacki and the Stacki Pallet for Kubernetes, you can go from bare metal to containers in one step very quickly and easily.' - }, - { - type: 0, - name: 'Cobe', - logo: 'cobe', - link: 'https://cobe.io/product-page/', - blurb: 'Manage Kubernetes clusters with a live, searchable model that captures all relationships and performance data in full visualised context.' - }, - { - type: 0, - name: 'Datawire', - logo: 'datawire', - link: 'http://www.datawire.io', - blurb: 'Datawires open source tools let your microservices developers be awesomely productive on Kubernetes, while letting ops sleep at night.' - }, - { - type: 0, - name: 'Mashape, Inc.', - logo: 'kong', - link: 'https://getkong.org/install/kubernetes/', - blurb: 'Kong is a scalable open source API layer that runs in front of any RESTful API and can be provisioned to a Kubernetes cluster.' - }, - { - type: 0, - name: 'F5 Networks', - logo: 'f5networks', - link: 'http://github.com/f5networks', - blurb: 'We have a LB integration into Kubernetes.' - }, - { - type: 1, - name: 'Lovable Tech', - logo: 'lovable', - link: 'http://lovable.tech/', - blurb: 'World class engineers, designers, and strategic consultants helping you ship Lovable web & mobile technology.' - }, - { - type: 0, - name: 'StackState', - logo: 'stackstate', - link: 'http://stackstate.com/platform/container-monitoring', - blurb: 'Operational Analytics across teams and tools. Includes topology visualization, root cause analysis and anomaly detection for Kubernetes.' - }, - { - type: 1, - name: 'INEXCCO INC', - logo: 'inexcco', - link: 'https://www.inexcco.com/', - blurb: 'Strong DevOps and Cloud talent working with couple clients on kubernetes and helm implementations. ' - }, - { - type: 2, - name: 'Bitnami', - logo: 'bitnami', - link: 'http://bitnami.com/kubernetes', - blurb: 'Bitnami brings a catalog of trusted, up to date, and easy to use applications and application building blocks to Kubernetes.' - }, - { - type: 1, - name: 'Nebulaworks', - logo: 'nebulaworks', - link: 'http://www.nebulaworks.com/container-platforms', - blurb: 'Nebulaworks provides services to help the enterprise adopt modern container platforms and optimized processes to enable innovation at scale.' - }, - { - type: 1, - name: 'EASYNUBE', - logo: 'easynube', - link: 'http://easynube.co.uk/devopsnube/', - blurb: 'EasyNube provide architecture, implementation, and manage scalable applications using Kubernetes and Openshift.' - }, - { - type: 1, - name: 'Opcito Technologies', - logo: 'opcito', - link: 'http://www.opcito.com/kubernetes/', - blurb: 'Opcito is a software consultancy that uses Kubernetes to help organisations build, architect & deploy highly scalable applications.' - }, - { - type: 0, - name: 'code by Dell EMC', - logo: 'codedellemc', - link: 'https://blog.codedellemc.com', - blurb: 'Respected as a thought leader in storage persistence for containerized applications. Contributed significant work to K8 and Ecosystem' - }, - { - type: 0, - name: 'Instana', - logo: 'instana', - link: 'https://www.instana.com/supported-technologies/', - blurb: 'Instana monitors performance of the applications, infrastructure, containers and services deployed on a Kubernetes cluster.' - }, - { - type: 0, - name: 'Netsil', - logo: 'netsil', - link: 'https://netsil.com/kubernetes/', - blurb: 'Generate a real-time, auto-discovered application topology map! Monitor Kubernetes pods and namespaces without any code instrumentation.' - }, - { - type: 2, - name: 'Treasure Data', - logo: 'treasuredata', - link: 'https://fluentd.treasuredata.com/kubernetes-logging/', - blurb: 'Fluentd Enterprise brings smart, secure logging to Kubernetes, and brings integrations with backends such as Splunk, Kafka, or AWS S3.' - }, - { - type: 2, - name: 'Kenzan', - logo: 'Kenzan', - link: 'http://kenzan.com/?ref=kubernetes', - blurb: 'We provide custom consulting services leveraging Kubernetes as our foundation. This involves the platform development, delivery pipelines, and the application development within Kubernetes.' - }, - { - type: 2, - name: 'New Context', - logo: 'newcontext', - link: 'https://www.newcontext.com/devsecops-infrastructure-automation-orchestration/', - blurb: 'New Context builds and uplifts secure Kubernetes implementations and migrations, from initial design to infrastructure automation and management.' - }, - { - type: 2, - name: 'Banzai', - logo: 'banzai', - link: 'https://banzaicloud.com/platform/', - blurb: 'Banzai Cloud brings cloud native to the enterprise and simplifies the transition to microservices on Kubernetes.' - }, - { - type: 3, - name: 'Kublr', - logo: 'kublr', - link: 'http://kublr.com', - blurb: 'Kublr - Accelerate and control the deployment, scaling, monitoring and management of your containerized applications.' - }, - { - type: 1, - name: 'ControlPlane', - logo: 'controlplane', - link: 'https://control-plane.io', - blurb: 'We are a London-based Kubernetes consultancy with a focus on security and continuous delivery. We offer consulting & training.' - }, - { - type: 3, - name: 'Nirmata', - logo: 'nirmata', - link: 'https://www.nirmata.com/', - blurb: 'Nirmata - Nirmata Managed Kubernetes' - }, - { - type: 2, - name: 'Nirmata', - logo: 'nirmata', - link: 'https://www.nirmata.com/', - blurb: 'Nirmata is a software platform that helps DevOps teams deliver enterprise-grade and cloud-provider agnostic Kubernetes based container management solutions.' - }, - { - type: 3, - name: 'TenxCloud', - logo: 'tenxcloud', - link: 'https://tenxcloud.com', - blurb: 'TenxCloud - TenxCloud Container Engine (TCE)' - }, - { - type: 2, - name: 'TenxCloud', - logo: 'tenxcloud', - link: 'https://www.tenxcloud.com/', - blurb: 'Founded in October 2014, TenxCloud is a leading enterprise container cloud computing service provider in China, covering the areas such as container PaaS cloud platform, micro-service management, DevOps, development test, AIOps and so on. Provide private cloud PaaS products and solutions for financial, energy, operator, manufacturing, education and other industry customers.' - }, - { - type: 0, - name: 'Twistlock', - logo: 'twistlock', - link: 'https://www.twistlock.com/', - blurb: 'Security at Kubernetes Scale: Twistlock allows you to deploy fearlessly with assurance that your images and containers are free of vulnerabilities and protected at runtime.' - }, - { - type: 0, - name: 'Endocode AG', - logo: 'endocode', - link: 'https://endocode.com/kubernetes/', - blurb: 'Endocode practices and teaches the open source way. Kernel to cluster - Dev to Ops. We offer Kubernetes trainings, services and support.' - }, - { - type: 2, - name: 'Accenture', - logo: 'accenture', - link: 'https://www.accenture.com/us-en/service-application-containers', - blurb: 'Architecture, implementation and operation of world-class Kubernetes solutions for cloud-native clients.' - }, - { - type: 1, - name: 'Biarca', - logo: 'biarca', - link: 'http://biarca.io/', - blurb: 'Biarca is a cloud services provider and key focus areas Key areas of focus for Biarca include Cloud Adoption Services, Infrastructure Services, DevOps Services and Application Services. Biarca leverages Kubernetes to deliver containerized solutions.' - }, - { - type: 2, - name: 'Claranet', - logo: 'claranet', - link: 'http://www.claranet.co.uk/hosting/google-cloud-platform-consulting-managed-services', - blurb: 'Claranet helps people migrate to the cloud and take full advantage of the new world it offers. We consult, design, build and proactively manage the right infrastructure and automation tooling for clients to achieve this.' - }, - { - type: 1, - name: 'CloudKite', - logo: 'cloudkite', - link: 'https://cloudkite.io/', - blurb: 'CloudKite.io helps companies build and maintain highly automated, resilient, and impressively performing software on Kubernetes.' - }, - { - type: 2, - name: 'CloudOps', - logo: 'CloudOps', - link: 'https://www.cloudops.com/services/docker-and-kubernetes-workshops/', - blurb: 'CloudOps gets you hands-on with the K8s ecosystem via workshop/lab. Get prod ready K8s in cloud(s) of your choice with our managed services.' - }, - { - type: 2, - name: 'Ghostcloud', - logo: 'ghostcloud', - link: 'https://www.ghostcloud.cn/ecos-kubernetes', - blurb: 'EcOS is an enterprise-grade PaaS / CaaS based on Docker and Kubernetes, which makes it easier to configure, deploy and manage containerized applications.' - }, - { - type: 3, - name: 'Ghostcloud', - logo: 'ghostcloud', - link: 'https://www.ghostcloud.cn/ecos-kubernetes', - blurb: 'EcOS is an enterprise-grade PaaS / CaaS based on Docker and Kubernetes, which makes it easier to configure, deploy and manage containerized applications.' - }, - { - type: 2, - name: 'Contino', - logo: 'contino', - link: 'https://www.contino.io/', - blurb: 'We help enterprise organizations adopt DevOps, containers and cloud computing. Contino is a global consultancy that enables regulated organizations to accelerate innovation through the adoption of modern approaches to software delivery.' - }, - { - type: 2, - name: 'Booz Allen Hamilton', - logo: 'boozallenhamilton', - link: 'https://www.boozallen.com/', - blurb: 'Booz Allen partners with public and private sector clients to solve their most difficult challenges through a combination of consulting, analytics, mission operations, technology, systems delivery, cybersecurity, engineering, and innovation expertise.' - }, - { - type: 1, - name: 'BigBinary', - logo: 'bigbinary', - link: 'http://blog.bigbinary.com/categories/Kubernetes', - blurb: 'Provider of Digital Solutions for federal and commercial clients, to include DevSecOps, cloud platforms, transformation strategy, cognitive solutions, and UX.' - }, - { - type: 0, - name: 'CloudPerceptions', - logo: 'cloudperceptions', - link: 'https://www.meetup.com/Triangle-Kubernetes-Meetup/files/', - blurb: 'Container security solution for small-to-medium size enterprises who plan to run Kubernetes on shared infrastructure.' - }, - { - type: 2, - name: 'Creationline, Inc.', - logo: 'creationline', - link: 'https://www.creationline.com/ci', - blurb: 'Total solution for container based IT resource management.' - }, - { - type: 0, - name: 'DataCore Software', - logo: 'datacore', - link: 'https://www.datacore.com/solutions/virtualization/containerization', - blurb: 'DataCore provides highly-available, high-performance universal block storage for Kubernetes, radically improving the speed of deployment.' - }, - { - type: 0, - name: 'Elastifile', - logo: 'elastifile', - link: 'https://www.elastifile.com/stateful-containers', - blurb: 'Elastifile’s cross-cloud data fabric delivers elastically scalable, high performance, software-defined persistent storage for Kubernetes.' - }, - { - type: 0, - name: 'GitLab', - logo: 'gitlab', - link: 'https://about.gitlab.com/2016/11/14/idea-to-production/', - blurb: 'With GitLab and Kubernetes, you can deploy a complete CI/CD pipeline with multiple environments, automatic deployments, and automatic monitoring.' - }, - { - type: 0, - name: 'Gravitational, Inc.', - logo: 'gravitational', - link: 'https://gravitational.com/telekube/', - blurb: 'Telekube combines Kubernetes with Teleport, our modern SSH server, so operators can remotely manage a multitude of K8s application deployments.' - }, - { - type: 0, - name: 'Hitachi Data Systems', - logo: 'hitachi', - link: 'https://www.hds.com/en-us/products-solutions/application-solutions/unified-compute-platform-with-kubernetes-orchestration.html', - blurb: 'Build the Applications You Need to Drive Your Business - DEVELOP AND DEPLOY APPLICATIONS FASTER AND MORE RELIABLY.' - }, - { - type: 1, - name: 'Infosys Technologies', - logo: 'infosys', - link: 'https://www.infosys.com', - blurb: 'Monolithic to microservices on openshift is a offering that we are building as part of open source practice.' - }, - { - type: 0, - name: 'JFrog', - logo: 'jfrog', - link: 'https://www.jfrog.com/use-cases/12584/', - blurb: 'You can use Artifactory to store and manage all of your application’s container images and deploy to Kubernetes and setup a build, test, deploy pipeline using Jenkins and Artifactory. Once an image is ready to be rolled out, Artifactory can trigger a rolling-update deployment into a Kubernetes cluster without downtime – automatically!' - }, - { - type: 0, - name: 'Navops by Univa', - logo: 'navops', - link: 'https://www.navops.io', - blurb: 'Navops is a suite of products that enables enterprises to take full advantage of Kubernetes and provides the ability to quickly and efficiently run containers at scale.' - }, - { - type: 0, - name: 'NeuVector', - logo: 'neuvector', - link: 'http://neuvector.com/solutions-for-kubernetes-security/', - blurb: 'NeuVector delivers an application and network intelligent container network security solution integrated with and optimized for Kubernetes.' - }, - { - type: 1, - name: 'OpsZero', - logo: 'opszero', - link: 'https://www.opszero.com/kubernetes.html', - blurb: 'opsZero provides DevOps for Startups. We build and service your Kubernetes and Cloud Infrastructure to accelerate your release cycle.' - }, - { - type: 1, - name: 'Shiwaforce.com Ltd.', - logo: 'shiwaforce', - link: 'https://www.shiwaforce.com/en/', - blurb: 'Shiwaforce.com is the Agile Partner in Digital Transformation. Our solutions follow business changes quickly, easily and cost-effectively.' - }, - { - type: 1, - name: 'SoftServe', - logo: 'softserve', - link: 'https://www.softserveinc.com/en-us/blogs/kubernetes-travis-ci/', - blurb: 'SoftServe allows its clients to adopt modern application design patterns and benefit from fully integrated, highly available, cost effective Kubernetes clusters at any scale.' - }, - { - type: 1, - name: 'Solinea', - logo: 'solinea', - link: 'https://www.solinea.com/cloud-consulting-services/container-microservices-offerings', - blurb: 'Solinea is a digital transformation consultancy that enables businesses to build innovative solutions by adopting cloud native computing.' - }, - { - type: 1, - name: 'Sphere Software, LLC', - logo: 'spheresoftware', - link: 'https://sphereinc.com/kubernetes/', - blurb: 'The Sphere Software team of experts allows customers to architect and implement scalable applications using Kubernetes in Google Cloud, AWS, and Azure.' - }, - { - type: 1, - name: 'Altoros', - logo: 'altoros', - link: 'https://www.altoros.com/container-orchestration-tools-enablement.html', - blurb: 'Deployment and configuration of Kubernetes, Optimization of existing solutions, training for developers on using Kubernetes, support.' - }, - { - type: 0, - name: 'Cloudbase Solutions', - logo: 'cloudbase', - link: 'https://cloudbase.it/kubernetes', - blurb: 'Cloudbase Solutions provides Kubernetes cross-cloud interoperability for Windows and Linux deployments based on open source technologies.' - }, - { - type: 0, - name: 'Codefresh', - logo: 'codefresh', - link: 'https://codefresh.io/kubernetes-deploy/', - blurb: 'Codefresh is a complete DevOps platform built for containers and Kubernetes. With CI/CD pipelines, image management, and deep integrations into Kubernetes and Helm.' - }, - { - type: 0, - name: 'NetApp', - logo: 'netapp', - link: 'http://netapp.io/2016/12/23/introducing-trident-dynamic-persistent-volume-provisioner-kubernetes/', - blurb: 'Dynamic provisioning and persistent storage support.' - }, - { - type: 0, - name: 'OpenEBS', - logo: 'OpenEBS', - link: 'https://openebs.io/', - blurb: 'OpenEBS is containerized storage for containers integrated tightly into Kubernetes and based on distributed block storage and containerization of storage control. OpenEBS derives intent from K8s and other YAML or JSON such as per container QoS SLAs, tiering and replica policies, and more. OpenEBS is EBS API compliant.' - }, - { - type: 3, - name: 'Google Kubernetes Engine', - logo: 'google', - link: 'https://cloud.google.com/kubernetes-engine/', - blurb: 'Google - Google Kubernetes Engine' - }, - { - type: 1, - name: 'Superorbital', - logo: 'superorbital', - link: 'https://superorbit.al/workshops/kubernetes/', - blurb: 'Helping companies navigate the Cloud Native waters through Kubernetes consulting and training.' - }, - { - type: 3, - name: 'Apprenda', - logo: 'apprenda', - link: 'https://apprenda.com/kismatic/', - blurb: 'Apprenda - Kismatic Enterprise Toolkit (KET)' - }, - { - type: 3, - name: 'Red Hat', - logo: 'redhat', - link: 'https://www.openshift.com', - blurb: 'Red Hat - OpenShift Online and OpenShift Container Platform' - }, - { - type: 3, - name: 'Rancher', - logo: 'rancher', - link: 'http://rancher.com/kubernetes/', - blurb: 'Rancher Inc. - Rancher Kubernetes' - }, - { - type: 3, - name: 'Canonical', - logo: 'canonical', - link: 'https://www.ubuntu.com/kubernetes', - blurb: 'The Canonical Distribution of Kubernetes enables you to operate Kubernetes clusters on demand on any major public cloud and private infrastructure.' - }, - { - type: 2, - name: 'Canonical', - logo: 'canonical', - link: 'https://www.ubuntu.com/kubernetes', - blurb: 'Canonical Ltd. - Canonical Distribution of Kubernetes' - }, - { - type: 3, - name: 'Cisco', - logo: 'cisco', - link: 'https://www.cisco.com', - blurb: 'Cisco Systems - Cisco Container Platform' - }, - { - type: 3, - name: 'Cloud Foundry', - logo: 'cff', - link: 'https://www.cloudfoundry.org/container-runtime/', - blurb: 'Cloud Foundry - Cloud Foundry Container Runtime' - }, - { - type: 3, - name: 'IBM', - logo: 'ibm', - link: 'https://www.ibm.com/cloud/container-service', - blurb: 'IBM - IBM Cloud Kubernetes Service' - }, - { - type: 2, - name: 'IBM', - logo: 'ibm', - link: 'https://www.ibm.com/cloud/container-service/', - blurb: 'The IBM Cloud Kubernetes Service combines Docker and Kubernetes to deliver powerful tools, an intuitive user experience, and built-in security and isolation to enable rapid delivery of applications all while leveraging Cloud Services including cognitive capabilities from Watson.' - }, - { - type: 3, - name: 'Samsung', - logo: 'samsung_sds', - link: 'https://github.com/samsung-cnct/kraken', - blurb: 'Samsung SDS - Kraken' - }, - { - type: 3, - name: 'IBM', - logo: 'ibm', - link: 'https://www.ibm.com/cloud-computing/products/ibm-cloud-private/', - blurb: 'IBM - IBM Cloud Private' - }, - { - type: 3, - name: 'Kinvolk', - logo: 'kinvolk', - link: 'https://github.com/kinvolk/kube-spawn', - blurb: 'Kinvolk - kube-spawn' - }, - { - type: 3, - name: 'Heptio', - logo: 'heptio', - link: 'https://aws.amazon.com/quickstart/architecture/heptio-kubernetes', - blurb: 'Heptio - AWS-Quickstart' - }, - { - type: 2, - name: 'Heptio', - logo: 'heptio', - link: 'http://heptio.com', - blurb: 'Heptio helps businesses of all sizes get closer to the vibrant Kubernetes community.' - }, - { - type: 3, - name: 'StackPointCloud', - logo: 'stackpoint', - link: 'https://stackpoint.io', - blurb: 'StackPointCloud - StackPointCloud' - }, - { - type: 2, - name: 'StackPointCloud', - logo: 'stackpoint', - link: 'https://stackpoint.io', - blurb: 'StackPointCloud offers a wide range of support plans for managed Kubernetes clusters built through its universal control plane for Kubernetes Anywhere.' - }, - { - type: 3, - name: 'Caicloud', - logo: 'caicloud', - link: 'https://caicloud.io/products/compass', - blurb: 'Caicloud - Compass' - }, - { - type: 2, - name: 'Caicloud', - logo: 'caicloud', - link: 'https://caicloud.io/', - blurb: 'Founded by ex-Googlers,and early Kubernetes contributors, Caicloud leverages Kubernetes to provide container products which have successfully served Fortune 500 enterprises, and further utilizes Kubernetes as a vehicle to deliver ultra-speed deep learning experience.' - }, - { - type: 3, - name: 'Alibaba', - logo: 'alibaba', - link: 'https://www.aliyun.com/product/containerservice?spm=5176.8142029.388261.219.3836dbccRpJ5e9', - blurb: 'Alibaba Cloud - Alibaba Cloud Container Service' - }, - { - type: 3, - name: 'Tencent', - logo: 'tencent', - link: 'https://cloud.tencent.com/product/ccs?lang=en', - blurb: 'Tencent Cloud - Tencent Cloud Container Service' - }, - { - type: 3, - name: 'Huawei', - logo: 'huawei', - link: 'http://www.huaweicloud.com/product/cce.html', - blurb: 'Huawei - Huawei Cloud Container Engine' - }, - { - type: 2, - name: 'Huawei', - logo: 'huawei', - link: 'http://developer.huawei.com/ict/en/site-paas', - blurb: 'FusionStage is an enterprise-grade Platform as a Service product, the core of which is based on mainstream open source container technology including Kubernetes and Docker.' - }, - { - type: 3, - name: 'Google', - logo: 'google', - link: 'https://github.com/kubernetes/kubernetes/tree/master/cluster', - blurb: 'Google - kube-up.sh on Google Compute Engine' - }, - { - type: 3, - name: 'Poseidon', - logo: 'poseidon', - link: 'https://typhoon.psdn.io/', - blurb: 'Poseidon - Typhoon' - }, - { - type: 3, - name: 'Netease', - logo: 'netease', - link: 'https://www.163yun.com/product/container-service-dedicated', - blurb: 'Netease - Netease Container Service Dedicated' - }, - { - type: 2, - name: 'Loodse', - logo: 'loodse', - link: 'https://loodse.com', - blurb: 'Loodse provides Kubernetes training & consulting, and host related events regularly across Europe.' - }, - { - type: 4, - name: 'Loodse', - logo: 'loodse', - link: 'https://loodse.com', - blurb: 'Loodse provides Kubernetes training & consulting, and host related events regularly across Europe.' - }, - { - type: 4, - name: 'LF Training', - logo: 'lf-training', - link: 'https://training.linuxfoundation.org/', - blurb: 'The Linux Foundation’s training program combines the broad, foundational knowledge with the networking opportunities that attendees need to thrive in their careers today.' - }, - { - type: 3, - name: 'Loodse', - logo: 'loodse', - link: 'https://loodse.com', - blurb: 'Loodse - Kubermatic Container Engine' - }, - { - type: 1, - name: 'LTI', - logo: 'lti', - link: 'https://www.lntinfotech.com/', - blurb: 'LTI helps enterprises architect, develop and support scalable cloud native apps using Docker and Kubernetes for private or public cloud.' - }, - { - type: 3, - name: 'Microsoft', - logo: 'microsoft', - link: 'https://github.com/Azure/acs-engine', - blurb: 'Microsoft - Azure acs-engine' - }, - { - type: 3, - name: 'Microsoft', - logo: 'microsoft', - link: 'https://docs.microsoft.com/en-us/azure/aks/', - blurb: 'Microsoft - Azure Container Service AKS' - }, - { - type: 3, - name: 'Oracle', - logo: 'oracle', - link: 'http://www.wercker.com/product', - blurb: 'Oracle - Oracle Container Engine' - }, - { - type: 3, - name: 'Oracle', - logo: 'oracle', - link: 'https://github.com/oracle/terraform-kubernetes-installer', - blurb: 'Oracle - Oracle Terraform Kubernetes Installer' - }, - { - type: 3, - name: 'Mesosphere', - logo: 'mesosphere', - link: 'https://mesosphere.com/kubernetes/', - blurb: 'Mesosphere - Kubernetes on DC/OS' - }, - { - type: 3, - name: 'Appscode', - logo: 'appscode', - link: 'https://appscode.com/products/cloud-deployment/', - blurb: 'Appscode - Pharmer' - }, - { - type: 3, - name: 'SAP', - logo: 'sap', - link: 'https://cloudplatform.sap.com/index.html', - blurb: 'SAP - Cloud Platform - Gardener (not yet released)' - }, - { - type: 3, - name: 'Oracle', - logo: 'oracle', - link: 'https://www.oracle.com/linux/index.html', - blurb: 'Oracle - Oracle Linux Container Services for use with Kubernetes' - }, - { - type: 3, - name: 'CoreOS', - logo: 'coreos', - link: 'https://github.com/kubernetes-incubator/bootkube', - blurb: 'CoreOS - bootkube' - }, - { - type: 2, - name: 'CoreOS', - logo: 'coreos', - link: 'https://coreos.com/', - blurb: 'Tectonic is the enterprise-ready Kubernetes product, by CoreOS. It adds key features to allow you to manage, update, and control clusters in production.' - }, - { - type: 3, - name: 'Weaveworks', - logo: 'weave_works', - link: '/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/', - blurb: 'Weaveworks - kubeadm' - }, - { - type: 3, - name: 'Joyent', - logo: 'joyent', - link: 'https://github.com/joyent/triton-kubernetes', - blurb: 'Joyent - Triton Kubernetes' - }, - { - type: 3, - name: 'Wise2c', - logo: 'wise2c', - link: 'http://www.wise2c.com/solution', - blurb: 'Wise2C Technology - WiseCloud' - }, - { - type: 2, - name: 'Wise2c', - logo: 'wise2c', - link: 'http://www.wise2c.com', - blurb: 'Using Kubernetes to providing IT continuous delivery and Enterprise grade container management solution to Financial Industry.' - }, - { - type: 3, - name: 'Docker', - logo: 'docker', - link: 'https://www.docker.com/enterprise-edition', - blurb: 'Docker - Docker Enterprise Edition' - }, - { - type: 3, - name: 'Daocloud', - logo: 'daocloud', - link: 'http://www.daocloud.io/dce', - blurb: 'DaoCloud - DaoCloud Enterprise' - }, - { - type: 2, - name: 'Daocloud', - logo: 'daocloud', - link: 'http://www.daocloud.io/dce', - blurb: 'We provide enterprise-level cloud native application platform that supports both Kubernetes and Docker Swarm.' - }, - { - type: 4, - name: 'Daocloud', - logo: 'daocloud', - link: 'http://www.daocloud.io/dce', - blurb: 'We provide enterprise-level cloud native application platform that supports both Kubernetes and Docker Swarm.' - }, - { - type: 3, - name: 'SUSE', - logo: 'suse', - link: 'https://www.suse.com/products/caas-platform/', - blurb: 'SUSE - SUSE CaaS (Container as a Service) Platform' - }, - { - type: 3, - name: 'Pivotal', - logo: 'pivotal', - link: 'https://cloud.vmware.com/pivotal-container-service', - blurb: 'Pivotal/VMware - Pivotal Container Service (PKS)' - }, - { - type: 3, - name: 'VMware', - logo: 'vmware', - link: 'https://cloud.vmware.com/pivotal-container-service', - blurb: 'Pivotal/VMware - Pivotal Container Service (PKS)' - }, - { - type: 3, - name: 'Alauda', - logo: 'alauda', - link: 'http://www.alauda.cn/product/detail/id/68.html', - blurb: 'Alauda - Alauda EE' - }, - { - type: 4, - name: 'Alauda', - logo: 'alauda', - link: 'http://www.alauda.cn/product/detail/id/68.html', - blurb: 'Alauda provides Kubernetes-Centric Enterprise Platform-as-a-Service offerings with a razor focus on delivering Cloud Native capabilities and DevOps best practices to enterprise customers across industries in China.' - }, - { - type: 2, - name: 'Alauda', - logo: 'alauda', - link: 'www.alauda.io', - blurb: 'Alauda provides Kubernetes-Centric Enterprise Platform-as-a-Service offerings with a razor focus on delivering Cloud Native capabilities and DevOps best practices to enterprise customers across industries in China.' - }, - { - type: 3, - name: 'EasyStack', - logo: 'easystack', - link: 'https://easystack.cn/eks/', - blurb: 'EasyStack - EasyStack Kubernetes Service (EKS)' - }, - { - type: 3, - name: 'CoreOS', - logo: 'coreos', - link: 'https://coreos.com/tectonic/', - blurb: 'CoreOS - Tectonic' - }, - { - type: 0, - name: 'GoPaddle', - logo: 'gopaddle', - link: 'https://gopaddle.io', - blurb: 'goPaddle is a DevOps platform for Kubernetes developers. It simplifies the Kubernetes Service creation and maintenance through source to image conversion, build & version management, team management, access controls and audit logs, single click provision of Kubernetes Clusters across multiple clouds from a single console.' - }, - { - type: 0, - name: 'Vexxhost', - logo: 'vexxhost', - link: 'https://vexxhost.com/public-cloud/container-services/kubernetes/', - blurb: 'VEXXHOST offers a high-performance container management service powered by Kubernetes and OpenStack Magnum.' - }, - { - type: 1, - name: 'Component Soft', - logo: 'componentsoft', - link: 'https://www.componentsoft.eu/?p=3925', - blurb: 'Component Soft offers training, consultation and support around open cloud technologies like Kubernetes, Docker, Openstack and Ceph.' - }, - { - type: 0, - name: 'Datera', - logo: 'datera', - link: 'http://www.datera.io/kubernetes/', - blurb: 'Datera delivers high performance, self-managing elastic block storage with self-service provisioning for deploying Kubernetes at scale.' - }, - { - type: 0, - name: 'Containership', - logo: 'containership', - link: 'https://containership.io/', - blurb: 'Containership is a cloud agnostic managed kubernetes offering that supports automatic provisioning on over 14 cloud providers.' - }, - { - type: 0, - name: 'Pure Storage', - logo: 'pure_storage', - link: 'https://hub.docker.com/r/purestorage/k8s/', - blurb: 'Our flexvol driver and dynamic provisioner allow FlashArray/Flashblade storage devices to be consumed as first class persistent storage from within Kubernetes.' - }, - { - type: 0, - name: 'Elastisys', - logo: 'elastisys', - link: 'https://elastisys.com/kubernetes/', - blurb: 'Predictive autoscaling - detects recurring workload variations, irregular traffic spikes, and everything in between. Runs K8s in any public or private cloud.' - }, - { - type: 0, - name: 'Portworx', - logo: 'portworx', - link: 'https://portworx.com/use-case/kubernetes-storage/', - blurb: 'With Portworx, you can manage any database or stateful service on any infrastructure using Kubernetes. You get a single data management layer for all of your stateful services, no matter where they run.' - }, - { - type: 1, - name: 'Object Computing, Inc.', - logo: 'objectcomputing', - link: 'https://objectcomputing.com/services/software-engineering/devops/kubernetes-services', - blurb: 'Our portfolio of DevOps consulting services includes Kubernetes support, development, and training.' - }, - { - type: 1, - name: 'Isotoma', - logo: 'isotoma', - link: 'https://www.isotoma.com/blog/2017/10/24/containerisation-tips-for-using-kubernetes-with-aws/', - blurb: 'Based in the North of England, Amazon partners who are delivering Kubernetes solutions on AWS for replatforming and native development.' - }, - { - type: 1, - name: 'Servian', - logo: 'servian', - link: 'https://www.servian.com/cloud-and-technology/', - blurb: 'Based in Australia, Servian provides advisory, consulting and managed services to support both application and data centric kubernetes use cases.' - }, - { - type: 1, - name: 'Redzara', - logo: 'redzara', - link: 'http://redzara.com/cloud-service', - blurb: 'Redzara has wide and in-depth experience in Cloud automation, now taking one giant step by providing container service offering and services to our customers.' - }, - { - type: 0, - name: 'Dataspine', - logo: 'dataspine', - link: 'http://dataspine.xyz/', - blurb: 'Dataspine is building a secure, elastic and serverless deployment platform for production ML/AI workloads on top of k8s.' - }, - { - type: 1, - name: 'CloudBourne', - logo: 'cloudbourne', - link: 'https://cloudbourne.com/kubernetes-enterprise-hybrid-cloud/', - blurb: 'Want to achieve maximum build, deploy and monitoring automation using Kubernetes? We can help.' - }, - { - type: 0, - name: 'CloudBourne', - logo: 'cloudbourne', - link: 'https://cloudbourne.com/', - blurb: 'Our AppZ Hybrid Cloud Platform can help you achieve your digital transformation goals using the powerful Kubernetes.' - }, - { - type: 3, - name: 'BoCloud', - logo: 'bocloud', - link: 'http://www.bocloud.com.cn/en/index.html', - blurb: 'BoCloud - BeyondcentContainer' - }, - { - type: 2, - name: 'Naitways', - logo: 'naitways', - link: 'https://www.naitways.com/', - blurb: 'Naitways is an Operator (AS57119), Integrator and Cloud Services Provider (our own !). We aim to provide value-added services through our mastering of the whole value chain (Infrastructure, Network, Human skills). Private and Public Cloud is available through Kubernetes managed or unmanaged.' - }, - { - type: 2, - name: 'Kinvolk', - logo: 'kinvolk', - link: 'https://kinvolk.io/kubernetes/', - blurb: 'Kinvolk offers Kubernetes engineering & operations support from cluster to kernel. Leading cloud-native organizations turn to Kinvolk for deep-stack Linux expertise.' - }, - { - type: 1, - name: 'Cascadeo Corporation', - logo: 'cascadeo', - link: 'http://www.cascadeo.com/', - blurb: 'Cascadeo designs, implements, and manages containerized workloads with Kubernetes, for both existing applications and greenfield development projects.' - }, - { - type: 1, - name: 'Elastisys AB', - logo: 'elastisys', - link: 'https://elastisys.com/services/#kubernetes', - blurb: 'We design, build, and operate Kubernetes clusters. We are experts in highly available and self-optimizing Kubernetes infrastructures' - }, - { - type: 1, - name: 'Greenfield Guild', - logo: 'greenfield', - link: 'http://greenfieldguild.com/', - blurb: 'The Greenfield Guild builds quality open source solutions on, and offers training and support for, Kubernetes in any environment.' - }, - { - type: 1, - name: 'PolarSeven', - logo: 'polarseven', - link: 'https://polarseven.com/what-we-do/kubernetes/', - blurb: 'To get started up and running with Kubernetes (K8s) our PolarSeven consultants can help you with creating a fully functional dockerized environment to run and deploy your applications.' - }, - { - type: 1, - name: 'Kloia', - logo: 'kloia', - link: 'https://kloia.com/kubernetes/', - blurb: 'Kloia is DevOps and Microservices Consultancy company that helps its customers to migrate their environment to cloud platforms for enabling more scalable and secure environments. We use Kubernetes to provide our customers all-in-one solutions in an cloud-agnostic way.' - }, - { - type: 0, - name: 'Bluefyre', - logo: 'bluefyre', - link: 'https://www.bluefyre.io', - blurb: 'Bluefyre offers a developer-first security platform that is native to Kubernetes. Bluefyre helps your development team ship secure code on Kubernetes faster!' - }, - { - type: 0, - name: 'Harness', - logo: 'harness', - link: 'https://harness.io/harness-continuous-delivery/secret-sauce/smart-automation/', - blurb: 'Harness offers Continuous Delivery As-A-Service will full support for containerized apps and Kubernetes clusters.' - }, - { - type: 0, - name: 'VMware - Wavefront', - logo: 'wavefront', - link: 'https://www.wavefront.com/solutions/container-monitoring/', - blurb: 'The Wavefront platform provides metrics-driven analytics and monitoring for Kubernetes and container dashboards for DevOps and developer teams delivering visibility into high-level services as well as granular container metrics.' - }, - { - type: 0, - name: 'Bloombase, Inc.', - logo: 'bloombase', - link: 'https://www.bloombase.com/go/kubernetes', - blurb: 'Bloombase provides high bandwidth, defense-in-depth data-at-rest encryption to lock down Kubernetes crown-jewels at scale.' - }, - { - type: 0, - name: 'Kasten', - logo: 'kasten', - link: 'https://kasten.io/product/', - blurb: 'Kasten provides enterprise solutions specifically built to address the operational complexity of data management in cloud-native environments.' - }, - { - type: 0, - name: 'Humio', - logo: 'humio', - link: 'https://humio.com', - blurb: 'Humio is a log aggregation database. We offer a Kubernetes integration that will give you insights to your logs across apps and instances.' - }, - { - type: 0, - name: 'Outcold Solutions LLC', - logo: 'outcold', - link: 'https://www.outcoldsolutions.com/#monitoring-kubernetes', - blurb: 'Powerful Certified Splunk applications for Monitoring OpenShift, Kubernetes and Docker.' - }, - { - type: 0, - name: 'SysEleven GmbH', - logo: 'syseleven', - link: 'http://www.syseleven.de/', - blurb: 'Enterprise Customers who are in need of bulletproof operations (High Performance E-Commerce and Enterprise Portals)' - }, - { - type: 0, - name: 'Landoop', - logo: 'landoop', - link: 'http://lenses.stream', - blurb: 'Lenses for Apache Kafka, to deploy, manage and operate with confidence data streaming pipelines and topologies at scale with confidence and native Kubernetes integration.' - }, - { - type: 0, - name: 'Redis Labs', - logo: 'redis', - link: 'https://redislabs.com/blog/getting-started-with-kubernetes-and-redis-using-redis-enterprise/', - blurb: 'Redis Enterprise extends open source Redis and delivers stable high performance and linear scaling required for building microservices on the Kubernetes platform.' - }, - { - type: 3, - name: 'Diamanti', - logo: 'diamanti', - link: 'https://diamanti.com/', - blurb: 'Diamanti - Diamanti-D10' - }, - { - type: 3, - name: 'Eking', - logo: 'eking', - link: 'http://www.eking-tech.com/', - blurb: 'Hainan eKing Technology Co. - eKing Cloud Container Platform' - }, - { - type: 3, - name: 'Harmony Cloud', - logo: 'harmony', - link: 'http://harmonycloud.cn/products/rongqiyun/', - blurb: 'Harmonycloud - Harmonycloud Container Platform' - }, - { - type: 3, - name: 'Woqutech', - logo: 'woqutech', - link: 'http://woqutech.com/product_qfusion.html', - blurb: 'Woqutech - QFusion' - }, - { - type: 3, - name: 'Baidu', - logo: 'baidu', - link: 'https://cloud.baidu.com/product/cce.html', - blurb: 'Baidu Cloud - Baidu Cloud Container Engine' - }, - { - type: 3, - name: 'ZTE', - logo: 'zte', - link: 'https://sdnfv.zte.com.cn/en/home', - blurb: 'ZTE - TECS OpenPalette' - }, - { - type: 1, - name: 'Automatic Server AG', - logo: 'asag', - link: 'http://www.automatic-server.com/paas.html', - blurb: 'We install and operate Kubernetes in big enterprises, create deployment workflows and help to migrate.' - }, - { - type: 1, - name: 'Circulo Siete', - logo: 'circulo', - link: 'https://circulosiete.com/consultoria/kubernetes/', - blurb: 'We are a Mexico based company offering training, consulting and support to migrate your workloads to Kubernetes, Cloud Native Microservices & Devops.' - }, - { - type: 1, - name: 'DevOpsGuru', - logo: 'devopsguru', - link: 'http://devopsguru.ca/workshop', - blurb: 'DevOpsGuru work with small business to transform from physical to virtual to containerization.' - }, - { - type: 1, - name: 'EIN Intelligence Co., Ltd', - logo: 'ein', - link: 'https://ein.io', - blurb: 'Startups and agile enterprises in South Korea.' - }, - { - type: 0, - name: 'GuardiCore', - logo: 'guardicore', - link: 'https://www.guardicore.com/', - blurb: 'GuardiCore provided process level visibility and network policy enforcement on containerized assets on the Kubernetes platform.' - }, - { - type: 0, - name: 'Hedvig', - logo: 'hedvig', - link: 'https://www.hedviginc.com/blog/provisioning-hedvig-storage-with-kubernetes', - blurb: 'Hedvig is software-defined storage that uses NFS or iSCSI for persistent volumes for provisioning shared storage for pods and containers.' - }, - { - type: 0, - name: 'Hewlett Packard Enterprise', - logo: 'hpe', - link: ' https://www.hpe.com/us/en/storage/containers.html', - blurb: 'Persistent Storage that makes data as easy to manage as containers: dynamic provisioning, policy-based performance & protection, QoS, & more.' - }, - { - type: 0, - name: 'JetBrains', - logo: 'jetbrains', - link: 'https://blog.jetbrains.com/teamcity/2017/10/teamcity-kubernetes-support-plugin/', - blurb: 'Run TeamCity cloud build agents in a Kubernetes cluster. Provides Helm support as a build step.' - }, - { - type: 2, - name: 'Opensense', - logo: 'opensense', - link: 'http://www.opensense.fr/en/kubernetes-en/', - blurb: 'We provide Kubernetes services (integration, operation, training) as well as development of banking microservices based on our extended experience with cloud of containers, microservices, data management and financial sector.' - }, - { - type: 2, - name: 'SAP SE', - logo: 'sap', - link: 'https://cloudplatform.sap.com', - blurb: 'The SAP Cloud Platform provides in-memory capabilities and unique business services for building and extending applications. With open sourced Project Gardener, SAP utilizes the power of Kubernetes to enable an open, robust, multi-cloud experience for our customers. You can use simple, modern cloud native design principles and leverage skills your organization already has to deliver agile and transformative applications, while integrating with the latest SAP Leonardo business features.' - }, - { - type: 1, - name: 'Mobilise Cloud Services Limited', - logo: 'mobilise', - link: 'https://www.mobilise.cloud/en/services/serverless-application-delivery/', - blurb: 'Mobilise helps organisations adopt Kubernetes and integrate with their CI/CD tooling.' - }, - { - type: 3, - name: 'AWS', - logo: 'aws', - link: 'https://aws.amazon.com/eks/', - blurb: 'Amazon Elastic Container Service for Kubernetes (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to install and operate your own Kubernetes clusters.' - }, - { - type: 3, - name: 'Kontena', - logo: 'kontena', - link: 'https://pharos.sh', - blurb: 'Kontena Pharos - The simple, solid, certified Kubernetes distribution that just works.' - }, - { - type: 2, - name: 'NTTData', - logo: 'nttdata', - link: 'http://de.nttdata.com/altemista-cloud', - blurb: 'NTT DATA, a member of the NTT Group, brings the power of the worlds leading infrastructure provider in the global K8s community.' - }, - { - type: 2, - name: 'OCTO', - logo: 'octo', - link: 'https://www.octo.academy/fr/formation/275-kubernetes-utiliser-architecturer-et-administrer-une-plateforme-de-conteneurs', - blurb: 'OCTO technology provides training, architecture, technical consulting and delivery services including containers and Kubernetes.' - }, - { - type: 0, - name: 'Logdna', - logo: 'logdna', - link: 'https://logdna.com/kubernetes', - blurb: 'Pinpoint production issues instantly with LogDNA, the best logging platform you will ever use. Get started with only 2 kubectl commands.' - } - ] - - var kcspContainer = document.getElementById('kcspContainer') - var distContainer = document.getElementById('distContainer') - var ktpContainer = document.getElementById('ktpContainer') - var isvContainer = document.getElementById('isvContainer') - var servContainer = document.getElementById('servContainer') - - var sorted = partners.sort(function (a, b) { - if (a.name > b.name) return 1 - if (a.name < b.name) return -1 - return 0 - }) - - sorted.forEach(function (obj) { - var box = document.createElement('div') - box.className = 'partner-box' - - var img = document.createElement('img') - img.src = '/images/square-logos/' + obj.logo + '.png' - - var div = document.createElement('div') - - var p = document.createElement('p') - p.textContent = obj.blurb - - var link = document.createElement('a') - link.href = obj.link - link.target = '_blank' - link.textContent = 'Learn more' - - div.appendChild(p) - div.appendChild(link) - - box.appendChild(img) - box.appendChild(div) - - var container; - if (obj.type === 0) { - container = isvContainer; - } else if (obj.type === 1) { - container = servContainer; - } else if (obj.type === 2) { - container = kcspContainer; - } else if (obj.type === 3) { - container = distContainer; - } else if (obj.type === 4) { - container = ktpContainer; - } - - container.appendChild(box) - }) -})(); diff --git a/content/en/partners/_index.html b/content/en/partners/_index.html index c652514ac40e3..7925e0318886f 100644 --- a/content/en/partners/_index.html +++ b/content/en/partners/_index.html @@ -95,8 +95,4 @@
- - + \ No newline at end of file diff --git a/content/es/_index.html b/content/es/_index.html index cf19b195cdf89..8aecd07aaf114 100644 --- a/content/es/_index.html +++ b/content/es/_index.html @@ -4,8 +4,6 @@ cid: home --- -{{< deprecationwarning >}} - {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} ### Kubernetes (K8s) es una plataforma de código abierto para automatizar la implementación, el escalado y la administración de aplicaciones en contenedores. @@ -57,4 +55,4 @@

El desafío de migrar más de 150 microservicios a Kubernetes

{{< /blocks/section >}} -{{< blocks/case-studies >}} +{{< blocks/case-studies >}} \ No newline at end of file diff --git a/content/es/docs/concepts/_index.md b/content/es/docs/concepts/_index.md index fddd126047a2f..7dd7709bae624 100644 --- a/content/es/docs/concepts/_index.md +++ b/content/es/docs/concepts/_index.md @@ -17,11 +17,11 @@ La sección de conceptos te ayudará a conocer los componentes de Kubernetes as En Kubernetes se utilizan los *objetos de la API de Kubernetes* para describir el *estado deseado* del clúster: qué aplicaciones u otras cargas de trabajo se quieren ejecutar, qué imagenes de contenedores usan, el número de replicas, qué red y qué recursos de almacenamiento quieres que tengan disponibles, etc. Se especifica el estado deseado del clúster mediante la creación de objetos usando la API de Kubernetes, típicamente mediante la interfaz de línea de comandos, `kubectl`. También se puede usar la API de Kubernetes directamente para interactuar con el clúster y especificar o modificar tu estado deseado. -Una vez que se especifica el estado deseado, el *Plano de Control de Kubernetes* realizará las acciones necesarias para que el estado actual del clúster coincida con el estado deseado. Para ello, Kubernetes realiza diferentes tareas de forma automática, como pueden ser: parar o arrancar contenedores, escalar el número de réplicas de una aplicación dada, etc. El Plano de Control de Kubernetes consiste en un grupo de procesos que corren en tu clúster: +Una vez que se especifica el estado deseado, el *Plano de Control de Kubernetes* realizará las acciones necesarias para que el estado actual del clúster coincida con el estado deseado. Para ello, Kubernetes realiza diferentes tareas de forma automática, como pueden ser: parar o arrancar contenedores, escalar el número de réplicas de una aplicación dada, etc. El Plano de Control de Kubernetes consiste en un grupo de daemons que corren en tu clúster: -* El **Master de Kubernetes** es un conjunto de tres procesos que se ejecutan en un único nodo del clúster, que se denomina nodo master. Estos procesos son: [kube-apiserver](/docs/admin/kube-apiserver/), [kube-controller-manager](/docs/admin/kube-controller-manager/) y [kube-scheduler](/docs/admin/kube-scheduler/). +* El **Master de Kubernetes** es un conjunto de tres daemons que se ejecutan en un único nodo del clúster, que se denomina nodo master. Estos daemons son: [kube-apiserver](/docs/admin/kube-apiserver/), [kube-controller-manager](/docs/admin/kube-controller-manager/) y [kube-scheduler](/docs/admin/kube-scheduler/). -* Los restantes nodos no master contenidos en tu clúster, ejecutan los siguientes dos procesos: +* Los restantes nodos no master contenidos en tu clúster, ejecutan los siguientes dos daemons: * **[kubelet](/docs/admin/kubelet/)**, el cual se comunica con el Master de Kubernetes. * **[kube-proxy](/docs/admin/kube-proxy/)**, un proxy de red que implementa los servicios de red de Kubernetes en cada nodo. @@ -55,7 +55,7 @@ Por ejemplo, cuando usas la API de Kubernetes para crear un Deployment, estás p El Master de Kubernetes es el responsable de mantener el estado deseado de tu clúster. Cuando interactuas con Kubernetes, como por ejemplo cuando utilizas la interfaz de línea de comandos `kubectl`, te estás comunicando con el master de tu clúster de Kubernetes. -> Por "master" entendemos la colección de procesos que gestionan el estado del clúster. Típicamente, estos procesos se ejecutan todos en un único nodo del clúster, y este nodo recibe por tanto la denominación de master. El master puede estar replicado por motivos de disponibilidad y redundancia. +> Por "master" entendemos la colección de daemons que gestionan el estado del clúster. Típicamente, estos daemons se ejecutan todos en un único nodo del clúster, y este nodo recibe por tanto la denominación de master. El master puede estar replicado por motivos de disponibilidad y redundancia. ### Kubernetes Nodes diff --git a/content/es/docs/concepts/configuration/configmap.md b/content/es/docs/concepts/configuration/configmap.md new file mode 100644 index 0000000000000..b607f0b82def9 --- /dev/null +++ b/content/es/docs/concepts/configuration/configmap.md @@ -0,0 +1,253 @@ +--- +title: ConfigMaps +content_type: concept +weight: 20 +--- + + + +{{< glossary_definition term_id="configmap" prepend="Un configmap es " length="all" >}} + +{{< caution >}} +ConfigMap no proporciona encriptación. +Si los datos que quieres almacenar son confidenciales, utiliza un +{{< glossary_tooltip text="Secret" term_id="secret" >}} en lugar de un ConfigMap, +o utiliza otras herramientas externas para mantener los datos seguros. +{{< /caution >}} + + + + +## Motivo + +Utiliza un ConfigMap para crear una configuración separada del código de la aplicación. + +Por ejemplo, imagina que estás desarrollando una aplicación que puedes correr en +tu propio equipo (para desarrollo) y en el cloud (para mantener tráfico real). +Escribes el código para configurar una variable llamada `DATABASE_HOST`. +En tu equipo configuras la variable con el valor `localhost`. +En el cloud, la configuras con referencia a un kubernetes +{{< glossary_tooltip text="Service" term_id="service" >}} que expone el componente +de la base de datos en tu cluster. + +Esto permite tener una imagen corriendo en un cloud y +tener el mismo código localmente para checkearlo si es necesario. + +## Objeto ConfigMap + +Un ConfigMap es un [objeto](/docs/concepts/overview/working-with-objects/kubernetes-objects/) de la API +que permite almacenar la configuración de otros objetos utilizados. Aunque muchos +objetos de kubernetes que tienen un `spec`, un ConfigMap tiene una sección `data` para +almacenar items, identificados por una clave, y sus valores. + +El nombre del ConfigMap debe ser un +[nombre de subdominio DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) válido. + +## ConfigMaps y Pods + +Puedes escribir un Pod `spec` y referenciarlo a un ConfigMap y configurar el contenedor(es) +de ese {{< glossary_tooltip text="Pod" term_id="pod" >}} en base a los datos del ConfigMap. El {{< glossary_tooltip text="Pod" term_id="pod" >}} y el ConfigMap deben estar en +el mismo {{< glossary_tooltip text="Namespace" term_id="namespace" >}}. + +Este es un ejemplo de ConfigMap que tiene algunas claves con un valor simple, +y otras claves donde el valor tiene un formato de un fragmento de configuración. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: game-demo +data: + # property-like keys; each key maps to a simple value + player_initial_lives: "3" + ui_properties_file_name: "user-interface.properties" + # + # file-like keys + game.properties: | + enemy.types=aliens,monsters + player.maximum-lives=5 + user-interface.properties: | + color.good=purple + color.bad=yellow + allow.textmode=true +``` +Hay cuatro maneras diferentes de usar un ConfigMap para configurar +un contenedor dentro de un {{< glossary_tooltip text="Pod" term_id="pod" >}}: + +1. Argumento en la linea de comandos como entrypoint de un contenedor +1. Variable de enorno de un contenedor +1. Como fichero en un volumen de solo lectura, para que lo lea la aplicación +1. Escribir el código para ejecutar dentro de un {{< glossary_tooltip text="Pod" term_id="pod" >}} que utiliza la API para leer el ConfigMap + +Estos diferentes mecanismos permiten utilizar diferentes métodos para modelar +los datos que se van a usar. +Para los primeros tres mecanismos, el +{{< glossary_tooltip text="kubelet" term_id="kubelet" >}} utiliza la información +del ConfigMap cuando lanza un contenedor (o varios) en un {{< glossary_tooltip text="Pod" term_id="pod" >}}. + +Para el cuarto método, tienes que escribir el código para leer el ConfigMap y sus datos. +Sin embargo, como estás utilizando la API de kubernetes directamente, la aplicación puede +suscribirse para obtener actualizaciones cuando el ConfigMap cambie, y reaccionar +cuando esto ocurra. Accediendo directamente a la API de kubernetes, esta +técnica también permite acceder al ConfigMap en diferentes namespaces. + +En el siguiente ejemplo el Pod utiliza los valores de `game-demo` para configurar el contenedor: +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: configmap-demo-pod +spec: + containers: + - name: demo + image: game.example/demo-game + env: + # Define the environment variable + - name: PLAYER_INITIAL_LIVES # Notice that the case is different here + # from the key name in the ConfigMap. + valueFrom: + configMapKeyRef: + name: game-demo # The ConfigMap this value comes from. + key: player_initial_lives # The key to fetch. + - name: UI_PROPERTIES_FILE_NAME + valueFrom: + configMapKeyRef: + name: game-demo + key: ui_properties_file_name + volumeMounts: + - name: config + mountPath: "/config" + readOnly: true + volumes: + # You set volumes at the Pod level, then mount them into containers inside that Pod + - name: config + configMap: + # Provide the name of the ConfigMap you want to mount. + name: game-demo + # An array of keys from the ConfigMap to create as files + items: + - key: "game.properties" + path: "game.properties" + - key: "user-interface.properties" + path: "user-interface.properties" +``` + + +Un ConfigMap no diferencia entre las propiedades de una linea individual y +un fichero con múltiples lineas y valores. +Lo importante es como los {{< glossary_tooltip text="Pods" term_id="pod" >}} y otros objetos consumen estos valores. + +Para este ejemplo, definimos un {{< glossary_tooltip text="Volumen" term_id="volume" >}} y lo montamos dentro del contenedor +`demo` como `/config` creando dos ficheros, +`/config/game.properties` y `/config/user-interface.properties`, +aunque haya cuatro claves en el ConfigMap. Esto es debido a que enla definición +del {{< glossary_tooltip text="Pod" term_id="pod" >}} se especifica el array `items` en la sección `volumes`. +Si quieres omitir el array `items` entero, cada clave del ConfigMap se convierte en +un fichero con el mismo nombre que la clave, y tienes 4 ficheros. + +## Usando ConfigMaps + +Los ConfigMaps pueden montarse como volúmenes. También pueden ser utilizados por otras +partes del sistema, sin ser expuestos directamente al {{< glossary_tooltip text="Pod" term_id="pod" >}}. Por ejemplo, +los ConfigMaps pueden contener información para que otros elementos del sistema utilicen +para su configuración. + +{{< note >}} +La manera más común de usar los Configmaps es para configurar +los contenedores que están corriendo en un {{< glossary_tooltip text="Pod" term_id="pod" >}} en el mismo {{< glossary_tooltip text="Namespace" term_id="namespace" >}}. +También se pueden usar por separado. + +Por ejemplo, +quizá encuentres {{< glossary_tooltip text="AddOns" term_id="addons" >}} +u {{< glossary_tooltip text="Operadores" term_id="operator-pattern" >}} que +ajustan su comportamiento en base a un ConfigMap. +{{< /note >}} + +### Usando ConfigMaps como ficheros en un Pod + +Para usar un ConfigMap en un volumen en un {{< glossary_tooltip text="Pod" term_id="pod" >}}: + +1. Crear un ConfigMap o usar uno que exista. Múltiples {{< glossary_tooltip text="Pods" term_id="pod" >}} pueden utilizar el mismo ConfigMap. +1. Modifica la configuración del {{< glossary_tooltip text="Pod" term_id="pod" >}} para añadir el volumen en `.spec.volumes[]`. Pon cualquier nombre al {{< glossary_tooltip text="Volumen" term_id="volume" >}}, y tienes un campo `.spec.volumes[].configMap.name` configurado con referencia al objeto ConfigMap. +1. Añade un `.spec.containers[].volumeMounts[]` a cada contenedor que necesite el ConfigMap. Especifica `.spec.containers[].volumeMounts[].readOnly = true` y `.spec.containers[].volumeMounts[].mountPath` en un directorio sin uso donde quieras que aparezca el ConfigMap. +1. Modifica la imagen o el comando utilizado para que el programa busque los ficheros en el directorio. Cada clave del ConfigMap `data` se convierte en un un fichero en el `mountPath`. + +En este ejemplo, el {{< glossary_tooltip text="Pod" term_id="pod" >}} monta un ConfigMap como un {{< glossary_tooltip text="volumen" term_id="volume" >}}: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: mypod +spec: + containers: + - name: mypod + image: redis + volumeMounts: + - name: foo + mountPath: "/etc/foo" + readOnly: true + volumes: + - name: foo + configMap: + name: myconfigmap +``` + +Cada ConfigMap que quieras utilizar debe estar referenciado en `.spec.volumes`. + +Si hay múltiples contenedores en el {{< glossary_tooltip text="Pod" term_id="pod" >}}, cada contenedor tiene su propio +bloque `volumeMounts`, pero solo un `.spec.volumes` es necesario por cada ConfigMap. + +#### ConfigMaps montados son actualizados automáticamente + +Cuando un ConfigMap está siendo utilizado en un {{< glossary_tooltip text="volumen" term_id="volume" >}} y es actualizado, las claves son actualizadas también. +El {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} comprueba si el ConfigMap montado está actualizado cada periodo de sincronización. +Sin embargo, el {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} utiliza su caché local para obtener el valor actual del ConfigMap. +El tipo de caché es configurable usando el campo `ConfigMapAndSecretChangeDetectionStrategy` en el +[KubeletConfiguration struct](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go). +Un ConfigMap puede ser propagado por vista (default), ttl-based, o simplemente redirigiendo +todas las consultas directamente a la API. +Como resultado, el retraso total desde el momento que el ConfigMap es actualizado hasta el momento +que las nuevas claves son proyectadas en el {{< glossary_tooltip text="Pod" term_id="pod" >}} puede ser tan largo como la sincronización del {{< glossary_tooltip text="Pod" term_id="pod" >}} ++ el retraso de propagación de la caché, donde la propagación de la caché depende del tipo de +caché elegido (es igual al retraso de propagación, ttl de la caché, o cero correspondientemente). + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +La característica alpha de kubernetes _Immutable Secrets and ConfigMaps_ provee una opción para configurar +{{< glossary_tooltip text="Secrets" term_id="secret" >}} individuales y ConfigMaps como inmutables. Para los {{< glossary_tooltip text="Clústeres" term_id="cluster" >}} que usan ConfigMaps como extensión +(al menos decenas o cientos de un único ConfigMap montado en {{< glossary_tooltip text="Pods" term_id="pod" >}}), previene cambios en sus +datos con las siguientes ventajas: + +- protección de actualizaciones accidentales (o no deseadas) que pueden causar caídas de aplicaciones +- mejora el rendimiento del {{< glossary_tooltip text="Clúster" term_id="cluster" >}} significativamente reduciendo la carga del {{< glossary_tooltip text="kube-apiserver" term_id="kube-apiserver" >}}, +cerrando las vistas para el ConfigMap marcado como inmutable. + +Para usar esta característica, habilita el `ImmutableEmphemeralVolumes` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) y configura +el campo del {{< glossary_tooltip text="Secret" term_id="secret" >}} o ConfigMap `immutable` como `true`. Por ejemplo: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + ... +data: + ... +immutable: true +``` + +{{< note >}} +Una vez que un ConfigMap o un {{< glossary_tooltip text="Secret" term_id="secret" >}} es marcado como inmutable, _no_ es posible revertir el cambio +ni cambiar el contenido del campo `data`. Solo se puede eliminar y recrear el ConfigMap. +Los {{< glossary_tooltip text="Pods" term_id="pod" >}} existentes mantiene un punto de montaje del ConfigMap eliminado - es recomendable +recrear los {{< glossary_tooltip text="Pods" term_id="pod" >}}. +{{< /note >}} + + +## {{% heading "whatsnext" %}} + + +* Leer sobre [Secrets](/docs/concepts/configuration/secret/). +* Leer [Configure a Pod to Use a ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/). +* Leer [The Twelve-Factor App](https://12factor.net/) para entender el motivo de separar + el código de la configuración. diff --git a/content/es/docs/concepts/configuration/pod-overhead.md b/content/es/docs/concepts/configuration/pod-overhead.md new file mode 100644 index 0000000000000..0d7a89bcd6904 --- /dev/null +++ b/content/es/docs/concepts/configuration/pod-overhead.md @@ -0,0 +1,41 @@ +--- +reviewers: +- raelga +title: Sobrecarga de Pod +content_type: concept +weight: 20 +--- + + + +{{< feature-state for_k8s_version="v1.16" state="alpha" >}} + +Cuando se está ejecutando un {{< glossary_tooltip text="Pod" term_id="pod" >}} en un {{< glossary_tooltip text="nodo" term_id="node" >}}, el Pod por sí mismo utiliza una cantidad de recursos del sistema. Estos recursos son adicionales a los recursos necesarios para hacer funcionar el/los contenedor(es) dentro del Pod. +La _Sobrecarga de Pod_ es una característica para contabilizar los recursos consumidos por la infraestructura de Pods que están por encima de los valores de _Requests_ y _Limits_ del/los contenedor(es). + + + +## Sobrecarga de Pod + +En Kubernetes, la sobrecarga de {{< glossary_tooltip text="Pod" term_id="pod" >}} se configura en el tiempo de [admisión](/docs/reference/access-authn-authz/extensible-admission-controllers/#what-are-admission-webhooks) con respecto a la sobrecarga asociada con el [RuntimeClass](/docs/concepts/containers/runtime-class/) del Pod. + +Cuando se habilita la opción de sobrecarga de {{< glossary_tooltip text="Pod" term_id="pod" >}}, se considera tanto la propia sobrecarga como la suma de solicitudes de recursos del contenedor al programar el {{< glossary_tooltip text="Pod" term_id="pod" >}}. Del mismo modo, {{< glossary_tooltip text="Kubelet" term_id="kubelet" >}} incluirá la sobrecarga de {{< glossary_tooltip text="Pod" term_id="pod" >}} cuando se dimensione el cgroup del {{< glossary_tooltip text="Pod" term_id="pod" >}}, y cuando se realice la clasificación de la expulsión de {{< glossary_tooltip text="Pods" term_id="pod" >}}. + +### Configuración + +Debe asegurarse de que el [Feature Gate](/docs/reference/command-line-tools-reference/feature-gates/) `PodOverhead` esté activado (su valor está desactivado de manera predeterminada) en todo el {{< glossary_tooltip text="clúster" term_id="cluster" >}}. Esto significa: + +- en el {{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}} +- en el {{< glossary_tooltip text="kube-apiserver" term_id="kube-apiserver" >}} +- en el {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} de cada {{< glossary_tooltip text="nodo" term_id="node" >}} +- en cualquier servidor de API personalizado que necesite [Feature Gates](/docs/reference/command-line-tools-reference/feature-gates/). + +{{< note >}} +Los usuarios que pueden escribir recursos del tipo RuntimeClass podrían impactar y poner en riesgo el rendimiento de la carga de trabajo en todo el {{< glossary_tooltip text="clúster" term_id="cluster" >}}. Por ello, se puede limitar el acceso a esta característica usando los controles de acceso de Kubernetes. +Para obtener más detalles vea la [documentación sobre autorización](/docs/reference/access-authn-authz/authorization/). +{{< /note >}} + + + +* [RuntimeClass](/docs/concepts/containers/runtime-class/) +* [PodOverhead Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) diff --git a/content/es/docs/concepts/overview/components.md b/content/es/docs/concepts/overview/components.md new file mode 100644 index 0000000000000..0ca6f3126c65b --- /dev/null +++ b/content/es/docs/concepts/overview/components.md @@ -0,0 +1,117 @@ +--- +reviewers: +- raelga +title: Componentes de Kubernetes +content_type: concept +weight: 20 +card: + name: concepts + weight: 20 +--- + + + +Este documento describe los distintos componentes que +son necesarios para operar un clúster de Kubernetes. + + + +## Componentes del plano de control + +Los componentes que forman el plano de control toman decisiones globales sobre +el clúster (por ejemplo, la planificación) y detectan y responden a eventos del clúster, como la creación +de un nuevo pod cuando la propiedad `replicas` de un controlador de replicación no se cumple. + +Estos componentes pueden ejecutarse en cualquier nodo del clúster. Sin embargo para simplificar, los +scripts de instalación típicamente se inician en el mismo nodo de forma exclusiva, +sin que se ejecuten contenedores de los usuarios en esos nodos. El plano de control se ejecuta en varios nodos +para garantizar la [alta disponibilidad](/docs/admin/high-availability/). + +### kube-apiserver + +{{< glossary_definition term_id="kube-apiserver" length="all" >}} + +### etcd + +{{< glossary_definition term_id="etcd" length="all" >}} + +### kube-scheduler + +{{< glossary_definition term_id="kube-scheduler" length="all" >}} + +### kube-controller-manager + +{{< glossary_definition term_id="kube-controller-manager" length="all" >}} + +Estos controladores incluyen: + + * Controlador de nodos: es el responsable de detectar y responder cuándo un nodo deja de funcionar + * Controlador de replicación: es el responsable de mantener el número correcto de pods para cada controlador + de replicación del sistema + * Controlador de endpoints: construye el objeto `Endpoints`, es decir, hace una unión entre los `Services` y los `Pods` + * Controladores de tokens y cuentas de servicio: crean cuentas y tokens de acceso a la API por defecto para los nuevos {{< glossary_tooltip text="Namespaces" term_id="namespace">}}. + +### cloud-controller-manager + +[cloud-controller-manager](/docs/tasks/administer-cluster/running-cloud-controller/) ejecuta controladores que +interactúan con proveedores de la nube. El binario `cloud-controller-manager` es una característica alpha que se introdujo en la versión 1.6 de Kubernetes. + +`cloud-controller-manager` sólo ejecuta ciclos de control específicos para cada proveedor de la nube. Es posible +desactivar estos ciclos en `kube-controller-manager` pasando la opción `--cloud-provider= external` cuando se arranque el `kube-controller-manager`. + +`cloud-controller-manager` permite que el código de Kubernetes y el del proveedor de la nube evolucionen de manera independiente. Anteriormente, el código de Kubernetes dependía de la funcionalidad específica de cada proveedor de la nube. En el futuro, el código que sea específico a una plataforma debería ser mantenido por el proveedor de la nube y enlazado a `cloud-controller-manager` al correr Kubernetes. + +Los siguientes controladores dependen de alguna forma de un proveedor de la nube: + + * Controlador de nodos: es el responsable de detectar y actuar cuándo un nodo deja de responder + * Controlador de rutas: para configurar rutas en la infraestructura de nube subyacente + * Controlador de servicios: para crear, actualizar y eliminar balanceadores de carga en la nube + * Controlador de volúmenes: para crear, conectar y montar volúmenes e interactuar con el proveedor de la nube para orquestarlos + +## Componentes de nodo + +Los componentes de nodo corren en cada nodo, manteniendo a los pods en funcionamiento y proporcionando el entorno de ejecución de Kubernetes. + +### kubelet + +{{< glossary_definition term_id="kubelet" length="all" >}} + +### kube-proxy + +[kube-proxy](/docs/admin/kube-proxy/) permite abstraer un servicio en Kubernetes manteniendo las +reglas de red en el anfitrión y haciendo reenvío de conexiones. + +### Runtime de contenedores + +El {{< glossary_definition term_id="container-runtime" text="runtime de los contenedores" >}} es el software responsable de ejecutar los contenedores. Kubernetes soporta varios de +ellos: [Docker](http://www.docker.com), [containerd](https://containerd.io), [cri-o](https://cri-o.io/), [rktlet](https://github.com/kubernetes-incubator/rktlet) y cualquier implementación de la interfaz de runtime de contenedores de Kubernetes, o [Kubernetes CRI](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md). + +## Addons + +Los _addons_ son pods y servicios que implementan funcionalidades del clúster. Estos pueden ser administrados +por `Deployments`, `ReplicationControllers` y otros. Los _addons_ asignados a un espacio de nombres se crean en el espacio `kube-system`. + +Más abajo se describen algunos _addons_. Para una lista más completa de los _addons_ disponibles, por favor visite [Addons](/docs/concepts/cluster-administration/addons/). + +### DNS + +Si bien los otros _addons_ no son estrictamente necesarios, todos los clústers de Kubernetes deberían tener un [DNS interno del clúster](/docs/concepts/services-networking/dns-pod-service/) ya que la mayoría de los ejemplos lo requieren. + +El DNS interno del clúster es un servidor DNS, adicional a los que ya podrías tener en tu red, que sirve registros DNS a los servicios de Kubernetes. + +Los contenedores que son iniciados por Kubernetes incluyen automáticamente este servidor en sus búsquedas DNS. + +### Interfaz Web (Dashboard) {#dashboard} + +El [Dashboard](/docs/tasks/access-application-cluster/web-ui-dashboard/) es una interfaz Web de propósito general para clústeres de Kubernetes. Le permite a los usuarios administrar y resolver problemas que puedan presentar tanto las aplicaciones como el clúster. + +### Monitor de recursos de contenedores + +El [Monitor de recursos de contenedores](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) almacena +de forma centralizada series de tiempo con métricas sobre los contenedores, y provee una interfaz para navegar estos +datos. + +### Registros del clúster + +El mecanismo de [registros del clúster](/docs/concepts/cluster-administration/logging/) está a cargo de almacenar +los registros de los contenedores de forma centralizada, proporcionando una interfaz de búsqueda y navegación. diff --git a/content/es/docs/concepts/workloads/controllers/deployment.md b/content/es/docs/concepts/workloads/controllers/deployment.md new file mode 100644 index 0000000000000..89563b3b72882 --- /dev/null +++ b/content/es/docs/concepts/workloads/controllers/deployment.md @@ -0,0 +1,1110 @@ +--- +title: Deployment +feature: + title: Despliegues y _rollback_ automáticos + description: > + Kubernetes despliega los cambios a tu aplicación o su configuración de forma progresiva mientras monitoriza la salud de la aplicación para asegurarse que no elimina todas tus instancias al mismo tiempo. Si algo sale mal, Kubernetes revertirá el cambio por ti. Aprovéchate del creciente ecosistema de soluciones de despliegue. + +content_type: concept +weight: 30 +--- + + + +Un controlador de _Deployment_ proporciona actualizaciones declarativas para los [Pods](/docs/concepts/workloads/pods/pod/) y los +[ReplicaSets](/docs/concepts/workloads/controllers/replicaset/). + +Cuando describes el _estado deseado_ en un objeto Deployment, el controlador del Deployment se encarga de cambiar el estado actual al estado deseado de forma controlada. +Puedes definir Deployments para crear nuevos ReplicaSets, o eliminar Deployments existentes y adoptar todos sus recursos con nuevos Deployments. + +{{< note >}} +No deberías gestionar directamente los ReplicaSets que pertenecen a un Deployment. +Todos los casos de uso deberían cubrirse manipulando el objeto Deployment. +Considera la posibilidad de abrir un incidente en el repositorio principal de Kubernetes si tu caso de uso no está soportado por el motivo que sea. +{{< /note >}} + + + + + + +## Casos de uso + +A continuación se presentan los casos de uso típicos de los Deployments: + +* [Crear un Deployment para desplegar un ReplicaSet](#creating-a-deployment). El ReplicaSet crea los Pods en segundo plano. Comprueba el estado del despliegue para comprobar si es satisfactorio o no. +* [Declarar el nuevo estado de los Pods](#updating-a-deployment) actualizando el PodTemplateSpec del Deployment. Ello crea un nuevo ReplicaSet y el Deployment gestiona el cambio de los Pods del viejo ReplicaSet al nuevo de forma controlada. Cada nuevo ReplicaSet actualiza la revisión del Deployment. +* [Retroceder a una revisión anterior del Deployment](#rolling-back-a-deployment) si el estado actual de un Deployment no es estable. Cada retroceso actualiza la revisión del Deployment. +* [Escalar horizontalmente el Deployment para soportar más carga](#scaling-a-deployment). +* [Pausar el Deployment](#pausing-and-resuming-a-deployment) para aplicar múltiples arreglos a su PodTemplateSpec y, a continuación, reanúdalo para que comience un nuevo despliegue. +* [Usar el estado del Deployment](#deployment-status) como un indicador de que el despliegue se ha atascado. +* [Limpiar los viejos ReplicaSets](#clean-up-policy) que no necesites más. + +## Crear un Deployment + +El siguiente ejemplo de un Deployment crea un ReplicaSet para arrancar tres Pods con `nginx`: + +{{< codenew file="controllers/nginx-deployment.yaml" >}} + +En este ejemplo: + +* Se crea un Deployment denominado `nginx-deployment`, indicado a través del campo `.metadata.name`. +* El Deployment crea tres Pods replicados, indicado a través del campo `replicas`. +* El campo `selector` define cómo el Deployment identifica los Pods que debe gestionar. + En este caso, simplemente seleccionas una etiqueta que se define en la plantilla Pod (`app: nginx`). + Sin embargo, es posible definir reglas de selección más sofisticadas, + siempre que la plantilla Pod misma satisfaga la regla. + + {{< note >}} + `matchLabels` es un mapa de entradas {clave,valor}. Una entrada simple {clave,valor} en el mapa `matchLabels` + es equivalente a un elemento de `matchExpressions` cuyo campo sea la "clave", el operador sea "In", + y la matriz de valores contenga únicamente un "valor". Todos los requisitos se concatenan con AND. + {{< /note >}} + +* El campo `template` contiene los siguientes sub-campos: + * Los Pods se etiquetan como `app: nginx` usando el campo `labels`. + * La especificación de la plantilla Pod, o el campo `.template.spec`, indica + que los Pods ejecutan un contenedor, `nginx`, que utiliza la versión 1.7.9 de la imagen de `nginx` de + [Docker Hub](https://hub.docker.com/). + * Crea un contenedor y lo llamar `nginx` usando el campo `name`. + * Ejecuta la imagen `nginx` en su versión `1.7.9`. + * Abre el puerto `80` para que el contenedor pueda enviar y recibir tráfico. + +Para crear este Deployment, ejecuta el siguiente comando: + +```shell +kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml +``` + +{{< note >}} +Debes indicar el parámetro `--record` para registrar el comando ejecutado en la anotación de recurso `kubernetes.io/change-cause`. +Esto es útil para futuras introspecciones, por ejemplo para comprobar qué comando se ha ejecutado en cada revisión del Deployment. +{{< /note >}} + +A continuación, ejecuta el comando `kubectl get deployments`. La salida debe ser parecida a la siguiente: + +```shell +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +nginx-deployment 3 0 0 0 1s +``` + +Cuando inspeccionas los Deployments de tu clúster, se muestran los siguientes campos: + +* `NAME` enumera los nombre de los Deployments del clúster. +* `DESIRED` muestra el número deseado de _réplicas_ de la aplicación, que se define + cuando se crea el Deployment. Esto se conoce como el _estado deseado_. +* `CURRENT` muestra cuántas réplicas se están ejecutando actualment. +* `UP-TO-DATE` muestra el número de réplicas que se ha actualizado para alcanzar el estado deseado. +* `AVAILABLE` muestra cuántas réplicas de la aplicación están disponibles para los usuarios. +* `AGE` muestra la cantidad de tiempo que la aplicación lleva ejecutándose. + +Nótese cómo los valores de cada campo corresponden a los valores de la especificación del Deployment: + +* El número de réplicas deseadas es 3 de acuerdo con el campo `.spec.replicas`. +* El número de réplicas actuales es 0 de acuerdo con el campo `.status.replicas`. +* El número de réplicas actualizadas es 0 de acuerdo con el campo `.status.updatedReplicas`. +* El número de réplicas disponibles es 0 de acuerdo con el campo `.status.availableReplicas`. + +Para ver el estado del Deployment, ejecuta el comando `kubectl rollout status deployment.v1.apps/nginx-deployment`. Este comando devuelve el siguiente resultado: + +```shell +Waiting for rollout to finish: 2 out of 3 new replicas have been updated... +deployment.apps/nginx-deployment successfully rolled out +``` + +Ejecuta de nuevo el comando `kubectl get deployments` unos segundos más tarde: + +```shell +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +nginx-deployment 3 3 3 3 18s +``` + +Fíjate que el Deployment ha creado todas las tres réplicas, y que todas las réplicas están actualizadas (contienen +la última plantilla Pod) y están disponibles (el estado del Pod tiene el valor Ready al menos para el campo `.spec.minReadySeconds` del Deployment). + +Para ver el ReplicaSet (`rs`) creado por el Deployment, ejecuta el comando `kubectl get rs`: + +```shell +NAME DESIRED CURRENT READY AGE +nginx-deployment-75675f5897 3 3 3 18s +``` + +Fíjate que el nombre del ReplicaSet siempre se formatea con el patrón `[DEPLOYMENT-NAME]-[RANDOM-STRING]`. La cadena aleatoria se +genera de forma aleatoria y usa el pod-template-hash como semilla. + +Para ver las etiquetas generadas automáticamente en cada pod, ejecuta el comando `kubectl get pods --show-labels`. Se devuelve la siguiente salida: + +```shell +NAME READY STATUS RESTARTS AGE LABELS +nginx-deployment-75675f5897-7ci7o 1/1 Running 0 18s app=nginx,pod-template-hash=3123191453 +nginx-deployment-75675f5897-kzszj 1/1 Running 0 18s app=nginx,pod-template-hash=3123191453 +nginx-deployment-75675f5897-qqcnn 1/1 Running 0 18s app=nginx,pod-template-hash=3123191453 +``` + +El ReplicaSet creado garantiza que hay tres Pods de `nginx` ejecutándose en todo momento. + +{{< note >}} +En un Deployment, debes especificar un selector apropiado y etiquetas de plantilla Pod (en este caso, +`app: nginx`). No entremezcles etiquetas o selectores con otros controladores (incluyendo otros Deployments y StatefulSets). +Kubernetes no te impide que lo hagas, pero en el caso de que múltiples controladores tengan selectores mezclados, dichos controladores pueden entrar en conflicto y provocar resultados inesperados. +{{< /note >}} + +### Etiqueta pod-template-hash + +{{< note >}} +No cambies esta etiqueta. +{{< /note >}} + +La etiqueta `pod-template-hash` es añadida por el controlador del Deployment a cada ReplicaSet que el Deployment crea o adopta. + +Esta etiqueta garantiza que todos los hijos ReplicaSets de un Deployment no se entremezclan. Se genera mediante una función hash aplicada al `PodTemplate` del ReplicaSet +y usando el resultado de la función hash como el valor de la etiqueta que se añade al selector del ReplicaSet, en las etiquetas de la plantilla Pod, +y en cualquier Pod existente que el ReplicaSet tenga. + +## Actualizar un Deployment + +{{< note >}} +El lanzamiento de un Deployment se activa si y sólo si la plantilla Pod del Deployment (esto es, `.spec.template`) +se cambia, por ejemplo si se actualiza las etiquetas o las imágenes de contenedor de la plantilla. +Otras actualizaciones, como el escalado del Deployment, no conllevan un lanzamiento de despliegue. +{{< /note >}} + +Asumiendo que ahora quieres actualizar los Pods nginx para que usen la imagen `nginx:1.9.1` +en vez de la imagen `nginx:1.7.9`. + +```shell +kubectl --record deployment.apps/nginx-deployment set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 +``` +``` +image updated +``` + +De forma alternativa, puedes `editar` el Deployment y cambiar el valor del campo `.spec.template.spec.containers[0].image` de `nginx:1.7.9` a `nginx:1.9.1`: + +```shell +kubectl edit deployment.v1.apps/nginx-deployment +``` +``` +deployment.apps/nginx-deployment edited +``` + +Para ver el estado del despliegue, ejecuta: + +```shell +kubectl rollout status deployment.v1.apps/nginx-deployment +``` +``` +Waiting for rollout to finish: 2 out of 3 new replicas have been updated... +deployment.apps/nginx-deployment successfully rolled out +``` + +Cuando el despliegue funciona, puede que quieras `obtener` el Deployment: + +```shell +kubectl get deployments +``` +``` +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +nginx-deployment 3 3 3 3 36s +``` + +El número de réplicas actualizadas indica que el Deployment ha actualizado las réplicas según la última configuración. +Las réplicas actuales indican el total de réplicas que gestiona este Deployment, y las réplicas disponibles indican +el número de réplicas actuales que están disponibles. + +Puedes ejecutar el comando `kubectl get rs` para ver que el Deployment actualizó los Pods creando un nuevo ReplicaSet y escalándolo +hasta las 3 réplicas, así como escalando el viejo ReplicaSet a 0 réplicas. + +```shell +kubectl get rs +``` +``` +NAME DESIRED CURRENT READY AGE +nginx-deployment-1564180365 3 3 3 6s +nginx-deployment-2035384211 0 0 0 36s +``` + +Si ejecutas el comando `get pods` deberías ver los nuevos Pods: + +```shell +kubectl get pods +``` +``` +NAME READY STATUS RESTARTS AGE +nginx-deployment-1564180365-khku8 1/1 Running 0 14s +nginx-deployment-1564180365-nacti 1/1 Running 0 14s +nginx-deployment-1564180365-z9gth 1/1 Running 0 14s +``` + +La próxima vez que quieras actualizar estos Pods, sólo necesitas actualizar la plantilla Pod del Deployment otra vez. + +El Deployment permite garantizar que sólo un número determinado de Pods puede eliminarse mientras se están actualizando. +Por defecto, garantiza que al menos el 25% menos del número deseado de Pods se está ejecutando (máx. 25% no disponible). + +El Deployment tmabién permite garantizar que sólo un número determinado de Pods puede crearse por encima del número deseado de +Pods. Por defecto, garantiza que al menos el 25% más del número deseado de Pods se está ejecutando (máx. 25% de aumento). + +Por ejemplo, si miras detenidamente el Deployment de arriba, verás que primero creó un Pod, +luego eliminó algunos viejos Pods y creó otros nuevos. No elimina los viejos Pods hasta que un número suficiente de +nuevos Pods han arrancado, y no crea nuevos Pods hasta que un número suficiente de viejos Pods se han eliminado. +De esta forma, asegura que el número de Pods disponibles siempre es al menos 2, y el número de Pods totales es cómo máximo 4. + +```shell +kubectl describe deployments +``` +``` +Name: nginx-deployment +Namespace: default +CreationTimestamp: Thu, 30 Nov 2017 10:56:25 +0000 +Labels: app=nginx +Annotations: deployment.kubernetes.io/revision=2 +Selector: app=nginx +Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable +StrategyType: RollingUpdate +MinReadySeconds: 0 +RollingUpdateStrategy: 25% max unavailable, 25% max surge +Pod Template: + Labels: app=nginx + Containers: + nginx: + Image: nginx:1.9.1 + Port: 80/TCP + Environment: + Mounts: + Volumes: +Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable +OldReplicaSets: +NewReplicaSet: nginx-deployment-1564180365 (3/3 replicas created) +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 2m deployment-controller Scaled up replica set nginx-deployment-2035384211 to 3 + Normal ScalingReplicaSet 24s deployment-controller Scaled up replica set nginx-deployment-1564180365 to 1 + Normal ScalingReplicaSet 22s deployment-controller Scaled down replica set nginx-deployment-2035384211 to 2 + Normal ScalingReplicaSet 22s deployment-controller Scaled up replica set nginx-deployment-1564180365 to 2 + Normal ScalingReplicaSet 19s deployment-controller Scaled down replica set nginx-deployment-2035384211 to 1 + Normal ScalingReplicaSet 19s deployment-controller Scaled up replica set nginx-deployment-1564180365 to 3 + Normal ScalingReplicaSet 14s deployment-controller Scaled down replica set nginx-deployment-2035384211 to 0 +``` + +Aquí puedes ver que cuando creaste por primera vez el Deployment, este creó un ReplicaSet (nginx-deployment-2035384211) +y lo escaló a 3 réplicas directamente. Cuando actualizaste el Deployment, creó un nuevo ReplicaSet +(nginx-deployment-1564180365) y lo escaló a 1 y entonces escaló el viejo ReplicaSet a 2, de forma que al menos +hubiera 2 Pods disponibles y como mucho 4 Pods en total en todo momento. Entonces, continuó escalando +el nuevo y el viejo ReplicaSet con la misma estrategia de actualización continua. Finalmente, el nuevo ReplicaSet acaba con 3 réplicas +disponibles, y el viejo ReplicaSet se escala a 0. + +### Sobrescritura (o sea, múltiples actualizaciones a la vez) + +Cada vez que el controlador del Deployment observa un nuevo objeto de despliegue, se crea un ReplicaSet para arrancar +los Pods deseados si es que no existe otro ReplicaSet haciéndolo. Los ReplicaSet existentes que controlan los Pods cuyas etiquetas +coinciden con el valor del campo `.spec.selector`, pero cuya plantilla no coincide con el valor del campo `.spec.template` se reducen. Al final, +el nuevo ReplicaSet se escala hasta el valor del campo `.spec.replicas` y todos los viejos ReplicaSets se escalan a 0. + +Si actualizas un Deployment mientras otro despliegue está en curso, el Deployment creará un nuevo ReplicaSet +como consecuencia de la actualización y comenzará a escalarlo, y sobrescribirá al ReplicaSet que estaba escalando anteriormente + -- lo añadirá a su lista de viejos ReplicaSets y comenzará a reducirlos. + +Por ejemplo, supongamos que creamos un Deployment para crear 5 réplicas de `nginx:1.7.9`, +pero entonces actualizamos el Deployment para crear 5 réplicas de `nginx:1.9.1` cuando sólo se ha creado 3 +réplicas de `nginx:1.7.9`. En este caso, el Deployment comenzará automáticamente a matar los 3 Pods de `nginx:1.7.9` +que había creado, y empezará a crear los Pods de `nginx:1.9.1`. Es decir, no esperará a que se creen las 5 réplicas de `nginx:1.7.9` +antes de aplicar la nueva configuración. + +### Actualizaciones del selector de etiquetas + +No se recomienda hacer cambios al selector del etiquetas y, por ello, se aconseja encarecidamente planificar el valor de dichos selectores por adelantado. +En cualquier caso, si necesitas cambiar un selector de etiquetas, hazlo con mucho cuidado y asegúrate que entiendes todas sus implicaciones. + +{{< note >}} +En la versión `apps/v1` de la API, el selector de etiquetas del Deployment es inmutable una vez se ha creado. +{{< /note >}} + +* Las adiciones posteriores al selector obligan también a actualizar las etiquetas de la plantilla Pod en la especificación del Deployment con los nuevos valores, +ya que de lo contrario se devolvería un error. Este cambio no es de superposición, es decir, que el nuevo selector +no selecciona los ReplicaSets y Pods creados con el viejo selector, lo que provoca que todos los viejos ReplicaSets se marquen como huérfanos y +la creación de un nuevo ReplicaSet. +* Las actualizaciones de selector -- esto es, cambiar el valor actual en una clave de selector -- provocan el mismo comportamiento que las adiciones. +* Las eliminaciones de selector -- esto es, eliminar una clave actual del selector del Deployment -- no necesitan de cambios en las etiquetas de la plantilla Pod. +No se marca ningún ReplicaSet existente como huérfano, y no se crea ningún ReplicaSet nuevo, pero debe tenerse en cuenta que +la etiqueta eliminada todavía existe en los Pods y ReplicaSets que se están ejecutando. + +## Revertir un Deployment + +En ocasiones necesitas revertir un Deployment; por ejemplo, cuando el Deployment no es estable, como cuando no para de reiniciarse. +Por defecto, toda la historia de despliegue del Deployment se mantiene en el sistema de forma que puedes revertir en cualquier momento +(se puede modificar este comportamiento cambiando el límite de la historia de revisiones de modificaciones). + +{{< note >}} +Cuando se lanza el despligue de un Deployment, se crea una nueva revisión. Esto quiere decir que +la nueva revisión se crea si y sólo si la plantilla Pod del Deployment (`.spec.template`) se cambia; +por ejemplo, si cambias las etiquetas o la imagen del contenedor de la plantilla. +Otras actualizaciones, como escalar el Deployment, +no generan una nueva revisión del Deployment, para poder facilitar el escalado manual simultáneo - o auto-escalado. +Esto significa que cuando reviertes a una versión anterior, sólo la parte de la plantilla Pod del Deployment se revierte. +{{< /note >}} + +Vamos a suponer que hemos cometido un error al actualizar el Deployment, poniendo como nombre de imagen `nginx:1.91` en vez de `nginx:1.9.1`: + +```shell +kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.91 --record=true +``` +``` +deployment.apps/nginx-deployment image updated +``` + +El despliegue se atasca y no progresa. + +```shell +kubectl rollout status deployment.v1.apps/nginx-deployment +``` +``` +Waiting for rollout to finish: 1 out of 3 new replicas have been updated... +``` + +Presiona Ctrl-C para detener la monitorización del despliegue de arriba. Para obtener más información sobre despliegues atascados, +[lee más aquí](#deployment-status). + +Verás que el número de réplicas viejas (nginx-deployment-1564180365 y nginx-deployment-2035384211) es 2, y el número de nuevas réplicas (nginx-deployment-3066724191) es 1. + +```shell +kubectl get rs +``` +``` +NAME DESIRED CURRENT READY AGE +nginx-deployment-1564180365 3 3 3 25s +nginx-deployment-2035384211 0 0 0 36s +nginx-deployment-3066724191 1 1 0 6s +``` + +Echando un vistazo a los Pods creados, verás que uno de los Pods creados por el nuevo ReplicaSet está atascado en un bucle intentando bajar la imagen: + +```shell +kubectl get pods +``` +``` +NAME READY STATUS RESTARTS AGE +nginx-deployment-1564180365-70iae 1/1 Running 0 25s +nginx-deployment-1564180365-jbqqo 1/1 Running 0 25s +nginx-deployment-1564180365-hysrc 1/1 Running 0 25s +nginx-deployment-3066724191-08mng 0/1 ImagePullBackOff 0 6s +``` + +{{< note >}} +El controlador del Deployment parará el despliegue erróneo de forma automática, y detendrá el escalado del nuevo +ReplicaSet. Esto depende de los parámetros del rollingUpdate (`maxUnavailable` específicamente) que hayas configurado. +Kubernetes por defecto establece el valor en el 25%. +{{< /note >}} + +```shell +kubectl describe deployment +``` +``` +Name: nginx-deployment +Namespace: default +CreationTimestamp: Tue, 15 Mar 2016 14:48:04 -0700 +Labels: app=nginx +Selector: app=nginx +Replicas: 3 desired | 1 updated | 4 total | 3 available | 1 unavailable +StrategyType: RollingUpdate +MinReadySeconds: 0 +RollingUpdateStrategy: 25% max unavailable, 25% max surge +Pod Template: + Labels: app=nginx + Containers: + nginx: + Image: nginx:1.91 + Port: 80/TCP + Host Port: 0/TCP + Environment: + Mounts: + Volumes: +Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True ReplicaSetUpdated +OldReplicaSets: nginx-deployment-1564180365 (3/3 replicas created) +NewReplicaSet: nginx-deployment-3066724191 (1/1 replicas created) +Events: + FirstSeen LastSeen Count From SubobjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 1m 1m 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set nginx-deployment-2035384211 to 3 + 22s 22s 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set nginx-deployment-1564180365 to 1 + 22s 22s 1 {deployment-controller } Normal ScalingReplicaSet Scaled down replica set nginx-deployment-2035384211 to 2 + 22s 22s 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set nginx-deployment-1564180365 to 2 + 21s 21s 1 {deployment-controller } Normal ScalingReplicaSet Scaled down replica set nginx-deployment-2035384211 to 1 + 21s 21s 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set nginx-deployment-1564180365 to 3 + 13s 13s 1 {deployment-controller } Normal ScalingReplicaSet Scaled down replica set nginx-deployment-2035384211 to 0 + 13s 13s 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set nginx-deployment-3066724191 to 1 +``` + +Para arreglar este problema, necesitas volver a una revisión previa del Deployment que sea estable. + +### Comprobar la Historia de Despliegues de un Deployment + +Primero, comprobemos las revisiones de este despliegue: + +```shell +kubectl rollout history deployment.v1.apps/nginx-deployment +``` +``` +deployments "nginx-deployment" +REVISION CHANGE-CAUSE +1 kubectl apply --filename=https://k8s.io/examples/controllers/nginx-deployment.yaml --record=true +2 kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 --record=true +3 kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.91 --record=true +``` +En el momento de la creación, el mensaje en `CHANGE-CAUSE` se copia de la anotación `kubernetes.io/change-cause` del Deployment a sus revisiones. Podrías indicar el mensaje `CHANGE-CAUSE`: + +* Anotando el Deployment con el comando `kubectl annotate deployment.v1.apps/nginx-deployment kubernetes.io/change-cause="image updated to 1.9.1"` +* Añadiendo el parámetro `--record` para registrar el comando `kubectl` que está haciendo cambios en el recurso. +* Manualmente editando el manifiesto del recursos. + +Para ver más detalles de cada revisión, ejecuta: + +```shell +kubectl rollout history deployment.v1.apps/nginx-deployment --revision=2 +``` +``` +deployments "nginx-deployment" revision 2 + Labels: app=nginx + pod-template-hash=1159050644 + Annotations: kubernetes.io/change-cause=kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 --record=true + Containers: + nginx: + Image: nginx:1.9.1 + Port: 80/TCP + QoS Tier: + cpu: BestEffort + memory: BestEffort + Environment Variables: + No volumes. +``` + +### Retroceder a una Revisión Previa + +Ahora has decidido que quieres deshacer el despliegue actual y retrocederlo a la revisión previa: + +```shell +kubectl rollout undo deployment.v1.apps/nginx-deployment +``` +``` +deployment.apps/nginx-deployment +``` + +Alternativamente, puedes retroceder a una revisión específica con el parámetro `--to-revision`: + +```shell +kubectl rollout undo deployment.v1.apps/nginx-deployment --to-revision=2 +``` +``` +deployment.apps/nginx-deployment +``` + +Para más detalles acerca de los comandos relacionados con las revisiones de un Deployment, echa un vistazo a [`kubectl rollout`](/docs/reference/generated/kubectl/kubectl-commands#rollout). + +El Deployment se ha revertido ahora a una revisión previa estable. Como se puede comprobar, el controlador del Deployment genera un evento `DeploymentRollback` +al retroceder a la revisión 2. + +```shell +kubectl get deployment nginx-deployment +``` +``` +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +nginx-deployment 3 3 3 3 30m +``` + +```shell +kubectl describe deployment nginx-deployment +``` +``` +Name: nginx-deployment +Namespace: default +CreationTimestamp: Sun, 02 Sep 2018 18:17:55 -0500 +Labels: app=nginx +Annotations: deployment.kubernetes.io/revision=4 + kubernetes.io/change-cause=kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 --record=true +Selector: app=nginx +Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable +StrategyType: RollingUpdate +MinReadySeconds: 0 +RollingUpdateStrategy: 25% max unavailable, 25% max surge +Pod Template: + Labels: app=nginx + Containers: + nginx: + Image: nginx:1.9.1 + Port: 80/TCP + Host Port: 0/TCP + Environment: + Mounts: + Volumes: +Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable +OldReplicaSets: +NewReplicaSet: nginx-deployment-c4747d96c (3/3 replicas created) +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 12m deployment-controller Scaled up replica set nginx-deployment-75675f5897 to 3 + Normal ScalingReplicaSet 11m deployment-controller Scaled up replica set nginx-deployment-c4747d96c to 1 + Normal ScalingReplicaSet 11m deployment-controller Scaled down replica set nginx-deployment-75675f5897 to 2 + Normal ScalingReplicaSet 11m deployment-controller Scaled up replica set nginx-deployment-c4747d96c to 2 + Normal ScalingReplicaSet 11m deployment-controller Scaled down replica set nginx-deployment-75675f5897 to 1 + Normal ScalingReplicaSet 11m deployment-controller Scaled up replica set nginx-deployment-c4747d96c to 3 + Normal ScalingReplicaSet 11m deployment-controller Scaled down replica set nginx-deployment-75675f5897 to 0 + Normal ScalingReplicaSet 11m deployment-controller Scaled up replica set nginx-deployment-595696685f to 1 + Normal DeploymentRollback 15s deployment-controller Rolled back deployment "nginx-deployment" to revision 2 + Normal ScalingReplicaSet 15s deployment-controller Scaled down replica set nginx-deployment-595696685f to 0 +``` + +## Escalar un Deployment + +Puedes escalar un Deployment usando el siguiente comando: + +```shell +kubectl scale deployment.v1.apps/nginx-deployment --replicas=10 +``` +``` +deployment.apps/nginx-deployment scaled +``` + +Asumiendo que se ha habilitado el [escalado horizontal de pod](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/) +en tu clúster, puedes configurar un auto-escalado para tu Deployment y elegir el mínimo y máximo número de Pods +que quieres ejecutar en base al uso de CPU de tus Pods actuales. + +```shell +kubectl autoscale deployment.v1.apps/nginx-deployment --min=10 --max=15 --cpu-percent=80 +``` +``` +deployment.apps/nginx-deployment scaled +``` + +### Escalado proporcional + +La actualización continua de los Deployments permite la ejecución de múltiples versiones de una aplicación al mismo tiempo. +Cuando tú o un auto-escalado escala un Deployment con actualización continua que está en medio de otro despliegue (bien en curso o pausado), +entonces el controlador del Deployment balanceará las réplicas adicionales de los ReplicaSets activos (ReplicaSets con Pods) +para así poder mitigar el riesgo. Esto se conoce como *escalado proporcional*. + +Por ejemplo, imagina que estás ejecutando un Deployment con 10 réplicas, donde [maxSurge](#max-surge)=3, y [maxUnavailable](#max-unavailable)=2. + +```shell +kubectl get deploy +``` +``` +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +nginx-deployment 10 10 10 10 50s +``` + +Si actualizas a una nueva imagen que no puede descargarse desde el clúster: + +```shell +kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:sometag +``` +``` +deployment.apps/nginx-deployment image updated +``` + +La actualización de la imagen arranca un nuevo despliegue con el ReplicaSet nginx-deployment-1989198191, +pero se bloquea debido al requisito `maxUnavailable` indicado arriba: + +```shell +kubectl get rs +``` +``` +NAME DESIRED CURRENT READY AGE +nginx-deployment-1989198191 5 5 0 9s +nginx-deployment-618515232 8 8 8 1m +``` + +Y entonces se origina una nueva petición de escalado para el Deployment. El auto-escalado incrementa las réplicas del Deployment +a 15. El controlador del Deployment necesita ahora decidir dónde añadir esas nuevas 5 réplicas. +Si no estuvieras usando el escalado proporcional, las 5 se añadirían al nuevo ReplicaSet. Pero con el escalado proporcional, +las réplicas adicionales se distribuyen entre todos los ReplicaSets. Las partes más grandes van a los ReplicaSets +con el mayor número de réplicas y las partes más pequeñas van a los ReplicaSets con menos réplicas. Cualquier resto sobrante se añade +al ReplicaSet con mayor número de réplicas. Aquellos ReplicaSets con 0 réplicas no se escalan. + +En nuestro ejemplo anterior, se añadirán 3 réplicas al viejo ReplicaSet y 2 réplicas al nuevo ReplicaSet. +EL proceso de despliegue debería al final mover todas las réplicas al nuevo ReplicaSet, siempre que las nuevas +réplicas arranquen positivamente. + +```shell +kubectl get deploy +``` +``` +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +nginx-deployment 15 18 7 8 7m +``` + +```shell +kubectl get rs +``` +``` +NAME DESIRED CURRENT READY AGE +nginx-deployment-1989198191 7 7 0 7m +nginx-deployment-618515232 11 11 11 7m +``` + +## Pausar y Reanudar un Deployment + +Puedes pausar un Deployment antes de arrancar una o más modificaciones y luego reanudarlo. Esto te permite aplicar múltiples arreglos +entre la pausa y la reanudación sin necesidad de arrancar despliegues innecesarios. + +Por ejemplo, con un Deployment que acaba de crearse: + +```shell +kubectl get deploy +``` +``` +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +nginx 3 3 3 3 1m +``` +```shell +kubectl get rs +``` +``` +NAME DESIRED CURRENT READY AGE +nginx-2142116321 3 3 3 1m +``` + +Lo pausamos ejecutando el siguiente comando: + +```shell +kubectl rollout pause deployment.v1.apps/nginx-deployment +``` +``` +deployment.apps/nginx-deployment paused +``` + +Y luego actualizamos la imagen del Deployment: + +```shell +kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 +``` +``` +deployment.apps/nginx-deployment image updated +``` + +Nótese que no se arranca ningún despliegue nuevo: + +```shell +kubectl rollout history deployment.v1.apps/nginx-deployment +``` +``` +deployments "nginx" +REVISION CHANGE-CAUSE +1 +``` + +```shell +kubectl get rs +``` +``` +NAME DESIRED CURRENT READY AGE +nginx-2142116321 3 3 3 2m +``` + +Puedes realizar tantas modificaciones como quieras, por ejemplo, para actualizar los recursos a utilizar: + +```shell +kubectl set resources deployment.v1.apps/nginx-deployment -c=nginx --limits=cpu=200m,memory=512Mi +``` +``` +deployment.apps/nginx-deployment resource requirements updated +``` + +El estado inicial del Deployment anterior a la pausa continuará su función, pero las nuevas modificaciones +del Deployment no tendrán efecto ya que el Deployment está pausado. + +Al final, reanuda el Deployment y observa cómo se genera un nuevo ReplicaSet con todos los cambios: + +```shell +kubectl rollout resume deployment.v1.apps/nginx-deployment +``` + +``` +deployment.apps/nginx-deployment resumed +``` + +```shell +kubectl get rs -w +``` + +``` +NAME DESIRED CURRENT READY AGE +nginx-2142116321 2 2 2 2m +nginx-3926361531 2 2 0 6s +nginx-3926361531 2 2 1 18s +nginx-2142116321 1 2 2 2m +nginx-2142116321 1 2 2 2m +nginx-3926361531 3 2 1 18s +nginx-3926361531 3 2 1 18s +nginx-2142116321 1 1 1 2m +nginx-3926361531 3 3 1 18s +nginx-3926361531 3 3 2 19s +nginx-2142116321 0 1 1 2m +nginx-2142116321 0 1 1 2m +nginx-2142116321 0 0 0 2m +nginx-3926361531 3 3 3 20s + +``` +```shell +kubectl get rs +``` +``` +NAME DESIRED CURRENT READY AGE +nginx-2142116321 0 0 0 2m +nginx-3926361531 3 3 3 28s +``` + +{{< note >}} +No se puede revertir un Deployment pausado hasta que se vuelve a reanudar. +{{< /note >}} + +## Estado del Deployment + +Un Deployment pasa por varios estados a lo largo de su ciclo de vida. Así, puede estar [progresando](#progressing-deployment) mientras +se despliega un nuevo ReplicaSet, puede estar [completo](#complete-deployment), o puede quedar en estado [fallido](#failed-deployment). + +### Progresar un Deployment + +Kubernetes marca un Deployment como _progresando_ cuando se realiza cualquiera de las siguientes tareas: + +* El Deployment crea un nuevo ReplicaSet. +* El Deployment está escalando su ReplicaSet más nuevo. +* El Deployment está reduciendo su(s) ReplicaSet(s) más antiguo(s). +* Hay nuevos Pods disponibles y listos (listo por lo menos [MinReadySeconds](#min-ready-seconds)). + +Puedes monitorizar el progreso de un Deployment usando el comando `kubectl rollout status`. + +### Completar un Deployment + +Kubernetes marca un Deployment como _completado_ cuando presenta las siguientes características: + +* Todas las réplicas asociadas con el Deployment han sido actualizadas a la última versión indicada, lo cual quiere decir +que todas las actualizaciones se han completado. +* Todas las réplicas asociadas con el Deployment están disponibles. +* No están ejecutándose viejas réplicas del Deployment. + +Puedes comprobar si un Deployment se ha completado usando el comando `kubectl rollout status`. Si el despliegue se ha completado +de forma satisfactoria, el comando `kubectl rollout status` devuelve un código 0 de salida. + +```shell +kubectl rollout status deployment.v1.apps/nginx-deployment +``` +``` +Waiting for rollout to finish: 2 of 3 updated replicas are available... +deployment.apps/nginx-deployment successfully rolled out +$ echo $? +0 +``` + +### Deployment fallido + +Tu Deployment puede quedarse bloqueado intentando desplegar su nuevo ReplicaSet sin nunca completarse. Esto puede ocurrir +debido a algunos de los factores siguientes: + +* Cuota insuficiente +* Fallos en la prueba de estar listo +* Errores en la descarga de imágenes +* Permisos insuficientes +* Rangos de límites de recursos +* Mala configuración del motor de ejecución de la aplicación + +Una forma de detectar este tipo de situación es especificar un parámetro de vencimiento en la especificación de tu Deployment: +([`.spec.progressDeadlineSeconds`](#progress-deadline-seconds)). `.spec.progressDeadlineSeconds` denota el número +de segundos que el controlador del Deployment debe esperar antes de indicar (en el estado del Deployment) que el +Deployment no avanza. + +El siguiente comando `kubectl` configura el campo `progressDeadlineSeconds` para forzar al controlador a +informar de la falta de avance de un Deployment después de 10 minutos: + +```shell +kubectl patch deployment.v1.apps/nginx-deployment -p '{"spec":{"progressDeadlineSeconds":600}}' +``` +``` +deployment.apps/nginx-deployment patched +``` +Una vez que se ha excedido el vencimiento, el controlador del Deployment añade una DeploymentCondition +con los siguientes atributos al campo `.status.conditions` del Deployment: + +* Type=Progressing +* Status=False +* Reason=ProgressDeadlineExceeded + +Ver las [convenciones de la API de Kubernetes](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties) para más información acerca de las condiciones de estado. + +{{< note >}} +Kubernetes no emprenderá ninguna acción ante un Deployment parado que no sea la de reportar el estado mediante +`Reason=ProgressDeadlineExceeded`. Los orquestradores de alto nivel pueden aprovecharse y actuar consecuentemente, por ejemplo, +retrocediendo el Deployment a su versión previa. +{{< /note >}} + +{{< note >}} +Si pausas un Deployment, Kubernetes no comprueba el avance en base al vencimiento indicado. Así, es posible pausar +de forma segura un Deployment en medio de un despliegue y reanudarlo sin que se arranque el estado de exceso de vencimiento. +{{< /note >}} + +Puede que notes errores transitorios en tus Deployments, bien debido a un tiempo de vencimiento muy pequeño que hayas configurado +o bien a cualquier otro tipo de error que puede considerarse como transitorio. Por ejemplo, +supongamos que no tienes suficiente cuota. Si describes el Deployment, te darás cuenta de la sección siguiente: + +```shell +kubectl describe deployment nginx-deployment +``` +``` +<...> +Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True ReplicaSetUpdated + ReplicaFailure True FailedCreate +<...> +``` + +Si ejecutas el comando `kubectl get deployment nginx-deployment -o yaml`, el estado del Deployment puede parecerse a: + +``` +status: + availableReplicas: 2 + conditions: + - lastTransitionTime: 2016-10-04T12:25:39Z + lastUpdateTime: 2016-10-04T12:25:39Z + message: Replica set "nginx-deployment-4262182780" is progressing. + reason: ReplicaSetUpdated + status: "True" + type: Progressing + - lastTransitionTime: 2016-10-04T12:25:42Z + lastUpdateTime: 2016-10-04T12:25:42Z + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: 2016-10-04T12:25:39Z + lastUpdateTime: 2016-10-04T12:25:39Z + message: 'Error creating: pods "nginx-deployment-4262182780-" is forbidden: exceeded quota: + object-counts, requested: pods=1, used: pods=3, limited: pods=2' + reason: FailedCreate + status: "True" + type: ReplicaFailure + observedGeneration: 3 + replicas: 2 + unavailableReplicas: 2 +``` + +Al final, una vez que se supera el vencimiento del progreso del Deployment, Kubernetes actualiza el estado +y la razón de el estado de progreso: + +``` +Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing False ProgressDeadlineExceeded + ReplicaFailure True FailedCreate +``` + +Puedes solucionar un problema de cuota insuficiente simplemente reduciendo el número de réplicas de tu Deployment, reduciendo +otros controladores que puedas estar ejecutando, o incrementando la cuota en tu espacio de nombres. Si una vez satisfechas las condiciones de tu cuota, +el controlador del Deployment completa el despliegue, entonces verás que el estado del Deployment se actualiza al estado satisfactorio (`Status=True` y `Reason=NewReplicaSetAvailable`). + +``` +Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable +``` + +`Type=Available` con `Status=True` significa que tu Deployment tiene disponibilidad mínima. La disponibilidad mínima se prescribe +mediante los parámetros indicados en la estrategia de despligue. `Type=Progressing` con `Status=True` significa que tu Deployment +está bien en medio de un despliegue y está progresando o bien que se ha completado de forma satisfactoria y el número mínimo +requerido de nuevas réplicas ya está disponible (ver la Razón del estado para cada caso particular - en nuestro caso +`Reason=NewReplicaSetAvailable` significa que el Deployment se ha completado). + +Puedes comprobar si un Deployment ha fallado en su progreso usando el comando `kubectl rollout status`. `kubectl rollout status` +devuelve un código de salida distinto de 0 si el Deployment ha excedido su tiempo de vencimiento. + +```shell +kubectl rollout status deployment.v1.apps/nginx-deployment +``` +``` +Waiting for rollout to finish: 2 out of 3 new replicas have been updated... +error: deployment "nginx" exceeded its progress deadline +$ echo $? +1 +``` + +### Actuar ante un despliegue fallido + +Todas las acciones que aplican a un Deployment completado también aplican a un Deployment fallido. Puedes escalarlo/reducirlo, retrocederlo +a una revisión previa, o incluso pausarlo si necesitas realizar múltiples cambios a la plantilla Pod del Deployment. + +## Regla de Limpieza + +Puedes configurar el campo `.spec.revisionHistoryLimit` de un Deployment para especificar cuántos ReplicaSets viejos quieres conservar +para este Deployment. El resto será eliminado en segundo plano. Por defecto, es 10. + +{{< note >}} +Poner este campo de forma explícita a 0 resulta en la limpieza de toda la historia de tu Deployment, +por lo que tu Deployment no podrá retroceder a revisiones previas. +{{< /note >}} + +## Casos de Uso + +### Despligue Canary + +Si quieres desplegar nuevas versiones a un sub-conjunto de usuarios o servidores usando el Deployment, +puedes hacerlo creando múltiples Deployments, uno para cada versión nueva, siguiendo el patrón canary descrito en +[gestionar recursos](/docs/concepts/cluster-administration/manage-deployment/#canary-deployments). + +## Escribir una especificación de Deployment + +Al igual que con el resto de configuraciones de Kubernetes, un Deployment requiere los campos `apiVersion`, `kind`, y `metadata`. +Para información general acerca de cómo trabajar con ficheros de configuración, ver los documentos acerca de [desplegar aplicaciones](/docs/tutorials/stateless-application/run-stateless-application-deployment/), +configurar contenedores, y [usar kubectl para gestionar recursos](/docs/concepts/overview/object-management-kubectl/overview/). + +Un Deployment también necesita una [sección `.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Plantilla Pod + +Tanto `.spec.template` como `.spec.selector` sin campos obligatorios dentro de `.spec`. + +El campo `.spec.template` es una [plantilla Pod](/docs/concepts/workloads/pods/pod-overview/#pod-templates). Tiene exactamente el mismo esquema que un [Pod](/docs/concepts/workloads/pods/pod/), +excepto por el hecho de que está anidado y no tiene `apiVersion` ni `kind`. + +Junto con los campos obligatorios de un Pod, una plantilla Pod de un Deployment debe indicar las etiquetas +y las reglas de reinicio apropiadas. Para el caso de las etiquetas, asegúrate que no se entremezclan con otros controladores. Ver [selector](#selector)). + +Únicamente se permite una [`.spec.template.spec.restartPolicy`](/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) igual a `Always`, +que es el valor por defecto si no se indica. + +### Réplicas + +`.spec.replicas` es un campo opcional que indica el número de Pods deseados. Su valor por defecto es 1. + +### Selector + +`.spec.selector` es un campo opcional que indica un [selector de etiquetas](/docs/concepts/overview/working-with-objects/labels/) +para los Pods objetivo del deployment. + +`.spec.selector` debe coincidir con `.spec.template.metadata.labels`, o será descartado por la API. + +A partir de la versión `apps/v1` de la API, `.spec.selector` y `.metadata.labels` no toman como valor por defecto el valor de `.spec.template.metadata.labels` si no se indica. +Por ello, debe especificarse de forma explícita. Además hay que mencionar que `.spec.selector` es inmutable tras la creación del Deployment en `apps/v1`. + +Un Deployment puede finalizar aquellos Pods cuyas etiquetas coincidan con el selector si su plantilla es diferente +de `.spec.template` o si el número total de dichos Pods excede `.spec.replicas`. Arranca nuevos +Pods con `.spec.template` si el número de Pods es menor que el número deseado. + +{{< note >}} +No deberías crear otros Pods cuyas etiquetas coincidan con este selector, ni directamente creando +otro Deployment, ni creando otro controlador como un ReplicaSet o un ReplicationController. Si lo haces, +el primer Deployment pensará que también creó esos otros Pods. Kubernetes no te impide hacerlo. +{{< /note >}} + +Si tienes múltiples controladores que entremezclan sus selectores, dichos controladores competirán entre ellos +y no se comportarán de forma correcta. + +### Estrategia + +`.spec.strategy` especifica la estrategia usada para remplazar los Pods viejos con los nuevos. +`.spec.strategy.type` puede tener el valor "Recreate" o "RollingUpdate". "RollingUpdate" el valor predeterminado. + +#### Despliegue mediante recreación + +Todos los Pods actuales se eliminan antes de que los nuevos se creen cuando `.spec.strategy.type==Recreate`. + +#### Despliegue mediante actualización continua + +El Deployment actualiza los Pods en modo de [actualización continua](/docs/tasks/run-application/rolling-update-replication-controller/) +cuando `.spec.strategy.type==RollingUpdate`. Puedes configurar los valores de `maxUnavailable` y `maxSurge` +para controlar el proceso de actualización continua. + +##### Número máximo de pods no disponibles + +`.spec.strategy.rollingUpdate.maxUnavailable` es un campo opcional que indica el número máximo +de Pods que pueden no estar disponibles durante el proceso de actualización. El valor puede ser un número absoluto (por ejemplo, 5) +o un porcentaje de los Pods deseados (por ejemplo, 10%). El número absoluto se calcula a partir del porcentaje +con redondeo a la baja. El valor no puede ser 0 si `.spec.strategy.rollingUpdate.maxSurge` es 0. El valor predeterminado es 25%. + +Por ejemplo, cuando este valor es 30%, el ReplicaSet viejo puede escalarse al 70% de los +Pods deseados de forma inmediata tras comenzar el proceso de actualización. Una vez que los Pods están listos, +el ReplicaSet viejo puede reducirse aún mas, seguido de un escalado del nuevo ReplicaSet, +asegurándose que el número total de Pods disponibles en todo momento durante la actualización +es de al menos el 70% de los Pods deseados. + +##### Número máximo de pods por encima del número deseado + +`.spec.strategy.rollingUpdate.maxSurge` es un campo opcional que indica el número máximo de Pods +que puede crearse por encima del número deseado de Pods. El valor puede ser un número absoluto (por ejemplo, 5) +o un porcentaje de los Pods deseados (por ejemplo, 10%). El valor no puede ser 0 si `MaxUnavailable` es 0. +El número absoluto se calcula a partir del porcentaje con redondeo al alza. El valor predeterminado es 25%. + +Por ejemplo, cuando este valor es 30%, el nuevo ReplicaSet puede escalarse inmediatamente cuando +comienza la actualización continua, de forma que el número total de Pods viejos y nuevos no +excede el 130% de los Pods deseados. Una vez que los viejos Pods se han eliminado, el nuevo ReplicaSet +puede seguir escalándose, asegurándose que el número total de Pods ejecutándose en todo momento +durante la actualización es como mucho del 130% de los Pods deseados. + +### Segundos para vencimiento del progreso + +`.spec.progressDeadlineSeconds` es un campo opcional que indica el número de segundos que quieres +esperar a que tu Deployment avance antes de que el sistema reporte que dicho Deployment +[ha fallado en su avance](#failed-deployment) - expresado como un estado con `Type=Progressing`, `Status=False`. +y `Reason=ProgressDeadlineExceeded` en el recurso. El controlador del Deployment seguirá intentando +el despliegue. En el futuro, una vez que se implemente el retroceso automático, el controlador del Deployment +retrocederá el despliegue en cuanto detecte ese estado. + +Si se especifica, este campo debe ser mayor que `.spec.minReadySeconds`. + +### Tiempo mínimo para considerar el Pod disponible + +`.spec.minReadySeconds` es un campo opcional que indica el número mínimo de segundos en que +un Pod recién creado debería estar listo sin que falle ninguno de sus contenedores, para que se considere disponible. +Por defecto su valor es 0 (el Pod se considera disponible en el momento que está listo). Para aprender más acerca de +cuándo un Pod se considera que está listo, ver las [pruebas de contenedor](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). + +### Vuelta atrás + +El campo `.spec.rollbackTo` se ha quitado de las versiones `extensions/v1beta1` y `apps/v1beta1` de la API, y ya no se permite en las versiones de la API a partir de `apps/v1beta2`. +En su caso, se debería usar `kubectl rollout undo`, tal y como se explicó en [Retroceder a una Revisión Previa](#rolling-back-to-a-previous-revision). + +### Límite del histórico de revisiones + +La historia de revisiones de un Deployment se almacena en los ReplicaSets que este controla. + +`.spec.revisionHistoryLimit` es un campo opcional que indica el número de ReplicaSets viejos a retener +para permitir los retrocesos. Estos ReplicaSets viejos consumen recursos en `etcd` y rebosan la salida de `kubectl get rs`. +La configuración de cada revisión de Deployment se almacena en sus ReplicaSets; +por lo tanto, una vez que se elimina el ReplicaSet viejo, se pierde la posibilidad de retroceder a dicha revisión del Deployment. +Por defecto, se retienen hasta 10 ReplicaSets viejos; pero su valor ideal depende de la frecuencia y la estabilidad de los nuevos Deployments. + +De forma más específica, si ponemos este campo a cero quiere decir que todos los ReplicaSets viejos con 0 réplicas se limpiarán. +En este caso, el nuevo despliegue del Deployment no se puede deshacer, ya que su historia de revisiones se habrá limpiado. + +### Pausa + +`.spec.paused` es un campo booleano opcional para pausar y reanudar un Deployment. La única diferencia entre +un Deployment pausado y otro que no lo está es que cualquier cambio al PodTemplateSpec del Deployment pausado +no generará nuevos despliegues mientras esté pausado. Un Deployment se pausa de forma predeterminada cuando se crea. + +## Alternativa a los Deployments + +### kubectl rolling update + +[`kubectl rolling update`](/docs/reference/generated/kubectl/kubectl-commands#rolling-update) actualiza los Pods y los ReplicationControllers +de forma similar. Pero se recomienda el uso de Deployments porque se declaran del lado del servidor, y proporcionan características adicionales +como la posibilidad de retroceder a revisiones anteriores incluso después de haber terminado una actualización continua. + + diff --git a/content/es/docs/concepts/workloads/controllers/garbage-collection.md b/content/es/docs/concepts/workloads/controllers/garbage-collection.md new file mode 100644 index 0000000000000..bc18541ce2f34 --- /dev/null +++ b/content/es/docs/concepts/workloads/controllers/garbage-collection.md @@ -0,0 +1,176 @@ +--- +title: Recolección de Basura +content_type: concept +weight: 60 +--- + + + +El papel del recolector de basura de Kubernetes es el de eliminar determinados objetos +que en algún momento tuvieron un propietario, pero que ahora ya no. + + + +## Propietarios y subordinados + +Algunos objetos de Kubernetes son propietarios de otros objetos. Por ejemplo, un ReplicaSet +es el propietario de un conjunto de Pods. Los objetos que se poseen se denominan *subordinados* del +objeto propietario. Cada objeto subordinado tiene un campo `metadata.ownerReferences` +que apunta al objeto propietario. + +En ocasiones, Kubernetes pone el valor del campo `ownerReference` automáticamente. + Por ejemplo, cuando creas un ReplicaSet, Kubernetes automáticamente pone el valor del campo +`ownerReference` de cada Pod en el ReplicaSet. A partir de la versión 1.8, Kubernetes +automáticamente pone el valor de `ownerReference` para los objetos creados o adoptados +por un ReplicationController, ReplicaSet, StatefulSet, DaemonSet, Deployment, Job +y CronJob. + +También puedes configurar las relaciones entre los propietarios y sus subordinados +de forma manual indicando el valor del campo `ownerReference`. + +Aquí se muestra un archivo de configuración para un ReplicaSet que tiene tres Pods: + +{{< codenew file="controllers/replicaset.yaml" >}} + +Si se crea el ReplicaSet y entonces se muestra los metadatos del Pod, se puede +observar el campo OwnerReferences: + +```shell +kubectl apply -f https://k8s.io/examples/controllers/replicaset.yaml +kubectl get pods --output=yaml +``` + +La salida muestra que el propietario del Pod es el ReplicaSet denominado `my-repset`: + +```shell +apiVersion: v1 +kind: Pod +metadata: + ... + ownerReferences: + - apiVersion: apps/v1 + controller: true + blockOwnerDeletion: true + kind: ReplicaSet + name: my-repset + uid: d9607e19-f88f-11e6-a518-42010a800195 + ... +``` + +{{< note >}} +No se recomienda el uso de OwnerReferences entre Namespaces por diseño. Esto quiere decir que: +1) Los subordinados dentro del ámbito de Namespaces sólo pueden definir propietarios en ese mismo Namespace, +y propietarios dentro del ámbito de clúster. +2) Los subordinados dentro del ámbito del clúster sólo pueden definir propietarios dentro del ámbito del clúster, pero no +propietarios dentro del ámbito de Namespaces. +{{< /note >}} + +## Controlar cómo el recolector de basura elimina los subordinados + +Cuando eliminas un objeto, puedes indicar si sus subordinados deben eliminarse también +de forma automática. Eliminar los subordinados automáticamente se denomina *borrado en cascada*. +Hay dos modos de *borrado en cascada*: *en segundo plano* y *en primer plano*. + +Si eliminas un objeto sin borrar sus subordinados de forma automática, +dichos subordinados se convierten en *huérfanos*. + +### Borrado en cascada en primer plano + +En el *borrado en cascada en primer plano*, el objeto raíz primero entra en un estado +llamado "deletion in progress". En este estado "deletion in progress", +se cumplen las siguientes premisas: + + * El objeto todavía es visible a través de la API REST + * Se pone el valor del campo `deletionTimestamp` del objeto + * El campo `metadata.finalizers` del objeto contiene el valor "foregroundDeletion". + +Una vez que se pone el estado "deletion in progress", el recolector de basura elimina +los subordinados del objeto. Una vez que el recolector de basura ha eliminado todos +los subordinados "bloqueantes" (los objetos con `ownerReference.blockOwnerDeletion=true`), elimina +el objeto propietario. + +Cabe mencionar que usando "foregroundDeletion", sólo los subordinados con valor en +`ownerReference.blockOwnerDeletion` bloquean la eliminación del objeto propietario. +A partir de la versión 1.7, Kubernetes añadió un [controlador de admisión](/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement) +que controla el acceso de usuario cuando se intenta poner el campo `blockOwnerDeletion` a true +con base a los permisos de borrado del objeto propietario, de forma que aquellos subordinados no autorizados +no puedan retrasar la eliminación del objeto propietario. + +Si un controlador (como un Deployment o un ReplicaSet) establece el valor del campo `ownerReferences` de un objeto, +se pone blockOwnerDeletion automáticamente y no se necesita modificar de forma manual este campo. + +### Borrado en cascada en segundo plano + +En el *borrado en cascada en segundo plano*, Kubernetes elimina el objeto propietario +inmediatamente y es el recolector de basura quien se encarga de eliminar los subordinados en segundo plano. + +### Configurar la regla de borrado en cascada + +Para controlar la regla de borrado en cascada, configura el campo `propagationPolicy` +del parámetro `deleteOptions` cuando elimines un objeto. Los valores posibles incluyen "Orphan", +"Foreground", o "Background". + +Antes de la versión 1.9 de Kubernetes, la regla predeterminada del recolector de basura para la mayoría de controladores era `orphan`. +Esto incluía al ReplicationController, ReplicaSet, StatefulSet, DaemonSet, y al Deployment. +Para los tipos dentro de las versiones de grupo `extensions/v1beta1`, `apps/v1beta1`, y `apps/v1beta2`, a menos que +se indique de otra manera, los objetos subordinados se quedan huérfanos por defecto. +En Kubernetes 1.9, para todos los tipos de la versión de grupo `apps/v1`, los objetos subordinados se eliminan por defecto. + +Aquí se muestra un ejemplo que elimina los subordinados en segundo plano: + +```shell +kubectl proxy --port=8080 +curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-repset \ +-d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Background"}' \ +-H "Content-Type: application/json" +``` + +Aquí se muestra un ejemplo que elimina los subordinados en primer plano: + +```shell +kubectl proxy --port=8080 +curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-repset \ +-d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Foreground"}' \ +-H "Content-Type: application/json" +``` + +Aquí se muestra un ejemplo de subordinados huérfanos: + +```shell +kubectl proxy --port=8080 +curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-repset \ +-d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Orphan"}' \ +-H "Content-Type: application/json" +``` + +kubectl también permite el borrado en cascada. +Para eliminar los subordinados automáticamente, utiliza el parámetro `--cascade` a true. + Usa false para subordinados huérfanos. Por defecto, el valor de `--cascade` +es true. + +Aquí se muestra un ejemplo de huérfanos de subordinados de un ReplicaSet: + +```shell +kubectl delete replicaset my-repset --cascade=false +``` + +### Nota adicional sobre los Deployments + +Antes de la versión 1.7, cuando se usaba el borrado en cascada con Deployments se *debía* usar `propagationPolicy: Foreground` +para eliminar no sólo los ReplicaSets creados, sino también sus Pods correspondientes. Si este tipo de _propagationPolicy_ +no se usa, solo se elimina los ReplicaSets, y los Pods se quedan huérfanos. +Ver [kubeadm/#149](https://github.com/kubernetes/kubeadm/issues/149#issuecomment-284766613) para más información. + +## Problemas conocidos + +Seguimiento en [#26120](https://github.com/kubernetes/kubernetes/issues/26120) + + + +## {{% heading "whatsnext" %}} + + +[Documento de Diseño 1](https://git.k8s.io/community/contributors/design-proposals/api-machinery/garbage-collection.md) + +[Documento de Diseño 2](https://git.k8s.io/community/contributors/design-proposals/api-machinery/synchronous-garbage-collection.md) + diff --git a/content/es/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/es/docs/concepts/workloads/controllers/jobs-run-to-completion.md index 480999af1ddd6..f3bd77b4bf3f6 100644 --- a/content/es/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/content/es/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -1,6 +1,6 @@ --- title: Jobs - Ejecución hasta el final -content_template: templates/concept +content_type: concept feature: title: Ejecución en lotes description: > @@ -8,7 +8,7 @@ feature: weight: 70 --- -{{% capture overview %}} + Un Job crea uno o más Pods y se asegura de que un número específico de ellos termina de forma satisfactoria. Conforme los pods terminan satisfactoriamente, el Job realiza el seguimiento de las ejecuciones satisfactorias. @@ -21,10 +21,10 @@ como consecuencia de un fallo de hardware o un reinicio en un nodo). También se puede usar un Job para ejecutar múltiples Pods en paralelo. -{{% /capture %}} -{{% capture body %}} + + ## Ejecutar un Job de ejemplo @@ -454,4 +454,4 @@ además del control completo de los Pods que se crean y cómo se les asigna trab Puedes utilizar un [`CronJob`](/docs/concepts/workloads/controllers/cron-jobs/) para crear un Job que se ejecute en una hora/fecha determinadas, de forma similar a la herramienta `cron` de Unix. -{{% /capture %}} + diff --git a/content/es/docs/concepts/workloads/controllers/replicaset.md b/content/es/docs/concepts/workloads/controllers/replicaset.md index a8a92c7860197..38bbf847c66bc 100644 --- a/content/es/docs/concepts/workloads/controllers/replicaset.md +++ b/content/es/docs/concepts/workloads/controllers/replicaset.md @@ -1,19 +1,19 @@ --- title: ReplicaSet -content_template: templates/concept +content_type: concept weight: 10 --- -{{% capture overview %}} + El objeto de un ReplicaSet es el de mantener un conjunto estable de réplicas de Pods ejecutándose en todo momento. Así, se usa en numerosas ocasiones para garantizar la disponibilidad de un número específico de Pods idénticos. -{{% /capture %}} -{{% capture body %}} + + ## Cómo funciona un ReplicaSet @@ -367,4 +367,4 @@ Los dos sirven al mismo propósito, y se comportan de forma similar, excepto por no soporta los requisitos del selector basado en conjunto, como se describe en la [guía de usuario de etiquetas](/docs/concepts/overview/working-with-objects/labels/#label-selectors). Por ello, se prefiere los ReplicaSets a los ReplicationControllers. -{{% /capture %}} + diff --git a/content/es/docs/concepts/workloads/controllers/replicationcontroller.md b/content/es/docs/concepts/workloads/controllers/replicationcontroller.md index 970eb4e8ec483..5fe6c94c1eaba 100644 --- a/content/es/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/es/docs/concepts/workloads/controllers/replicationcontroller.md @@ -281,7 +281,7 @@ Incluso se plantea excluir el mecanismo de creación de pods a granel ([#170](ht El ReplicationController está pensado para ser una primitiva de bloques is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run, scale, rolling-update) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](http://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing ReplicationControllers, auto-scalers, services, scheduling policies, canaries, etc. -## Obejto API +## Objeto API El ReplicationController es un recurso de alto nivel en la API REST de Kubernetes. Más detalles acerca del objeto API se pueden encontrar aquí: diff --git a/content/es/docs/concepts/workloads/pods/pod.md b/content/es/docs/concepts/workloads/pods/pod.md index 4c6b5c7498605..54ec37ce2801b 100644 --- a/content/es/docs/concepts/workloads/pods/pod.md +++ b/content/es/docs/concepts/workloads/pods/pod.md @@ -1,18 +1,18 @@ --- reviewers: title: Pods -content_template: templates/concept +content_type: concept weight: 20 --- -{{% capture overview %}} + Los _Pods_ son las unidades de computación desplegables más pequeñas que se pueden crear y gestionar en Kubernetes. -{{% /capture %}} -{{% capture body %}} + + ## ¿Qué és un Pod? @@ -151,4 +151,4 @@ Pod es un recurso de nivel superior en la API REST de Kubernetes. La definición de [objeto de API Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) describe el objeto en detalle. -{{% /capture %}} + diff --git a/content/es/docs/contribute/start.md b/content/es/docs/contribute/start.md new file mode 100644 index 0000000000000..607ecc966c250 --- /dev/null +++ b/content/es/docs/contribute/start.md @@ -0,0 +1,207 @@ +--- +title: Empieza a contribuir +slug: start +content_type: concept +weight: 10 +card: + name: contribute + weight: 10 +--- + + + +Si quieres empezar a contribuir a la documentación de Kubernetes esta página y su temas enlazados pueden ayudarte a empezar. No necesitas ser un desarrollador o saber escribir de forma técnica para tener un gran impacto en la documentación y experiencia de usuario en Kubernetes! Todo lo que necesitas para los temas en esta página es una [Cuenta en GitHub](https://github.com/join) y un navegador web. + +Si estas buscando información sobre cómo comenzar a contribuir a los repositorios de Kubernetes, entonces dirígete a [las guías de la comunidad Kubernetes](https://github.com/kubernetes/community/blob/master/governance.md) + + + +## Lo básico sobre nuestra documentación + +La documentación de Kuberentes esta escrita usando Markdown, procesada y +desplegada usando Hugo. El código fuente está en GitHub accessible en [git.k8s.io/website/](https://github.com/kubernetes/website). +La mayoría de la documentación en castellano está en `/content/es/docs`. Alguna de +la documentación de referencia se genera automática con los scripts del +directorio `/update-imported-docs`. + +Puedes clasificar incidencias, editar contenido y revisar cambios de otros, todo ello +desde la página de GitHub. También puedes usar la historia embebida de GitHub y +las herramientas de búsqueda. + +No todas las tareas se pueden realizar desde la interfaz web de GitHub, también +se discute en las guías de contribución a la documentación +[intermedia](/docs/contribute/intermediate/) y +[avanzada](/docs/contribute/advanced/) + +### Participar en la documentación de los SIG + +La documentación de Kubernetes es mantenida por el {{< glossary_tooltip text="Special Interest Group" term_id="sig" >}} (SIG) denominado SIG Docs. Nos comunicamos usando un canal de Slack, una lista de correo +y una reunión semana por video-conferencia. Siempre son bienvenidos nuevos +participantes al grupo. Para más información ver +[Participar en SIG Docs](/docs/contribute/participating/). + +### Guías de estilo + +Se mantienen unas [guías de estilo](/docs/contribute/style/style-guide/) con la información sobre las elecciones que cada comunidad SIG Docs ha realizado referente a gramática, sintaxis, formato del código fuente y convenciones tipográficas. Revisa la guía de estilos antes de hacer tu primera contribución y úsala para resolver tus dudas. + +Los cambios en la guía de estilos se hacen desde el SIG Docs como grupo. Para añadir o proponer cambios [añade tus comentarios en la agenda](https://docs.google.com/document/d/1Ds87eRiNZeXwRBEbFr6Z7ukjbTow5RQcNZLaSvWWQsE/edit#) para las próximas reuniones del SIG Docs y participe en las discusiones durante la reunión. Revisa el apartado [avanzado](/docs/contribute/advanced/) para más información. + +### Plantillas para páginas + +Se usan plantillas para las páginas de documentación con el objeto de que todas tengan la misma presentación. Asegúrate de entender como funcionan estas plantillas y revisa el apartado [Uso de plantillas para páginas](/docs/contribute/style/page-templates/). Si tienes alguna consulta, no dudes en ponerte en contacto con el resto del equipo en Slack. + +### Hugo shortcodes + +La documentación de Kubernetes se transforma a partir de Markdown para obtener HTML usando Hugo. Hay que conocer los shortcodes estándar de Hugo, así como algunos que son personalizados para la documentación de Kubernetes. Para más información de como usarlos revisa [Hugo shortcodes personalizados](/docs/contribute/style/hugo-shortcodes/). + +### Múltiples idiomas + +La documentación original está disponible en múltiples idiomas en `/content/`. Cada idioma tiene su propia carpeta con el código de dos letras determinado por el [estándar ISO 639-1](https://www.loc.gov/standards/iso639-2/php/code_list.php). Por ejemplo, la documentación original en inglés se encuentra en `/content/en/docs/`. + +Para más información sobre como contribuir a la documentación en múltiples idiomas revisa ["Localizar contenido"](/docs/contribute/intermediate#localize-content) + +Si te interesa empezar una nueva localización revisa ["Localization"](/docs/contribute/localization/). + +## Registro de incidencias + +Cualquier persona con una cuenta de GitHub puede reportar una incidencia en la documentación de Kubernetes. Si ves algo erróneo, aunque no sepas como resolverlo, [reporta una incidencia](#cómo-reportar-una-incidencia). La única excepción a la regla es si se trata de un pequeño error, como alguno que puedes resolver por ti mismo. En este último caso, puedes tratar de [resolverlo](#mejorar-contenido-existente) sin necesidad de reportar una incidencia primero. + +### Cómo reportar una incidencia + +- **En una página existente** + + Si ves un problema en una página existente en la [documentación de Kuberenetes](/docs/) ve al final de la página y haz clic en el botón **Abrir un Issue**. Si no estas autenticado en GitHub, te pedirá que te identifiques y posteriormente un formulario de nueva incidencia aparecerá con contenido pre-cargado. + + Utilizando formato Markdown completa todos los detalles que sea posible. En los lugares en que haya corchetes (`[ ]`) pon una `x` en medio de los corchetes para representar la elección de una opción. Si tienes una posible solución al problema añádela. + +- **Solicitar una nueva página** + + Si crees que un contenido debería añadirse, pero no estás seguro de donde debería añadirse o si crees que no encaja en las páginas que ya existen, puedes crear un incidente. También puedes elegir una página ya existente donde pienses que pudiera encajar y crear el incidente desde esa página, o ir directamente a [https://github.com/kubernetes/website/issues/new/](https://github.com/kubernetes/website/issues/new/) y crearlo desde allí. + +### Cómo reportar correctamente incidencias + +Para estar seguros que tu incidencia se entiende y se puede procesar ten en cuenta esta guía: + +- Usa la plantilla de incidencia y aporta detalles, cuantos más es mejor. +- Explica de forma clara el impacto de la incidencia en los usuarios. +- Mantén el alcance de una incidencia a una cantidad de trabajo razonable. Para problemas con un alcance muy amplio divídela en incidencias más pequeñas. + + Por ejemplo, "Arreglar la documentación de seguridad" no es una incidencia procesable, pero "Añadir detalles en el tema 'Restringir acceso a la red'" si lo es. +- Si la incidencia está relacionada con otra o con una petición de cambio puedes referirte a ella tanto por la URL como con el número de la incidencia o petición de cambio con el carácter `#` delante. Por ejemplo `Introducido por #987654`. +- Se respetuoso y evita desahogarte. Por ejemplo, "La documentación sobre X apesta" no es útil o una crítica constructiva. El [Código de conducta](/community/code-of-conduct/) también aplica para las interacciones en los repositorios de Kubernetes en GitHub. + +## Participa en las discusiones de SIG Docs + +El equipo de SIG Docs se comunica por las siguientes vías: + +- [Únete al Slack de Kubernetes](http://slack.k8s.io/) y entra al canal `#sig-docs` o `#kubernetes-docs-es` para la documentación en castellano. En Slack, discutimos sobre las incidencias de documentación en tiempo real, nos coordinamos y hablamos de temas relacionados con la documentación. No olvides presentarte cuando entres en el canal para que podamos saber un poco más de ti! +- [Únete a la lista de correo `kubernetes-sig-docs`](https://groups.google.com/forum/#!forum/kubernetes-sig-docs), donde tienen lugar las discusiones más amplias y se registran las decisiones oficiales. +- Participa en la video-conferencia [semanal de SIG Docs](https://github.com/kubernetes/community/tree/master/sig-docs), esta se anuncia en el canal de Slack y la lista de correo. Actualmente esta reunión tiene lugar usando Zoom, por lo que necesitas descargar el [cliente Zoom](https://zoom.us/download) o llamar usando un teléfono. + +{{< note >}} +Puedes revisar la reunión semanal de SIG Docs en el [Calendario de reuniones de la comunidad Kubernetes](https://calendar.google.com/calendar/embed?src=cgnt364vd8s86hr2phapfjc6uk%40group.calendar.google.com&ctz=America/Los_Angeles). +{{< /note >}} + +## Mejorar contenido existente + +Para mejorar contenido existente crea una _pull request(PR)_ después de crear un _fork_. Estos términos son [específicos de GitHub](https://help.github.com/categories/collaborating-with-issues-and-pull-requests/). No es necesario conocer todo sobre estos términos porque todo se realiza a través del navegador web. Cuando continúes con la [guía de contribución de documentación intermedia](/docs/contribute/intermediate/) entonces necesitarás un poco más de conocimiento de la metodología Git. + +{{< note >}} +**Desarrolladores de código de Kubernetes**: Si estás documentando una nueva característica para una versión futura de Kubernetes, entonces el proceso es un poco diferente. Mira el proceso y pautas en [Documentar una característica](/docs/contribute/intermediate/#sig-members-documenting-new-features) así como información sobre plazos. +{{< /note >}} + +### Firma el CNCF CLA {#firma-el-cla} + +Antes de poder contribuir o documentar en Kubernetes **es necesario** leer [Guía del contribuidor](https://github.com/kubernetes/community/blob/master/contributors/guide/README.md) y [firmar el `Contributor License Agreement` (CLA)](https://github.com/kubernetes/community/blob/master/CLA.md). No te preocupes esto no lleva mucho tiempo! + +### Busca algo con lo que trabajar + +Si ves algo que quieras arreglar directamente, simplemente sigue las instrucciones más abajo. No es necesario que [reportes una incidencia](#registro-de-incidencias) (aunque de todas formas puedes). + +Si quieres empezar por buscar una incidencia existente para trabajar puedes ir [https://github.com/kubernetes/website/issues](https://github.com/kubernetes/website/issues) y buscar una incidencia con la etiqueta `good first issue` (puedes usar [este](https://github.com/kubernetes/website/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) atajo). Lee los comentarios y asegurate de que no hay una petición de cambio abierta para esa incidencia y que nadie a dejado un comentario indicando que están trabajando en esa misma incidencia recientemente (3 días es una buena regla). Deja un comentario indicando que te gustaría trabajar en la incidencia. + +### Elije que rama de Git usar + +El aspecto más importante a la hora de mandar una petición de cambio es que rama usar como base para trabajar. Usa estas pautas para tomar la decisión: + +- Utiliza `master` para arreglar problemas en contenido ya existente publicado, o hacer mejoras en contenido ya existente. + - Utiliza una rama de versión (cómo `dev-{{< release-branch >}}` para la versión {{< release-branch>}}) para documentar futuras características o cambios para futuras versiones que todavía no se han publicado. +- Utiliza una rama de características que haya sido acordada por SIG Docs para colaborar en grandes mejoras o cambios en la documentación existente, incluida la reorganización de contenido o cambios en la apariencia del sitio web. + +Si todavía no estás seguro con que rama utilizar, pregunta en `#sig-docs`en Slack o atiende una reunión semanal del SIG Docs para aclarar tus dudas. + +### Enviar una petición de cambio + +Sigue estos pasos para enviar una petición de cambio y mejorar la documentación de Kubernetes. + +1. En la página que hayas visto una incidencia haz clic en el icono del lápiz arriba a la derecha. + Una nueva página de GitHub aparecerá con algunos textos de ayuda. +2. Si nunca has creado un copia del repositorio de documentación de Kubernetes te pedirá que lo haga. + Crea la copia bajo tu usuario de GitHub en lugar de otra organización de la que seas miembro. La copia generalmente tiene una URL como `https://github.com//website`, a menos que ya tengas un repositorio con un nombre en conflicto con este. + + La razón por la que se pide crear una copia del repositorio es porque no tienes permisos para subir cambios directamente a rama en el repositorio original de Kubernetes. +3. Aparecerá el editor Markdown de GitHub con el fichero Markdown fuente cargado. Realiza tus cambios. Debajo del editor completa el formulario **Propose file change**. El primer campo es el resumen del mensaje de tu commit y no debe ser más largo de 50 caracteres. El segundo campo es opcional, pero puede incluir más información y detalles si procede. + + {{< note >}} + No incluyas referencias a otras incidencias o peticiones de cambio de GitHub en el mensaje de los commits. Esto lo puedes añadir después en la descripción de la petición de cambio. +{{< /note >}} + + Haz clic en **Propose file change**. El cambio se guarda como un commit en una nueva rama de tu copia, automáticamente se le asignará un nombre estilo `patch-1`. + +4. La siguiente pantalla resume los cambios que has hecho pudiendo comparar la nueva rama (la **head fork** y cajas de selección **compare**) con el estado actual del **base fork** y la rama **base** (`master` en el repositorio por defecto `kubernetes/website`). Puedes cambiar cualquiera de las cajas de selección, pero no lo hagas ahora. Hecha un vistazo a las distintas vistas en la parte baja de la pantalla y si todo parece correcto haz clic en **Create pull request**. + + {{< note >}} + Si no deseas crear una petición de cambio puedes hacerlo más delante, solo basta con navegar a la URL principal del repositorio de Kubernetes website o de tu copia. La página de GitHub te mostrará un mensaje para crear una petición de cambio si detecta que has subido una nueva rama a tu repositorio copia. + {{< /note >}} + +5. La pantalla **Open a pull request** aparece. El tema de una petición de cambio es el resumen del commit, pero puedes cambiarlo si lo necesitas. El cuerpo está pre-cargado con el mensaje del commit extendido (si lo hay) junto con una plantilla. Lee la plantilla y llena los detalles requeridos, entonces borra el texto extra de la plantilla. Deja la casilla **Allow edits from maintainers** seleccionada. Haz clic en **Create pull request**. + + Enhorabuena! Tu petición de cambio está disponible en [Pull requests](https://github.com/kubernetes/website/pulls). + + Después de unos minutos ya podrás pre-visualizar la página con los cambios de tu PR aplicados. Ve a la pestaña de **Conversation** en tu PR y haz clic en el enlace **Details** para ver el test `deploy/netlify`, localizado casi al final de la página. Se abrirá en la misma ventana del navegado por defecto. + +6. Espera una revisión. Generalmente `k8s-ci-robot` sugiere unos revisores. Si un revisor te pide que hagas cambios puedes ir a la pestaña **FilesChanged** y hacer clic en el icono del lápiz para hacer tus cambios en cualquiera de los ficheros en la petición de cambio. Cuando guardes los cambios se creará un commit en la rama asociada a la petición de cambio. + +7. Si tu cambio es aceptado, un revisor fusionará tu petición de cambio y tus cambios serán visibles en pocos minutos en la web de [kubernetes.io](https://kubernetes.io). + +Esta es solo una forma de mandar una petición de cambio. Si eres un usuario de Git y GitHub avanzado puedes usar una aplicación GUI local o la linea de comandos con el cliente Git en lugar de usar la UI de GitHub. Algunos conceptos básicos sobre el uso de la línea de comandos Git +cliente se discuten en la guía de documentación [intermedia](/docs/contribute/intermediate/). + +## Revisar peticiones de cambio de documentación + +Las personas que aún no son aprobadores o revisores todavía pueden revisar peticiones de cambio. Las revisiones no se consideran "vinculantes", lo que significa que su revisión por sí sola no hará que se fusionen las peticiones de cambio. Sin embargo, aún puede ser útil. Incluso si no deja ningún comentario de revisión, puede tener una idea de las convenciones y etiquetas en una petición de cambio y acostumbrarse al flujo de trabajo. + +1. Ve a [https://github.com/kubernetes/website/pulls](https://github.com/kubernetes/website/pulls). Desde ahí podrás ver una lista de todas las peticiones de cambio en la documentación del website de Kubernetes. + +2. Por defecto el único filtro que se aplica es `open`, por lo que no puedes ver las que ya se han cerrado o fusionado. Es una buena idea aplicar el filtro `cncf-cla: yes` y para tu primera revisión es una buena idea añadir `size/S` o `size/XS`. La etiqueta `size` se aplica automáticamente basada en el número de lineas modificadas en la PR. Puedes aplicar filtros con las cajas de selección al principio de la página, o usar [estos atajos](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3A%22cncf-cla%3A+yes%22+label%3Asize%2FS) solo para PRs pequeñas. Los filtros son aplicados con `AND` todos juntos, por lo que no se puede buscar a la vez `size/S` y `size/XS` en la misma consulta. + +3. Ve a la pestaña **Files changed**. Mira los cambios introducidos en la PR, y si aplica, mira también los incidentes enlazados. Si ves un algún problema o posibilidad de mejora pasa el cursor sobre la línea y haz click en el símbolo `+` que aparece. + + Puedes entonces dejar un comentario seleccionando **Add single comment** o **Start a review**. Normalmente empezar una revisión es la forma recomendada, ya que te permite hacer varios comentarios y avisar a propietario de la PR solo cuando tu revisión este completada, en lugar de notificar cada comentario. + +4. Cuando hayas acabado de revisar, haz clic en **Review changes** en la parte superior de la página. Puedes ver un resumen de la revisión y puedes elegir entre comentar, aprobar o solicitar cambios. Los nuevos contribuidores siempre deben elegir **Comment**. + +Gracias por revisar una petición de cambio! Cuando eres nuevo en un proyecto es buena idea solicitar comentarios y opiniones en las revisiones de una petición de cambio. Otro buen lugar para solicitar comentarios es en el canal de Slack `#sig-docs`. + +## Escribir un artículo en el blog + +Cualquiera puede escribir un articulo en el blog y enviarlo para revisión. Los artículos del blog no deben ser comerciales y deben consistir en contenido que se pueda aplicar de la forma más amplia posible a la comunidad de Kubernetes. + +Para enviar un artículo al blog puedes hacerlo también usando el formulario [Kubernetes blog submission form](https://docs.google.com/forms/d/e/1FAIpQLSch_phFYMTYlrTDuYziURP6nLMijoXx_f7sLABEU5gWBtxJHQ/viewform), o puedes seguir los siguientes pasos. + +1. [Firma el CLA](#sign-the-cla) si no lo has hecho ya. +2. Revisa el formato Markdown en los artículos del blog existentes en el [repositorio website](https://github.com/kubernetes/website/tree/master/content/en/blog/_posts). +3. Escribe tu artículo usando el editor de texto que prefieras. +4. En el mismo enlace que el paso 2 haz clic en botón **Create new file**. Pega el contenido de tu editor. Nombra el fichero para que coincida con el título del artículo, pero no pongas la fecha en el nombre. Los revisores del blog trabajarán contigo en el nombre final del fichero y la fecha en la que será publicado. +5. Cuando guardes el fichero, GitHub te guiará en el proceso de petición de cambio. +6. Un revisor de artículos del blog revisará tu envío y trabajará contigo aportando comentarios y los detalles finales. Cuando el artículo sea aprobado, se establecerá una fecha de publicación. + +## Envía un caso de estudio + +Un caso de estudio destaca como organizaciones están usando Kubernetes para resolver problemas del mundo real. Estos se escriben en colaboración con el equipo de marketing de Kubernetes que está dirigido por la {{< glossary_tooltip text="CNCF" term_id="cncf" >}}. + +Revisa el código fuente para ver los [casos de estudio existentes](https://github.com/kubernetes/website/tree/master/content/en/case-studies). Usa el formulario [Kubernetes case study submission form](https://www.cncf.io/people/end-user-community/) para enviar tu propuesta. + +## {{% heading "whatsnext" %}} + +Cuando entiendas mejor las tareas mostradas en este tema y quieras formar parte del equipo de documentación de Kubernetes de una forma más activa lee la [guía intermedia de contribución](/docs/contribute/intermediate/). \ No newline at end of file diff --git a/content/es/docs/reference/_index.md b/content/es/docs/reference/_index.md index 070cb9376501e..a3625a903eed8 100644 --- a/content/es/docs/reference/_index.md +++ b/content/es/docs/reference/_index.md @@ -49,11 +49,11 @@ En estos momento, las librerías con soporte oficial son: * [kubelet](/docs/admin/kubelet/) - El principal *agente* que se ejecuta en cada nodo. El kubelet toma un conjunto de PodSpecs y asegura que los contenedores descritos estén funcionando y en buen estado. * [kube-apiserver](/docs/admin/kube-apiserver/) - API REST que valida y configura datos para objetos API como pods, servicios, controladores de replicación, ... -* [kube-controller-manager](/docs/admin/kube-controller-manager/) - Demonio que integra los bucles de control enviados con Kubernetes. +* [kube-controller-manager](/docs/admin/kube-controller-manager/) - Daemon que integra los bucles de control enviados con Kubernetes. * [kube-proxy](/docs/admin/kube-proxy/) - Puede hacer fowarding simple o con round-robin de TCP/UDP a través de un conjunto de back-ends. * [kube-scheduler](/docs/admin/kube-scheduler/) - Planificador que gestiona la disponibilidad, el rendimiento y la capacidad. * [federation-apiserver](/docs/admin/federation-apiserver/) - Servidor API para clusters federados. -* [federation-controller-manager](/docs/admin/federation-controller-manager/) - Demonio que integra los bucles de control enviados con la federación Kubernetes. +* [federation-controller-manager](/docs/admin/federation-controller-manager/) - Proceso que integra los bucles de control enviados con la federación Kubernetes. ## Documentos de diseño diff --git a/content/es/docs/reference/glossary/configmap.md b/content/es/docs/reference/glossary/configmap.md new file mode 100644 index 0000000000000..577e24dc1fb6d --- /dev/null +++ b/content/es/docs/reference/glossary/configmap.md @@ -0,0 +1,18 @@ +--- +title: Configmap +id: configmap +date: 2020-07-11 +full_link: /docs/concepts/configuration/configmap/ +short_description: > + Almacena información no sensible. + +aka: +tags: +- workload +--- +Un objeto de la API utilizado para almacenar datos no confidenciales en el formato clave-valor. Los {{< glossary_tooltip text="Pods" term_id="pod" >}} pueden utilizar los ConfigMaps como variables de entorno, argumentos de la linea de comandos o como ficheros de configuración en un {{< glossary_tooltip text="Volumen" term_id="volume" >}}. + +Un ConfigMap te permite desacoplar la configuración de un entorno específico de una imagen de contenedor, así las aplicaciones son fácilmente portables. + + + diff --git a/content/es/docs/reference/glossary/controller.md b/content/es/docs/reference/glossary/controller.md new file mode 100755 index 0000000000000..8258d0ae86058 --- /dev/null +++ b/content/es/docs/reference/glossary/controller.md @@ -0,0 +1,33 @@ +--- +title: Controlador +id: controller +date: 2018-04-12 +full_link: /docs/concepts/architecture/controller/ +short_description: > + Los controladores son bucles de control que observan el estado del clúster, + y ejecutan o solicitan los cambios que sean necesarios para alcanzar el estado + deseado. + +aka: +tags: +- architecture +- fundamental +--- + +En Kubernetes, los controladores son bucles de control que observan el estado del +{{< glossary_tooltip term_id="cluster" text="clúster">}}, y ejecutan o solicitan +los cambios que sean necesarios para llevar el estado actual del clúster más +cerca del estado deseado. + + + +Los controladores observan el estado compartido del clúster a través del +{{< glossary_tooltip text="API Server" term_id="kube-apiserver" >}} (parte del +{{< glossary_tooltip term_id="control-plane" text="plano de control" >}}). + +Algunos controladores también se ejecutan dentro del mismo plano de control, +proporcionado los bucles de control necesarios para las operaciones principales +de Kubernetes. Por ejemplo, el controlador de Deployments, el controlador de +DaemonSets, el controlador de Namespaces y el controlador de volúmenes +persistentes, entre otros, se ejecutan dentro del +{{< glossary_tooltip term_id="kube-controller-manager" >}}. diff --git a/content/es/docs/reference/glossary/etcd.md b/content/es/docs/reference/glossary/etcd.md new file mode 100755 index 0000000000000..5ac470a85c5e3 --- /dev/null +++ b/content/es/docs/reference/glossary/etcd.md @@ -0,0 +1,24 @@ +--- +title: etcd +id: etcd +date: 2018-04-12 +full_link: /docs/tasks/administer-cluster/configure-upgrade-etcd/ +short_description: > + Almacén de datos persistente, consistente y distribuido de clave-valor utilizado + para almacenar toda a la información del clúster de Kubernetes. + +aka: +tags: +- architecture +- storage +--- + +Almacén de datos persistente, consistente y distribuido de clave-valor utilizado +para almacenar toda a la información del clúster de Kubernetes. + + + +Si tu clúster utiliza etcd como sistema de almacenamiento, échale un vistazo a la +documentación sobre [estrategias de backup](/docs/tasks/administer-cluster/configure-upgrade-etcd/#backing-up-an-etcd-cluster). + +Puedes encontrar información detallada sobre etcd en su [documentación oficial](https://etcd.io/docs/). diff --git a/content/es/docs/reference/glossary/kube-apiserver.md b/content/es/docs/reference/glossary/kube-apiserver.md new file mode 100755 index 0000000000000..3363f3edcb733 --- /dev/null +++ b/content/es/docs/reference/glossary/kube-apiserver.md @@ -0,0 +1,26 @@ +--- +title: API Server +id: kube-apiserver +date: 2020-07-01 +full_link: /docs/reference/generated/kube-apiserver/ +short_description: > + Componente del plano de control que expone la API de Kubernetes. + +aka: +- Servidor de la API +- kube-apiserver +tags: +- architecture +- fundamental +--- + +El servidor de la API es el componente del {{< glossary_tooltip text="plano de control" term_id="control-plane" >}} +de Kubernetes que expone la API de Kubernetes. Se trata del frontend de Kubernetes, +recibe las peticiones y actualiza acordemente el estado en {{< glossary_tooltip term_id="etcd" length="all" >}}. + + + +La principal implementación de un servidor de la API de Kubernetes es +[kube-apiserver](/docs/reference/generated/kube-apiserver/). +Es una implementación preparada para ejecutarse en alta disponiblidad y que +puede escalar horizontalmente para balancear la carga entre varias instancias. \ No newline at end of file diff --git a/content/es/docs/reference/glossary/kube-controller-manager.md b/content/es/docs/reference/glossary/kube-controller-manager.md new file mode 100755 index 0000000000000..4a9bd2087743b --- /dev/null +++ b/content/es/docs/reference/glossary/kube-controller-manager.md @@ -0,0 +1,21 @@ +--- +title: kube-controller-manager +id: kube-controller-manager +date: 2018-04-12 +full_link: /docs/reference/command-line-tools-reference/kube-controller-manager/ +short_description: > + Componente del plano de control que ejecuta los controladores de Kubernetes. + +aka: +tags: +- architecture +- fundamental +--- + +Componente del plano de control que ejecuta los {{< glossary_tooltip text="controladores" term_id="controller" >}} de Kubernetes. + + + +Lógicamente cada {{< glossary_tooltip text="controlador" term_id="controller" >}} +es un proceso independiente, pero para reducir la complejidad, todos se compilan +en un único binario y se ejecuta en un mismo proceso. diff --git a/content/es/docs/reference/glossary/kube-scheduler.md b/content/es/docs/reference/glossary/kube-scheduler.md new file mode 100755 index 0000000000000..ea7914495a382 --- /dev/null +++ b/content/es/docs/reference/glossary/kube-scheduler.md @@ -0,0 +1,25 @@ +--- +title: kube-scheduler +id: kube-scheduler +date: 2018-04-12 +full_link: /docs/reference/generated/kube-scheduler/ +short_description: > + Componente del plano de control que está pendiente de los pods que no tienen + ningún nodo asignado y seleciona uno dónde ejecutarlo. + +aka: +tags: +- architecture +--- + +Componente del plano de control que está pendiente de los +{{< glossary_tooltip term_id="pod" text="Pods" >}} que no tienen ningún +{{< glossary_tooltip term_id="node" text="nodo">}} asignado +y seleciona uno donde ejecutarlo. + + + +Para decidir en qué {{< glossary_tooltip term_id="node" text="nodo">}} +se ejecutará el {{< glossary_tooltip term_id="pod" text="pod" >}}, se tienen +en cuenta diversos factores: requisitos de recursos, restricciones de hardware/software/políticas, +afinidad y anti-afinidad, localización de datos dependientes, entre otros. diff --git a/content/es/docs/reference/glossary/namespace.md b/content/es/docs/reference/glossary/namespace.md new file mode 100755 index 0000000000000..4ceec4db73638 --- /dev/null +++ b/content/es/docs/reference/glossary/namespace.md @@ -0,0 +1,22 @@ +--- +title: Namespace +id: namespace +date: 2018-04-12 +full_link: /es/docs/concepts/overview/working-with-objects/namespaces/ +short_description: > + Abstracción utilizada por Kubernetes para soportar múltiples clústeres virtuales en el mismo clúster físico. +aka: +- Espacio de nombres +tags: +- fundamental +--- + +Abstracción utilizada por Kubernetes para soportar múltiples clústeres virtuales +en el mismo {{< glossary_tooltip text="clúster" term_id="cluster" >}} físico. + + + +Los Namespaces, espacios de nombres, se utilizan para organizar objetos del clúster +proporcionando un mecanismo para dividir los recusos del clúster. Los nombres de los +objetos tienen que ser únicos dentro del mismo namespace, pero se pueden repetir en +otros namespaces del mismo clúster. \ No newline at end of file diff --git a/content/es/docs/tasks/_index.md b/content/es/docs/tasks/_index.md index 12d741e26342e..1b10eb1d35826 100644 --- a/content/es/docs/tasks/_index.md +++ b/content/es/docs/tasks/_index.md @@ -65,23 +65,20 @@ Configura componentes en una federación de clústers. Realiza tareas comunes de gestión de aplicaciones con estado, incluyendo escalado, borrado y depuración de StatefulSets. -## Demonios del Clúster +## Daemons del Clúster Realiza tareas comunes de gestión de un DaemonSet, como llevar a cabo una actualización de lanzamiento. ## Gestionar GPUs -COnfigura y planifica GPUs de NVIDIA para hacerlas disponibles como recursos a los nodos de un clúster. +Configura y planifica GPUs de NVIDIA para hacerlas disponibles como recursos a los nodos de un clúster. ## Gestionar HugePages Configura y planifica HugePages como un recurso planificado en un clúster. - - ## {{% heading "whatsnext" %}} - Si quisieras escribir una página de Tareas, echa un vistazo a [Crear una Petición de Subida de Documentación](/docs/home/contribute/create-pull-request/). diff --git a/content/es/docs/tasks/debug-application-cluster/_index.md b/content/es/docs/tasks/debug-application-cluster/_index.md index 12bb04c317405..6573112172ea0 100644 --- a/content/es/docs/tasks/debug-application-cluster/_index.md +++ b/content/es/docs/tasks/debug-application-cluster/_index.md @@ -1,4 +1,4 @@ --- title: "Monitorización, Logs y Debugging" weight: 80 ---- \ No newline at end of file +--- diff --git a/content/es/docs/tasks/debug-application-cluster/audit.md b/content/es/docs/tasks/debug-application-cluster/audit.md new file mode 100644 index 0000000000000..fc2dec9e27e56 --- /dev/null +++ b/content/es/docs/tasks/debug-application-cluster/audit.md @@ -0,0 +1,434 @@ +--- +content_type: concept +title: Auditoría +--- + + + +La auditoría de Kubernetes proporciona un conjunto de registros cronológicos referentes a la seguridad +que documentan la secuencia de actividades que tanto los usuarios individuales, como +los administradores y otros componentes del sistema ha realizado en el sistema. + Así, permite al administrador del clúster responder a las siguientes cuestiones: + + - ¿qué ha pasado? + - ¿cuándo ha pasado? + - ¿quién lo ha iniciado? + - ¿sobre qué ha pasado? + - ¿dónde se ha observado? + - ¿desde dónde se ha iniciado? + - ¿hacia dónde iba? + + + + + + +El componente [Kube-apiserver][kube-apiserver] lleva a cabo la auditoría. Cada petición en cada fase +de su ejecución genera un evento, que se pre-procesa según un cierto reglamento y +se escribe en un backend. Este reglamento determina lo que se audita +y los backends persisten los registros. Las implementaciones actuales de backend +incluyen los archivos de logs y los webhooks. + +Cada petición puede grabarse junto con una "etapa" asociada. Las etapas conocidas son: + +- `RequestReceived` - La etapa para aquellos eventos generados tan pronto como +el responsable de la auditoría recibe la petición, pero antes de que sea delegada al +siguiente responsable en la cadena. +- `ResponseStarted` - Una vez que las cabeceras de la respuesta se han enviado, +pero antes de que el cuerpo de la respuesta se envíe. Esta etapa sólo se genera +en peticiones de larga duración (ej. watch). +- `ResponseComplete` - El cuerpo de la respuesta se ha completado y no se enviarán más bytes. +- `Panic` - Eventos que se generan cuando ocurre una situación de pánico. + +{{< note >}} +La característica de registro de auditoría incrementa el consumo de memoria del servidor API +porque requiere de contexto adicional para lo que se audita en cada petición. +De forma adicional, el consumo de memoria depende de la configuración misma del registro. +{{< /note >}} + +## Reglamento de Auditoría + +El reglamento de auditoría define las reglas acerca de los eventos que deberían registrarse y +los datos que deberían incluir. La estructura del objeto de reglas de auditoría se define +en el [`audit.k8s.io` grupo de API][auditing-api]. Cuando se procesa un evento, se compara +con la lista de reglas en orden. La primera regla coincidente establece el "nivel de auditoría" +del evento. Los niveles de auditoría conocidos son: + +- `None` - no se registra eventos que disparan esta regla. +- `Metadata` - se registra los metadatos de la petición (usuario que la realiza, marca de fecha y hora, recurso, + verbo, etc.), pero no la petición ni el cuerpo de la respuesta. +- `Request` - se registra los metadatos del evento y el cuerpo de la petición, pero no el cuerpo de la respuesta. + Esto no aplica para las peticiones que no son de recurso. +- `RequestResponse` - se registra los metadatos del evento, y los cuerpos de la petición y la respuesta. + Esto no aplica para las peticiones que no son de recurso. + +Es posible indicar un archivo al definir el reglamento en el [kube-apiserver][kube-apiserver] +usando el parámetro `--audit-policy-file`. Si dicho parámetros se omite, no se registra ningún evento. +Nótese que el campo `rules` __debe__ proporcionarse en el archivo del reglamento de auditoría. +Un reglamento sin (0) reglas se considera ilegal. + +Abajo se presenta un ejemplo de un archivo de reglamento de auditoría: + +{{< codenew file="audit/audit-policy.yaml" >}} + +Puedes usar un archivo mínimo de reglamento de auditoría para registrar todas las peticiones al nivel `Metadata` de la siguiente forma: + +```yaml +# Log all requests at the Metadata level. +apiVersion: audit.k8s.io/v1 +kind: Policy +rules: +- level: Metadata +``` + +El [perfil de auditoría utilizado por GCE][gce-audit-profile] debería servir como referencia para +que los administradores construyeran sus propios perfiles de auditoría. + +## Backends de auditoría + +Los backends de auditoría persisten los eventos de auditoría en un almacenamiento externo. +El [Kube-apiserver][kube-apiserver] por defecto proporciona tres backends: + +- Backend de logs, que escribe los eventos en disco +- Backend de webhook, que envía los eventos a una API externa +- Backend dinámico, que configura backends de webhook a través de objetos de la API AuditSink. + +En todos los casos, la estructura de los eventos de auditoría se define por la API del grupo +`audit.k8s.io`. La versión actual de la API es +[`v1`][auditing-api]. + +{{< note >}} +En el caso de parches, el cuerpo de la petición es una matriz JSON con operaciones de parcheado, en vez +de un objeto JSON que incluya el objeto de la API de Kubernetes apropiado. Por ejemplo, +el siguiente cuerpo de mensaje es una petición de parcheado válida para +`/apis/batch/v1/namespaces/some-namespace/jobs/some-job-name`. + +```json +[ + { + "op": "replace", + "path": "/spec/parallelism", + "value": 0 + }, + { + "op": "remove", + "path": "/spec/template/spec/containers/0/terminationMessagePolicy" + } +] +``` +{{< /note >}} + +### Backend de Logs + +El backend de logs escribe los eventos de auditoría a un archivo en formato JSON. + Puedes configurar el backend de logs de auditoría usando el siguiente + parámetro de [kube-apiserver][kube-apiserver] flags: + +- `--audit-log-path` especifica la ruta al archivo de log que el backend utiliza para +escribir los eventos de auditoría. Si no se especifica, se deshabilita el backend de logs. `-` significa salida estándar +- `--audit-log-maxage` define el máximo número de días a retener los archivos de log +- `--audit-log-maxbackup` define el máximo número de archivos de log a retener +- `--audit-log-maxsize` define el tamaño máximo en megabytes del archivo de logs antes de ser rotado + +### Backend de Webhook + +El backend de Webhook envía eventos de auditoría a una API remota, que se supone es la misma API +que expone el [kube-apiserver][kube-apiserver]. Puedes configurar el backend de webhook de auditoría usando +los siguientes parámetros de kube-apiserver: + +- `--audit-webhook-config-file` especifica la ruta a un archivo con configuración del webhook. +La configuración del webhook es, de hecho, un archivo [kubeconfig][kubeconfig]. +- `--audit-webhook-initial-backoff` especifica la cantidad de tiempo a esperar tras una petición fallida +antes de volver a intentarla. Los reintentos posteriores se ejecutan con retraso exponencial. + +El archivo de configuración del webhook usa el formato kubeconfig para especificar la dirección remota +del servicio y las credenciales para conectarse al mismo. + +En la versión 1.13, los backends de webhook pueden configurarse [dinámicamente](#dynamic-backend). + +### Procesamiento por lotes + +Tanto el backend de logs como el de webhook permiten procesamiento por lotes. Si usamos el webhook como ejemplo, + aquí se muestra la lista de parámetros disponibles. Para aplicar el mismo parámetro al backend de logs, + simplemente sustituye `webhook` por `log` en el nombre del parámetro. Por defecto, + el procesimiento por lotes está habilitado en `webhook` y deshabilitado en `log`. De forma similar, + por defecto la regulación (throttling) está habilitada en `webhook` y deshabilitada en `log`. + +- `--audit-webhook-mode` define la estrategia de memoria intermedia (búfer), que puede ser una de las siguientes: + - `batch` - almacenar eventos y procesarlos de forma asíncrona en lotes. Esta es la estrategia por defecto. + - `blocking` - bloquear todas las respuestas del servidor API al procesar cada evento de forma individual. + - `blocking-strict` - igual que blocking, pero si ocurre un error durante el registro de la audtoría en la etapa RequestReceived, la petición completa al apiserver fallará. + +Los siguientes parámetros se usan únicamente en el modo `batch`: + +- `--audit-webhook-batch-buffer-size` define el número de eventos a almacenar de forma intermedia antes de procesar por lotes. + Si el ritmo de eventos entrantes desborda la memoria intermedia, dichos eventos se descartan. +- `--audit-webhook-batch-max-size` define el número máximo de eventos en un único proceso por lotes. +- `--audit-webhook-batch-max-wait` define la cantidad máxima de tiempo a esperar de forma incondicional antes de procesar los eventos de la cola. +- `--audit-webhook-batch-throttle-qps` define el promedio máximo de procesos por lote generados por segundo. +- `--audit-webhook-batch-throttle-burst` define el número máximo de procesos por lote generados al mismo tiempo si el QPS permitido no fue usado en su totalidad anteriormente. + +#### Ajuste de parámetros + +Los parámetros deberían ajustarse a la carga del apiserver. + +Por ejemplo, si kube-apiserver recibe 100 peticiones por segundo, y para cada petición se audita +las etapas `ResponseStarted` y `ResponseComplete`, deberías esperar unos ~200 +eventos de auditoría generados por segundo. Asumiendo que hay hasta 100 eventos en un lote, +deberías establecer el nivel de regulación (throttling) por lo menos a 2 QPS. Además, asumiendo +que el backend puede tardar hasta 5 segundos en escribir eventos, deberías configurar el tamaño de la memoria intermedia para almacenar hasta 5 segundos de eventos, esto es, +10 lotes, o sea, 1000 eventos. + +En la mayoría de los casos, sin embargo, los valores por defecto de los parámetros +deberían ser suficientes y no deberías preocuparte de ajustarlos manualmente. +Puedes echar un vistazo a la siguientes métricas de Prometheus que expone kube-apiserver +y también los logs para monitorizar el estado del subsistema de auditoría: + +- `apiserver_audit_event_total` métrica que contiene el número total de eventos de auditoría exportados. +- `apiserver_audit_error_total` métrica que contiene el número total de eventos descartados debido a un error durante su exportación. + +### Truncado + +Tanto el backend de logs como el de webhook permiten truncado. Como ejemplo, aquí se indica la +lista de parámetros disponible para el backend de logs: + + - `audit-log-truncate-enabled` indica si el truncado de eventos y por lotes está habilitado. + - `audit-log-truncate-max-batch-size` indica el tamaño máximo en bytes del lote enviado al backend correspondiente. + - `audit-log-truncate-max-event-size` indica el tamaño máximo en bytes del evento de auditoría enviado al backend correspondiente. + +Por defecto, el truncado está deshabilitado tanto en `webhook` como en `log`; un administrador del clúster debe configurar bien el parámetro `audit-log-truncate-enabled` o `audit-webhook-truncate-enabled` para habilitar esta característica. + +### Backend dinámico + +{{< feature-state for_k8s_version="v1.13" state="alpha" >}} + +En la versión 1.13 de Kubernetes, puedes configurar de forma dinámica los backends de auditoría usando objetos de la API AuditSink. + +Para habilitar la auditoría dinámica, debes configurar los siguientes parámetros de apiserver: + +- `--audit-dynamic-configuration`: el interruptor principal. Cuando esta característica sea GA, el único parámetro necesario. +- `--feature-gates=DynamicAuditing=true`: en evaluación en alpha y beta. +- `--runtime-config=auditregistration.k8s.io/v1alpha1=true`: habilitar la API. + +Cuando se habilita, un objeto AuditSink se provisiona de la siguiente forma: + +```yaml +apiVersion: auditregistration.k8s.io/v1alpha1 +kind: AuditSink +metadata: + name: mysink +spec: + policy: + level: Metadata + stages: + - ResponseComplete + webhook: + throttle: + qps: 10 + burst: 15 + clientConfig: + url: "https://audit.app" +``` + +Para una definición completa de la API, ver [AuditSink](/docs/reference/generated/kubernetes-api/v1.13/#auditsink-v1alpha1-auditregistration). Múltiples objetos existirán como soluciones independientes. + +Aquellos backends estáticos que se configuran con parámetros en tiempo de ejecución no se ven impactados por esta característica. + Sin embargo, estos backends dinámicos comparten las opciones de truncado del webhook estático, de forma que si dichas opciones se configura con parámetros en tiempo de ejecución, entonces se aplican a todos los backends dinámicos. + +#### Reglamento + +El reglamento de AuditSink es diferente del de la auditoría en tiempo de ejecución. Esto es debido a que el objeto de la API sirve para casos de uso diferentes. El reglamento continuará +evolucionando para dar cabida a más casos de uso. + +El campo `level` establece el nivel de auditoría indicado a todas las peticiones. El campo `stages` es actualmente una lista de las etapas que se permite registrar. + +#### Seguridad + +Los administradores deberían tener en cuenta que permitir el acceso en modo escritura de esta característica otorga el modo de acceso de lectura +a toda la información del clúster. Así, el acceso debería gestionarse como un privilegio de nivel `cluster-admin`. + +#### Rendimiento + +Actualmente, esta característica tiene implicaciones en el apiserver en forma de incrementos en el uso de la CPU y la memoria. +Aunque debería ser nominal cuando se trata de un número pequeño de destinos, se realizarán pruebas adicionales de rendimiento para entender su impacto real antes de que esta API pase a beta. + +## Configuración multi-clúster + +Si estás extendiendo la API de Kubernetes mediante la [capa de agregación][kube-aggregator], puedes también +configurar el registro de auditoría para el apiserver agregado. Para ello, pasa las opciones +de configuración en el mismo formato que se describe arriba al apiserver agregado +y configura el mecanismo de ingestión de logs para que recolecte los logs de auditoría. +Cada uno de los apiservers puede tener configuraciones de auditoría diferentes con +diferentes reglamentos de auditoría. + +## Ejemplos de recolectores de Logs + +### Uso de fluentd para recolectar y distribuir eventos de auditoría a partir de un archivo de logs + +[Fluentd][fluentd] es un recolector de datos de libre distribución que proporciona una capa unificada de registros. +En este ejemplo, usaremos fluentd para separar los eventos de auditoría por nombres de espacio: + +1. Instala [fluentd][fluentd_install_doc], fluent-plugin-forest y fluent-plugin-rewrite-tag-filter en el nodo donde corre kube-apiserver +{{< note >}} +Fluent-plugin-forest y fluent-plugin-rewrite-tag-filter son plugins de fluentd. Puedes obtener detalles de la instalación de estos plugins en el documento [fluentd plugin-management][fluentd_plugin_management_doc]. +{{< /note >}} + +1. Crea un archivo de configuración para fluentd: + + ``` + cat <<'EOF' > /etc/fluentd/config + # fluentd conf runs in the same host with kube-apiserver + + @type tail + # audit log path of kube-apiserver + path /var/log/kube-audit + pos_file /var/log/audit.pos + format json + time_key time + time_format %Y-%m-%dT%H:%M:%S.%N%z + tag audit + + + + #https://github.com/fluent/fluent-plugin-rewrite-tag-filter/issues/13 + @type record_transformer + enable_ruby + + namespace ${record["objectRef"].nil? ? "none":(record["objectRef"]["namespace"].nil? ? "none":record["objectRef"]["namespace"])} + + + + + # route audit according to namespace element in context + @type rewrite_tag_filter + + key namespace + pattern /^(.+)/ + tag ${tag}.$1 + + + + + @type record_transformer + remove_keys namespace + + + + @type forest + subtype file + remove_prefix audit + + + EOF + ``` + +1. Arranca fluentd: + + ```shell + fluentd -c /etc/fluentd/config -vv + ``` + +1. Arranca el componente kube-apiserver con las siguientes opciones: + + ```shell + --audit-policy-file=/etc/kubernetes/audit-policy.yaml --audit-log-path=/var/log/kube-audit --audit-log-format=json + ``` + +1. Comprueba las auditorías de los distintos espacios de nombres en `/var/log/audit-*.log` + +### Uso de logstash para recolectar y distribuir eventos de auditoría desde un backend de webhook + +[Logstash][logstash] es una herramienta de libre distribución de procesamiento de datos en servidor. +En este ejemplo, vamos a usar logstash para recolectar eventos de auditoría a partir de un backend de webhook, +y grabar los eventos de usuarios diferentes en archivos distintos. + +1. Instala [logstash][logstash_install_doc] + +1. Crea un archivo de configuración para logstash: + + ``` + cat < /etc/logstash/config + input{ + http{ + #TODO, figure out a way to use kubeconfig file to authenticate to logstash + #https://www.elastic.co/guide/en/logstash/current/plugins-inputs-http.html#plugins-inputs-http-ssl + port=>8888 + } + } + filter{ + split{ + # Webhook audit backend sends several events together with EventList + # split each event here. + field=>[items] + # We only need event subelement, remove others. + remove_field=>[headers, metadata, apiVersion, "@timestamp", kind, "@version", host] + } + mutate{ + rename => {items=>event} + } + } + output{ + file{ + # Audit events from different users will be saved into different files. + path=>"/var/log/kube-audit-%{[event][user][username]}/audit" + } + } + EOF + ``` + +1. Arranca logstash: + + ```shell + bin/logstash -f /etc/logstash/config --path.settings /etc/logstash/ + ``` + +1. Crea un [archivo kubeconfig](/docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig/) para el webhook del backend de auditoría de kube-apiserver: + + cat < /etc/kubernetes/audit-webhook-kubeconfig + apiVersion: v1 + clusters: + - cluster: + server: http://:8888 + name: logstash + contexts: + - context: + cluster: logstash + user: "" + name: default-context + current-context: default-context + kind: Config + preferences: {} + users: [] + EOF + +1. Arranca kube-apiserver con las siguientes opciones: + + ```shell + --audit-policy-file=/etc/kubernetes/audit-policy.yaml --audit-webhook-config-file=/etc/kubernetes/audit-webhook-kubeconfig + ``` + +1. Comprueba las auditorías en los directorios `/var/log/kube-audit-*/audit` de los nodos de logstash + +Nótese que además del plugin para salida en archivos, logstash ofrece una variedad de salidas adicionales +que permiten a los usuarios enviar la información donde necesiten. Por ejemplo, se puede enviar los eventos de auditoría +al plugin de elasticsearch que soporta búsquedas avanzadas y analíticas. + +[kube-apiserver]: /docs/admin/kube-apiserver +[auditing-proposal]: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/auditing.md +[auditing-api]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/types.go +[gce-audit-profile]: https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh#L735 +[kubeconfig]: /docs/tasks/access-application-cluster/configure-access-multiple-clusters/ +[fluentd]: http://www.fluentd.org/ +[fluentd_install_doc]: https://docs.fluentd.org/v1.0/articles/quickstart#step-1:-installing-fluentd +[fluentd_plugin_management_doc]: https://docs.fluentd.org/v1.0/articles/plugin-management +[logstash]: https://www.elastic.co/products/logstash +[logstash_install_doc]: https://www.elastic.co/guide/en/logstash/current/installing-logstash.html +[kube-aggregator]: /docs/concepts/api-extension/apiserver-aggregation + + diff --git a/content/es/docs/tasks/debug-application-cluster/debug-init-containers.md b/content/es/docs/tasks/debug-application-cluster/debug-init-containers.md new file mode 100644 index 0000000000000..d4c8ae141bd5e --- /dev/null +++ b/content/es/docs/tasks/debug-application-cluster/debug-init-containers.md @@ -0,0 +1,129 @@ +--- +title: Depurar Contenedores de Inicialización +content_type: task +--- + + + +Esta página muestra cómo investigar problemas relacionados con la ejecución +de los contenedores de inicialización (init containers). Las líneas de comando del ejemplo de abajo +se refieren al Pod como `` y a los Init Containers como `` e + `` respectivamente. + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +* Deberías estar familizarizado con el concepto de [Init Containers](/docs/concepts/abstractions/init-containers/). +* Deberías conocer la [Configuración de un Init Container](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container/). + + + + + +## Comprobar el estado de los Init Containers + +Muestra el estado de tu pod: + +```shell +kubectl get pod +``` + +Por ejemplo, un estado de `Init:1/2` indica que uno de los Init Containers +se ha ejecutado satisfactoriamente: + +``` +NAME READY STATUS RESTARTS AGE + 0/1 Init:1/2 0 7s +``` + +Echa un vistazo a [Comprender el estado de un Pod](#understanding-pod-status) para más ejemplos +de valores de estado y sus significados. + +## Obtener detalles acerca de los Init Containers + +Para ver información detallada acerca de la ejecución de un Init Container: + +```shell +kubectl describe pod +``` + +Por ejemplo, un Pod con dos Init Containers podría mostrar lo siguiente: + +``` +Init Containers: + : + Container ID: ... + ... + State: Terminated + Reason: Completed + Exit Code: 0 + Started: ... + Finished: ... + Ready: True + Restart Count: 0 + ... + : + Container ID: ... + ... + State: Waiting + Reason: CrashLoopBackOff + Last State: Terminated + Reason: Error + Exit Code: 1 + Started: ... + Finished: ... + Ready: False + Restart Count: 3 + ... +``` + +También puedes acceder al estado del Init Container de forma programática mediante +la lectura del campo `status.initContainerStatuses` dentro del Pod Spec: + + +```shell +kubectl get pod nginx --template '{{.status.initContainerStatuses}}' +``` + + +Este comando devolverá la misma información que arriba en formato JSON. + +## Acceder a los logs de los Init Containers + +Indica el nombre del Init Container así como el nombre del Pod para + acceder a sus logs. + +```shell +kubectl logs -c +``` + +Los Init Containers que ejecutan secuencias de línea de comandos muestran los comandos +conforme se van ejecutando. Por ejemplo, puedes hacer lo siguiente en Bash +indicando `set -x` al principio de la secuencia. + + + + + +## Comprender el estado de un Pod + +Un estado de un Pod que comienza con `Init:` especifica el estado de la ejecución de +un Init Container. La tabla a continuación muestra algunos valores de estado de ejemplo +que puedes encontrar al depurar Init Containers. + +Estado | Significado +------ | ------- +`Init:N/M` | El Pod tiene `M` Init Containers, y por el momento se han completado `N`. +`Init:Error` | Ha fallado la ejecución de un Init Container. +`Init:CrashLoopBackOff` | Un Init Container ha fallado de forma repetida. +`Pending` | El Pod todavía no ha comenzado a ejecutar sus Init Containers. +`PodInitializing` o `Running` | El Pod ya ha terminado de ejecutar sus Init Containers. + + + + + diff --git a/content/es/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md b/content/es/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md new file mode 100644 index 0000000000000..af95eaff7cd12 --- /dev/null +++ b/content/es/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md @@ -0,0 +1,119 @@ +--- +content_type: concept +title: Escribiendo Logs con Elasticsearch y Kibana +--- + + + +En la plataforma Google Compute Engine (GCE), por defecto da soporte a la escritura de logs haciendo uso de +[Stackdriver Logging](https://cloud.google.com/logging/), el cual se describe en detalle en [Logging con Stackdriver Logging](/docs/user-guide/logging/stackdriver). + +Este artículo describe cómo configurar un clúster para la ingesta de logs en +[Elasticsearch](https://www.elastic.co/products/elasticsearch) y su posterior visualización +con [Kibana](https://www.elastic.co/products/kibana), a modo de alternativa a +Stackdriver Logging cuando se utiliza la plataforma GCE. + +{{< note >}} +No se puede desplegar de forma automática Elasticsearch o Kibana en un clúster alojado en Google Kubernetes Engine. Hay que desplegarlos de forma manual. +{{< /note >}} + + + + + +Para utilizar Elasticsearch y Kibana para escritura de logs del clúster, deberías configurar +la siguiente variable de entorno que se muestra a continuación como parte de la creación +del clúster con kube-up.sh: + +```shell +KUBE_LOGGING_DESTINATION=elasticsearch +``` + +También deberías asegurar que `KUBE_ENABLE_NODE_LOGGING=true` (que es el valor por defecto en la plataforma GCE). + +Así, cuando crees un clúster, un mensaje te indicará que la recolección de logs de los daemons de Fluentd +que corren en cada nodo enviará dichos logs a Elasticsearch: + +```shell +cluster/kube-up.sh +``` +``` +... +Project: kubernetes-satnam +Zone: us-central1-b +... calling kube-up +Project: kubernetes-satnam +Zone: us-central1-b ++++ Staging server tars to Google Storage: gs://kubernetes-staging-e6d0e81793/devel ++++ kubernetes-server-linux-amd64.tar.gz uploaded (sha1 = 6987c098277871b6d69623141276924ab687f89d) ++++ kubernetes-salt.tar.gz uploaded (sha1 = bdfc83ed6b60fa9e3bff9004b542cfc643464cd0) +Looking for already existing resources +Starting master and configuring firewalls +Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/zones/us-central1-b/disks/kubernetes-master-pd]. +NAME ZONE SIZE_GB TYPE STATUS +kubernetes-master-pd us-central1-b 20 pd-ssd READY +Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/regions/us-central1/addresses/kubernetes-master-ip]. ++++ Logging using Fluentd to elasticsearch +``` + +Tanto los pods por nodo de Fluentd, como los pods de Elasticsearch, y los pods de Kibana + deberían ejecutarse en el namespace de kube-system inmediatamente después + de que el clúster esté disponible. + +```shell +kubectl get pods --namespace=kube-system +``` +``` +NAME READY STATUS RESTARTS AGE +elasticsearch-logging-v1-78nog 1/1 Running 0 2h +elasticsearch-logging-v1-nj2nb 1/1 Running 0 2h +fluentd-elasticsearch-kubernetes-node-5oq0 1/1 Running 0 2h +fluentd-elasticsearch-kubernetes-node-6896 1/1 Running 0 2h +fluentd-elasticsearch-kubernetes-node-l1ds 1/1 Running 0 2h +fluentd-elasticsearch-kubernetes-node-lz9j 1/1 Running 0 2h +kibana-logging-v1-bhpo8 1/1 Running 0 2h +kube-dns-v3-7r1l9 3/3 Running 0 2h +monitoring-heapster-v4-yl332 1/1 Running 1 2h +monitoring-influx-grafana-v1-o79xf 2/2 Running 0 2h +``` + +Los pods de `fluentd-elasticsearch` recogen los logs de cada nodo y los envían a los +pods de `elasticsearch-logging`, que son parte de un [servicio](/docs/concepts/services-networking/service/) llamado `elasticsearch-logging`. +Estos pods de Elasticsearch almacenan los logs y los exponen via una API REST. +El pod de `kibana-logging` proporciona una UI via web donde leer los logs almacenados en +Elasticsearch, y es parte de un servicio denominado `kibana-logging`. + +Los servicios de Elasticsearch y Kibana ambos están en el namespace `kube-system` + y no se exponen de forma directa mediante una IP accesible públicamente. Para poder acceder a dichos logs, +sigue las instrucciones acerca de cómo [Acceder a servicios corriendo en un clúster](/docs/concepts/cluster-administration/access-clusater/#accessing-services-running-on-the-cluster). + +Si tratas de acceder al servicio de `elasticsearch-logging` desde tu navegador, +verás una página de estado que se parece a la siguiente: + +![Estado de Elasticsearch](/images/docs/es-browser.png) + +A partir de ese momento, puedes introducir consultas de Elasticsearch directamente en el navegador, si lo necesitas. +Echa un vistazo a la [documentación de Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-uri-request.html) +para más detalles acerca de cómo hacerlo. + +De forma alternativa, puedes ver los logs de tu clúster en Kibana (de nuevo usando las +[instrucciones para acceder a un servicio corriendo en un clúster](/docs/user-guide/accessing-the-cluster/#accessing-services-running-on-the-cluster)). +La primera vez que visitas la URL de Kibana se te presentará una página que te pedirá +que configures una vista de los logs. Selecciona la opción de valores de serie temporal + y luego `@timestamp`. En la página siguiente selecciona la pestaña de `Discover` +y entonces deberías ver todos los logs. Puedes establecer el intervalo de actualización +en 5 segundos para refrescar los logs de forma regular. + +Aquí se muestra una vista típica de logs desde el visor de Kibana: + +![Kibana logs](/images/docs/kibana-logs.png) + + + +## {{% heading "whatsnext" %}} + + +¡Kibana te permite todo tipo de potentes opciones para explorar tus logs! Puedes encontrar +algunas ideas para profundizar en el tema en la [documentación de Kibana](https://www.elastic.co/guide/en/kibana/current/discover.html). + + diff --git a/content/es/docs/tasks/debug-application-cluster/logging-stackdriver.md b/content/es/docs/tasks/debug-application-cluster/logging-stackdriver.md new file mode 100644 index 0000000000000..3a247b5e88227 --- /dev/null +++ b/content/es/docs/tasks/debug-application-cluster/logging-stackdriver.md @@ -0,0 +1,366 @@ +--- +title: Escribiendo Logs con Stackdriver +content_type: concept +--- + + + +Antes de seguir leyendo esta página, deberías familiarizarte con el +[resumen de escritura de logs en Kubernetes](/docs/concepts/cluster-administration/logging). + +{{< note >}} +Por defecto, Stackdriver recolecta toda la salida estándar de tus contenedores, así +como el flujo de la salida de error. Para recolectar cualquier log tu aplicación escribe en un archivo (por ejemplo), +ver la [estrategia de sidecar](/docs/concepts/cluster-administration/logging#sidecar-container-with-a-logging-agent) +en el resumen de escritura de logs en Kubernetes. +{{< /note >}} + + + + + + +## Despliegue + +Para ingerir logs, debes desplegar el agente de Stackdriver Logging en cada uno de los nodos de tu clúster. +Dicho agente configura una instancia de `fluentd`, donde la configuración se guarda en un `ConfigMap` +y las instancias se gestionan a través de un `DaemonSet` de Kubernetes. El despliegue actual del +`ConfigMap` y el `DaemonSet` dentro de tu clúster depende de tu configuración individual del clúster. + +### Desplegar en un nuevo clúster + +#### Google Kubernetes Engine + +Stackdriver es la solución por defecto de escritura de logs para aquellos clústeres desplegados en Google Kubernetes Engine. +Stackdriver Logging se despliega por defecto en cada clúster a no ser que se le indique de forma explícita no hacerlo. + +#### Otras plataformas + +Para desplegar Stackdriver Logging en un *nuevo* clúster que estés creando con +`kube-up.sh`, haz lo siguiente: + +1. Configura la variable de entorno `KUBE_LOGGING_DESTINATION` con el valor `gcp`. +1. **Si no estás trabajando en GCE**, incluye `beta.kubernetes.io/fluentd-ds-ready=true` +en la variable `KUBE_NODE_LABELS`. + +Una vez que tu clúster ha arrancado, cada nodo debería ejecutar un agente de Stackdriver Logging. +Los `DaemonSet` y `ConfigMap` se configuran como extras. Si no estás usando `kube-up.sh`, +considera la posibilidad de arrancar un clúster sin una solución pre-determinada de escritura de logs +y entonces desplegar los agentes de Stackdriver Logging una vez el clúster esté ejecutándose. + +{{< warning >}} +El proceso de Stackdriver Logging reporta problemas conocidos en plataformas distintas +a Google Kubernetes Engine. Úsalo bajo tu propio riesgo. +{{< /warning >}} + +### Desplegar a un clúster existente + +1. Aplica una etiqueta en cada nodo, si no estaba presente ya. + + El despliegue del agente de Stackdriver Logging utiliza etiquetas de nodo para + determinar en qué nodos debería desplegarse. Estas etiquetas fueron introducidas + para distinguir entre nodos de Kubernetes de la versión 1.6 o superior. + Si el clúster se creó con Stackdriver Logging configurado y el nodo tiene la + versión 1.5.X o inferior, ejecutará fluentd como un pod estático. Puesto que un nodo + no puede tener más de una instancia de fluentd, aplica únicamente las etiquetas + a los nodos que no tienen un pod de fluentd ya desplegado. Puedes confirmar si tu nodo + ha sido etiquetado correctamente ejecutando `kubectl describe` de la siguiente manera: + + ``` + kubectl describe node $NODE_NAME + ``` + + La salida debería ser similar a la siguiente: + + ``` + Name: NODE_NAME + Role: + Labels: beta.kubernetes.io/fluentd-ds-ready=true + ... + ``` + + Asegúrate que la salida contiene la etiqueta `beta.kubernetes.io/fluentd-ds-ready=true`. + Si no está presente, puedes añadirla usando el comando `kubectl label` como se indica: + + ``` + kubectl label node $NODE_NAME beta.kubernetes.io/fluentd-ds-ready=true + ``` + + {{< note >}} + Si un nodo falla y tiene que volver a crearse, deberás volver a definir + la etiqueta al nuevo nodo. Para facilitar esta tarea, puedes utilizar el + parámetro de línea de comandos del Kubelet para aplicar dichas etiquetas + cada vez que se arranque un nodo. + {{< /note >}} + +1. Despliega un `ConfigMap` con la configuración del agente de escritura de logs ejecutando el siguiente comando: + + ``` + kubectl apply -f https://k8s.io/examples/debug/fluentd-gcp-configmap.yaml + ``` + + Este comando crea el `ConfigMap` en el espacio de nombres `default`. Puedes descargar el archivo + manualmente y cambiarlo antes de crear el objeto `ConfigMap`. + +1. Despliega el agente `DaemonSet` de escritura de logs ejecutando el siguiente comando: + + ``` + kubectl apply -f https://k8s.io/examples/debug/fluentd-gcp-ds.yaml + ``` + + Puedes descargar y editar este archivo antes de usarlo igualmente. + +## Verificar el despliegue de tu agente de escritura de logs + +Tras el despliegue del `DaemonSet` de StackDriver, puedes comprobar el estado de +cada uno de los despliegues de los agentes ejecutando el siguiente comando: + +```shell +kubectl get ds --all-namespaces +``` + +Si tienes 3 nodos en el clúster, la salida debería ser similar a esta: + +``` +NAMESPACE NAME DESIRED CURRENT READY NODE-SELECTOR AGE +... +default fluentd-gcp-v2.0 3 3 3 beta.kubernetes.io/fluentd-ds-ready=true 5m +... +``` +Para comprender cómo funciona Stackdriver, considera la siguiente especificación +de un generador de logs sintéticos [counter-pod.yaml](/examples/debug/counter-pod.yaml): + +{{< codenew file="debug/counter-pod.yaml" >}} + +Esta especificación de pod tiene un contenedor que ejecuta una secuencia de comandos bash +que escribe el valor de un contador y la fecha y hora cada segundo, de forma indefinida. +Vamos a crear este pod en el espacio de nombres por defecto. + +```shell +kubectl apply -f https://k8s.io/examples/debug/counter-pod.yaml +``` + +Puedes observar el pod corriendo: + +```shell +kubectl get pods +``` +``` +NAME READY STATUS RESTARTS AGE +counter 1/1 Running 0 5m +``` + +Durante un período de tiempo corto puedes observar que el estado del pod es 'Pending', debido a que el kubelet +tiene primero que descargar la imagen del contenedor. Cuando el estado del pod cambia a `Running` +puedes usar el comando `kubectl logs` para ver la salida de este pod contador. + +```shell +kubectl logs counter +``` +``` +0: Mon Jan 1 00:00:00 UTC 2001 +1: Mon Jan 1 00:00:01 UTC 2001 +2: Mon Jan 1 00:00:02 UTC 2001 +... +``` + +Como se describe en el resumen de escritura de logs, este comando visualiza las entradas de logs +del archivo de logs del contenedor. Si se termina el contenedor y Kubernetes lo reinicia, +todavía puedes acceder a los logs de la ejecución previa del contenedor. Sin embargo, +si el pod se desaloja del nodo, los archivos de log se pierden. Vamos a demostrar este +comportamiento mediante el borrado del contenedor que ejecuta nuestro contador: + +```shell +kubectl delete pod counter +``` +``` +pod "counter" deleted +``` + +y su posterior re-creación: + +```shell +kubectl create -f https://k8s.io/examples/debug/counter-pod.yaml +``` +``` +pod/counter created +``` + +Tras un tiempo, puedes acceder a los logs del pod contador otra vez: + +```shell +kubectl logs counter +``` +``` +0: Mon Jan 1 00:01:00 UTC 2001 +1: Mon Jan 1 00:01:01 UTC 2001 +2: Mon Jan 1 00:01:02 UTC 2001 +... +``` + +Como era de esperar, únicamente se visualizan las líneas de log recientes. Sin embargo, +para una aplicación real seguramente prefieras acceder a los logs de todos los contenedores, +especialmente cuando te haga falta depurar problemas. Aquí es donde haber habilitado +Stackdriver Logging puede ayudarte. + +## Ver logs + +El agente de Stackdriver Logging asocia metadatos a cada entrada de log, para que puedas usarlos posteriormente +en consultas para seleccionar sólo los mensajes que te interesan: por ejemplo, +los mensajes de un pod en particular. + +Los metadatos más importantes son el tipo de recurso y el nombre del log. +El tipo de recurso de un log de contenedor tiene el valor `container`, que se muestra como +`GKE Containers` en la UI (incluso si el clúster de Kubernetes no está en Google Kubernetes Engine). +El nombre de log es el nombre del contenedor, de forma que si tienes un pod con +dos contenedores, denominados `container_1` y `container_2` en la especificación, sus logs +tendrán los nombres `container_1` y `container_2` respectivamente. + +Los componentes del sistema tienen el valor `compute` como tipo de recursos, que se muestra como +`GCE VM Instance` en la UI. Los nombres de log para los componentes del sistema son fijos. +Para un nodo de Google Kubernetes Engine, cada entrada de log de cada componente de sistema tiene uno de los siguientes nombres: + +* docker +* kubelet +* kube-proxy + +Puedes aprender más acerca de cómo visualizar los logs en la [página dedicada a Stackdriver](https://cloud.google.com/logging/docs/view/logs_viewer). + +Uno de los posibles modos de ver los logs es usando el comando de línea de interfaz +[`gcloud logging`](https://cloud.google.com/logging/docs/api/gcloud-logging) +del [SDK de Google Cloud](https://cloud.google.com/sdk/). +Este comando usa la [sintaxis de filtrado](https://cloud.google.com/logging/docs/view/advanced_filters) de StackDriver Logging +para consultar logs específicos. Por ejemplo, puedes ejecutar el siguiente comando: + +```none +gcloud beta logging read 'logName="projects/$YOUR_PROJECT_ID/logs/count"' --format json | jq '.[].textPayload' +``` +``` +... +"2: Mon Jan 1 00:01:02 UTC 2001\n" +"1: Mon Jan 1 00:01:01 UTC 2001\n" +"0: Mon Jan 1 00:01:00 UTC 2001\n" +... +"2: Mon Jan 1 00:00:02 UTC 2001\n" +"1: Mon Jan 1 00:00:01 UTC 2001\n" +"0: Mon Jan 1 00:00:00 UTC 2001\n" +``` + +Como puedes observar, muestra los mensajes del contenedor contador tanto de la +primera como de la segunda ejecución, a pesar de que el kubelet ya había eliminado los logs del primer contenedor. + +### Exportar logs + +Puedes exportar los logs al [Google Cloud Storage](https://cloud.google.com/storage/) +o a [BigQuery](https://cloud.google.com/bigquery/) para llevar a cabo un análisis más profundo. +Stackdriver Logging ofrece el concepto de destinos, donde puedes especificar el destino de +las entradas de logs. Más información disponible en la [página de exportación de logs](https://cloud.google.com/logging/docs/export/configure_export_v2) de StackDriver. + +## Configurar los agentes de Stackdriver Logging + +En ocasiones la instalación por defecto de Stackdriver Logging puede que no se ajuste a tus necesidades, por ejemplo: + +* Puede que quieras añadir más recursos porque el rendimiento por defecto no encaja con tus necesidades. +* Puede que quieras añadir un parseo adicional para extraer más metadatos de tus mensajes de log, +como la severidad o referencias al código fuente. +* Puede que quieras enviar los logs no sólo a Stackdriver o sólo enviarlos a Stackdriver parcialmente. + +En cualquiera de estos casos, necesitas poder cambiar los parámetros del `DaemonSet` y el `ConfigMap`. + +### Prerequisitos + +Si estás usando GKE y Stackdriver Logging está habilitado en tu clúster, no puedes +cambiar su configuración, porque ya está gestionada por GKE. +Sin embargo, puedes deshabilitar la integración por defecto y desplegar la tuya propia. + +{{< note >}} +Tendrás que mantener y dar soporte tú mismo a la nueva configuración desplegada: +actualizar la imagen y la configuración, ajustar los recuros y todo eso. +{{< /note >}} + +Para deshabilitar la integración por defecto, usa el siguiente comando: + +``` +gcloud beta container clusters update --logging-service=none CLUSTER +``` + +Puedes encontrar notas acerca de cómo instalar los agentes de Stackdriver Logging + en un clúster ya ejecutándose en la [sección de despliegue](#deploying). + +### Cambiar los parámetros del `DaemonSet` + +Cuando tienes un `DaemonSet` de Stackdriver Logging en tu clúster, puedes simplemente +modificar el campo `template` en su especificación, y el controlador del daemonset actualizará los pods por ti. Por ejemplo, +asumamos que acabas de instalar el Stackdriver Logging como se describe arriba. Ahora quieres cambiar +el límite de memoria que se le asigna a fluentd para poder procesar más logs de forma segura. + +Obtén la especificación del `DaemonSet` que corre en tu clúster: + +```shell +kubectl get ds fluentd-gcp-v2.0 --namespace kube-system -o yaml > fluentd-gcp-ds.yaml +``` + +A continuación, edita los requisitos del recurso en el `spec` y actualiza el objeto `DaemonSet` +en el apiserver usando el siguiente comando: + +```shell +kubectl replace -f fluentd-gcp-ds.yaml +``` + +Tras un tiempo, los pods de agente de Stackdriver Logging se reiniciarán con la nueva configuración. + +### Cambiar los parámetros de fluentd + +La configuración de Fluentd se almacena en un objeto `ConfigMap`. Realmente se trata de un conjunto +de archivos de configuración que se combinan conjuntamente. Puedes aprender acerca de +la configuración de fluentd en el [sitio oficial](http://docs.fluentd.org). + +Imagina que quieres añadir una nueva lógica de parseo a la configuración actual, de forma que fluentd pueda entender +el formato de logs por defecto de Python. Un filtro apropiado de fluentd para conseguirlo sería: + +``` + + type parser + format /^(?\w):(?\w):(?.*)/ + reserve_data true + suppress_parse_error_log true + key_name log + +``` + +Ahora tienes que añadirlo a la configuración actual y que los agentes de Stackdriver Logging la usen. +Para ello, obtén la versión actual del `ConfigMap` de Stackdriver Logging de tu clúster +ejecutando el siguiente comando: + +```shell +kubectl get cm fluentd-gcp-config --namespace kube-system -o yaml > fluentd-gcp-configmap.yaml +``` + +Luego, como valor de la clave `containers.input.conf`, inserta un nuevo filtro justo después +de la sección `source`. + +{{< note >}} +El orden es importante. +{{< /note >}} + +Actualizar el `ConfigMap` en el apiserver es más complicado que actualizar el `DaemonSet`. +Es mejor considerar que un `ConfigMap` es inmutable. Así, para poder actualizar la configuración, deberías +crear un nuevo `ConfigMap` con otro nombre y cambiar el `DaemonSet` para que apunte al nuevo +siguiendo la [guía de arriba](#changing-daemonset-parameters). + +### Añadir plugins de fluentd + +Fluentd está desarrollado en Ruby y permite extender sus capacidades mediante el uso de +[plugins](http://www.fluentd.org/plugins). Si quieres usar un plugin que no está incluido en +la imagen por defecto del contenedor de Stackdriver Logging, debes construir tu propia imagen. +Imagina que quieres añadir un destino Kafka para aquellos mensajes de un contenedor en particular +para poder procesarlos posteriormente. Puedes reusar los [fuentes de imagen de contenedor](https://git.k8s.io/contrib/fluentd/fluentd-gcp-image) +con algunos pequeños cambios: + +* Cambia el archivo Makefile para que apunte a tu repositorio de contenedores, ej. `PREFIX=gcr.io/`. +* Añade tu dependencia al archivo Gemfile, por ejemplo `gem 'fluent-plugin-kafka'`. + +Luego, ejecuta `make build push` desde ese directorio. Cuando el `DaemonSet` haya tomado los cambios de la nueva imagen, +podrás usar el plugin que has indicado en la configuración de fluentd. + + diff --git a/content/es/docs/tasks/manage-daemon/_index.md b/content/es/docs/tasks/manage-daemon/_index.md index 000b87a2148af..dfd787f9c5976 100755 --- a/content/es/docs/tasks/manage-daemon/_index.md +++ b/content/es/docs/tasks/manage-daemon/_index.md @@ -1,4 +1,4 @@ --- -title: Gestionar y ejecutar demonios +title: Gestionar y ejecutar daemons weight: 45 --- \ No newline at end of file diff --git a/content/es/examples/audit/audit-policy.yaml b/content/es/examples/audit/audit-policy.yaml new file mode 100644 index 0000000000000..cdc46be754de9 --- /dev/null +++ b/content/es/examples/audit/audit-policy.yaml @@ -0,0 +1,68 @@ +apiVersion: audit.k8s.io/v1 # Esto es obligatorio. +kind: Policy +# No generar eventos de auditoría para las peticiones en la etapa RequestReceived. +omitStages: + - "RequestReceived" +rules: + # Registrar los cambios del pod al nivel RequestResponse + - level: RequestResponse + resources: + - group: "" + # Los recursos "pods" no hacen coincidir las peticiones a cualquier sub-recurso de pods, + # lo que es consistente con la regla RBAC. + resources: ["pods"] + # Registrar "pods/log", "pods/status" al nivel Metadata + - level: Metadata + resources: + - group: "" + resources: ["pods/log", "pods/status"] + + # No registrar peticiones al configmap denominado "controller-leader" + - level: None + resources: + - group: "" + resources: ["configmaps"] + resourceNames: ["controller-leader"] + + # No registrar peticiones de observación hechas por "system:kube-proxy" sobre puntos de acceso o servicios + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # Grupo API base + resources: ["endpoints", "services"] + + # No registrar peticiones autenticadas a ciertas rutas URL que no son recursos. + - level: None + userGroups: ["system:authenticated"] + nonResourceURLs: + - "/api*" # Coincidencia por comodín. + - "/version" + + # Registrar el cuerpo de la petición de los cambios de configmap en kube-system. + - level: Request + resources: + - group: "" # Grupo API base + resources: ["configmaps"] + # Esta regla sólo aplica a los recursos en el Namespace "kube-system". + # La cadena vacía "" se puede usar para seleccionar los recursos sin Namespace. + namespaces: ["kube-system"] + + # Registrar los cambios de configmap y secret en todos los otros Namespaces al nivel Metadata. + - level: Metadata + resources: + - group: "" # Grupo API base + resources: ["secrets", "configmaps"] + + # Registrar todos los recursos en core y extensions al nivel Request. + - level: Request + resources: + - group: "" # Grupo API base + - group: "extensions" # La versión del grupo NO debería incluirse. + + # Regla para "cazar" todos las demás peticiones al nivel Metadata. + - level: Metadata + # Las peticiones de larga duración, como los watches, que caen bajo esta regla no + # generan un evento de auditoría en RequestReceived. + omitStages: + - "RequestReceived" diff --git a/content/es/examples/controllers/nginx-deployment.yaml b/content/es/examples/controllers/nginx-deployment.yaml new file mode 100644 index 0000000000000..f7f95deebbb23 --- /dev/null +++ b/content/es/examples/controllers/nginx-deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 diff --git a/content/es/examples/controllers/replicaset.yaml b/content/es/examples/controllers/replicaset.yaml new file mode 100644 index 0000000000000..e5dfdf6c43ce5 --- /dev/null +++ b/content/es/examples/controllers/replicaset.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: my-repset +spec: + replicas: 3 + selector: + matchLabels: + pod-is-for: garbage-collection-example + template: + metadata: + labels: + pod-is-for: garbage-collection-example + spec: + containers: + - name: nginx + image: nginx diff --git a/content/es/examples/debug/counter-pod.yaml b/content/es/examples/debug/counter-pod.yaml new file mode 100644 index 0000000000000..f997886386258 --- /dev/null +++ b/content/es/examples/debug/counter-pod.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox + args: [/bin/sh, -c, + 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done'] diff --git a/content/fr/_index.html b/content/fr/_index.html index 89a66f48b6ca7..3b659534e8cf7 100644 --- a/content/fr/_index.html +++ b/content/fr/_index.html @@ -3,9 +3,6 @@ abstract: "Déploiement, mise à l'échelle et gestion automatisée des conteneurs" cid: home --- -{{< announcement >}} - -{{< deprecationwarning >}} {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} diff --git a/content/fr/docs/concepts/workloads/pods/pod-lifecycle.md b/content/fr/docs/concepts/workloads/pods/pod-lifecycle.md index 9a6f96d36a1e1..aece7de62cf76 100644 --- a/content/fr/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/fr/docs/concepts/workloads/pods/pod-lifecycle.md @@ -63,10 +63,8 @@ du tableau de PodCondition a six champs possibles : * `PodScheduled` : le Pod a été affecté à un nœud ; * `Ready` : le Pod est prêt à servir des requêtes et doit être rajouté aux équilibreurs de charge de tous les Services correspondants ; - * `Initialized` : tous les [init containers](/docs/concepts/workloads/pods/init-containers) + * `Initialized` : tous les [init containers](/fr/docs/concepts/workloads/pods/init-containers) ont démarré correctement ; - * `Unschedulable` : le scheduler ne peut pas affecter le Pod pour l'instant, par exemple - par manque de ressources ou en raison d'autres contraintes ; * `ContainersReady` : tous les conteneurs du Pod sont prêts. @@ -98,12 +96,12 @@ Chaque sonde a un résultat parmi ces trois : * Failure: Le Conteneur a échoué au diagnostic. * Unknown: L'exécution du diagnostic a échoué, et donc aucune action ne peut être prise. -kubelet peut optionnellement exécuter et réagir à deux types de sondes sur des conteneurs +kubelet peut optionnellement exécuter et réagir à trois types de sondes sur des conteneurs en cours d'exécution : * `livenessProbe` : Indique si le Conteneur est en cours d'exécution. Si la liveness probe échoue, kubelet tue le Conteneur et le Conteneur - est soumis à sa [politique de redémarrage](#restart-policy) (restart policy). + est soumis à sa [politique de redémarrage](#politique-de-redemarrage) (restart policy). Si un Conteneur ne fournit pas de liveness probe, l'état par défaut est `Success`. * `readinessProbe` : Indique si le Conteneur est prêt à servir des requêtes. @@ -113,7 +111,13 @@ en cours d'exécution : `Failure`. Si le Conteneur ne fournit pas de readiness probe, l'état par défaut est `Success`. -### Quand devez-vous utiliser une liveness ou une readiness probe ? +* `startupProbe`: Indique si l'application à l'intérieur du conteneur a démarré. + Toutes les autres probes sont désactivées si une starup probe est fournie, + jusqu'à ce qu'elle réponde avec succès. Si la startup probe échoue, le kubelet + tue le conteneur, et le conteneur est assujetti à sa [politique de redémarrage](#politique-de-redemarrage). + Si un conteneur ne fournit pas de startup probe, l'état par défaut est `Success`. + +### Quand devez-vous utiliser une liveness probe ? Si le process de votre Conteneur est capable de crasher de lui-même lorsqu'il rencontre un problème ou devient inopérant, vous n'avez pas forcément besoin @@ -124,6 +128,10 @@ Si vous désirez que votre Conteneur soit tué et redémarré si une sonde écho spécifiez une liveness probe et indiquez une valeur pour `restartPolicy` à Always ou OnFailure. +### Quand devez-vous utiliser une readiness probe ? + +{{< feature-state for_k8s_version="v1.0" state="stable" >}} + Si vous voulez commencer à envoyer du trafic à un Pod seulement lorsqu'une sonde réussit, spécifiez une readiness probe. Dans ce cas, la readiness probe peut être la même que la liveness probe, mais l'existence de la readiness probe dans la spec @@ -142,8 +150,16 @@ de sa suppression, le Pod se met automatiquement dans un état non prêt, que la readiness probe existe ou non. Le Pod reste dans le statut non prêt le temps que les Conteneurs du Pod s'arrêtent. -Pour plus d'informations sur la manière de mettre en place une liveness ou readiness probe, -voir [Configurer des Liveness et Readiness Probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/). +### Quand devez-vous utiliser une startup probe ? + +{{< feature-state for_k8s_version="v1.16" state="alpha" >}} + +Si votre conteneur démarre habituellement en plus de `initialDelaySeconds + failureThreshold × periodSeconds`, +vous devriez spécifier une startup probe qui vérifie le même point de terminaison que la liveness probe. La valeur par défaut pour `periodSeconds` est 30s. +Vous devriez alors mettre sa valeur `failureThreshold` suffisamment haute pour permettre au conteneur de démarrer, sans changer les valeurs par défaut de la liveness probe. Ceci aide à se protéger de deadlocks. + +Pour plus d'informations sur la manière de mettre en place une liveness, readiness ou startup probe, +voir [Configurer des Liveness, Readiness et Startup Probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). ## Statut d'un Pod et d'un Conteneur @@ -172,9 +188,7 @@ d'informations. ... ``` -* `Running` : Indique que le conteneur s'exécute sans problème. Une fois qu'un centeneur est -dans l'état Running, le hook `postStart` est exécuté (s'il existe). Cet état affiche aussi -le moment auquel le conteneur est entré dans l'état Running. +* `Running` : Indique que le conteneur s'exécute sans problème. Le hook `postStart` (s'il existe) est exécuté avant que le conteneur entre dans l'état Running. Cet état affiche aussi le moment auquel le conteneur est entré dans l'état Running. ```yaml ... @@ -199,27 +213,30 @@ dans l'état Terminated, le hook `preStop` est exécuté (s'il existe). ... ``` -## Pod readiness gate +## Pod readiness {#pod-readiness-gate} {{< feature-state for_k8s_version="v1.14" state="stable" >}} -Afin d'étendre la readiness d'un Pod en autorisant l'injection de données -supplémentaires ou des signaux dans `PodStatus`, Kubernetes 1.11 a introduit -une fonctionnalité appelée [Pod ready++](https://github.com/kubernetes/enhancements/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md). -Vous pouvez utiliser le nouveau champ `ReadinessGate` dans `PodSpec` -pour spécifier des conditions additionnelles à évaluer pour la readiness d'un Pod. -Si Kubernetes ne peut pas trouver une telle condition dans le champ `status.conditions` -d'un Pod, le statut de la condition est "`False`" par défaut. Voici un exemple : +Votre application peut injecter des données dans `PodStatus`. + +_Pod readiness_. Pour utiliser cette fonctionnalité, remplissez `readinessGates` dans le PodSpec avec +une liste de conditions supplémentaires que le kubelet évalue pour la disponibilité du Pod. + +Les Readiness gates sont déterminées par l'état courant des champs `status.condition` du Pod. +Si Kubernetes ne peut pas trouver une telle condition dans le champs `status.conditions` d'un Pod, the statut de la condition +est mise par défaut à "`False`". + +Voici un exemple : ```yaml -Kind: Pod +kind: Pod ... spec: readinessGates: - conditionType: "www.example.com/feature-1" status: conditions: - - type: Ready # ceci est une builtin PodCondition + - type: Ready # une PodCondition intégrée status: "False" lastProbeTime: null lastTransitionTime: 2018-01-01T00:00:00Z @@ -233,27 +250,26 @@ status: ... ``` -Les nouvelles conditions du Pod doivent être conformes au [format des étiquettes](/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) de Kubernetes. -La commande `kubectl patch` ne prenant pas encore en charge la modifictaion du statut -des objets, les nouvelles conditions du Pod doivent être injectées avec -l'action `PATCH` en utilisant une des [bibliothèques KubeClient](/docs/reference/using-api/client-libraries/). +Les conditions du Pod que vous ajoutez doivent avoir des noms qui sont conformes au [format des étiquettes](/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) de Kubernetes. -Avec l'introduction de nouvelles conditions d'un Pod, un Pod est considéré comme prêt -**seulement** lorsque les deux déclarations suivantes sont vraies : +### Statut de la disponibilité d'un Pod {#statut-pod-disponibilité} -* Tous les conteneurs du Pod sont prêts. -* Toutes les conditions spécifiées dans `ReadinessGates` sont à "`True`". +La commande `kubectl patch` ne peut pas patcher le statut d'un objet. +Pour renseigner ces `status.conditions` pour le pod, les applications et +{{< glossary_tooltip term_id="operator-pattern" text="operators">}} doivent utiliser l'action `PATCH`. +Vous pouvez utiliser une [bibliothèque client Kubernetes](/docs/reference/using-api/client-libraries/) pour +écrire du code qui renseigne les conditions particulières pour la disponibilité dun Pod. -Pour faciliter le changement de l'évaluation de la readiness d'un Pod, -une nouvelle condition de Pod `ContainersReady` est introduite pour capturer -l'ancienne condition `Ready` d'un Pod. +Pour un Pod utilisant des conditions particulières, ce Pod est considéré prêt **seulement** +lorsque les deux déclarations ci-dessous sont vraies : -Avec K8s 1.11, en tant que fonctionnalité alpha, "Pod Ready++" doit être explicitement activé en mettant la [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) `PodReadinessGates` -à true. +* Tous les conteneurs du Pod sont prêts. +* Toutes les conditions spécifiées dans `ReadinessGates` sont `True`. -Avec K8s 1.12, la fonctionnalité est activée par défaut. +Lorsque les conteneurs d'un Pod sont prêts mais qu'au moins une condition particulière +est manquante ou `False`, le kubelet renseigne la condition du Pod à `ContainersReady`. -## Restart policy +## Politique de redémarrage La structure PodSpec a un champ `restartPolicy` avec comme valeur possible Always, OnFailure et Never. La valeur par défaut est Always. @@ -267,33 +283,30 @@ une fois attaché à un nœud, un Pod ne sera jamais rattaché à un autre nœud ## Durée de vie d'un Pod -En général, un Pod ne disparaît pas avant que quelqu'un le détruise. Ceci peut être -un humain ou un contrôleur. La seule exception à cette règle est pour les Pods ayant -une `phase` Succeeded ou Failed depuis une durée donnée (déterminée -par `terminated-pod-gc-threshold` sur le master), qui expireront et seront -automatiquement détruits. +En général, les Pods restent jusqu'à ce qu'un humain ou un process de +{{< glossary_tooltip term_id="controller" text="contrôleur" >}} les supprime explicitement. -Trois types de contrôleurs sont disponibles : +Le plan de contrôle nettoie les Pods terminés (avec une phase à `Succeeded` ou +`Failed`), lorsque le nombre de Pods excède le seuil configuré +(determiné par `terminated-pod-gc-threshold` dans le kube-controller-manager). +Ceci empêche une fuite de ressources lorsque les Pods sont créés et supprimés au fil du temps. -- Utilisez un [Job](/docs/concepts/jobs/run-to-completion-finite-workloads/) pour des -Pods qui doivent se terminer, par exemple des calculs par batch. Les Jobs sont appropriés -seulement pour des Pods ayant `restartPolicy` égal à OnFailure ou Never. +Il y a différents types de ressources pour créer des Pods : + +- Utilisez un {{< glossary_tooltip term_id="deployment" >}}, + {{< glossary_tooltip term_id="replica-set" >}} ou {{< glossary_tooltip term_id="statefulset" >}} + pour les Pods qui ne sont pas censés terminer, par exemple des serveurs web. -- Utilisez un [ReplicationController](/docs/concepts/workloads/controllers/replicationcontroller/), - [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) ou - [Deployment](/docs/concepts/workloads/controllers/deployment/) - pour des Pods qui ne doivent pas s'arrêter, par exemple des serveurs web. - ReplicationControllers sont appropriés pour des Pods ayant `restartPolicy` égal à - Always. +- Utilisez un {{< glossary_tooltip term_id="job" >}} + pour les Pods qui sont censés se terminer une fois leur tâche accomplie. Les Jobs sont appropriés +seulement pour des Pods ayant `restartPolicy` égal à OnFailure ou Never. -- Utilisez un [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) pour des Pods - qui doivent s'exécuter une fois par machine, car ils fournissent un service système - au niveau de la machine. +- Utilisez un {{< glossary_tooltip term_id="daemonset" >}} + pour les Pods qui doivent s'exécuter sur chaque noeud éligible. -Les trois types de contrôleurs contiennent un PodTemplate. Il est recommandé -de créer le contrôleur approprié et de le laisser créer les Pods, plutôt que de -créer directement les Pods vous-même. Ceci car les Pods seuls ne sont pas résilients -aux pannes machines, alors que les contrôleurs le sont. +Toutes les ressources de charges de travail contiennent une PodSpec. Il est recommandé de créer +la ressource de charges de travail appropriée et laisser le contrôleur de la ressource créer les Pods +pour vous, plutôt que de créer directement les Pods vous-même. Si un nœud meurt ou est déconnecté du reste du cluster, Kubernetes applique une politique pour mettre la `phase` de tous les Pods du nœud perdu à Failed. @@ -391,7 +404,7 @@ spec: [attacher des handlers à des événements de cycle de vie d'un conteneur](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). * Apprenez par la pratique - [configurer des liveness et readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/). + [configurer des liveness, readiness et startup probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). * En apprendre plus sur les [hooks de cycle de vie d'un Conteneur](/docs/concepts/containers/container-lifecycle-hooks/). diff --git a/content/fr/docs/concepts/workloads/pods/pod-overview.md b/content/fr/docs/concepts/workloads/pods/pod-overview.md index b1803ba5e0e6a..bfa5e02c6236a 100644 --- a/content/fr/docs/concepts/workloads/pods/pod-overview.md +++ b/content/fr/docs/concepts/workloads/pods/pod-overview.md @@ -16,23 +16,18 @@ Cette page fournit un aperçu du `Pod`, l'objet déployable le plus petit dans l ## Comprendre les Pods -Un *Pod* est l'unité d'exécution de base d'une application Kubernetes--l'unité la plus petite et la plus simple dans le modèle d'objets de Kubernetes--que vous créez ou déployez. Un Pod représente des process en cours d'exécution dans votre {{< glossary_tooltip term_id="cluster" >}}. +Un *Pod* est l'unité d'exécution de base d'une application Kubernetes--l'unité la plus petite et la plus simple dans le modèle d'objets de Kubernetes--que vous créez ou déployez. Un Pod représente des process en cours d'exécution dans votre {{< glossary_tooltip term_id="cluster" text="cluster" >}}. -Un Pod encapsule un conteneur applicatif (ou, dans certains cas, plusieurs conteneurs), des ressources de stockage, une IP réseau unique, et des options qui contrôlent comment le ou les conteneurs doivent s'exécuter. Un Pod représente une unité de déploiement : *une instance unique d'une application dans Kubernetes*, qui peut consister soit en un unique {{< glossary_tooltip text="container" term_id="container" >}} soit en un petit nombre de conteneurs qui sont étroitement liés et qui partagent des ressources. +Un Pod encapsule un conteneur applicatif (ou, dans certains cas, plusieurs conteneurs), des ressources de stockage, une identité réseau (adresse IP) unique, ainsi que des options qui contrôlent comment le ou les conteneurs doivent s'exécuter. Un Pod représente une unité de déploiement : *une instance unique d'une application dans Kubernetes*, qui peut consister soit en un unique {{< glossary_tooltip text="container" term_id="container" >}} soit en un petit nombre de conteneurs qui sont étroitement liés et qui partagent des ressources. -> [Docker](https://www.docker.com) est le runtime de conteneurs le plus courant utilisé dans un Pod Kubernetes, mais les Pods prennent également en charge d'autres [runtimes de conteneurs](https://kubernetes.io/docs/setup/production-environment/container-runtimes/). +> [Docker](https://www.docker.com) est le runtime de conteneurs le plus courant utilisé dans un Pod Kubernetes, mais les Pods prennent également en charge d'autres [runtimes de conteneurs](/docs/setup/production-environment/container-runtimes/). Les Pods dans un cluster Kubernetes peuvent être utilisés de deux manières différentes : * **les Pods exécutant un conteneur unique**. Le modèle "un-conteneur-par-Pod" est le cas d'utilisation Kubernetes le plus courant ; dans ce cas, vous pouvez voir un Pod comme un wrapper autour d'un conteneur unique, et Kubernetes gère les Pods plutôt que directement les conteneurs. * **les Pods exécutant plusieurs conteneurs devant travailler ensemble**. Un Pod peut encapsuler une application composée de plusieurs conteneurs co-localisés qui sont étroitement liés et qui doivent partager des ressources. Ces conteneurs co-localisés pourraient former une unique unité de service cohésive--un conteneur servant des fichiers d'un volume partagé au public, alors qu'un conteneur "sidecar" séparé rafraîchit ou met à jour ces fichiers. Le Pod enveloppe ensemble ces conteneurs et ressources de stockage en une entité maniable de base. -Le [Blog Kubernetes](http://kubernetes.io/blog) contient quelques informations supplémentaires sur les cas d'utilisation des Pods. Pour plus d'informations, voir : - -* [The Distributed System Toolkit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) -* [Container Design Patterns](https://kubernetes.io/blog/2016/06/container-design-patterns) - -Chaque Pod est destiné à exécuter une instance unique d'une application donnée. Si vous désirez mettre à l'échelle votre application horizontalement, (par ex., exécuter plusieurs instances), vous devez utiliser plusieurs Pods, un pour chaque instance. Dans Kubernetes, on parle généralement de _réplication_. Des Pods répliqués sont en général créés et gérés comme un groupe par une abstraction appelée Controller. Voir [Pods et Controllers](#pods-and-controllers) pour plus d'informations. +Chaque Pod est destiné à exécuter une instance unique d'une application donnée. Si vous désirez mettre à l'échelle votre application horizontalement, (pour fournir plus de ressources au global en exécutant plus d'instances), vous devez utiliser plusieurs Pods, un pour chaque instance. Dans Kubernetes, on parle typiquement de _réplication_. Des Pods répliqués sont en général créés et gérés en tant que groupe par une ressource de charge de travail et son {{< glossary_tooltip text="_contrôleur_" term_id="controller" >}}. Voir [Pods et contrôleurs](#pods-et-controleurs) pour plus d'informations. ### Comment les Pods gèrent plusieurs conteneurs @@ -48,61 +43,76 @@ Les Pods fournissent deux types de ressources partagées pour leurs conteneurs : #### Réseau -Chaque Pod se voit assigner une adresse IP unique. Tous les conteneurs d'un Pod partagent le même namespace réseau, y compris l'adresse IP et les ports réseau. Les conteneurs *à l'intérieur d'un Pod* peuvent communiquer entre eux en utilisant `localhost`. Lorsque les conteneurs dans un Pod communiquent avec des entités *en dehors du Pod*, ils doivent coordonner comment ils utilisent les ressources réseau partagées (comme les ports). +Chaque Pod se voit assigner une adresse IP unique pour chaque famille d'adresses. Tous les conteneurs d'un Pod partagent le même namespace réseau, y compris l'adresse IP et les ports réseau. Les conteneurs *à l'intérieur d'un Pod* peuvent communiquer entre eux en utilisant `localhost`. Lorsque les conteneurs dans un Pod communiquent avec des entités *en dehors du Pod*, ils doivent coordonner comment ils utilisent les ressources réseau partagées (comme les ports). #### Stockage -Un Pod peut spécifier un jeu de {{< glossary_tooltip text="Volumes" term_id="volume" >}} de stockage partagés. Tous les conteneurs dans le Pod peuvent accéder aux volumes partagés, permettant à ces conteneurs de partager des données. Les volumes permettent aussi les données persistantes d'un Pod de survivre au cas où un des conteneurs doit être redémarré. Voir [Volumes](/docs/concepts/storage/volumes/) pour plus d'informations sur la façon dont Kubernetes implémente le stockage partagé dans un Pod. +Un Pod peut spécifier un jeu de {{< glossary_tooltip text="volumes" term_id="volume" >}} de stockage partagés. Tous les conteneurs dans le Pod peuvent accéder aux volumes partagés, permettant à ces conteneurs de partager des données. Les volumes permettent aussi les données persistantes d'un Pod de survivre au cas où un des conteneurs doit être redémarré. Voir [Volumes](/docs/concepts/storage/volumes/) pour plus d'informations sur la façon dont Kubernetes implémente le stockage partagé dans un Pod. ## Travailler avec des Pods -Vous aurez rarement à créer directement des Pods individuels dans Kubernetes--même des Pods à un seul conteneur. Ceci est dû au fait que les Pods sont conçus comme des entités relativement éphémères et jetables. Lorsqu'un Pod est créé (directement par vous ou indirectement par un Controller), il est programmé pour s'exécuter sur un {{< glossary_tooltip term_id="node" >}} dans votre cluster. Le Pod reste sur ce Nœud jusqu'à ce que le process se termine, l'objet pod soit supprimé, le pod soit *expulsé* par manque de ressources, ou le Nœud soit en échec. +Vous aurez rarement à créer directement des Pods individuels dans Kubernetes--même des Pods à un seul conteneur. Ceci est dû au fait que les Pods sont conçus comme des entités relativement éphémères et jetables. Lorsqu'un Pod est créé (directement par vous ou indirectement par un {{< glossary_tooltip text="_contrôleur_" term_id="controller" >}}), il est programmé pour s'exécuter sur un {{< glossary_tooltip term_id="node" >}} dans votre cluster. Le Pod reste sur ce nœud jusqu'à ce que le process se termine, l'objet pod soit supprimé, le pod soit *expulsé* par manque de ressources, ou le nœud soit en échec. {{< note >}} -Redémarrer un conteneur dans un Pod ne doit pas être confondu avec redémarrer le Pod. Le Pod lui-même ne s'exécute pas, mais est un environnement dans lequel les conteneurs s'exécutent, et persiste jusqu'à ce qu'il soit supprimé. +Redémarrer un conteneur dans un Pod ne doit pas être confondu avec redémarrer un Pod. Un Pod n'est pas un process, mais un environnement pour exécuter un conteneur. Un Pod persiste jusqu'à ce qu'il soit supprimé. {{< /note >}} -Les Pods ne se guérissent pas par eux-mêmes. Si un Pod est programmé sur un Nœud qui échoue, ou si l'opération de programmation elle-même échoue, le Pod est supprimé ; de plus, un Pod ne survivra pas à une expulsion due à un manque de ressources ou une mise en maintenance du Nœud. Kubernetes utilise une abstraction de plus haut niveau, appelée un *Controller*, qui s'occupe de gérer les instances de Pods relativement jetables. Ainsi, même s'il est possible d'utiliser des Pods directement, il est beaucoup plus courant dans Kubernetes de gérer vos Pods en utilisant un Controller. Voir [Pods et Controllers](#pods-and-controllers) pour plus d'informations sur la façon dont Kubernetes utilise des Controllers pour implémenter la mise à l'échelle et la guérison des Pods. +Les Pods ne se guérissent pas par eux-mêmes. Si un Pod est programmé sur un Nœud qui échoue, ou si l'opération de programmation elle-même échoue, le Pod est supprimé ; de plus, un Pod ne survivra pas à une expulsion due à un manque de ressources ou une mise en maintenance du Nœud. Kubernetes utilise une abstraction de plus haut niveau, appelée un *contrôleur*, qui s'occupe de gérer les instances de Pods relativement jetables. Ainsi, même s'il est possible d'utiliser des Pods directement, il est beaucoup plus courant dans Kubernetes de gérer vos Pods en utilisant un contrôleur. -### Pods et Controllers +### Pods et contrôleurs -Un Controller peut créer et gérer plusieurs Pods pour vous, s'occupant de la réplication et du déploiement et fournissant des capacités d'auto-guérison au niveau du cluster. Par exemple, si un Nœud échoue, le Controller peut automatiquement remplacer le Pod en programmant un remplaçant identique sur un Nœud différent. +Vous pouvez utiliser des ressources de charges de travail pour créer et gérer plusieurs Pods pour vous. Un contrôleur pour la ressource gère la réplication, +le plan de déploiement et la guérison automatique en cas de problèmes du Pod. Par exemple, si un noeud est en échec, un contrôleur note que les Pods de ce noeud +ont arrêté de fonctionner et créent des Pods pour les remplacer. L'ordonnanceur place le Pod de remplacement sur un noeud en fonctionnement. -Quelques exemples de Controllers qui contiennent un ou plusieurs pods : +Voici quelques exemples de ressources de charges de travail qui gèrent un ou plusieurs Pods : -* [Deployment](/docs/concepts/workloads/controllers/deployment/) -* [StatefulSet](/docs/concepts/workloads/controllers/statefulset/) -* [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) - -En général, les Controllers utilisent des Templates de Pod que vous lui fournissez pour créer les Pods dont il est responsable. +* {{< glossary_tooltip text="Deployment" term_id="deployment" >}} +* {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}} +* {{< glossary_tooltip text="DaemonSet" term_id="daemonset" >}} ## Templates de Pod -Les Templates de Pod sont des spécifications de pod qui sont inclus dans d'autres objets, comme les -[Replication Controllers](/docs/concepts/workloads/controllers/replicationcontroller/), [Jobs](/docs/concepts/jobs/run-to-completion-finite-workloads/), et -[DaemonSets](/docs/concepts/workloads/controllers/daemonset/). Les Controllers utilisent les Templates de Pod pour créer réellement les pods. -L'exemple ci-dessous est un manifeste simple pour un Pod d'un conteneur affichant un message. +Les Templates de Pod sont des spécifications pour créer des Pods, et sont inclus dans les ressources de charges de travail comme +les [Deployments](/fr/docs/concepts/workloads/controllers/deployment/), les [Jobs](/docs/concepts/jobs/run-to-completion-finite-workloads/) et +les [DaemonSets](/docs/concepts/workloads/controllers/daemonset/). + +Chaque contrôleur pour une ressource de charges de travail utilise le template de pod à l'intérieur de l'objet pour créer les Pods. Le template de pod fait partie de l'état désiré de la ressource de charges de travail que vous avez utilisé pour exécuter votre application. + +L'exemple ci-dessous est un manifest pour un Job simple avec un `template` qui démarre un conteneur. Le conteneur dans ce Pod affiche un message puis se met en pause. ```yaml -apiVersion: v1 -kind: Pod +apiVersion: batch/v1 +kind: Job metadata: - name: myapp-pod - labels: - app: myapp + name: hello spec: - containers: - - name: myapp-container - image: busybox - command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600'] + template: + # Ceci est un template de pod + spec: + containers: + - name: hello + image: busybox + command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600'] + restartPolicy: OnFailure + # Le template de pod se termine ici ``` -Plutôt que de spécifier tous les états désirés courants de tous les réplicas, les templates de pod sont comme des emporte-pièces. Une fois qu'une pièce a été coupée, la pièce n'a plus de relation avec l'outil. Il n'y a pas de lien qui persiste dans le temps entre le template et le pod. Un changement à venir dans le template ou même le changement pour un nouveau template n'a pas d'effet direct sur les pods déjà créés. De manière similaire, les pods créés par un replication controller peuvent par la suite être modifiés directement. C'est en contraste délibéré avec les pods, qui spécifient l'état désiré courant de tous les conteneurs appartenant au pod. Cette approche simplifie radicalement la sémantique système et augmente la flexibilité de la primitive. + +Modifier le template de pod ou changer pour un nouvau template de pod n'a pas d'effet sur les pods déjà existants. Les Pods ne reçoivent pas une mise à jour +du template directement ; au lieu de cela, un nouveau Pod est créé pour correspondre au nouveau template de pod. + +Par exemple, un contrôleur de Deployment s'assure que les Pods en cours d'exécution correspondent au template de pod en cours. Si le template est mis à jour, +le contrôleur doit supprimer les pods existants et créer de nouveaux Pods avec le nouveau template. Chaque contrôleur de charges de travail implémente ses propres +règles pour gérer les changements du template de Pod. + +Sur les noeuds, le {{< glossary_tooltip term_id="kubelet" text="kubelet" >}} n'observe ou ne gère pas directement les détails concernant les templates de pods et leurs mises à jours ; ces détails sont abstraits. Cette abstraction et cette séparation des préoccupations simplifie la sémantique du système, et rend possible l'extension du comportement du cluster sans changer le code existant. ## {{% heading "whatsnext" %}} * En savoir plus sur les [Pods](/docs/concepts/workloads/pods/pod/) +* [The Distributed System Toolkit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) explique les dispositions courantes pour des Pods avec plusieurs conteneurs * En savoir plus sur le comportement des Pods : * [Terminaison d'un Pod](/docs/concepts/workloads/pods/pod/#termination-of-pods) * [Cycle de vie d'un Pod](/docs/concepts/workloads/pods/pod-lifecycle/) diff --git a/content/fr/docs/concepts/workloads/pods/pod.md b/content/fr/docs/concepts/workloads/pods/pod.md index 4d685cca80763..b989a8fd8d2e8 100644 --- a/content/fr/docs/concepts/workloads/pods/pod.md +++ b/content/fr/docs/concepts/workloads/pods/pod.md @@ -164,7 +164,7 @@ Un exemple de déroulement : 1. Le Pod dans l'API server est mis à jour avec le temps au delà duquel le Pod est considéré "mort" ainsi que la période de grâce. 1. Le Pod est affiché comme "Terminating" dans les listes des commandes client 1. (en même temps que 3) Lorsque Kubelet voit qu'un Pod a été marqué "Terminating", le temps ayant été mis en 2, il commence le processus de suppression du pod. - 1. Si un des conteneurs du Pod a défini un [preStop hook](/docs/concepts/containers/container-lifecycle-hooks/#hook-details), il est exécuté à l'intérieur du conteneur. Si le `preStop` hook est toujours en cours d'exécution à la fin de la période de grâce, l'étape 2 est invoquée avec une courte (2 secondes) période de grâce supplémentaire. + 1. Si un des conteneurs du Pod a défini un [preStop hook](/fr/docs/concepts/containers/container-lifecycle-hooks/#hook-details), il est exécuté à l'intérieur du conteneur. Si le `preStop` hook est toujours en cours d'exécution à la fin de la période de grâce, l'étape 2 est invoquée avec une courte (2 secondes) période de grâce supplémentaire une seule fois. Vous devez modifier `terminationGracePeriodSeconds` si le hook `preStop` a besoin de plus de temps pour se terminer. 1. Le signal TERM est envoyé aux conteneurs. Notez que tous les conteneurs du Pod ne recevront pas le signal TERM en même temps et il peut être nécessaire de définir des `preStop` hook si l'ordre d'arrêt est important. 1. (en même temps que 3) Le Pod est supprimé des listes d'endpoints des services, et n'est plus considéré comme faisant partie des pods en cours d'exécution pour les contrôleurs de réplication. Les Pods s'arrêtant lentement ne peuvent pas continuer à servir du trafic, les load balancers (comme le service proxy) les supprimant de leurs rotations. 1. Lorsque la période de grâce expire, les processus s'exécutant toujours dans le Pod sont tués avec SIGKILL. @@ -186,7 +186,6 @@ Si le master exécute Kubernetes v1.1 ou supérieur, et les nœuds exécutent un Si l'utilisateur appelle `kubectl describe pod FooPodName`, l'utilisateur peut voir la raison pour laquelle le pod est en état "pending". La table d'événements dans la sortie de la commande "describe" indiquera : `Error validating pod "FooPodName"."FooPodNamespace" from api, ignoring: spec.containers[0].securityContext.privileged: forbidden '<*>(0xc2089d3248)true'` - Si le master exécute une version antérieure à v1.1, les pods privilégiés ne peuvent alors pas être créés. Si l'utilisateur tente de créer un pod ayant un conteneur privilégié, l'utilisateur obtiendra l'erreur suivante : `The Pod "FooPodName" is invalid. spec.containers[0].securityContext.privileged: forbidden '<*>(0xc20b222db0)true'` @@ -196,4 +195,4 @@ spec.containers[0].securityContext.privileged: forbidden '<*>(0xc20b222db0)true' Le Pod est une ressource au plus haut niveau dans l'API REST Kubernetes. Plus de détails sur l'objet de l'API peuvent être trouvés à : [Objet de l'API Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core). - +Lorsque vous créez un manifest pour un objet Pod, soyez certain que le nom spécifié est un [nom de sous-domaine DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) valide. diff --git a/content/fr/docs/reference/glossary/uid.md b/content/fr/docs/reference/glossary/uid.md new file mode 100644 index 0000000000000..80f73c9c5c57b --- /dev/null +++ b/content/fr/docs/reference/glossary/uid.md @@ -0,0 +1,17 @@ +--- +title: UID +id: uid +date: 2018-04-12 +full_link: /docs/concepts/overview/working-with-objects/names +short_description: > + Chaîne de caractères générée par les systèmes Kubernetes pour identifier de manière unique les objets. + +aka: +tags: +- fundamental +--- + Chaîne de caractères générée par les systèmes Kubernetes pour identifier de manière unique les objets. + + + +Chaque objet créé pendant toute la durée de vie d'un cluster Kubernetes possède un UID distinct. Il vise à distinguer les occurrences historiques d'entités similaires. \ No newline at end of file diff --git a/content/fr/docs/reference/kubectl/kubectl.md b/content/fr/docs/reference/kubectl/kubectl.md index 64a3c89ce1c5e..23d788c0c3bae 100755 --- a/content/fr/docs/reference/kubectl/kubectl.md +++ b/content/fr/docs/reference/kubectl/kubectl.md @@ -1,6 +1,6 @@ --- title: kubectl -content_template: templates/tool-reference +content_type: tool-reference description: Référence kubectl notitle: true --- diff --git a/content/fr/docs/setup/learning-environment/minikube.md b/content/fr/docs/setup/learning-environment/minikube.md index 0cf929e394397..77be61831fc14 100644 --- a/content/fr/docs/setup/learning-environment/minikube.md +++ b/content/fr/docs/setup/learning-environment/minikube.md @@ -242,9 +242,9 @@ Voir [DRIVERS](https://minikube.sigs.k8s.io/docs/drivers/) pour plus de détails * vmwarefusion * kvm2 ([installation du pilote](https://minikube.sigs.k8s.io/docs/drivers/#kvm2-driver)) * hyperkit ([installation du pilote](https://minikube.sigs.k8s.io/docs/drivers/#hyperkit-driver)) -* hyperv ([installation du pilote](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#hyperv-driver)) +* hyperv ([installation du pilote](https://minikube.sigs.k8s.io/docs/drivers/#hyperv-driver)) Notez que l'adresse IP ci-dessous est dynamique et peut changer. Il peut être récupéré avec `minikube ip`. -* vmware ([installation du pilote](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#vmware-unified-driver)) (VMware unified driver) +* vmware ([installation du pilote](https://minikube.sigs.k8s.io/docs/drivers/#vmware-unified-driver)) (VMware unified driver) * none (Exécute les composants Kubernetes sur l’hôte et non sur une machine virtuelle. Il n'est pas recommandé d'exécuter le pilote none sur des postes de travail personnels. L'utilisation de ce pilote nécessite Docker ([docker installer](https://docs.docker.com/install/linux/docker-ce/ubuntu/)) et un environnement Linux) #### Démarrage d'un cluster sur des exécutions de conteneur alternatives diff --git a/content/fr/docs/tasks/configure-pod-container/configure-service-account.md b/content/fr/docs/tasks/configure-pod-container/configure-service-account.md new file mode 100644 index 0000000000000..1147f2234e7ec --- /dev/null +++ b/content/fr/docs/tasks/configure-pod-container/configure-service-account.md @@ -0,0 +1,282 @@ +--- +title: Configurer les comptes de service pour les pods +content_type: task +weight: 90 +--- + + +Un ServiceAccount (compte de service) fournit une identité pour les processus qui s'exécutent dans un Pod. + +*Ceci est une introduction aux comptes de service pour les utilisateurs. Voir aussi +[Guide de l'administrateur du cluster des comptes de service](/docs/reference/access-authn-authz/service-accounts-admin/).* + +{{< note >}} +Ce document décrit le comportement des comptes de service dans un cluster mis en place conformément aux recommandations du projet Kubernetes. L'administrateur de votre cluster a peut-être personnalisé le comportement dans votre cluster, dans ce cas cette documentation pourrait être non applicable. +{{< /note >}} + +Lorsque vous (un humain) accédez au cluster (par exemple, en utilisant `kubectl`), vous êtes +authentifié par l'apiserver en tant que compte d'utilisateur particulier (actuellement, il s'agit +généralement de l'utilisateur `admin`, à moins que votre administrateur de cluster n'ait personnalisé votre cluster). Les processus dans les conteneurs dans les Pods peuvent également contacter l'apiserver. Dans ce cas, ils sont authentifiés en tant que compte de service particulier (par exemple, `default`). + + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## Utiliser le compte de service par défaut pour accéder au API server. + +Si vous obtenez le raw json ou yaml pour un Pod que vous avez créé (par exemple, `kubectl get pods/ -o yaml`), vous pouvez voir que le champ `spec.serviceAccountName` a été [automatiquement assigné](/docs/user-guide/working-with-resources/#resources-are-automatically-modified). + +Vous pouvez accéder à l'API depuis l'intérieur d'un Pod en utilisant les identifiants de compte de service montés automatiquement, comme décrit dans [Accès au cluster](/docs/user-guide/accessing-the-cluster/#accessing-the-api-from-a-pod). +Les permissions API du compte de service dépendent du [plugin d'autorisation et de la politique](/docs/reference/access-authn-authz/authorization/#authorization-modules) en usage. + +Dans la version 1.6+, vous pouvez choisir de ne pas utiliser le montage automatique des identifiants API pour un compte de service en définissant `automountServiceAccountToken: false` sur le compte de service : + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: build-robot +automountServiceAccountToken: false +... +``` + +Dans la version 1.6+, vous pouvez également choisir de ne pas monter automatiquement les identifiants API pour un Pod particulier : + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: my-pod +spec: + serviceAccountName: build-robot + automountServiceAccountToken: false + ... +``` + +La spéc de Pod a prépondérance par rapport au compte de service si les deux spécifient la valeur `automountServiceAccountToken`. + +## Utiliser plusieurs comptes de services. + +Chaque Namespace possède une ressource ServiceAccount par défaut appelée `default`. +Vous pouvez lister cette ressource et toutes les autres ressources de ServiceAccount dans le Namespace avec cette commande : + +```shell +kubectl get serviceAccounts +``` +La sortie est comme la suivante : + +``` +NAME SECRETS AGE +default 1 1d +``` + +Vous pouvez créer des objets ServiceAccount supplémentaires comme ceci : + +```shell +kubectl apply -f - < +Annotations: kubernetes.io/service-account.name=build-robot + kubernetes.io/service-account.uid=da68f9c6-9d26-11e7-b84e-002dc52800da + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1338 bytes +namespace: 7 bytes +token: ... +``` + +{{< note >}} +Le contenu de `token` est éludé ici. +{{< /note >}} + +## Ajouter ImagePullSecrets à un compte de service + +Tout d'abord, créez un imagePullSecret, comme décrit [ici](/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). +Puis, vérifiez qu'il a été créé. Par exemple : + +```shell +kubectl get secrets myregistrykey +``` + +La sortie est comme la suivante : + +``` +NAME TYPE DATA AGE +myregistrykey   kubernetes.io/.dockerconfigjson   1       1d +``` + +Ensuite, modifiez le compte de service par défaut du Namespace pour utiliser ce Secret comme un `imagePullSecret`. + +```shell +kubectl patch serviceaccount default -p '{"imagePullSecrets": [{"name": "myregistrykey"}]}' +``` + +La version interactive nécessite un traitement manuel : + +```shell +kubectl get serviceaccounts default -o yaml > ./sa.yaml +``` + +La sortie du fichier `sa.yaml` est similaire à celle-ci : + +```shell +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: 2015-08-07T22:02:39Z + name: default + namespace: default + resourceVersion: "243024" + selfLink: /api/v1/namespaces/default/serviceaccounts/default + uid: 052fb0f4-3d50-11e5-b066-42010af0d7b6 +secrets: +- name: default-token-uudge +``` + +En utilisant l'éditeur de votre choix (par exemple `vi`), ouvrez le fichier `sa.yaml`, supprimez la ligne avec la clé `resourceVersion`, ajoutez les lignes avec `imagePullSecrets:` et sauvegardez. + +La sortie du fichier `sa.yaml` est similaire à celle-ci : + +```shell +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: 2015-08-07T22:02:39Z + name: default + namespace: default + selfLink: /api/v1/namespaces/default/serviceaccounts/default + uid: 052fb0f4-3d50-11e5-b066-42010af0d7b6 +secrets: +- name: default-token-uudge +imagePullSecrets: +- name: myregistrykey +``` + +Enfin, remplacez le compte de service par le nouveau fichier `sa.yaml` mis à jour. + +```shell +kubectl replace serviceaccount default -f ./sa.yaml +``` + +Maintenant, tous les nouveaux Pods créés dans le Namespace courant auront ceci ajouté à leurs spécifications : + +```yaml +spec: + imagePullSecrets: + - name: myregistrykey +``` + +## Projection du volume des tokens de compte de service + +{{< feature-state for_k8s_version="v1.12" state="beta" >}} + +{{< note >}} +Ce ServiceAccountTokenVolumeProjection est __beta__ en 1.12 et +activé en passant tous les paramètres suivants au serveur API : + +* `--service-account-issuer` +* `--service-account-signing-key-file` +* `--service-account-api-audiences` + +{{< /note >}} + +Kubelet peut également projeter un token de compte de service dans un Pod. Vous pouvez spécifier les propriétés souhaitées du token, telles que l'audience et la durée de validité. +Ces propriétés ne sont pas configurables sur le compte de service par défaut. Le token de compte de service devient également invalide par l'API lorsque le Pod ou le ServiceAccount est supprimé + +Ce comportement est configuré sur un PodSpec utilisant un type de ProjectedVolume appelé +[ServiceAccountToken](/docs/concepts/storage/volumes/#projected). Pour fournir un +Pod avec un token avec une audience de "vault" et une durée de validité de deux heures, vous devriez configurer ce qui suit dans votre PodSpec : + +{{< codenew file="pods/pod-projected-svc-token.yaml" >}} + +Créez le Pod + +```shell +kubectl create -f https://k8s.io/examples/pods/pod-projected-svc-token.yaml +``` + +Kubelet demandera et stockera le token a la place du Pod, rendra le token disponible pour le Pod à un chemin d'accès configurable, et rafraîchissez le token à l'approche de son expiration. Kubelet fait tourner le token de manière proactive s'il est plus vieux que 80% de son TTL total, ou si le token est plus vieux que 24 heures. + +L'application est responsable du rechargement du token lorsque celui ci est renouvelé. Un rechargement périodique (par ex. toutes les 5 minutes) est suffisant pour la plupart des cas d'utilisation. diff --git a/content/fr/docs/tasks/tools/install-kubectl.md b/content/fr/docs/tasks/tools/install-kubectl.md index 8d60357aea73b..2a88388374b36 100644 --- a/content/fr/docs/tasks/tools/install-kubectl.md +++ b/content/fr/docs/tasks/tools/install-kubectl.md @@ -121,7 +121,7 @@ kubectl version --client curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl ``` -2. Rendrez le binaire kubectl exécutable. +2. Rendez le binaire kubectl exécutable. ``` chmod +x ./kubectl diff --git a/content/fr/examples/pods/pod-projected-svc-token.yaml b/content/fr/examples/pods/pod-projected-svc-token.yaml new file mode 100644 index 0000000000000..985073c8d39dc --- /dev/null +++ b/content/fr/examples/pods/pod-projected-svc-token.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - image: nginx + name: nginx + volumeMounts: + - mountPath: /var/run/secrets/tokens + name: vault-token + serviceAccountName: build-robot + volumes: + - name: vault-token + projected: + sources: + - serviceAccountToken: + path: vault-token + expirationSeconds: 7200 + audience: vault diff --git a/content/id/_index.html b/content/id/_index.html index 47d7f71ced6e7..e95d661b76d37 100644 --- a/content/id/_index.html +++ b/content/id/_index.html @@ -4,7 +4,6 @@ cid: home --- -{{< deprecationwarning >}} {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} @@ -60,4 +59,4 @@

Tantangan yang Dihadapi untuk Melakukan Migrasi 150+ Microservices ke Kubern {{< blocks/kubernetes-features >}} -{{< blocks/case-studies >}} +{{< blocks/case-studies >}} \ No newline at end of file diff --git a/content/id/docs/concepts/_index.md b/content/id/docs/concepts/_index.md index ebc205d84ac1f..33f4ada445742 100644 --- a/content/id/docs/concepts/_index.md +++ b/content/id/docs/concepts/_index.md @@ -49,19 +49,19 @@ untuk penjelasan yang lebih mendetail. Objek mendasar Kubernetes termasuk: -* [Pod](/docs/concepts/workloads/pods/pod-overview/) -* [Service](/docs/concepts/services-networking/service/) -* [Volume](/docs/concepts/storage/volumes/) -* [Namespace](/docs/concepts/overview/working-with-objects/namespaces/) +* [Pod](/id/docs/concepts/workloads/pods/pod-overview/) +* [Service](/id/docs/concepts/services-networking/service/) +* [Volume](/id/docs/concepts/storage/volumes/) +* [Namespace](/id/docs/concepts/overview/working-with-objects/namespaces/) Sebagai tambahan, Kubernetes memiliki beberapa abstraksi yang lebih tinggi yang disebut kontroler. Kontroler merupakan objek mendasar dengan fungsi tambahan, contoh dari kontroler ini adalah: -* [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) -* [Deployment](/docs/concepts/workloads/controllers/deployment/) -* [StatefulSet](/docs/concepts/workloads/controllers/statefulset/) -* [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) -* [Job](/docs/concepts/workloads/controllers/jobs-run-to-completion/) +* [ReplicaSet](/id/docs/concepts/workloads/controllers/replicaset/) +* [Deployment](/id/docs/concepts/workloads/controllers/deployment/) +* [StatefulSet](/id/docs/concepts/workloads/controllers/statefulset/) +* [DaemonSet](/id/docs/concepts/workloads/controllers/daemonset/) +* [Job](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/) ## *Control Plane* Kubernetes @@ -95,7 +95,7 @@ dengan *node* secara langsung. #### Metadata objek -* [Anotasi](/docs/concepts/overview/working-with-objects/annotations/) +* [Anotasi](/id/docs/concepts/overview/working-with-objects/annotations/) diff --git a/content/id/docs/concepts/architecture/master-node-communication.md b/content/id/docs/concepts/architecture/control-plane-node-communication.md similarity index 60% rename from content/id/docs/concepts/architecture/master-node-communication.md rename to content/id/docs/concepts/architecture/control-plane-node-communication.md index 80644983a42fc..b5381796702be 100644 --- a/content/id/docs/concepts/architecture/master-node-communication.md +++ b/content/id/docs/concepts/architecture/control-plane-node-communication.md @@ -1,12 +1,12 @@ --- -title: Komunikasi Master-Node +title: Komunikasi antara Control Plane dan Node content_type: concept weight: 20 --- -Dokumen ini menjelaskan tentang jalur-jalur komunikasi di antara klaster Kubernetes dan master yang sebenarnya hanya berhubungan dengan apiserver saja. +Dokumen ini menjelaskan tentang jalur-jalur komunikasi di antara klaster Kubernetes dan control plane yang sebenarnya hanya berhubungan dengan apiserver saja. Kenapa ada dokumen ini? Supaya kamu, para pengguna Kubernetes, punya gambaran bagaimana mengatur instalasi untuk memperketat konfigurasi jaringan di dalam klaster. Hal ini cukup penting, karena klaster bisa saja berjalan pada jaringan tak terpercaya (untrusted network), ataupun melalui alamat-alamat IP publik pada penyedia cloud. @@ -15,31 +15,24 @@ Hal ini cukup penting, karena klaster bisa saja berjalan pada jaringan tak terpe -## Klaster menuju Master +## Node Menuju Control Plane -Semua jalur komunikasi dari klaster menuju master diterminasi pada apiserver. -Tidak ada komponen apapun di dalam master, selain apiserver, yang terekspos ke luar untuk diakses dari servis remote. -Untuk instalasi klaster pada umumnya, apiserver diatur untuk listen ke koneksi remote melalui port HTTPS (443) yang aman, dengan satu atau beberapa metode [autentikasi](/docs/reference/access-authn-authz/authentication/) client yang telah terpasang. +Kubernetes memiliki sebuah pola API "hub-and-spoke". Semua penggunaan API dari Node (atau Pod dimana Pod-Pod tersebut dijalankan) akan diterminasi pada apiserver (tidak ada satu komponen _control plane_ apa pun yang didesain untuk diekspos pada servis _remote_). +Apiserver dikonfigurasi untuk mendengarkan koneksi aman _remote_ yang pada umumnya terdapat pada porta HTTPS (443) dengan satu atau lebih bentuk [autentikasi](/docs/reference/access-authn-authz/authentication/) klien yang dipasang. Sebaiknya, satu atau beberapa metode [otorisasi](/docs/reference/access-authn-authz/authorization/) juga dipasang, terutama jika kamu memperbolehkan [permintaan anonim (anonymous request)](/docs/reference/access-authn-authz/authentication/#anonymous-requests) ataupun [service account token](/docs/reference/access-authn-authz/authentication/#service-account-tokens). -Node-node seharusnya disediakan dengan public root certificate untuk klaster, sehingga node-node tersebut bisa terhubung secara aman ke apiserver dengan kredensial client yang valid. -Contohnya, untuk instalasi GKE dengan standar konfigurasi, kredensial client harus diberikan kepada kubelet dalam bentuk client certificate. -Lihat [menghidupkan TLS kubelet](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) untuk menyediakan client certificate untuk kubelet secara otomatis. +Jika diperlukan, Pod-Pod dapat terhubung pada apiserver secara aman dengan menggunakan ServiceAccount. +Dengan ini, Kubernetes memasukkan _public root certificate_ dan _bearer token_ yang valid ke dalam Pod, secara otomatis saat Pod mulai dijalankan. +Kubernetes Service (di dalam semua Namespace) diatur dengan sebuah alamat IP virtual. Semua yang mengakses alamat IP ini akan dialihkan (melalui kube-proxy) menuju _endpoint_ HTTPS dari apiserver. -Jika diperlukan, pod-pod dapat terhubung pada apiserver secara aman dengan menggunakan service account. -Dengan ini, Kubernetes memasukkan public root certificate dan bearer token yang valid ke dalam pod, secara otomatis saat pod mulai dijalankan. -Kubernetes service (di dalam semua namespace) diatur dengan sebuah alamat IP virtual. -Semua yang mengakses alamat IP ini akan dialihkan (melalui kube-proxy) menuju endpoint HTTPS dari apiserver. +Komponen-komponen juga melakukan koneksi pada apiserver klaster melalui porta yang aman. -Komponen-komponen master juga berkomunikasi dengan apiserver melalui port yang aman di dalam klaster. -Akibatnya, untuk konfigurasi yang umum dan standar, semua koneksi dari klaster (node-node dan pod-pod yang berjalan di atas node tersebut) menuju master sudah terhubung dengan aman. -Dan juga, klaster dan master bisa terhubung melalui jaringan publik dan/atau yang tak terpercaya (untrusted). +Akibatnya, untuk konfigurasi yang umum dan standar, semua koneksi dari klaster (node-node dan pod-pod yang berjalan di atas node tersebut) menujucontrol planesudah terhubung dengan aman. +Dan juga, klaster dancontrol planebisa terhubung melalui jaringan publik dan/atau yang tak terpercaya (untrusted). -## Master menuju Klaster +## Control Plane menuju Node -Ada dua jalur komunikasi utama dari master (apiserver) menuju klaster. -Pertama, dari apiserver ke process kubelet yang berjalan pada setiap node di dalam klaster. -Kedua, dari apiserver ke setiap node, pod, ataupun service melalui fungsi proxy pada apiserver. +Ada dua jalur komunikasi utama dari _control plane_ (apiserver) menuju klaster. Pertama, dari apiserver ke proses kubelet yang berjalan pada setiap Node di dalam klaster. Kedua, dari apiserver ke setiap Node, Pod, ataupun Service melalui fungsi proksi pada apiserver ### Apiserver menuju kubelet @@ -67,11 +60,9 @@ Koneksi ini **tidak aman** untuk dilalui pada jaringan publik dan/atau tak terpe ### Tunnel SSH -Kubernetes menyediakan tunnel SSH untuk mengamankan jalur komunikasi Master -> Klaster. +Kubernetes menyediakan tunnel SSH untuk mengamankan jalur komunikasi control plane -> Klaster. Dengan ini, apiserver menginisiasi sebuah tunnel SSH untuk setiap node di dalam klaster (terhubung ke server SSH di port 22) dan membuat semua trafik menuju kubelet, node, pod, atau service dilewatkan melalui tunnel tesebut. Tunnel ini memastikan trafik tidak terekspos keluar jaringan dimana node-node berada. Tunnel SSH saat ini sudah usang (deprecated), jadi sebaiknya jangan digunakan, kecuali kamu tahu pasti apa yang kamu lakukan. Sebuah desain baru untuk mengganti kanal komunikasi ini sedang disiapkan. - - diff --git a/content/id/docs/concepts/architecture/controller.md b/content/id/docs/concepts/architecture/controller.md index a0ff6b9256c85..6cf90cf9e6189 100644 --- a/content/id/docs/concepts/architecture/controller.md +++ b/content/id/docs/concepts/architecture/controller.md @@ -33,7 +33,7 @@ klaster saat ini mendekati keadaan yang diinginkan. Sebuah _controller_ melacak sekurang-kurangnya satu jenis sumber daya dari Kubernetes. -[objek-objek](/docs/concepts/overview/working-with-objects/kubernetes-objects/) ini +[objek-objek](/id/docs/concepts/overview/working-with-objects/kubernetes-objects/) ini memiliki *spec field* yang merepresentasikan keadaan yang diinginkan. Satu atau lebih _controller_ untuk *resource* tersebut bertanggung jawab untuk membuat keadaan sekarang mendekati keadaan yang diinginkan. @@ -174,6 +174,6 @@ khusus itu lakukan. * Silahkan baca tentang [_control plane_ Kubernetes](/docs/concepts/#kubernetes-control-plane) * Temukan beberapa dasar tentang [objek-objek Kubernetes](/docs/concepts/#kubernetes-objects) -* Pelajari lebih lanjut tentang [Kubernetes API](/docs/concepts/overview/kubernetes-api/) -* Apabila kamu ingin membuat _controller_ sendiri, silakan lihat [pola perluasan](/docs/concepts/extend-kubernetes/extend-cluster/#extension-patterns) dalam memperluas Kubernetes. +* Pelajari lebih lanjut tentang [Kubernetes API](/id/docs/concepts/overview/kubernetes-api/) +* Apabila kamu ingin membuat _controller_ sendiri, silakan lihat [pola perluasan](/id/docs/concepts/extend-kubernetes/extend-cluster/#extension-patterns) dalam memperluas Kubernetes. diff --git a/content/id/docs/concepts/architecture/nodes.md b/content/id/docs/concepts/architecture/nodes.md index 8913c9df65e45..ab13cf122a5b6 100644 --- a/content/id/docs/concepts/architecture/nodes.md +++ b/content/id/docs/concepts/architecture/nodes.md @@ -8,8 +8,8 @@ weight: 10 Node merupakan sebuah mesin worker di dalam Kubernetes, yang sebelumnya dinamakan `minion`. Sebuah node bisa berupa VM ataupun mesin fisik, tergantung dari klaster-nya. -Masing-masing node berisi beberapa servis yang berguna untuk menjalankan banyak [pod](/docs/concepts/workloads/pods/pod/) dan diatur oleh komponen-komponen yang dimiliki oleh master. -Servis-servis di dalam sebuah node terdiri dari [runtime kontainer](/docs/concepts/overview/components/#node-components), kubelet dan kube-proxy. +Masing-masing node berisi beberapa servis yang berguna untuk menjalankan banyak [pod](/id/docs/concepts/workloads/pods/pod/) dan diatur oleh komponen-komponen yang dimiliki oleh master. +Servis-servis di dalam sebuah node terdiri dari [runtime kontainer](/id/docs/concepts/overview/components/#node-components), kubelet dan kube-proxy. Untuk lebih detail, lihat dokumentasi desain arsitektur pada [Node Kubernetes](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node). @@ -67,12 +67,12 @@ Pada kasus tertentu ketika node terputus jaringannya, apiserver tidak dapat berk Keputusan untuk menghilangkan pod tidak dapat diberitahukan pada kubelet, sampai komunikasi dengan apiserver terhubung kembali. Sementara itu, pod-pod akan terus berjalan pada node yang sudah terputus, walaupun mendapati schedule untuk dihilangkan. -Pada versi Kubernetes sebelum 1.5, kontroler node dapat menghilangkan dengan paksa ([force delete](/docs/concepts/workloads/pods/pod/#force-deletion-of-pods)) pod-pod yang terputus dari apiserver. +Pada versi Kubernetes sebelum 1.5, kontroler node dapat menghilangkan dengan paksa ([force delete](/id/docs/concepts/workloads/pods/pod/#force-deletion-of-pods)) pod-pod yang terputus dari apiserver. Namun, pada versi 1.5 dan seterusnya, kontroler node tidak menghilangkan pod dengan paksa, sampai ada konfirmasi bahwa pod tersebut sudah berhenti jalan di dalam klaster. Pada kasus dimana Kubernetes tidak bisa menarik kesimpulan bahwa ada node yang telah meninggalkan klaster, admin klaster mungkin perlu untuk menghilangkan node secara manual. Menghilangkan obyek node dari Kubernetes akan membuat semua pod yang berjalan pada node tersebut dihilangkan oleh apiserver, dan membebaskan nama-namanya agar bisa digunakan kembali. -Pada versi 1.12, fitur `TaintNodesByCondition` telah dipromosikan ke beta, sehingga kontroler lifecycle node secara otomatis membuat [taints](/docs/concepts/configuration/taint-and-toleration/) yang merepresentasikan conditions. +Pada versi 1.12, fitur `TaintNodesByCondition` telah dipromosikan ke beta, sehingga kontroler lifecycle node secara otomatis membuat [taints](/id/docs/concepts/configuration/taint-and-toleration/) yang merepresentasikan conditions. Akibatnya, scheduler menghiraukan conditions ketika mempertimbangkan sebuah Node; scheduler akan melihat pada taints sebuah Node dan tolerations sebuah Pod. Sekarang, para pengguna dapat memilih antara model scheduling yang lama dan model scheduling yang lebih fleksibel. @@ -93,7 +93,7 @@ Informasi ini dikumpulkan oleh Kubelet di dalam node. ## Manajemen -Tidak seperti [pod](/docs/concepts/workloads/pods/pod/) dan [service](/docs/concepts/services-networking/service/), sebuah node tidaklah dibuat dan dikonfigurasi oleh Kubernetes: tapi node dibuat di luar klaster oleh penyedia layanan cloud, seperti Google Compute Engine, atau pool mesin fisik ataupun virtual (VM) yang kamu punya. +Tidak seperti [pod](/id/docs/concepts/workloads/pods/pod/) dan [service](/id/docs/concepts/services-networking/service/), sebuah node tidaklah dibuat dan dikonfigurasi oleh Kubernetes: tapi node dibuat di luar klaster oleh penyedia layanan cloud, seperti Google Compute Engine, atau pool mesin fisik ataupun virtual (VM) yang kamu punya. Jadi ketika Kubernetes membuat sebuah node, obyek yang merepresentasikan node tersebut akan dibuat. Setelah pembuatan, Kubernetes memeriksa apakah node tersebut valid atau tidak. Contohnya, jika kamu mencoba untuk membuat node dari konten berikut: @@ -164,7 +164,7 @@ Pada kasus ini, kontroler node berasumsi ada masalah pada jaringan master, dan m Mulai dari Kubernetes 1.6, kontroler node juga bertanggung jawab untuk melakukan eviction pada pod-pod yang berjalan di atas node dengan taints `NoExecute`, ketika pod-pod tersebut sudah tidak lagi tolerate terhadap taints. Sebagai tambahan, hal ini di-nonaktifkan secara default pada fitur alpha, kontroler node bertanggung jawab untuk menambahkan taints yang berhubungan dengan masalah pada node, seperti terputus atau `NotReady`. -Lihat [dokumentasi ini](/docs/concepts/configuration/taint-and-toleration/) untuk bahasan detail tentang taints `NoExecute` dan fitur alpha. +Lihat [dokumentasi ini](/id/docs/concepts/configuration/taint-and-toleration/) untuk bahasan detail tentang taints `NoExecute` dan fitur alpha. Mulai dari versi 1.8, kontroler node bisa diatur untuk bertanggung jawab pada pembuatan taints yang merepresentasikan node condition. Ini merupakan fitur alpha untuk versi 1.8. @@ -218,7 +218,7 @@ Jika kamu melakukan [administrasi node manual](#manual-node-administration), mak Scheduler Kubernetes memastikan kalau ada resource yang cukup untuk menjalankan semua pod di dalam sebuah node. Kubernetes memeriksa jumlah semua request untuk kontainer pada sebuah node tidak lebih besar daripada kapasitas node. -Hal ini termasuk semua kontainer yang dijalankan oleh kubelet. Namun, ini tidak termasuk kontainer-kontainer yang dijalankan secara langsung oleh [runtime kontainer](/docs/concepts/overview/components/#node-components) ataupun process yang ada di luar kontainer. +Hal ini termasuk semua kontainer yang dijalankan oleh kubelet. Namun, ini tidak termasuk kontainer-kontainer yang dijalankan secara langsung oleh [runtime kontainer](/id/docs/concepts/overview/components/#node-components) ataupun process yang ada di luar kontainer. Kalau kamu ingin secara eksplisit menyimpan resource cadangan untuk menjalankan process-process selain Pod, ikut tutorial [menyimpan resource cadangan untuk system daemon](/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved). diff --git a/content/id/docs/concepts/cluster-administration/addons.md b/content/id/docs/concepts/cluster-administration/addons.md index b404465d8f25e..ca50347492117 100644 --- a/content/id/docs/concepts/cluster-administration/addons.md +++ b/content/id/docs/concepts/cluster-administration/addons.md @@ -32,7 +32,7 @@ Laman ini akan menjabarkan beberapa *add-ons* yang tersedia serta tautan instruk * [Multus](https://github.com/Intel-Corp/multus-cni) merupakan sebuah multi *plugin* agar Kubernetes mendukung multipel jaringan secara bersamaan sehingga dapat menggunakan semua *plugin* CNI (contoh: Calico, Cilium, Contiv, Flannel), ditambah pula dengan SRIOV, DPDK, OVS-DPDK dan VPP pada *workload* Kubernetes. * [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) Container Plug-in (NCP) menyediakan integrasi antara VMware NSX-T dan orkestrator kontainer seperti Kubernetes, termasuk juga integrasi antara NSX-T dan platform CaaS/PaaS berbasis kontainer seperti *Pivotal Container Service* (PKS) dan OpenShift. * [Nuage](https://github.com/nuagenetworks/nuage-kubernetes/blob/v5.1.1-1/docs/kubernetes-1-installation.rst) merupakan platform SDN yang menyediakan *policy-based* jaringan antara Kubernetes Pods dan non-Kubernetes *environment* dengan *monitoring* visibilitas dan keamanan. -* [Romana](http://romana.io) merupakan solusi jaringan *Layer* 3 untuk jaringan pod yang juga mendukung [*NetworkPolicy* API](/docs/concepts/services-networking/network-policies/). Instalasi Kubeadm *add-on* ini tersedia [di sini](https://github.com/romana/romana/tree/master/containerize). +* [Romana](http://romana.io) merupakan solusi jaringan *Layer* 3 untuk jaringan pod yang juga mendukung [*NetworkPolicy* API](/id/docs/concepts/services-networking/network-policies/). Instalasi Kubeadm *add-on* ini tersedia [di sini](https://github.com/romana/romana/tree/master/containerize). * [Weave Net](https://www.weave.works/docs/net/latest/kube-addon/) menyediakan jaringan serta *policy* jaringan, yang akan membawa kedua sisi dari partisi jaringan, serta tidak membutuhkan basis data eksternal. ## _Service Discovery_ diff --git a/content/id/docs/concepts/cluster-administration/certificates.md b/content/id/docs/concepts/cluster-administration/certificates.md index a605a78547931..ee1f91cbeb3fe 100644 --- a/content/id/docs/concepts/cluster-administration/certificates.md +++ b/content/id/docs/concepts/cluster-administration/certificates.md @@ -245,6 +245,6 @@ done. Kamu dapat menggunakan API `Certificate.k8s.io` untuk menyediakan sertifikat x509 yang digunakan untuk autentikasi seperti yang didokumentasikan -[di sini](/docs/tasks/tls/managing-tls-in-a-cluster). +[di sini](/id/docs/tasks/tls/managing-tls-in-a-cluster). diff --git a/content/id/docs/concepts/cluster-administration/cloud-providers.md b/content/id/docs/concepts/cluster-administration/cloud-providers.md index 45820e36609ba..9a32af1eb89f8 100644 --- a/content/id/docs/concepts/cluster-administration/cloud-providers.md +++ b/content/id/docs/concepts/cluster-administration/cloud-providers.md @@ -56,7 +56,7 @@ Bagian ini akan menjelaskan semua konfigurasi yang dapat diatur saat menjalankan Penyedia layanan cloud AWS menggunakan nama DNS privat dari *instance* AWS sebagai nama dari objek Kubernetes Node. ### *Load Balancer* -Kamu dapat mengatur [load balancers eksternal](/docs/tasks/access-application-cluster/create-external-load-balancer/) sehingga dapat menggunakan fitur khusus AWS dengan mengatur anotasi seperti di bawah ini. +Kamu dapat mengatur [load balancers eksternal](/id/docs/tasks/access-application-cluster/create-external-load-balancer/) sehingga dapat menggunakan fitur khusus AWS dengan mengatur anotasi seperti di bawah ini. ```yaml apiVersion: v1 diff --git a/content/id/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/id/docs/concepts/cluster-administration/cluster-administration-overview.md index b485b5e142d89..b2bd349908b64 100644 --- a/content/id/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/content/id/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -20,10 +20,10 @@ Lihat panduan di [Persiapan](/docs/setup) untuk mempelajari beberapa contoh tent Sebelum memilih panduan, berikut adalah beberapa hal yang perlu dipertimbangkan: - Apakah kamu hanya ingin mencoba Kubernetes pada komputermu, atau kamu ingin membuat sebuah klaster dengan *high-availability*, *multi-node*? Pilihlah distro yang paling sesuai dengan kebutuhanmu. - - **Jika kamu merencanakan klaster dengan _high-availability_**, pelajari bagaimana cara mengonfigurasi [klaster pada *multiple zone*](/docs/concepts/cluster-administration/federation/). + - **Jika kamu merencanakan klaster dengan _high-availability_**, pelajari bagaimana cara mengonfigurasi [klaster pada *multiple zone*](/id/docs/concepts/cluster-administration/federation/). - Apakah kamu akan menggunakan **Kubernetes klaster di _hosting_**, seperti [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/), atau **_hosting_ sendiri klastermu**? - Apakah klastermu berada pada **_on-premises_**, atau **di cloud (IaaS)**? Kubernetes belum mendukung secara langsung klaster hibrid. Sebagai gantinya, kamu dapat membuat beberapa klaster. - - **Jika kamu ingin mengonfigurasi Kubernetes _on-premises_**, pertimbangkan [model jaringan](/docs/concepts/cluster-administration/networking/) yang paling sesuai. + - **Jika kamu ingin mengonfigurasi Kubernetes _on-premises_**, pertimbangkan [model jaringan](/id/docs/concepts/cluster-administration/networking/) yang paling sesuai. - Apakah kamu ingin menjalankan Kubernetes pada **"bare metal" _hardware_** atau pada **_virtual machines_ (VM)**? - Apakah kamu **hanya ingin mencoba klaster Kubernetes**, atau kamu ingin ikut aktif melakukan **pengembangan kode dari proyek Kubernetes**? Jika jawabannya yang terakhir, pilihlah distro yang aktif dikembangkan. Beberapa distro hanya menggunakan rilis *binary*, namun menawarkan lebih banyak variasi pilihan. - Pastikan kamu paham dan terbiasa dengan beberapa [komponen](/docs/admin/cluster-components/) yang dibutuhkan untuk menjalankan sebuah klaster. @@ -36,13 +36,13 @@ Catatan: Tidak semua distro aktif dikelola. Pilihlah distro yang telah diuji den * Pelajari bagaimana cara [mengatur *node*](/docs/concepts/nodes/node/). -* Pelajari bagaimana cara membuat dan mengatur kuota resource [(*resource quota*)](/docs/concepts/policy/resource-quotas/) untuk *shared* klaster. +* Pelajari bagaimana cara membuat dan mengatur kuota resource [(*resource quota*)](/id/docs/concepts/policy/resource-quotas/) untuk *shared* klaster. ## Mengamankan Klaster -* [Sertifikat (*certificate*)](/docs/concepts/cluster-administration/certificates/) akan menjabarkan langkah-langkah untuk membuat sertifikat menggunakan beberapa *tool chains*. +* [Sertifikat (*certificate*)](/id/docs/concepts/cluster-administration/certificates/) akan menjabarkan langkah-langkah untuk membuat sertifikat menggunakan beberapa *tool chains*. -* [Kubernetes *Container Environment*](/docs/concepts/containers/container-environment-variables/) akan menjelaskan *environment* untuk kontainer yang dikelola oleh Kubelet pada Kubernetes *node*. +* [Kubernetes *Container Environment*](/id/docs/concepts/containers/container-environment-variables/) akan menjelaskan *environment* untuk kontainer yang dikelola oleh Kubelet pada Kubernetes *node*. * [Mengontrol Akses ke Kubernetes API](/docs/reference/access-authn-authz/controlling-access/) akan menjabarkan bagaimana cara mengatur izin (*permission*) untuk akun pengguna dan *service account*. @@ -63,9 +63,9 @@ Catatan: Tidak semua distro aktif dikelola. Pilihlah distro yang telah diuji den ## Layanan Tambahan Klaster -* [Integrasi DNS](/docs/concepts/services-networking/dns-pod-service/) akan menjelaskan bagaimana cara *resolve* suatu nama DNS langsung pada *service* Kubernetes. +* [Integrasi DNS](/id/docs/concepts/services-networking/dns-pod-service/) akan menjelaskan bagaimana cara *resolve* suatu nama DNS langsung pada *service* Kubernetes. -* [*Logging* dan *Monitoring* Aktivitas Klaster](/docs/concepts/cluster-administration/logging/) akan menjelaskan bagaimana cara *logging* bekerja di Kubernetes serta bagaimana cara mengimplementasikannya. +* [*Logging* dan *Monitoring* Aktivitas Klaster](/id/docs/concepts/cluster-administration/logging/) akan menjelaskan bagaimana cara *logging* bekerja di Kubernetes serta bagaimana cara mengimplementasikannya. diff --git a/content/id/docs/concepts/cluster-administration/federation.md b/content/id/docs/concepts/cluster-administration/federation.md index 7690a75a82abb..d59da126ad253 100644 --- a/content/id/docs/concepts/cluster-administration/federation.md +++ b/content/id/docs/concepts/cluster-administration/federation.md @@ -106,7 +106,7 @@ Berikut merupakan panduan yang akan menjelaskan masing-masing _resource_ secara * [Namespaces](/docs/tasks/administer-federation/namespaces/) * [ReplicaSets](/docs/tasks/administer-federation/replicaset/) * [Secrets](/docs/tasks/administer-federation/secret/) -* [Services](/docs/concepts/cluster-administration/federation-service-discovery/) +* [Services](/id/docs/concepts/cluster-administration/federation-service-discovery/) [Referensi Dokumentasi API](/docs/reference/federation/) memberikan semua daftar diff --git a/content/id/docs/concepts/cluster-administration/logging.md b/content/id/docs/concepts/cluster-administration/logging.md index 53203777f232d..75f3b971896b1 100644 --- a/content/id/docs/concepts/cluster-administration/logging.md +++ b/content/id/docs/concepts/cluster-administration/logging.md @@ -173,7 +173,7 @@ Menggunakan agen _logging_ di dalam kontainer _sidecar_ dapat berakibat pengguna {{< /note >}} Sebagai contoh, kamu dapat menggunakan [Stackdriver](/docs/tasks/debug-application-cluster/logging-stackdriver/), -yang menggunakan fluentd sebagai agen _logging_. Berikut ini dua _file_ konfigurasi yang dapat kamu pakai untuk mengimplementasikan cara ini. _File_ yang pertama berisi sebuah [ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/) untuk mengonfigurasi fluentd. +yang menggunakan fluentd sebagai agen _logging_. Berikut ini dua _file_ konfigurasi yang dapat kamu pakai untuk mengimplementasikan cara ini. _File_ yang pertama berisi sebuah [ConfigMap](/id/docs/tasks/configure-pod-container/configure-pod-configmap/) untuk mengonfigurasi fluentd. {{< codenew file="admin/logging/fluentd-sidecar-config.yaml" >}} diff --git a/content/id/docs/concepts/cluster-administration/manage-deployment.md b/content/id/docs/concepts/cluster-administration/manage-deployment.md index 81c0ba4d08b8f..d67da9c13eb1b 100644 --- a/content/id/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/id/docs/concepts/cluster-administration/manage-deployment.md @@ -6,7 +6,7 @@ weight: 40 -Kamu telah melakukan _deploy_ pada aplikasimu dan mengeksposnya melalui sebuah _service_. Lalu? Kubernetes menyediakan berbagai peralatan untuk membantu mengatur mekanisme _deploy_ aplikasi, termasuk pengaturan kapasitas dan pembaruan. Diantara fitur yang akan didiskusikan lebih mendalam yaitu [berkas konfigurasi](/docs/concepts/configuration/overview/) dan [label](/docs/concepts/overview/working-with-objects/labels/). +Kamu telah melakukan _deploy_ pada aplikasimu dan mengeksposnya melalui sebuah _service_. Lalu? Kubernetes menyediakan berbagai peralatan untuk membantu mengatur mekanisme _deploy_ aplikasi, termasuk pengaturan kapasitas dan pembaruan. Diantara fitur yang akan didiskusikan lebih mendalam yaitu [berkas konfigurasi](/id/docs/concepts/configuration/overview/) dan [label](/id/docs/concepts/overview/working-with-objects/labels/). @@ -290,7 +290,7 @@ my-nginx-2035384211-u3t6x 1/1 Running 0 23m fe Akan muncul semua _pod_ dengan "app=nginx" dan sebuah kolom label tambahan yaitu tier (ditentukan dengan `-L` atau `--label-columns`). -Untuk informasi lebih lanjut, silahkan baca [label](/docs/concepts/overview/working-with-objects/labels/) dan [kubectl label](/docs/reference/generated/kubectl/kubectl-commands/#label). +Untuk informasi lebih lanjut, silahkan baca [label](/id/docs/concepts/overview/working-with-objects/labels/) dan [kubectl label](/docs/reference/generated/kubectl/kubectl-commands/#label). ## Memperbarui anotasi @@ -309,7 +309,7 @@ metadata: ... ``` -Untuk informasi lebih lanjut, silahkan lihat laman [annotations](/docs/concepts/overview/working-with-objects/annotations/) dan [kubectl annotate](/docs/reference/generated/kubectl/kubectl-commands/#annotate). +Untuk informasi lebih lanjut, silahkan lihat laman [annotations](/id/docs/concepts/overview/working-with-objects/annotations/) dan [kubectl annotate](/docs/reference/generated/kubectl/kubectl-commands/#annotate). ## Memperbesar dan memperkecil aplikasi kamu @@ -432,7 +432,7 @@ Untuk memperbarui versi ke 1.9.1, ganti `.spec.template.spec.containers[0].image kubectl edit deployment/my-nginx ``` -Selesai! Deployment akan memperbarui aplikasi nginx yang terdeploy secara berangsur di belakang. Dia akan menjamin hanya ada sekian replika lama yang akan down selagi pembaruan berjalan dan hanya ada sekian replika baru akan dibuat melebihi jumlah pod. Untuk mempelajari lebih lanjut, kunjungi [laman Deployment](/docs/concepts/workloads/controllers/deployment/). +Selesai! Deployment akan memperbarui aplikasi nginx yang terdeploy secara berangsur di belakang. Dia akan menjamin hanya ada sekian replika lama yang akan down selagi pembaruan berjalan dan hanya ada sekian replika baru akan dibuat melebihi jumlah pod. Untuk mempelajari lebih lanjut, kunjungi [laman Deployment](/id/docs/concepts/workloads/controllers/deployment/). @@ -440,6 +440,6 @@ Selesai! Deployment akan memperbarui aplikasi nginx yang terdeploy secara berang - [Pelajari tentang bagaimana memakai `kubectl` untuk memeriksa dan _debug_ aplikasi.](/docs/tasks/debug-application-cluster/debug-application-introspection/) -- [Praktik Terbaik dan Tips Konfigurasi](/docs/concepts/configuration/overview/) +- [Praktik Terbaik dan Tips Konfigurasi](/id/docs/concepts/configuration/overview/) diff --git a/content/id/docs/concepts/cluster-administration/networking.md b/content/id/docs/concepts/cluster-administration/networking.md index 038465bcb818c..6bcd78d7ef562 100644 --- a/content/id/docs/concepts/cluster-administration/networking.md +++ b/content/id/docs/concepts/cluster-administration/networking.md @@ -10,10 +10,10 @@ untuk memahami persis bagaimana mengharapkannya bisa bekerja. Ada 4 masalah yang berbeda untuk diatasi: 1. Komunikasi antar kontainer yang sangat erat: hal ini diselesaikan oleh - [Pod](/docs/concepts/workloads/pods/pod/) dan komunikasi `localhost`. + [Pod](/id/docs/concepts/workloads/pods/pod/) dan komunikasi `localhost`. 2. Komunikasi antar Pod: ini adalah fokus utama dari dokumen ini. -3. Komunikasi Pod dengan Service: ini terdapat di [Service](/docs/concepts/services-networking/service/). -4. Komunikasi eksternal dengan Service: ini terdapat di [Service](/docs/concepts/services-networking/service/). +3. Komunikasi Pod dengan Service: ini terdapat di [Service](/id/docs/concepts/services-networking/service/). +4. Komunikasi eksternal dengan Service: ini terdapat di [Service](/id/docs/concepts/services-networking/service/). @@ -213,7 +213,7 @@ Calico juga dapat dijalankan dalam mode penegakan kebijakan bersama dengan solus ### Romana -[Romana](http://romana.io) adalah jaringan sumber terbuka dan solusi otomasi keamanan yang memungkinkan kamu menggunakan Kubernetes tanpa jaringan hamparan. Romana mendukung Kubernetes [Kebijakan Jaringan](/docs/concepts/services-networking/network-policies/) untuk memberikan isolasi di seluruh ruang nama jaringan. +[Romana](http://romana.io) adalah jaringan sumber terbuka dan solusi otomasi keamanan yang memungkinkan kamu menggunakan Kubernetes tanpa jaringan hamparan. Romana mendukung Kubernetes [Kebijakan Jaringan](/id/docs/concepts/services-networking/network-policies/) untuk memberikan isolasi di seluruh ruang nama jaringan. ### Weave Net dari Weaveworks diff --git a/content/id/docs/concepts/cluster-administration/proxies.md b/content/id/docs/concepts/cluster-administration/proxies.md index 5595414aa9896..f3567233e0205 100644 --- a/content/id/docs/concepts/cluster-administration/proxies.md +++ b/content/id/docs/concepts/cluster-administration/proxies.md @@ -14,7 +14,7 @@ Laman ini menjelaskan berbagai proxy yang ada di dalam Kubernetes. Ada beberapa jenis proxy yang akan kamu temui saat menggunakan Kubernetes: -1. [kubectl proxy](/docs/tasks/access-application-cluster/access-cluster/#directly-accessing-the-rest-api): +1. [kubectl proxy](/id/docs/tasks/access-application-cluster/access-cluster/#directly-accessing-the-rest-api): - dijalankan pada desktop pengguna atau di dalam sebuah Pod - melakukan proxy dari alamat localhost ke apiserver Kubernetes @@ -23,7 +23,7 @@ Ada beberapa jenis proxy yang akan kamu temui saat menggunakan Kubernetes - mencari lokasi apiserver - menambahkan header autentikasi -1. [apiserver proxy](/docs/tasks/access-application-cluster/access-cluster/#discovering-builtin-services): +1. [apiserver proxy](/id/docs/tasks/access-application-cluster/access-cluster/#discovering-builtin-services): - merupakan sebuah bastion yang ada di dalam apiserver - menghubungkan pengguna di luar klaster ke alamat-alamat IP di dalam klaster yang tidak bisa terjangkau @@ -33,7 +33,7 @@ Ada beberapa jenis proxy yang akan kamu temui saat menggunakan Kubernetes - dapat digunakan untuk menghubungi Node, Pod, atau Service - melakukan load balancing saat digunakan untuk menjangkau sebuah Service -1. [kube proxy](/docs/concepts/services-networking/service/#ips-and-vips): +1. [kube proxy](/id/docs/concepts/services-networking/service/#ips-and-vips): - dijalankan pada setiap Node - melakukan proxy untuk UDP, TCP dan SCTP diff --git a/content/id/docs/concepts/configuration/assign-pod-node.md b/content/id/docs/concepts/configuration/assign-pod-node.md index 8af1abba28642..ee9e8bf2f4a88 100644 --- a/content/id/docs/concepts/configuration/assign-pod-node.md +++ b/content/id/docs/concepts/configuration/assign-pod-node.md @@ -7,7 +7,7 @@ weight: 30 -Kamu dapat memaksa sebuah [pod](/docs/concepts/workloads/pods/pod/) untuk hanya dapat berjalan pada [node](/docs/concepts/architecture/nodes/) tertentu atau mengajukannya agar berjalan pada node tertentu. Ada beberapa cara untuk melakukan hal tersebut. Semua cara yang direkomendasikan adalah dengan menggunakan [_selector_ label](/docs/concepts/overview/working-with-objects/labels/) untuk menetapkan pilihan yang kamu inginkan. Pada umumnya, pembatasan ini tidak dibutuhkan, sebagaimana _scheduler_ akan melakukan penempatan yang proporsional dengan otomatis (seperti contohnya menyebar pod di node-node, tidak menempatkan pod pada node dengan sumber daya yang tidak memadai, dst.) tetapi ada keadaan-keadaan tertentu yang membuat kamu memiliki kendali lebih terhadap node yang menjadi tempat pod dijalankan, contohnya untuk memastikan pod dijalankan pada mesin yang telah terpasang SSD, atau untuk menempatkan pod-pod dari dua servis yang berbeda yang sering berkomunikasi bersamaan ke dalam zona ketersediaan yang sama. +Kamu dapat memaksa sebuah [pod](/id/docs/concepts/workloads/pods/pod/) untuk hanya dapat berjalan pada [node](/id/docs/concepts/architecture/nodes/) tertentu atau mengajukannya agar berjalan pada node tertentu. Ada beberapa cara untuk melakukan hal tersebut. Semua cara yang direkomendasikan adalah dengan menggunakan [_selector_ label](/id/docs/concepts/overview/working-with-objects/labels/) untuk menetapkan pilihan yang kamu inginkan. Pada umumnya, pembatasan ini tidak dibutuhkan, sebagaimana _scheduler_ akan melakukan penempatan yang proporsional dengan otomatis (seperti contohnya menyebar pod di node-node, tidak menempatkan pod pada node dengan sumber daya yang tidak memadai, dst.) tetapi ada keadaan-keadaan tertentu yang membuat kamu memiliki kendali lebih terhadap node yang menjadi tempat pod dijalankan, contohnya untuk memastikan pod dijalankan pada mesin yang telah terpasang SSD, atau untuk menempatkan pod-pod dari dua servis yang berbeda yang sering berkomunikasi bersamaan ke dalam zona ketersediaan yang sama. Kamu dapat menemukan semua berkas untuk contoh-contoh berikut pada [dokumentasi yang kami sediakan di sini](https://github.com/kubernetes/website/tree/{{< param "docsbranch" >}}/content/en/docs/concepts/configuration/) @@ -114,7 +114,7 @@ Berikut ini contoh dari pod yang menggunakan afinitas node: Aturan afinitas node tersebut menyatakan pod hanya bisa ditugaskan pada node dengan label yang memiliki kunci `kubernetes.io/e2e-az-name` dan bernilai `e2e-az1` atau `e2e-az2`. Selain itu, dari semua node yang memenuhi kriteria tersebut, mode dengan label dengan kunci `another-node-label-key` and bernilai `another-node-label-value` harus lebih diutamakan. -Kamu dapat meilhat operator `In` digunakan dalam contoh berikut. Sitaksis afinitas node yang baru mendukung operator-operator berikut: `In`, `NotIn`, `Exists`, `DoesNotExist`, `Gt`, `Lt`. Kamu dapat menggunakan `NotIn` dan `DoesNotExist` untuk mewujudkan perilaku node anti-afinitas, atau menggunakan [node taints](/docs/concepts/configuration/taint-and-toleration/) untuk menolak pod dari node tertentu. +Kamu dapat meilhat operator `In` digunakan dalam contoh berikut. Sitaksis afinitas node yang baru mendukung operator-operator berikut: `In`, `NotIn`, `Exists`, `DoesNotExist`, `Gt`, `Lt`. Kamu dapat menggunakan `NotIn` dan `DoesNotExist` untuk mewujudkan perilaku node anti-afinitas, atau menggunakan [node taints](/id/docs/concepts/configuration/taint-and-toleration/) untuk menolak pod dari node tertentu. Jika kamu menyatakan `nodeSelector` dan `nodeAffinity`. *keduanya* harus dipenuhi agar pod dapat dijadwalkan pada node kandidat. @@ -284,7 +284,7 @@ Lihat [tutorial ZooKeeper](/docs/tutorials/stateful-application/zookeeper/#toler Untuk informasi lebih lanjut tentang afinitas/anti-afinitas antar pod, lihat [design doc](https://git.k8s.io/community/contributors/design-proposals/scheduling/podaffinity.md). -Kamu juga dapat mengecek [Taints](/docs/concepts/configuration/taint-and-toleration/), yang memungkinkan sebuah *node* untuk *menolak* sekumpulan pod. +Kamu juga dapat mengecek [Taints](/id/docs/concepts/configuration/taint-and-toleration/), yang memungkinkan sebuah *node* untuk *menolak* sekumpulan pod. ## nodeName diff --git a/content/id/docs/concepts/configuration/manage-compute-resources-container.md b/content/id/docs/concepts/configuration/manage-compute-resources-container.md index 3450bab45989b..600a4cc6cdf07 100644 --- a/content/id/docs/concepts/configuration/manage-compute-resources-container.md +++ b/content/id/docs/concepts/configuration/manage-compute-resources-container.md @@ -10,7 +10,7 @@ feature: -Saat kamu membuat spesifikasi sebuah [Pod](/docs/concepts/workloads/pods/pod/), kamu +Saat kamu membuat spesifikasi sebuah [Pod](/id/docs/concepts/workloads/pods/pod/), kamu dapat secara opsional menentukan seberapa banyak CPU dan memori (RAM) yang dibutuhkan oleh setiap Container. Saat Container-Container menentukan _request_ (permintaan) sumber daya, scheduler dapat membuat keputusan yang lebih baik mengenai Node mana yang akan dipilih @@ -42,8 +42,8 @@ Hal ini berbeda dari sumber daya `memory` dan `cpu` (yang dapat di-_overcommit_) CPU dan memori secara kolektif disebut sebagai _sumber daya komputasi_, atau cukup _sumber daya_ saja. Sumber daya komputasi adalah jumlah yang dapat diminta, dialokasikan, -dan dikonsumsi. Mereka berbeda dengan [sumber daya API](/docs/concepts/overview/kubernetes-api/). -Sumber daya API, seperti Pod dan [Service](/docs/concepts/services-networking/service/) adalah +dan dikonsumsi. Mereka berbeda dengan [sumber daya API](/id/docs/concepts/overview/kubernetes-api/). +Sumber daya API, seperti Pod dan [Service](/id/docs/concepts/services-networking/service/) adalah objek-objek yang dapat dibaca dan diubah melalui Kubernetes API Server. ## Request dan Limit Sumber daya dari Pod dan Container @@ -270,7 +270,7 @@ _daemon_ sistem menggunakan sebagian dari sumber daya yang ada. Kolom `allocatab memberikan jumlah sumber daya yang tersedia untuk Pod-Pod. Untuk lebih lanjut, lihat [Sumber daya Node yang dapat dialokasikan](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md). -Fitur [kuota sumber daya](/docs/concepts/policy/resource-quotas/) dapat disetel untuk +Fitur [kuota sumber daya](/id/docs/concepts/policy/resource-quotas/) dapat disetel untuk membatasi jumlah sumber daya yang dapat digunakan. Jika dipakai bersama dengan Namespace, kuota sumber daya dapat mencegah suatu tim menghabiskan semua sumber daya. @@ -489,7 +489,7 @@ Sumber daya yang diperluas pada tingkat Node terikat pada Node. ##### Sumber daya Device Plugin yang dikelola Lihat [Device -Plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) untuk +Plugin](/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) untuk cara menyatakan sumber daya _device plugin_ yang dikelola pada setiap node. ##### Sumber daya lainnya diff --git a/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md index 929c895821fff..caba991a8d95d 100644 --- a/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md +++ b/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -24,7 +24,7 @@ tanda [`--kubeconfig`](/docs/reference/generated/kubectl/kubectl/). Instruksi langkah demi langkah untuk membuat dan menentukan berkas kubeconfig, bisa mengacu pada [Mengatur Akses Pada Beberapa Klaster] -(/docs/tasks/access-application-cluster/configure-access-multiple-clusters). +(/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters). @@ -103,7 +103,7 @@ kubeconfig: abaikan mereka. Beberapa contoh pengaturan variabel _environment_ `KUBECONFIG`, bisa melihat pada - [pengaturan vaiabel _environment_ KUBECONFIG](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#set-the-kubeconfig-environment-variable). + [pengaturan vaiabel _environment_ KUBECONFIG](/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#set-the-kubeconfig-environment-variable). Sebaliknya, bisa menggunakan berkas kubeconfig _default_, `$HOME/.kube/config`, tanpa melakukan penggabungan. @@ -158,7 +158,7 @@ _absolute path_ akan disimpan secara mutlak. ## {{% heading "whatsnext" %}} -* [Mengatur Akses Pada Beberapa Klaster](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) +* [Mengatur Akses Pada Beberapa Klaster](/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) * [`kubectl config`](/docs/reference/generated/kubectl/kubectl-commands#config) diff --git a/content/id/docs/concepts/configuration/overview.md b/content/id/docs/concepts/configuration/overview.md index 76d68658ecaad..67fb2061fe54a 100644 --- a/content/id/docs/concepts/configuration/overview.md +++ b/content/id/docs/concepts/configuration/overview.md @@ -32,14 +32,14 @@ Dokumentasi ini terbuka. Jika Anda menemukan sesuatu yang tidak ada dalam daftar ## "Naked" Pods vs ReplicaSets, Deployments, and Jobs -- Jangan gunakan Pods naked (artinya, Pods tidak terikat dengan a [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) a [Deployment](/docs/concepts/workloads/controllers/deployment/)) jika kamu bisa menghindarinya. Pod naked tidak akan dijadwal ulang jika terjadi kegagalan pada node. +- Jangan gunakan Pods naked (artinya, Pods tidak terikat dengan a [ReplicaSet](/id/docs/concepts/workloads/controllers/replicaset/) a [Deployment](/id/docs/concepts/workloads/controllers/deployment/)) jika kamu bisa menghindarinya. Pod naked tidak akan dijadwal ulang jika terjadi kegagalan pada node. - Deployment, yang keduanya menciptakan ReplicaSet untuk memastikan bahwa jumlah Pod yang diinginkan selalu tersedia, dan menentukan strategi untuk mengganti Pods (seperti [RollingUpdate](/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment)), hampir selalu lebih disukai daripada membuat Pods secara langsung, kecuali untuk beberapa yang eksplisit [`restartPolicy: Never`](/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) banyak skenario . A [Job](/docs/concepts/workloads/controllers/jobs-run-to-completion/) mungkin juga sesuai. + Deployment, yang keduanya menciptakan ReplicaSet untuk memastikan bahwa jumlah Pod yang diinginkan selalu tersedia, dan menentukan strategi untuk mengganti Pods (seperti [RollingUpdate](/id/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment)), hampir selalu lebih disukai daripada membuat Pods secara langsung, kecuali untuk beberapa yang eksplisit [`restartPolicy: Never`](/id/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) banyak skenario . A [Job](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/) mungkin juga sesuai. ## Services -- Buat [Service](/docs/concepts/services-networking/service/) sebelum workloads backend terkait (Penyebaran atau ReplicaSets), dan sebelum workloads apa pun yang perlu mengaksesnya. Ketika Kubernetes memulai sebuah container, ia menyediakan environment variabel yang menunjuk ke semua Layanan yang berjalan ketika container itu dimulai. Misalnya, jika Layanan bernama `foo` ada, semua container akan mendapatkan variabel berikut di environment awalnya: +- Buat [Service](/id/docs/concepts/services-networking/service/) sebelum workloads backend terkait (Penyebaran atau ReplicaSets), dan sebelum workloads apa pun yang perlu mengaksesnya. Ketika Kubernetes memulai sebuah container, ia menyediakan environment variabel yang menunjuk ke semua Layanan yang berjalan ketika container itu dimulai. Misalnya, jika Layanan bernama `foo` ada, semua container akan mendapatkan variabel berikut di environment awalnya: ```shell FOO_SERVICE_HOST= @@ -48,26 +48,26 @@ Dokumentasi ini terbuka. Jika Anda menemukan sesuatu yang tidak ada dalam daftar *Ini menunjukan persyaratan pemesanan * - `Service` apa pun yang ingin diakses oleh` Pod` harus dibuat sebelum `Pod` itu sendiri, atau environment variabel tidak akan diisi. DNS tidak memiliki batasan ini. -- Opsional (meskipun sangat disarankan) [cluster add-on](/docs/concepts/cluster-administration/addons/) adalah server DNS. +- Opsional (meskipun sangat disarankan) [cluster add-on](/id/docs/concepts/cluster-administration/addons/) adalah server DNS. Server DNS melihat API Kubernetes untuk `Service` baru dan membuat satu set catatan DNS untuk masing-masing. Jika DNS telah diaktifkan di seluruh cluster maka semua `Pods` harus dapat melakukan resolusi nama`Service` secara otomatis. - Jangan tentukan `hostPort` untuk Pod kecuali jika benar-benar diperlukan. Ketika Anda bind Pod ke `hostPort`, hal itu membatasi jumlah tempat Pod dapat dijadwalkan, karena setiap kombinasi <` hostIP`, `hostPort`,` protokol`> harus unik. Jika Anda tidak menentukan `hostIP` dan` protokol` secara eksplisit, Kubernetes akan menggunakan `0.0.0.0` sebagai` hostIP` dan `TCP` sebagai default` protokol`. - Jika kamu hanya perlu akses ke port untuk keperluan debugging, Anda bisa menggunakan [apiserver proxy](/docs/tasks/access-application-cluster/access-cluster/#manually-constructing-apiserver-proxy-urls) atau [`kubectl port-forward`](/docs/tasks/access-application-cluster/port-forward-access-application-cluster/). + Jika kamu hanya perlu akses ke port untuk keperluan debugging, Anda bisa menggunakan [apiserver proxy](/id/docs/tasks/access-application-cluster/access-cluster/#manually-constructing-apiserver-proxy-urls) atau [`kubectl port-forward`](/id/docs/tasks/access-application-cluster/port-forward-access-application-cluster/). - Jika Anda secara eksplisit perlu mengekspos port Pod pada node, pertimbangkan untuk menggunakan [NodePort](/docs/concepts/services-networking/service/#nodeport) Service sebelum beralih ke `hostPort`. + Jika Anda secara eksplisit perlu mengekspos port Pod pada node, pertimbangkan untuk menggunakan [NodePort](/id/docs/concepts/services-networking/service/#nodeport) Service sebelum beralih ke `hostPort`. - Hindari menggunakan `hostNetwork`, untuk alasan yang sama seperti` hostPort`. -- Gunakan [headless Services](/docs/concepts/services-networking/service/#headless- +- Gunakan [headless Services](/id/docs/concepts/services-networking/service/#headless- services) (yang memiliki `ClusterIP` dari` None`) untuk Service discovery yang mudah ketika Anda tidak membutuhkan `kube-proxy` load balancing. ## Menggunakan label -- Deklarasi dan gunakan [labels] (/docs/concepts/overview/working-with-objects/labels/) untuk identifikasi __semantic attributes__ aplikasi atau Deployment kamu, seperti `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`. Kamu dapat menggunakan label ini untuk memilih Pod yang sesuai untuk sumber daya lainnya; misalnya, Service yang memilih semua `tier: frontend` Pods, atau semua komponen` phase: test` dari `app: myapp`. Lihat [guestbook](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) aplikasi untuk contoh-contoh pendekatan ini. +- Deklarasi dan gunakan [labels] (/id/docs/concepts/overview/working-with-objects/labels/) untuk identifikasi __semantic attributes__ aplikasi atau Deployment kamu, seperti `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`. Kamu dapat menggunakan label ini untuk memilih Pod yang sesuai untuk sumber daya lainnya; misalnya, Service yang memilih semua `tier: frontend` Pods, atau semua komponen` phase: test` dari `app: myapp`. Lihat [guestbook](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) aplikasi untuk contoh-contoh pendekatan ini. -Service dapat dibuat untuk menjangkau beberapa Penyebaran dengan menghilangkan label khusus rilis dari pemilihnya. [Deployments](/docs/concepts/workloads/controllers/deployment/) membuatnya mudah untuk memperbarui Service yang sedang berjalan tanpa downtime. +Service dapat dibuat untuk menjangkau beberapa Penyebaran dengan menghilangkan label khusus rilis dari pemilihnya. [Deployments](/id/docs/concepts/workloads/controllers/deployment/) membuatnya mudah untuk memperbarui Service yang sedang berjalan tanpa downtime. Keadaan objek yang diinginkan dideskripsikan oleh Deployment, dan jika perubahan terhadap spesifikasi tersebut adalah _applied_, Deployment controller mengubah keadaan aktual ke keadaan yang diinginkan pada tingkat yang terkontrol. @@ -75,7 +75,7 @@ Keadaan objek yang diinginkan dideskripsikan oleh Deployment, dan jika perubahan ## Container Images -Ini [imagePullPolicy](/docs/concepts/containers/images/#updating-images) dan tag dari image mempengaruhi ketika [kubelet](/docs/admin/kubelet/) mencoba menarik image yang ditentukan +Ini [imagePullPolicy](/id/docs/concepts/containers/images/#updating-images) dan tag dari image mempengaruhi ketika [kubelet](/docs/admin/kubelet/) mencoba menarik image yang ditentukan - `imagePullPolicy: IfNotPresent`: image ditarik hanya jika belum ada secara lokal. @@ -105,7 +105,7 @@ Semantik caching dari penyedia gambar yang mendasarinya membuat bahkan `imagePul - Gunakan `kubectl apply -f `. Ini mencari konfigurasi Kubernetes di semua file `.yaml`,` .yml`, dan `.json` di` `dan meneruskannya ke` apply`. -- Gunakan label selector untuk operasi `get` dan` delete` alih-alih nama objek tertentu. Lihat bagian di [label selectors](/docs/concepts/overview/working-with-objects/labels/#label-selectors) dan [using labels effectively](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively). +- Gunakan label selector untuk operasi `get` dan` delete` alih-alih nama objek tertentu. Lihat bagian di [label selectors](/id/docs/concepts/overview/working-with-objects/labels/#label-selectors) dan [using labels effectively](/id/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively). - Gunakan `kubectl run` dan` kubectl expose` untuk dengan cepat membuat Deployment dan Service single-container. Lihat [Use a Service to Access an Application in a Cluster](/docs/tasks/access-application-cluster/service-access-application-cluster/) untuk Contoh. diff --git a/content/id/docs/concepts/configuration/pod-overhead.md b/content/id/docs/concepts/configuration/pod-overhead.md index e59301bb96a66..13db4e32f8f6c 100644 --- a/content/id/docs/concepts/configuration/pod-overhead.md +++ b/content/id/docs/concepts/configuration/pod-overhead.md @@ -22,7 +22,7 @@ _Pod Overhead_ adalah fitur yang berfungsi untuk menghitung sumber daya digunaka Pada Kubernetes, Overhead Pod ditentukan pada [saat admisi](/docs/reference/access-authn-authz/extensible-admission-controllers/#what-are-admission-webhooks) sesuai dengan Overhead yang ditentukan di dalam -[RuntimeClass](/docs/concepts/containers/runtime-class/) milik Pod. +[RuntimeClass](/id/docs/concepts/containers/runtime-class/) milik Pod. Ketika Overhead Pod diaktifkan, Overhead akan dipertimbangkan sebagai tambahan terhadap jumlah permintaan sumber daya Container saat menjadwalkan Pod. Begitu pula Kubelet, yang akan memasukkan Overhead Pod saat menentukan ukuran @@ -49,7 +49,7 @@ Lihat [Ringkasan Otorisasi](/docs/reference/access-authn-authz/authorization/) u ## {{% heading "whatsnext" %}} -* [RuntimeClass](/docs/concepts/containers/runtime-class/) +* [RuntimeClass](/id/docs/concepts/containers/runtime-class/) * [Desain PodOverhead](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) diff --git a/content/id/docs/concepts/configuration/pod-priority-preemption.md b/content/id/docs/concepts/configuration/pod-priority-preemption.md index a0c6035482107..7350470fa390f 100644 --- a/content/id/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/id/docs/concepts/configuration/pod-priority-preemption.md @@ -24,7 +24,7 @@ Versi Kubernetes | Keadaan Priority and Pemindahan | Dihidupkan secara Bawaan 1.11 | beta | ya 1.14 | stable | ya -{{< warning >}}Pada sebuah klaster di mana tidak semua pengguna dipercaya, seorang pengguna yang berniat jahat dapat membuat Pod-pod dengan prioritas paling tinggi, membuat Pod-pod lainnya dipindahkan/tidak dapat dijadwalkan. Untuk mengatasi masalah ini, [ResourceQuota](/docs/concepts/policy/resource-quotas/) ditambahkan untuk mendukung prioritas Pod. Seorang admin dapat membuat ResourceQuota untuk pengguna-pengguna pada tingkat prioritas tertentu, mencegah mereka untuk membuat Pod-pod pada prioritas tinggi. Fitur ini telah beta sejak Kubernetes 1.12. +{{< warning >}}Pada sebuah klaster di mana tidak semua pengguna dipercaya, seorang pengguna yang berniat jahat dapat membuat Pod-pod dengan prioritas paling tinggi, membuat Pod-pod lainnya dipindahkan/tidak dapat dijadwalkan. Untuk mengatasi masalah ini, [ResourceQuota](/id/docs/concepts/policy/resource-quotas/) ditambahkan untuk mendukung prioritas Pod. Seorang admin dapat membuat ResourceQuota untuk pengguna-pengguna pada tingkat prioritas tertentu, mencegah mereka untuk membuat Pod-pod pada prioritas tinggi. Fitur ini telah beta sejak Kubernetes 1.12. {{< /warning >}} @@ -178,11 +178,11 @@ Harap catat bahwa Pod P tidak harus dijadwalkan pada "_nominated_ Node" (Node ya #### Penghentian secara sopan dari korban-korban pemindahan Pod -Saat Pod-pod dipindahkan, korban-korbannya mendapatkan [periode penghentian secara sopan](/docs/concepts/workloads/pods/pod/#penghentian-pod). Mereka memiliki waktu sebanyak itu untuk menyelesaikan pekerjaan merekan dan berhenti. Jika mereka tidak menyelesaikannya sebelum waktu tersebut, mereka akan dihentikan secara paksa. Periode penghentian secara sopan ini membuat sebuah jarak waktu antara saat di mana Scheduler memindahkan Pod-pod dengan waktu saat Pod yang tertunda tersebut (P) dapat dijadwalkan pada Node tersebut (N). Sementara itu, Scheduler akan terus menjadwalkan Pod-pod lain yang tertunda. Oleh karena itu, biasanya ada jarak waktu antara titik di mana Scheduler memindahkan korban-korban dan titik saat Pod P dijadwalkan. Untuk meminimalkan jarak waktu ini, kamu dapat menyetel periode penghentian secara sopan dari Pod-pod dengan prioritas lebih rendah menjadi nol atau sebuah angka yang kecil. +Saat Pod-pod dipindahkan, korban-korbannya mendapatkan [periode penghentian secara sopan](/id/docs/concepts/workloads/pods/pod/#penghentian-pod). Mereka memiliki waktu sebanyak itu untuk menyelesaikan pekerjaan merekan dan berhenti. Jika mereka tidak menyelesaikannya sebelum waktu tersebut, mereka akan dihentikan secara paksa. Periode penghentian secara sopan ini membuat sebuah jarak waktu antara saat di mana Scheduler memindahkan Pod-pod dengan waktu saat Pod yang tertunda tersebut (P) dapat dijadwalkan pada Node tersebut (N). Sementara itu, Scheduler akan terus menjadwalkan Pod-pod lain yang tertunda. Oleh karena itu, biasanya ada jarak waktu antara titik di mana Scheduler memindahkan korban-korban dan titik saat Pod P dijadwalkan. Untuk meminimalkan jarak waktu ini, kamu dapat menyetel periode penghentian secara sopan dari Pod-pod dengan prioritas lebih rendah menjadi nol atau sebuah angka yang kecil. #### PodDisruptionBudget didukung, tapi tidak dijamin! -Sebuah [Pod Disruption Budget (PDB)](/docs/concepts/workloads/pods/disruptions/) memungkinkan pemilik-pemilik aplikasi untuk membatasi jumlah Pod-pod dari sebuah aplikasi yang direplikasi yang mati secara bersamaan dikarenakan disrupsi yang disengaja. Kubernetes 1.9 mendukung PDB saat memindahkan Pod-pod, tetapi penghormatan terhadap PDB ini bersifat "usaha terbaik" (_best-effort_). Scheduler akan mencoba mencari korban-korban yang PDB-nya tidak dilanggar oleh pemindahan, tetapi jika tidak ada korban yang ditemukan, pemindahan akan tetap terjadi, dan Pod-pod dengan prioritas lebih rendah akan dihapus/dipindahkan meskipun PDB mereka dilanggar. +Sebuah [Pod Disruption Budget (PDB)](/id/docs/concepts/workloads/pods/disruptions/) memungkinkan pemilik-pemilik aplikasi untuk membatasi jumlah Pod-pod dari sebuah aplikasi yang direplikasi yang mati secara bersamaan dikarenakan disrupsi yang disengaja. Kubernetes 1.9 mendukung PDB saat memindahkan Pod-pod, tetapi penghormatan terhadap PDB ini bersifat "usaha terbaik" (_best-effort_). Scheduler akan mencoba mencari korban-korban yang PDB-nya tidak dilanggar oleh pemindahan, tetapi jika tidak ada korban yang ditemukan, pemindahan akan tetap terjadi, dan Pod-pod dengan prioritas lebih rendah akan dihapus/dipindahkan meskipun PDB mereka dilanggar. #### Afinitas antar-Pod pada Pod-pod dengan prioritas lebih rendah diff --git a/content/id/docs/concepts/configuration/secret.md b/content/id/docs/concepts/configuration/secret.md index a6ca8dca88b62..40875648ff955 100644 --- a/content/id/docs/concepts/configuration/secret.md +++ b/content/id/docs/concepts/configuration/secret.md @@ -49,7 +49,7 @@ Mekanisme otomatisasi pembuatan secret dan penggunaan kredensial API dapat di no atau di-_override_ jika kamu menginginkannya. Meskipun begitu, jika apa yang kamu butuhkan hanyalah mengakses apiserver secara aman, maka mekanisme _default_ inilah yang disarankan. -Baca lebih lanjut dokumentasi [_Service Account_](/docs/tasks/configure-pod-container/configure-service-account/) +Baca lebih lanjut dokumentasi [_Service Account_](/id/docs/tasks/configure-pod-container/configure-service-account/) untuk informasi lebih lanjut mengenai bagaimana cara kerja _Service Account_. ### Membuat Objek Secret Kamu Sendiri @@ -569,7 +569,7 @@ _delay_ propagasi _cache_, dimana _delay_ propagasi _cache_ bergantung pada jeni {{< note >}} Sebuah container menggunakan Secret sebagai -[subPath](/docs/concepts/storage/volumes#using-subpath) dari _volume_ +[subPath](/id/docs/concepts/storage/volumes#using-subpath) dari _volume_ yang di-_mount_ tidak akan menerima perubahan Secret. {{< /note >}} @@ -636,7 +636,7 @@ pada Kubelet, sehingga Kubelet dapat mengunduh _image_ dan menempatkannya pada P **Memberikan spesifikasi manual dari sebuah imagePullSecret** -Penggunaan imagePullSecrets dideskripsikan di dalam [dokumentasi _image_](/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) +Penggunaan imagePullSecrets dideskripsikan di dalam [dokumentasi _image_](/id/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) ### Mekanisme yang Dapat Diterapkan agar imagePullSecrets dapat Secara Otomatis Digunakan @@ -644,7 +644,7 @@ Kamu dapat secara manual membuat sebuah imagePullSecret, serta merujuk imagePull yang sudah kamu buat dari sebuah serviceAccount. Semua Pod yang dibuat dengan menggunakan serviceAccount tadi atau serviceAccount _default_ akan menerima _field_ imagePullSecret dari serviceAccount yang digunakan. -Bacalah [Cara menambahkan ImagePullSecrets pada sebuah _service account_](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) +Bacalah [Cara menambahkan ImagePullSecrets pada sebuah _service account_](/id/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) untuk informasi lebih detail soal proses yang dijalankan. ### Mekanisme _Mounting_ Otomatis dari Secret yang Sudah Dibuat @@ -985,7 +985,7 @@ hanya boleh dimiliki oleh komponen pada sistem level yang paling _previleged_. Aplikasi yang membutuhkan akses ke API secret harus melakukan _request_ `get` pada secret yang dibutuhkan. Hal ini memungkinkan administrator untuk membatasi -akses pada semua secret dengan tetap memberikan [akses pada instans secret tertentu](/docs/reference/access-authn-authz/rbac/#referring-to-resources) +akses pada semua secret dengan tetap memberikan [akses pada instans secret tertentu](/id/docs/reference/access-authn-authz/rbac/#referring-to-resources) yang dibutuhkan aplikasi. Untuk meningkatkan performa dengan menggunakan iterasi `get`, klien dapat mendesain diff --git a/content/id/docs/concepts/configuration/taint-and-toleration.md b/content/id/docs/concepts/configuration/taint-and-toleration.md index 9a30b48f5b684..723bbd1c9c6dc 100644 --- a/content/id/docs/concepts/configuration/taint-and-toleration.md +++ b/content/id/docs/concepts/configuration/taint-and-toleration.md @@ -6,7 +6,7 @@ weight: 40 -Afinitas Node, seperti yang dideskripsikan [di sini](/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature), +Afinitas Node, seperti yang dideskripsikan [di sini](/id/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature), adalah salah satu properti dari Pod yang menyebabkan pod tersebut memiliki preferensi untuk ditempatkan di sekelompok Node tertentu (preferensi ini dapat berupa _soft constraints_ atau _hard constraints_ yang harus dipenuhi). _Taint_ merupakan kebalikan dari afinitas -- @@ -193,7 +193,7 @@ khusus (misalnya, `kubectl taint nodes nodename special=true:NoSchedule` atau yang sesuai pada _pod_ yang menggunakan _node_ dengan perangkat keras khusus. Seperti halnya pada kebutuhan _dedicated_ _node_, hal ini dapat dilakukan dengan mudah dengan cara menulis [_admission controller_](/docs/reference/access-authn-authz/admission-controllers/) yang -bersifat khusus. Misalnya, kita dapat menggunakan [_Extended Resource_](/docs/concepts/configuration/manage-compute-resources-container/#extended-resources) +bersifat khusus. Misalnya, kita dapat menggunakan [_Extended Resource_](/id/docs/concepts/configuration/manage-compute-resources-container/#extended-resources) untuk merepresentasikan perangkat keras khusus, kemudian _taint_ _node_ dengan perangkat keras khusus dengan nama _extended resource_ dan jalankan _admission controller_ [ExtendedResourceToleration](/docs/reference/access-authn-authz/admission-controllers/#extendedresourcetoleration). @@ -244,7 +244,7 @@ dan logika normal untuk melakukan _eviction_ pada _pod_ dari suatu _node_ terten dari _Ready_ yang ada pada _NodeCondition_ dinonaktifkan. {{< note >}} -Untuk menjaga perilaku [_rate limiting_](/docs/concepts/architecture/nodes/) yang +Untuk menjaga perilaku [_rate limiting_](/id/docs/concepts/architecture/nodes/) yang ada pada _eviction_ _pod_ apabila _node_ mengalami masalah, sistem sebenarnya menambahkan _taint_ dalam bentuk _rate limiter_. Hal ini mencegah _eviction_ besar-besaran pada _pod_ pada skenario dimana master menjadi terpisah dari _node_ lainnya. @@ -280,7 +280,7 @@ _node_ apabila salah satu masalah terdeteksi. Kedua _toleration_ _default_ tadi ditambahkan oleh [DefaultTolerationSeconds _admission controller_](https://git.k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds). -_Pod-pod_ pada [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) dibuat dengan _toleration_ +_Pod-pod_ pada [DaemonSet](/id/docs/concepts/workloads/controllers/daemonset/) dibuat dengan _toleration_ `NoExecute` untuk _taint_ tanpa `tolerationSeconds`: * `node.kubernetes.io/unreachable` diff --git a/content/id/docs/concepts/containers/container-environment.md b/content/id/docs/concepts/containers/container-environment.md index affb37100124a..6c0ba354e8e74 100644 --- a/content/id/docs/concepts/containers/container-environment.md +++ b/content/id/docs/concepts/containers/container-environment.md @@ -17,7 +17,7 @@ Laman ini menjelaskan berbagai *resource* yang tersedia di dalam Kontainer pada *Environment* Kontainer pada Kubernetes menyediakan beberapa *resource* penting yang tersedia di dalam Kontainer: -* Sebuah *Filesystem*, yang merupakan kombinasi antara [image](/docs/concepts/containers/images/) dan satu atau banyak [*volumes*](/docs/concepts/storage/volumes/). +* Sebuah *Filesystem*, yang merupakan kombinasi antara [image](/id/docs/concepts/containers/images/) dan satu atau banyak [*volumes*](/id/docs/concepts/storage/volumes/). * Informasi tentang Kontainer tersebut. * Informasi tentang objek-objek lain di dalam klaster. @@ -53,7 +53,7 @@ jika [*addon* DNS](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/a ## {{% heading "whatsnext" %}} -* Pelajari lebih lanjut tentang [berbagai *hook* pada *lifecycle* Kontainer](/docs/concepts/containers/container-lifecycle-hooks/). +* Pelajari lebih lanjut tentang [berbagai *hook* pada *lifecycle* Kontainer](/id/docs/concepts/containers/container-lifecycle-hooks/). * Dapatkan pengalaman praktis soal [memberikan *handler* untuk *event* dari *lifecycle* Kontainer](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). diff --git a/content/id/docs/concepts/containers/container-lifecycle-hooks.md b/content/id/docs/concepts/containers/container-lifecycle-hooks.md index a7b5164864241..d45a5ad23e87d 100644 --- a/content/id/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/id/docs/concepts/containers/container-lifecycle-hooks.md @@ -40,7 +40,7 @@ Hal ini bersifat *blocking*, yang artinya panggilan bersifat sinkron (*synchrono untuk menghapus kontainer tersebut. Tidak ada parameter yang diberikan pada *handler*. -Penjelasan yang lebih rinci tentang proses terminasi dapat dilihat pada [Terminasi Pod](/docs/concepts/workloads/pods/pod/#termination-of-pods). +Penjelasan yang lebih rinci tentang proses terminasi dapat dilihat pada [Terminasi Pod](/id/docs/concepts/workloads/pods/pod/#termination-of-pods). ### Implementasi *handler* untuk *hook* @@ -113,7 +113,7 @@ Events: ## {{% heading "whatsnext" %}} -* Pelajari lebih lanjut tentang [*environment* Kontainer](/docs/concepts/containers/container-environment-variables/). +* Pelajari lebih lanjut tentang [*environment* Kontainer](/id/docs/concepts/containers/container-environment-variables/). * Pelajari bagaimana caranya [melakukan *attach handler* pada *event lifecycle* sebuah Kontainer](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). diff --git a/content/id/docs/concepts/containers/images.md b/content/id/docs/concepts/containers/images.md index 7a5fa281542d1..8fa81801ff00d 100644 --- a/content/id/docs/concepts/containers/images.md +++ b/content/id/docs/concepts/containers/images.md @@ -26,7 +26,7 @@ selalu diunduh, kamu bisa melakukan salah satu dari berikut: - buang `imagePullPolicy` dan juga _tag_ untuk _image_. - aktifkan [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) _admission controller_. -Harap diingat kamu sebaiknya hindari penggunaan _tag_ `:latest`, lihat [panduan konfigurasi](/docs/concepts/configuration/overview/#container-images) untuk informasi lebih lanjut. +Harap diingat kamu sebaiknya hindari penggunaan _tag_ `:latest`, lihat [panduan konfigurasi](/id/docs/concepts/configuration/overview/#container-images) untuk informasi lebih lanjut. ## Membuat Image Multi-arsitektur dengan Manifest @@ -142,7 +142,7 @@ Setelah kamu membuat registri, kamu akan menggunakan kredensial berikut untuk lo * `DOCKER_EMAIL`: `${some-email-address}` Ketika kamu sudah memiliki variabel-variabel di atas, kamu dapat -[mengkonfigurasi sebuah Kubernetes Secret dan menggunakannya untuk _deploy_ sebuah Pod](/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). +[mengkonfigurasi sebuah Kubernetes Secret dan menggunakannya untuk _deploy_ sebuah Pod](/id/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). ### Menggunakan IBM Cloud Container Registry IBM Cloud Container Registry menyediakan sebuah registri _image_ privat yang _multi-tenant_, dapat kamu gunakan untuk menyimpan dan membagikan _image-image_ secara aman. Secara _default_, _image-image_ di dalam registri privat kamu akan dipindai (_scan_) oleh Vulnerability Advisor terintegrasi untuk deteksi isu @@ -291,7 +291,7 @@ kubectl create secret docker-registry --docker-server=DOCKER_REGISTRY_SER Jika kamu sudah memiliki berkas kredensial Docker, daripada menggunakan perintah di atas, kamu dapat mengimpor berkas kredensial sebagai Kubernetes Secret. -[Membuat sebuah Secret berbasiskan pada kredensial Docker yang sudah ada](/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials) menjelaskan bagaimana mengatur ini. +[Membuat sebuah Secret berbasiskan pada kredensial Docker yang sudah ada](/id/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials) menjelaskan bagaimana mengatur ini. Cara ini berguna khususnya jika kamu menggunakan beberapa registri kontainer privat, perintah `kubectl create secret docker-registry` akan membuat sebuah Secret yang akan hanya bekerja menggunakan satu registri privat. @@ -331,7 +331,7 @@ Cara ini perlu untuk diselesaikan untuk setiap Pod yang mengguunakan registri pr Hanya saja, mengatur _field_ ini dapat diotomasi dengan mengatur imagePullSecrets di dalam sumber daya [serviceAccount](/docs/user-guide/service-accounts). -Periksa [Tambahan ImagePullSecrets untuk sebuah Service Account](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) untuk instruksi yang lebih detail. +Periksa [Tambahan ImagePullSecrets untuk sebuah Service Account](/id/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) untuk instruksi yang lebih detail. Kamu dapat menggunakan cara ini bersama `.docker/config.json` pada setiap Node. Kredensial-kredensial akan dapat di-_merged_. Cara ini akan dapat bekerja pada Google Kubernetes Engine. diff --git a/content/id/docs/concepts/containers/overview.md b/content/id/docs/concepts/containers/overview.md index d31c760ee03a3..715230d14d551 100644 --- a/content/id/docs/concepts/containers/overview.md +++ b/content/id/docs/concepts/containers/overview.md @@ -21,7 +21,7 @@ ini membuat penyebaran lebih mudah di lingkungan cloud atau OS yang berbeda. ## Image-Image Kontainer -[Kontainer image](/docs/concepts/containers/images/) meruapakan paket perangkat lunak +[Kontainer image](/id/docs/concepts/containers/images/) meruapakan paket perangkat lunak yang siap dijalankan, mengandung semua yang diperlukan untuk menjalankan sebuah aplikasi: kode dan setiap *runtime* yang dibutuhkan, *library* dari aplikasi dan sistem, dan nilai *default* untuk penganturan yang penting. diff --git a/content/id/docs/concepts/containers/runtime-class.md b/content/id/docs/concepts/containers/runtime-class.md index 31bd8a25ecbda..73252a03e4af3 100644 --- a/content/id/docs/concepts/containers/runtime-class.md +++ b/content/id/docs/concepts/containers/runtime-class.md @@ -45,7 +45,7 @@ soal bagaimana melakukan konfigurasi untuk implementasi CRI yang kamu miliki. Untuk saat ini, RuntimeClass berasumsi bahwa semua _node_ di dalam klaster punya konfigurasi yang sama (homogen). Jika ada _node_ yang punya konfigurasi berbeda dari yang lain (heterogen), maka perbedaan ini harus diatur secara independen di luar RuntimeClass -melalui fitur _scheduling_ (lihat [Menempatkan Pod pada Node](/docs/concepts/configuration/assign-pod-node/)). +melalui fitur _scheduling_ (lihat [Menempatkan Pod pada Node](/id/docs/concepts/configuration/assign-pod-node/)). {{< /note >}} Seluruh konfigurasi memiliki nama `handler` yang terkait, dijadikan referensi oleh RuntimeClass. @@ -91,7 +91,7 @@ spec: Kubelet akan mendapat instruksi untuk menggunakan RuntimeClass dengan nama yang sudah ditentukan tersebut untuk menjalankan Pod ini. Jika RuntimeClass dengan nama tersebut tidak ditemukan, atau CRI tidak dapat -menjalankan _handler_ yang terkait, maka Pod akan memasuki [tahap](/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) `Failed`. +menjalankan _handler_ yang terkait, maka Pod akan memasuki [tahap](/id/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) `Failed`. Lihat [_event_](/docs/tasks/debug-application-cluster/debug-application-introspection/) untuk mengetahui pesan error yang terkait. Jika tidak ada `runtimeClassName` yang ditentukan di dalam Pod, maka RuntimeHandler yang _default_ akan digunakan. diff --git a/content/id/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/id/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index d8be642856c98..3a3ece65b029a 100644 --- a/content/id/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/id/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -14,7 +14,7 @@ _Custom Resource_ adalah ekstensi dari Kubernetes API. Laman ini mendiskusikan k ## _Custom Resource_ -Sebuah sumber daya adalah sebuah *endpoint* pada [Kubernetes API](/docs/reference/using-api/api-overview/) yang menyimpan sebuah koleksi [objek API](/docs/concepts/overview/working-with-objects/kubernetes-objects/) dari sebuah jenis tertentu. Sebagai contoh, sumber daya bawaan Pod mengandung sebuah koleksi objek-objek Pod. +Sebuah sumber daya adalah sebuah *endpoint* pada [Kubernetes API](/docs/reference/using-api/api-overview/) yang menyimpan sebuah koleksi [objek API](/id/docs/concepts/overview/working-with-objects/kubernetes-objects/) dari sebuah jenis tertentu. Sebagai contoh, sumber daya bawaan Pod mengandung sebuah koleksi objek-objek Pod. Sebuah _Custom Resource_ adalah sebuah ekstensi dari Kubernetes API yang tidak seharusnya tersedia pada pemasangan default Kubernetes. Namun, banyak fungsi-fungsi inti Kubernetes yang sekarang dibangun menggunakan _Custom Resource_, membuat Kubernetes lebih modular. @@ -25,7 +25,7 @@ dipasang, pengguna dapat membuat dan mengakses objek-objek _Custom Resource_ men Dengan sendirinya, _Custom Resource_ memungkinkan kamu untuk menyimpan dan mengambil data terstruktur. Ketika kamu menggabungkan sebuah _Custom Resource_ dengan _controller_ khusus, _Custom Resource_ akan memberikan sebuah API deklaratif yang sebenarnya. -Sebuah [API deklaratif](/docs/concepts/overview/working-with-objects/kubernetes-objects/#memahami-konsep-objek-objek-yang-ada-pada-kubernetes) +Sebuah [API deklaratif](/id/docs/concepts/overview/working-with-objects/kubernetes-objects/#memahami-konsep-objek-objek-yang-ada-pada-kubernetes) memungkinkan kamu untuk mendeklarasikan atau menspesifikasikan keadaan dari sumber daya kamu dan mencoba untuk menjaga agar keadaan saat itu tersinkronisasi dengan keadaan yang diinginkan. *Controller* menginterpretasikan data terstruktur sebagai sebuah rekaman dari keadaan yang diinginkan pengguna, dan secara kontinu menjaga keadaan ini. Kamu bisa men-_deploy_ dan memperbaharui sebuah _controller_ khusus pada sebuah klaster yang berjalan, secara independen dari siklus hidup klaster itu sendiri. _Controller_ khusus dapat berfungsi dengan sumber daya jenis apapun, tetapi mereka sangat efektif ketika dikombinasikan dengan _Custom Resource_. [_Operator pattern_](https://coreos.com/blog/introducing-operators.html) mengkombinasikan _Custom Resource_ dan _controller_ khusus. Kamu bisa menggunakan _controller_ khusus untuk menyandi pengetahuan domain untuk aplikasi spesifik menjadi sebuah ekstensi dari Kubernetes API. @@ -40,7 +40,7 @@ Ketika membuat sebuah API baru, pikirkan apakah kamu ingin [mengagregasikan API | Kamu mau tipe baru yang dapat dibaca dan ditulis dengan `kubectl`.| Dukungan `kubectl` tidak diperlukan | | Kamu mau melihat tipe baru pada sebuah Kubernetes UI, seperti dasbor, bersama dengan tipe-tipe bawaan. | Dukungan Kubernetes UI tidak diperlukan. | | Kamu mengembangkan sebuah API baru. | Kamu memiliki sebuah program yang melayani API kamu dan dapat berkerja dengan baik. | -| Kamu bersedia menerima pembatasan format yang Kubernetes terapkan pada jalur sumber daya API (Lihat [Ikhtisar API](/docs/concepts/overview/kubernetes-api/).) | Kamu perlu memiliki jalur REST spesifik agar menjadi cocok dengan REST API yang telah didefinisikan. | +| Kamu bersedia menerima pembatasan format yang Kubernetes terapkan pada jalur sumber daya API (Lihat [Ikhtisar API](/id/docs/concepts/overview/kubernetes-api/).) | Kamu perlu memiliki jalur REST spesifik agar menjadi cocok dengan REST API yang telah didefinisikan. | | Sumber daya kamu secara alami mencakup hingga sebuah klaster atau sebuah *namespace* dari sebuah klaster. | Sumber daya yang mencakup klaster atau *namespace* adalah sebuah ketidakcocokan; kamu perlu mengendalikan jalur sumber daya spesifik. | | Kamu ingin menggunakan kembali [dukungan fitur Kubernetes API](#fitur-umum). | Kamu tidak membutuhkan fitur tersebut. | @@ -77,7 +77,7 @@ Gunakan ConfigMap jika salah satu hal berikut berlaku: * Kamu ingin melakukan pembaharuan bergulir lewat Deployment, dll, ketika berkas diperbaharui. {{< note >}} -Gunakan sebuah [Secret](/docs/concepts/configuration/secret/) untuk data sensitif, yang serupa dengan ConfigMap tetapi lebih aman. +Gunakan sebuah [Secret](/id/docs/concepts/configuration/secret/) untuk data sensitif, yang serupa dengan ConfigMap tetapi lebih aman. {{< /note >}} Gunakan sebuah _Custom Resource_ (CRD atau _Aggregated API_) jika kebanyakan dari hal berikut berlaku: @@ -93,11 +93,11 @@ Gunakan sebuah _Custom Resource_ (CRD atau _Aggregated API_) jika kebanyakan dar Kubernetes menyediakan dua cara untuk menambahkan sumber daya ke klaster kamu: - CRD cukup sederhana dan bisa diciptakan tanpa pemrograman apapun. -- [Agregasi API](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) membutuhkan pemrograman, tetapi memungkinkan kendali lebih terhadap perilaku API seperti bagaimana data disimpan dan perubahan antar versi API. +- [Agregasi API](/id/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) membutuhkan pemrograman, tetapi memungkinkan kendali lebih terhadap perilaku API seperti bagaimana data disimpan dan perubahan antar versi API. Kubernetes menyediakan kedua opsi tersebut untuk memenuhi kebutuhan pengguna berbeda, jadi tidak ada kemudahan penggunaan atau fleksibilitas yang dikompromikan. -_Aggregated API_ adalah bawahan dari APIServer yang duduk dibelakang API server utama, yang bertindak sebagai sebuah _proxy_. Pengaturan ini disebut [Agregasi API](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) (AA). Untuk pengguna, yang terlihat adalah Kubernetes API yang diperluas. +_Aggregated API_ adalah bawahan dari APIServer yang duduk dibelakang API server utama, yang bertindak sebagai sebuah _proxy_. Pengaturan ini disebut [Agregasi API](/id/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) (AA). Untuk pengguna, yang terlihat adalah Kubernetes API yang diperluas. CRD memungkinkan pengguna untuk membuat tipe baru sumber daya tanpa menambahkan APIserver lain. Kamu tidak perlu mengerti Agregasi API untuk menggunakan CRD. @@ -115,7 +115,7 @@ Lihat [contoh *controller* khusus](https://github.com/kubernetes/sample-controll Biasanya, tiap sumber daya di API Kubernetes membutuhkan kode yang menangani permintaan REST dan mengatur peyimpanan tetap dari objek-objek. Server Kubernetes API utama menangani sumber daya bawaan seperti Pod dan Service, dan juga menangani _Custom Resource_ dalam sebuah cara yang umum melalui [CRD](#customresourcedefinition). -[Lapisan agregasi](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) memungkinkan kamu untuk menyediakan implementasi khusus untuk _Custom Resource_ dengan menulis dan men-_deploy_ API server kamu yang berdiri sendiri. API server utama menlimpahkan permintaan kepada kamu untuk _Custom Resource_ yang kamu tangani, membuat mereka tersedia untuk semua kliennya. +[Lapisan agregasi](/id/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) memungkinkan kamu untuk menyediakan implementasi khusus untuk _Custom Resource_ dengan menulis dan men-_deploy_ API server kamu yang berdiri sendiri. API server utama menlimpahkan permintaan kepada kamu untuk _Custom Resource_ yang kamu tangani, membuat mereka tersedia untuk semua kliennya. ## Memilih sebuah metode untuk menambahkan _Custom Resource_ @@ -216,7 +216,7 @@ Ketika kamu menambahkan sebuah _Custom Resource_, kamu dapat mengaksesnya dengan ## {{% heading "whatsnext" %}} -* Belajar bagaimana untuk [Memperluas Kubernetes API dengan lapisan agregasi](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/). +* Belajar bagaimana untuk [Memperluas Kubernetes API dengan lapisan agregasi](/id/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/). * Belajar bagaimana untuk [Memperluas Kubernetes API dengan CustomResourceDefinition](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/). diff --git a/content/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 3bde2909ca101..62f7c8d41d737 100644 --- a/content/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/id/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -37,7 +37,7 @@ Dalam pendaftaran, _plugin_ perangkat perlu mengirim: * Nama Unix socket-nya. * Versi API Plugin Perangkat yang dipakai. * `ResourceName` yang ingin ditunjukkan. `ResourceName` ini harus mengikuti - [skema penamaan sumber daya ekstensi](/docs/concepts/configuration/manage-compute-resources-container/#extended-resources) + [skema penamaan sumber daya ekstensi](/id/docs/concepts/configuration/manage-compute-resources-container/#extended-resources) sebagai `vendor-domain/tipe-sumber-daya`. (Contohnya, NVIDIA GPU akan dinamai `nvidia.com/gpu`.) diff --git a/content/id/docs/concepts/extend-kubernetes/extend-cluster.md b/content/id/docs/concepts/extend-kubernetes/extend-cluster.md index b7b07b46ff46d..9d80881724397 100644 --- a/content/id/docs/concepts/extend-kubernetes/extend-cluster.md +++ b/content/id/docs/concepts/extend-kubernetes/extend-cluster.md @@ -36,7 +36,7 @@ _Flag-flag_ dan _berkas-berkas konfigurasi_ didokumentasikan di bagian Referensi _Flag-flag_ dan berkas-berkas konfigurasi mungkin tidak selalu dapat diubah pada layanan Kubernetes yang _hosted_ atau pada distribusi dengan instalasi yang dikelola. Ketika mereka dapat diubah, mereka biasanya hanya dapat diubah oleh Administrator Klaster. Dan juga, mereka dapat sewaktu-waktu diubah dalam versi Kubernetes di masa depan, dan menyetel mereka mungkin memerlukan proses pengulangan kembali. Oleh karena itu, mereka harus digunakan hanya ketika tidak ada pilihan lain. -*API kebijakan bawaan*, seperti [ResourceQuota](/docs/concepts/policy/resource-quotas/), [PodSecurityPolicy](/docs/concepts/policy/pod-security-policy/), [NetworkPolicy](/docs/concepts/services-networking/network-policies/) dan Role-based Access Control ([RBAC](/docs/reference/access-authn-authz/rbac/)), adalah API bawaan Kubernetes. API biasanya digunakan oleh layanan Kubernetes yang _hosted_ dan diatur oleh instalasi Kubernetes. Mereka bersifat deklaratif dan menggunakan konvensi yang sama dengan sumber daya Kubernetes lainnya seperti pod-pod, jadi konfigurasi klaster baru dapat diulang-ulang dan dapat diatur dengan cara yang sama dengan aplikasi. Dan, ketika mereka stabil, mereka mendapatkan keuntungan dari [kebijakan pendukung yang jelas](/docs/reference/deprecation-policy/) seperti API Kubernetes lainnya. Oleh karena itu, mereka lebih disukai daripada _berkas konfigurasi_ dan _flag-flag_ saat mereka cocok dengan situasi yang dibutuhkan. +*API kebijakan bawaan*, seperti [ResourceQuota](/id/docs/concepts/policy/resource-quotas/), [PodSecurityPolicy](/id/docs/concepts/policy/pod-security-policy/), [NetworkPolicy](/id/docs/concepts/services-networking/network-policies/) dan Role-based Access Control ([RBAC](/id/docs/reference/access-authn-authz/rbac/)), adalah API bawaan Kubernetes. API biasanya digunakan oleh layanan Kubernetes yang _hosted_ dan diatur oleh instalasi Kubernetes. Mereka bersifat deklaratif dan menggunakan konvensi yang sama dengan sumber daya Kubernetes lainnya seperti pod-pod, jadi konfigurasi klaster baru dapat diulang-ulang dan dapat diatur dengan cara yang sama dengan aplikasi. Dan, ketika mereka stabil, mereka mendapatkan keuntungan dari [kebijakan pendukung yang jelas](/docs/reference/deprecation-policy/) seperti API Kubernetes lainnya. Oleh karena itu, mereka lebih disukai daripada _berkas konfigurasi_ dan _flag-flag_ saat mereka cocok dengan situasi yang dibutuhkan. ## Perluasan @@ -107,7 +107,7 @@ Untuk lebih jelasnya tentang Sumber Daya _Custom_, lihat [Panduan Konsep Sumber ### Menggabungkan API Baru dengan Otomasi -Kombinasi antara sebuah API sumber daya _custom_ dan _loop_ kontrol disebut [Pola Operator](/docs/concepts/extend-kubernetes/operator/). Pola Operator digunakan untuk mengelola aplikasi yang spesifik dan biasanya _stateful_. API-API _custom_ dan _loop_ kontrol ini dapat digunakan untuk mengatur sumber daya lainnya, seperti penyimpanan dan kebijakan-kebijakan. +Kombinasi antara sebuah API sumber daya _custom_ dan _loop_ kontrol disebut [Pola Operator](/id/docs/concepts/extend-kubernetes/operator/). Pola Operator digunakan untuk mengelola aplikasi yang spesifik dan biasanya _stateful_. API-API _custom_ dan _loop_ kontrol ini dapat digunakan untuk mengatur sumber daya lainnya, seperti penyimpanan dan kebijakan-kebijakan. ### Mengubah Sumber Daya Bawaan @@ -173,6 +173,6 @@ Penjadwal juga mendukung [_webhook_](https://github.com/kubernetes/community/blo * [_Plugin_ Jaringan](/docs/concepts/cluster-administration/network-plugins/) * [_Plugin_ Perangkat](/docs/concepts/cluster-administration/device-plugins/) * Pelajari tentang [_Plugin_ kubectl](/docs/tasks/extend-kubectl/kubectl-plugins/) -* Pelajari tentang [Pola Operator](/docs/concepts/extend-kubernetes/operator/) +* Pelajari tentang [Pola Operator](/id/docs/concepts/extend-kubernetes/operator/) diff --git a/content/id/docs/concepts/extend-kubernetes/operator.md b/content/id/docs/concepts/extend-kubernetes/operator.md index 02df63bb79769..315ae35e3da95 100644 --- a/content/id/docs/concepts/extend-kubernetes/operator.md +++ b/content/id/docs/concepts/extend-kubernetes/operator.md @@ -7,7 +7,7 @@ weight: 30 Operator adalah ekstensi perangkat lunak untuk Kubernetes yang memanfaatkan -[_custom resource_](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) +[_custom resource_](/id/docs/concepts/extend-kubernetes/api-extension/custom-resources/) untuk mengelola aplikasi dan komponen-komponennya. Operator mengikuti prinsip Kubernetes, khususnya dalam hal [_control loop_](/docs/concepts/#kubernetes-control-plane). @@ -124,11 +124,9 @@ Kamu juga dapat mengimplementasikan Operator (yaitu, _Controller_) dengan menggunakan bahasa / _runtime_ yang dapat bertindak sebagai [klien dari API Kubernetes](/docs/reference/using-api/client-libraries/). +## {{% heading "whatsnext" %}} - -{{% capture Selanjutnya %}} - -* Memahami lebih lanjut tentang [_custome resources_](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) +* Memahami lebih lanjut tentang [_custome resources_](/id/docs/concepts/extend-kubernetes/api-extension/custom-resources/) * Temukan "ready-made" _operators_ dalam [OperatorHub.io](https://operatorhub.io/) untuk memenuhi use case kamu * Menggunakan perangkat yang ada untuk menulis Operator kamu sendiri, misalnya: diff --git a/content/id/docs/concepts/extend-kubernetes/service-catalog.md b/content/id/docs/concepts/extend-kubernetes/service-catalog.md index efea4eda97e86..cd63a89355aee 100644 --- a/content/id/docs/concepts/extend-kubernetes/service-catalog.md +++ b/content/id/docs/concepts/extend-kubernetes/service-catalog.md @@ -46,7 +46,7 @@ untuk berkomunikasi dengan makelar servis, bertindak sebagai perantara untuk API merundingkan penyediaan awal dan mengambil kredensial untuk aplikasi bisa menggunakan servis terkelola tersebut. Ini terimplementasi sebagai ekstensi API Server dan pengontrol, menggunakan etcd sebagai media penyimpanan. -Ini juga menggunakan [lapisan agregasi](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) +Ini juga menggunakan [lapisan agregasi](/id/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) yang tersedia pada Kubernetes versi 1.7+ untuk menampilkan API-nya.
diff --git a/content/id/docs/concepts/overview/components.md b/content/id/docs/concepts/overview/components.md index 63e7b4b3af9b6..aa2ee52152263 100644 --- a/content/id/docs/concepts/overview/components.md +++ b/content/id/docs/concepts/overview/components.md @@ -120,7 +120,7 @@ Meskipun tidak semua addons dibutuhkan, semua klaster Kubernetes hendakny memiliki DNS klaster. Komponen ini penting karena banyak dibutuhkan oleh komponen lainnya. -[Klaster DNS](/docs/concepts/cluster-administration/addons/) adalah server DNS, selain beberapa server DNS lain yang sudah ada di +[Klaster DNS](/id/docs/concepts/cluster-administration/addons/) adalah server DNS, selain beberapa server DNS lain yang sudah ada di environment kamu, yang berfungsi sebagai catatan DNS bagi Kubernetes services Kontainer yang dimulai oleh kubernetes secara otomatis akan memasukkan server DNS ini @@ -129,7 +129,7 @@ ke dalam mekanisme pencarian DNS yang dimilikinya. ### Web UI (Dasbor) -[Dasbor](/docs/tasks/access-application-cluster/web-ui-dashboard/) adalah antar muka berbasis web multifungsi yang ada pada klaster Kubernetes. +[Dasbor](/id/docs/tasks/access-application-cluster/web-ui-dashboard/) adalah antar muka berbasis web multifungsi yang ada pada klaster Kubernetes. Dasbor ini memungkinkan user melakukan manajemen dan troubleshooting klaster maupun aplikasi yang ada pada klaster itu sendiri. @@ -143,7 +143,7 @@ untuk melakukan pencarian data yang dibutuhkan. ### Cluster-level Logging -[Cluster-level logging](/docs/concepts/cluster-administration/logging/) bertanggung jawab mencatat log kontainer pada +[Cluster-level logging](/id/docs/concepts/cluster-administration/logging/) bertanggung jawab mencatat log kontainer pada penyimpanan log terpusat dengan antar muka yang dapat digunakan untuk melakukan pencarian. diff --git a/content/id/docs/concepts/overview/object-management-kubectl/declarative-config.md b/content/id/docs/concepts/overview/object-management-kubectl/declarative-config.md index 9599feaf24332..46066769d4bd1 100644 --- a/content/id/docs/concepts/overview/object-management-kubectl/declarative-config.md +++ b/content/id/docs/concepts/overview/object-management-kubectl/declarative-config.md @@ -25,8 +25,8 @@ Lihat [Pengelolaan Objek Kubernetes](/docs/concepts/overview/object-management-k Konfigurasi objek secara deklaratif membutuhkan pemahaman yang baik tentang definisi dan konfigurasi objek-objek Kubernetes. Jika belum pernah, kamu disarankan untuk membaca terlebih dulu dokumen-dokumen berikut: -- [Pengelolaan Objek Kubernetes Menggunakan Perintah Imperatif](/docs/concepts/overview/object-management-kubectl/imperative-command/) -- [Pengelolaan Objek Kubernetes Menggunakan File Konfigurasi Imperatif](/docs/concepts/overview/object-management-kubectl/imperative-config/) +- [Pengelolaan Objek Kubernetes Menggunakan Perintah Imperatif](/id/docs/concepts/overview/object-management-kubectl/imperative-command/) +- [Pengelolaan Objek Kubernetes Menggunakan File Konfigurasi Imperatif](/id/docs/concepts/overview/object-management-kubectl/imperative-config/) Berikut adalah beberapa defnisi dari istilah-istilah yang digunakan dalam dokumen ini: @@ -862,8 +862,8 @@ template: ## {{% heading "whatsnext" %}} -- [Pengelolaan Objek Kubernetes Menggunakan Perintah Imperatif](/docs/concepts/overview/object-management-kubectl/imperative-command/) -- [Pengelolaan Objek Kubernetes secara Imperatif Menggunakan File Konfigurasi](/docs/concepts/overview/object-management-kubectl/imperative-config/) +- [Pengelolaan Objek Kubernetes Menggunakan Perintah Imperatif](/id/docs/concepts/overview/object-management-kubectl/imperative-command/) +- [Pengelolaan Objek Kubernetes secara Imperatif Menggunakan File Konfigurasi](/id/docs/concepts/overview/object-management-kubectl/imperative-config/) - [Rujukan Perintah Kubectl](/docs/reference/generated/kubectl/kubectl/) - [Rujukan API Kubernetes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) diff --git a/content/id/docs/concepts/overview/object-management-kubectl/imperative-command.md b/content/id/docs/concepts/overview/object-management-kubectl/imperative-command.md index e77cc9ca6343a..23489efb5940e 100644 --- a/content/id/docs/concepts/overview/object-management-kubectl/imperative-command.md +++ b/content/id/docs/concepts/overview/object-management-kubectl/imperative-command.md @@ -126,8 +126,8 @@ kubectl create --edit -f /tmp/srv.yaml ## {{% heading "whatsnext" %}} -- [Pengelolaan Objek Kubernetes secara Imperatif dengan Menggunakan Konfigurasi Objek](/docs/concepts/overview/object-management-kubectl/imperative-config/) -- [Pengelolaan Objek Kubernetes secara Deklaratif dengan Menggunakan File Konfigurasi](/docs/concepts/overview/object-management-kubectl/declarative-config/) +- [Pengelolaan Objek Kubernetes secara Imperatif dengan Menggunakan Konfigurasi Objek](/id/docs/concepts/overview/object-management-kubectl/imperative-config/) +- [Pengelolaan Objek Kubernetes secara Deklaratif dengan Menggunakan File Konfigurasi](/id/docs/concepts/overview/object-management-kubectl/declarative-config/) - [Rujukan Perintah Kubectl](/docs/reference/generated/kubectl/kubectl/) - [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) diff --git a/content/id/docs/concepts/overview/object-management-kubectl/imperative-config.md b/content/id/docs/concepts/overview/object-management-kubectl/imperative-config.md index 7df68f579da59..94f1082e35e39 100644 --- a/content/id/docs/concepts/overview/object-management-kubectl/imperative-config.md +++ b/content/id/docs/concepts/overview/object-management-kubectl/imperative-config.md @@ -108,8 +108,8 @@ template: ## {{% heading "whatsnext" %}} -- [Pengelolaan Objek Kubernetes Menggunakan Perintah Imperatif](/docs/concepts/overview/object-management-kubectl/imperative-command/) -- [Pengelolaan Objek Kubernetes secara Deklaratif dengan Menggunakan File Konfigurasi](/docs/concepts/overview/object-management-kubectl/declarative-config/) +- [Pengelolaan Objek Kubernetes Menggunakan Perintah Imperatif](/id/docs/concepts/overview/object-management-kubectl/imperative-command/) +- [Pengelolaan Objek Kubernetes secara Deklaratif dengan Menggunakan File Konfigurasi](/id/docs/concepts/overview/object-management-kubectl/declarative-config/) - [Rujukan Perintah Kubectl](/docs/reference/generated/kubectl/kubectl/) - [Rujukan API Kubernetes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) diff --git a/content/id/docs/concepts/overview/working-with-objects/annotations.md b/content/id/docs/concepts/overview/working-with-objects/annotations.md index 8a822f255d96c..aaa238add598c 100644 --- a/content/id/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/id/docs/concepts/overview/working-with-objects/annotations.md @@ -80,5 +80,5 @@ Prefiks `kubernetes.io/` dan `k8s.io/` merupakan reservasi dari komponen inti Ku ## {{% heading "whatsnext" %}} -Pelajari lebih lanjut tentang [Label dan Selektor](/docs/concepts/overview/working-with-objects/labels/). +Pelajari lebih lanjut tentang [Label dan Selektor](/id/docs/concepts/overview/working-with-objects/labels/). diff --git a/content/id/docs/concepts/overview/working-with-objects/field-selectors.md b/content/id/docs/concepts/overview/working-with-objects/field-selectors.md index 7cd81495cdc62..e46916ee3df06 100644 --- a/content/id/docs/concepts/overview/working-with-objects/field-selectors.md +++ b/content/id/docs/concepts/overview/working-with-objects/field-selectors.md @@ -3,14 +3,14 @@ title: Selektor Field weight: 60 --- -Selektor *field* memungkinkan kamu untuk [memilih (*select*) *resource* Kubernetes](/docs/concepts/overview/working-with-objects/kubernetes-objects) berdasarkan +Selektor *field* memungkinkan kamu untuk [memilih (*select*) *resource* Kubernetes](/id/docs/concepts/overview/working-with-objects/kubernetes-objects) berdasarkan nilai dari satu atau banyak *field resource*. Di bawah ini merupakan contoh dari beberapa *query* selektor *field*: * `metadata.name=my-service` * `metadata.namespace!=default` * `status.phase=Pending` -Perintah `kubectl` di bawah ini memilih semua Pod dengan *field* [`status.phase`](/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) yang bernilai +Perintah `kubectl` di bawah ini memilih semua Pod dengan *field* [`status.phase`](/id/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) yang bernilai `Running`: ```shell @@ -50,7 +50,7 @@ kubectl get services --field-selector metadata.namespace!=default ## Selektor berantai -Seperti halnya [label](/docs/concepts/overview/working-with-objects/labels) dan selektor-selektor lainnya, kamu dapat membuat selektor *field* berantai +Seperti halnya [label](/id/docs/concepts/overview/working-with-objects/labels) dan selektor-selektor lainnya, kamu dapat membuat selektor *field* berantai (*chained*) dengan *list* yang dipisahkan oleh koma. Perintah `kubectl` di bawah ini memilih semua Pod dengan `status.phase` tidak sama dengan `Running` dan *field* `spec.restartPolicy` sama dengan `Always`: diff --git a/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md index 57eef5e9c6d46..aa702827b9ad4 100644 --- a/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -30,7 +30,7 @@ memberikan informasi pada sistem Kubernetes mengenai perilaku apakah yang kamu i dengan kata lain ini merupakan definisi _state_ klaster yang kamu inginkan. Untuk menggunakan objek-objek Kubernetes--baik membuat, mengubah, atau menghapus objek-objek tersebut--kamu -harus menggunakan [API Kubernetes](/docs/concepts/overview/kubernetes-api/). +harus menggunakan [API Kubernetes](/id/docs/concepts/overview/kubernetes-api/). Ketika kamu menggunakan perintah `kubectl`, perintah ini akan melakukan _API call_ untuk perintah yang kamu berikan. Kamu juga dapat menggunakan API Kubernetes secara langsung pada program yang kamu miliki menggunakan salah satu [_library_ klien](/docs/reference/using-api/client-libraries/) yang disediakan. @@ -103,7 +103,7 @@ dan format _spec_ untuk _Deployment_ dapat ditemukan ## {{% heading "whatsnext" %}} -* Pelajari lebih lanjut mengenai dasar-dasar penting bagi objek Kubernetes, seperti [Pod](/docs/concepts/workloads/pods/pod-overview/). +* Pelajari lebih lanjut mengenai dasar-dasar penting bagi objek Kubernetes, seperti [Pod](/id/docs/concepts/workloads/pods/pod-overview/). diff --git a/content/id/docs/concepts/overview/working-with-objects/names.md b/content/id/docs/concepts/overview/working-with-objects/names.md index 5527c15b72033..0d6528c41d377 100644 --- a/content/id/docs/concepts/overview/working-with-objects/names.md +++ b/content/id/docs/concepts/overview/working-with-objects/names.md @@ -8,7 +8,7 @@ weight: 20 Seluruh objek di dalam REST API Kubernetes secara jelas ditandai dengan nama dan UID. -Apabila pengguna ingin memberikan atribut tidak unik, Kubernetes menyediakan [label](/docs/user-guide/labels) dan [anotasi](/docs/concepts/overview/working-with-objects/annotations/). +Apabila pengguna ingin memberikan atribut tidak unik, Kubernetes menyediakan [label](/docs/user-guide/labels) dan [anotasi](/id/docs/concepts/overview/working-with-objects/annotations/). Bacalah [dokumentasi desain penanda](https://git.k8s.io/community/contributors/design-proposals/architecture/identifiers.md) agar kamu dapat memahami lebih lanjut sintaks yang digunakan untuk Nama dan UID. diff --git a/content/id/docs/concepts/overview/working-with-objects/namespaces.md b/content/id/docs/concepts/overview/working-with-objects/namespaces.md index 5eb358a17adde..89ffb8ea14503 100644 --- a/content/id/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/id/docs/concepts/overview/working-with-objects/namespaces.md @@ -19,7 +19,7 @@ Kubernetes mendukung banyak klaster virtual di dalam satu klaster fisik. Klaster *Namespace* menyediakan ruang untuk nama objek. Nama dari *resource* atau objek harus berbeda di dalam sebuah *namespace*, tetapi boleh sama jika berbeda *namespace*. *Namespace* tidak bisa dibuat di dalam *namespace* lain dan setiap *resource* atau objek Kubernetes hanya dapat berada di dalam satu *namespace*. -*Namespace* merupakan cara yang digunakan untuk memisahkan *resource* klaster untuk beberapa pengguna (dengan [*resource quota*](/docs/concepts/policy/resource-quotas/)). +*Namespace* merupakan cara yang digunakan untuk memisahkan *resource* klaster untuk beberapa pengguna (dengan [*resource quota*](/id/docs/concepts/policy/resource-quotas/)). Dalam versi Kubernetes yang akan datang, objek di dalam satu *namespace* akan mempunyai *access control policies* yang sama secara *default*. @@ -74,7 +74,7 @@ kubectl config view | grep namespace: ## Namespace dan DNS -Saat kamu membuat sebuah [Service](/docs/user-guide/services), Kubernetes membuat [Entri DNS](/docs/concepts/services-networking/dns-pod-service/) untuk *service* tersebut. Entri *DNS* ini berformat `..svc.cluster.local`, yang berarti jika sebuah kontainer hanya menggunakan ``, kontainer tersebut akan berkomunikasi dengan *service* yang berada di dalam satu *namespace*. Ini berguna untuk menggunakan konfigurasi yang sama di beberapa *namespace* seperti *Development*, *Staging*, dan *Production*. Jika kamu ingin berkomunikasi antar *namespace*, kamu harus menggunakan seluruh *fully qualified domain name (FQDN)*. +Saat kamu membuat sebuah [Service](/docs/user-guide/services), Kubernetes membuat [Entri DNS](/id/docs/concepts/services-networking/dns-pod-service/) untuk *service* tersebut. Entri *DNS* ini berformat `..svc.cluster.local`, yang berarti jika sebuah kontainer hanya menggunakan ``, kontainer tersebut akan berkomunikasi dengan *service* yang berada di dalam satu *namespace*. Ini berguna untuk menggunakan konfigurasi yang sama di beberapa *namespace* seperti *Development*, *Staging*, dan *Production*. Jika kamu ingin berkomunikasi antar *namespace*, kamu harus menggunakan seluruh *fully qualified domain name (FQDN)*. ## Tidak semua objek di dalam Namespace diff --git a/content/id/docs/concepts/policy/limit-range.md b/content/id/docs/concepts/policy/limit-range.md index 6de9d69dd2d76..106f4c1a84ca2 100644 --- a/content/id/docs/concepts/policy/limit-range.md +++ b/content/id/docs/concepts/policy/limit-range.md @@ -1,6 +1,6 @@ --- title: LimitRange -content_template: templates/concept +content_type: concept weight: 10 --- diff --git a/content/id/docs/concepts/policy/pod-security-policy.md b/content/id/docs/concepts/policy/pod-security-policy.md index 2dbbd53144048..991ebb44aa8b7 100644 --- a/content/id/docs/concepts/policy/pod-security-policy.md +++ b/content/id/docs/concepts/policy/pod-security-policy.md @@ -45,13 +45,13 @@ Sejak API dari Pod Security Policy (`policy/v1beta1/podsecuritypolicy`) diaktifk ## Mengizinkan Kebijakan -Saat sebuah sumber daya PodSecurityPolicy dibuat, ia tidak melakukan apa-apa. Untuk menggunakannya, [Service Account](/docs/tasks/configure-pod-container/configure-service-account/) dari pengguna yang memintanya atau target Pod-nya harus diizinkan terlebih dahulu untuk menggunakan kebijakan tersebut, dengan membolehkan kata kerja `use` terhadap kebijakan tersebut. +Saat sebuah sumber daya PodSecurityPolicy dibuat, ia tidak melakukan apa-apa. Untuk menggunakannya, [Service Account](/id/docs/tasks/configure-pod-container/configure-service-account/) dari pengguna yang memintanya atau target Pod-nya harus diizinkan terlebih dahulu untuk menggunakan kebijakan tersebut, dengan membolehkan kata kerja `use` terhadap kebijakan tersebut. -Kebanyakan Pod Kubernetes tidak dibuat secara langsung oleh pengguna. Sebagai gantinya, mereka biasanya dibuat secara tidak langsung sebagai bagian dari sebuah [Deployment](/docs/concepts/workloads/controllers/deployment/), [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/), atau pengontrol yang sudah ditemplat lainnya melalui Controller Manager. Memberikan akses untuk pengontrol terhadap kebijakan tersebut akan mengizinkan akses untuk *semua* Pod yang dibuat oleh pengontrol tersebut, sehingga metode yang lebih baik untuk mengizinkan kebijakan adalah dengan memberikan akses pada Service Account milik Pod (lihat [contohnya](#run-another-pod)). +Kebanyakan Pod Kubernetes tidak dibuat secara langsung oleh pengguna. Sebagai gantinya, mereka biasanya dibuat secara tidak langsung sebagai bagian dari sebuah [Deployment](/id/docs/concepts/workloads/controllers/deployment/), [ReplicaSet](/id/docs/concepts/workloads/controllers/replicaset/), atau pengontrol yang sudah ditemplat lainnya melalui Controller Manager. Memberikan akses untuk pengontrol terhadap kebijakan tersebut akan mengizinkan akses untuk *semua* Pod yang dibuat oleh pengontrol tersebut, sehingga metode yang lebih baik untuk mengizinkan kebijakan adalah dengan memberikan akses pada Service Account milik Pod (lihat [contohnya](#run-another-pod)). ### Melalui RBAC -[RBAC](/docs/reference/access-authn-authz/rbac/) adalah mode otorisasi standar Kubernetes, dan dapat digunakan dengan mudah untuk mengotorisasi penggunaan kebijakan-kebijakan. +[RBAC](/id/docs/reference/access-authn-authz/rbac/) adalah mode otorisasi standar Kubernetes, dan dapat digunakan dengan mudah untuk mengotorisasi penggunaan kebijakan-kebijakan. Pertama-tama, sebuah `Role` atau `ClusterRole` perlu memberikan akses pada kata kerja `use` terhadap kebijakan-kebijakan yang diinginkan. `rules` yang digunakan untuk memberikan akses tersebut terlihat seperti berikut: @@ -103,12 +103,12 @@ Jika sebuah `RoleBinding` (bukan `ClusterRoleBinding`) digunakan, maka ia hanya name: system:authenticated ``` -Untuk lebih banyak contoh pengikatan RBAC, lihat [Contoh Role Binding](/docs/reference/access-authn-authz/rbac#role-binding-examples). +Untuk lebih banyak contoh pengikatan RBAC, lihat [Contoh Role Binding](/id/docs/reference/access-authn-authz/rbac#role-binding-examples). Untuk contoh lengkap untuk mengotorisasi sebuah PodSecurityPolicy, lihat [di bawah](#contoh). ### Mengatasi Masalah -- [Controller Manager](/docs/admin/kube-controller-manager/) harus dijalankan terhadap [port API yang telah diamankan](/docs/reference/access-authn-authz/controlling-access/), dan tidak boleh memiliki izin _superuser_, atau semua permintaan akan melewati modul-modul otentikasi dan otorisasi, semua objek PodSecurityPolicy tidak akan diizinkan, dan semua pengguna dapat membuat Container-container yang _privileged_. Untuk lebih detil tentang mengkonfigurasi otorisasi Controller Manager, lihat [Controller Roles](/docs/reference/access-authn-authz/rbac/#controller-roles). +- [Controller Manager](/docs/admin/kube-controller-manager/) harus dijalankan terhadap [port API yang telah diamankan](/docs/reference/access-authn-authz/controlling-access/), dan tidak boleh memiliki izin _superuser_, atau semua permintaan akan melewati modul-modul otentikasi dan otorisasi, semua objek PodSecurityPolicy tidak akan diizinkan, dan semua pengguna dapat membuat Container-container yang _privileged_. Untuk lebih detil tentang mengkonfigurasi otorisasi Controller Manager, lihat [Controller Roles](/id/docs/reference/access-authn-authz/rbac/#controller-roles). ## Urutan Kebijakan @@ -324,7 +324,7 @@ determines if any container in a pod can enable privileged mode. ### Volume dan _file system_ -**Volume** - Menyediakan sebuah daftar putih dari tipe-tipe Volume yang diizinkan. Nilai-nilai yang diizinkan sesuai dengan sumber Volume yang didefinisikan saat membuat sebuah Volume. Untuk daftar lengkap tipe-tipe Volume, lihat [tipe-tipe Volume](/docs/concepts/storage/volumes/#tipe-tipe-volume). Sebagai tambahan, `*` dapat digunakan untuk mengizinkan semua tipe Volume. +**Volume** - Menyediakan sebuah daftar putih dari tipe-tipe Volume yang diizinkan. Nilai-nilai yang diizinkan sesuai dengan sumber Volume yang didefinisikan saat membuat sebuah Volume. Untuk daftar lengkap tipe-tipe Volume, lihat [tipe-tipe Volume](/id/docs/concepts/storage/volumes/#tipe-tipe-volume). Sebagai tambahan, `*` dapat digunakan untuk mengizinkan semua tipe Volume. **Kumpulan Volume-volume minimal yang direkomendasikan** untuk PodSecurityPolicy baru adalah sebagai berikut: diff --git a/content/id/docs/concepts/policy/resource-quotas.md b/content/id/docs/concepts/policy/resource-quotas.md index 47bfa996bb458..c001ef4a40a7a 100644 --- a/content/id/docs/concepts/policy/resource-quotas.md +++ b/content/id/docs/concepts/policy/resource-quotas.md @@ -81,7 +81,7 @@ Berikut jenis-jenis sumber daya yang didukung: ### Resource Quota untuk sumber daya yang diperluas Sebagai tambahan untuk sumber daya yang disebutkan di atas, pada rilis 1.10, dukungan kuota untuk -[sumber daya yang diperluas](/docs/concepts/configuration/manage-compute-resources-container/#extended-resources) ditambahkan. +[sumber daya yang diperluas](/id/docs/concepts/configuration/manage-compute-resources-container/#extended-resources) ditambahkan. Karena _overcommit_ tidak diperbolehkan untuk sumber daya yang diperluas, tidak masuk akal untuk menentukan keduanya; `requests` dan `limits` untuk sumber daya yang diperluas yang sama pada sebuah kuota. Jadi, untuk @@ -98,7 +98,7 @@ Lihat [Melihat dan Menyetel Kuota](#melihat-dan-menyetel-kuota) untuk informasi ## Resource Quota untuk penyimpanan -Kamu dapat membatasi jumlah total [sumber daya penyimpanan](/docs/concepts/storage/persistent-volumes/) yang dapat +Kamu dapat membatasi jumlah total [sumber daya penyimpanan](/id/docs/concepts/storage/persistent-volumes/) yang dapat diminta pada sebuah Namespace. Sebagai tambahan, kamu dapat membatasi penggunaan sumber daya penyimpanan berdasarkan _storage class_ @@ -107,9 +107,9 @@ sumber daya penyimpanan tersebut. | Nama Sumber Daya | Deskripsi | | --------------------- | ----------------------------------------------------------- | | `requests.storage` | Pada seluruh Persistent Volume Claim, jumlah `requests` penyimpanan tidak dapat melebihi nilai ini. | -| `persistentvolumeclaims` | Jumlah kuantitas [Persistent Volume Claim](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) yang dapat ada di dalam sebuah Namespace. | +| `persistentvolumeclaims` | Jumlah kuantitas [Persistent Volume Claim](/id/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) yang dapat ada di dalam sebuah Namespace. | | `.storageclass.storage.k8s.io/requests.storage` | Pada seluruh Persistent Volume Claim yang dikaitkan dengan sebuah nama _storage-class_ (melalui kolom `storageClassName`), jumlah permintaan penyimpanan tidak dapat melebihi nilai ini. | -| `.storageclass.storage.k8s.io/persistentvolumeclaims` | Pada seluruh Persistent Volume Claim yang dikaitkan dengan sebuah nama _storage-class_ (melalui kolom `storageClassName`), jumlah kuantitas [Persistent Volume Claim](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) yang dapat ada di dalam sebuah Namespace. | +| `.storageclass.storage.k8s.io/persistentvolumeclaims` | Pada seluruh Persistent Volume Claim yang dikaitkan dengan sebuah nama _storage-class_ (melalui kolom `storageClassName`), jumlah kuantitas [Persistent Volume Claim](/id/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) yang dapat ada di dalam sebuah Namespace. | Sebagai contoh, jika sebuah operator ingin membatasi penyimpanan dengan Storage Class `gold` yang berbeda dengan Storage Class `bronze`, maka operator tersebut dapat menentukan kuota sebagai berikut: @@ -163,7 +163,7 @@ Berikut jenis-jenis yang telah didukung: | Nama Sumber Daya | Deskripsi | | ------------------------------- | ------------------------------------------------- | | `configmaps` | Jumlah total ConfigMap yang dapat berada pada suatu Namespace. | -| `persistentvolumeclaims` | Jumlah total PersistentVolumeClaim[persistent volume claims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) yang dapat berada pada suatu Namespace. | +| `persistentvolumeclaims` | Jumlah total PersistentVolumeClaim[persistent volume claims](/id/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) yang dapat berada pada suatu Namespace. | | `pods` | Jumlah total Pod yang berada pada kondisi non-terminal yang dapat berada pada suatu Namespace. Sebuah Pod berada kondisi terminal yaitu jika `.status.phase in (Failed, Succeded)` adalah `true`. | | `replicationcontrollers` | Jumlah total ReplicationController yang dapat berada pada suatu Namespace. | | `resourcequotas` | Jumlah total [ResourceQuota](/docs/reference/access-authn-authz/admission-controllers/#resourcequota) yang dapat berada pada suatu Namespace. | @@ -208,7 +208,7 @@ Lingkup `Terminating`, `NotTerminating`, dan `NotBestEffort` membatasi sebuah k {{< feature-state for_k8s_version="1.12" state="beta" >}} -Pod-Pod dapat dibuat dengan sebuah [Priority (prioritas)](/docs/concepts/configuration/pod-priority-preemption/#pod-priority) tertentu. +Pod-Pod dapat dibuat dengan sebuah [Priority (prioritas)](/id/docs/concepts/configuration/pod-priority-preemption/#pod-priority) tertentu. Kamu dapat mengontrol konsumsi sumber daya sistem sebuah Pod berdasarkan Priority Pod tersebut, menggunakan kolom `scopeSelector` pada spesifikasi kuota tersebut. diff --git a/content/id/docs/concepts/scheduling/kube-scheduler.md b/content/id/docs/concepts/scheduling/kube-scheduler.md index f4cd477608ead..6f7efab3d9773 100644 --- a/content/id/docs/concepts/scheduling/kube-scheduler.md +++ b/content/id/docs/concepts/scheduling/kube-scheduler.md @@ -94,10 +94,10 @@ penilaian oleh penjadwal: ## {{% heading "whatsnext" %}} -* Baca tentang [penyetelan performa penjadwal](/docs/concepts/scheduling/scheduler-perf-tuning/) -* Baca tentang [pertimbangan penyebarang topologi pod](/docs/concepts/workloads/pods/pod-topology-spread-constraints/) +* Baca tentang [penyetelan performa penjadwal](/id/docs/concepts/scheduling/scheduler-perf-tuning/) +* Baca tentang [pertimbangan penyebarang topologi pod](/id/docs/concepts/workloads/pods/pod-topology-spread-constraints/) * Baca [referensi dokumentasi](/docs/reference/command-line-tools-reference/kube-scheduler/) untuk _kube-scheduler_ * Pelajari tentang [mengkonfigurasi beberapa penjadwal](/docs/tasks/administer-cluster/configure-multiple-schedulers/) * Pelajari tentang [aturan manajemen topologi](/docs/tasks/administer-cluster/topology-manager/) -* Pelajari tentang [pengeluaran tambahan Pod](/docs/concepts/configuration/pod-overhead/) +* Pelajari tentang [pengeluaran tambahan Pod](/id/docs/concepts/configuration/pod-overhead/) diff --git a/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md b/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md index 0a20d9050adc7..3689ecf7cb6f7 100644 --- a/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md +++ b/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md @@ -8,7 +8,7 @@ weight: 70 {{< feature-state for_k8s_version="v1.14" state="beta" >}} -[kube-scheduler](/docs/concepts/scheduling/kube-scheduler/#kube-scheduler) +[kube-scheduler](/id/docs/concepts/scheduling/kube-scheduler/#kube-scheduler) merupakan penjadwal (_scheduler_) Kubernetes bawaan yang bertanggung jawab terhadap penempatan Pod-Pod pada seluruh Node di dalam sebuah klaster. @@ -66,7 +66,7 @@ Kamu bisa mengatur ambang batas untuk menentukan berapa banyak jumlah Node minim persentase bagian dari seluruh Node di dalam klaster kamu. kube-scheduler akan mengubahnya menjadi bilangan bulat berisi jumlah Node. Saat penjadwalan, jika kube-scheduler mengidentifikasi cukup banyak Node-Node layak untuk melewati jumlah persentase yang diatur, maka kube-scheduler -akan berhenti mencari Node-Node layak dan lanjut ke [fase penskoran] (/docs/concepts/scheduling/kube-scheduler/#kube-scheduler-implementation). +akan berhenti mencari Node-Node layak dan lanjut ke [fase penskoran] (/id/docs/concepts/scheduling/kube-scheduler/#kube-scheduler-implementation). [Bagaimana penjadwal mengecek Node](#bagaimana-penjadwal-mengecek-node) menjelaskan proses ini secara detail. diff --git a/content/id/docs/concepts/security/overview.md b/content/id/docs/concepts/security/overview.md index caff040bc50cf..bc271e0645bb8 100644 --- a/content/id/docs/concepts/security/overview.md +++ b/content/id/docs/concepts/security/overview.md @@ -107,11 +107,11 @@ Kebanyakan dari saran yang disebut di atas dapat diotomasi di dalam _delivery pi ## {{% heading "whatsnext" %}} -* Pelajari tentang [Network Policy untuk Pod](/docs/concepts/services-networking/network-policies/) +* Pelajari tentang [Network Policy untuk Pod](/id/docs/concepts/services-networking/network-policies/) * Pelajari tentang [mengamankan klaster kamu](/docs/tasks/administer-cluster/securing-a-cluster/) * Pelajari tentang [kontrol akses API](/docs/reference/access-authn-authz/controlling-access/) -* Pelajari tentang [enkripsi data saat transit](/docs/tasks/tls/managing-tls-in-a-cluster/) for the control plane +* Pelajari tentang [enkripsi data saat transit](/id/docs/tasks/tls/managing-tls-in-a-cluster/) for the control plane * Pelajari tentang [enkripsi data saat diam](/docs/tasks/administer-cluster/encrypt-data/) -* Pelajari tentang [Secret (data sensitif) pada Kubernetes](/docs/concepts/configuration/secret/) +* Pelajari tentang [Secret (data sensitif) pada Kubernetes](/id/docs/concepts/configuration/secret/) diff --git a/content/id/docs/concepts/services-networking/connect-applications-service.md b/content/id/docs/concepts/services-networking/connect-applications-service.md index 4bbd0bbf56381..806fff3a46426 100644 --- a/content/id/docs/concepts/services-networking/connect-applications-service.md +++ b/content/id/docs/concepts/services-networking/connect-applications-service.md @@ -47,7 +47,7 @@ kubectl get pods -l run=my-nginx -o yaml | grep podIP Kamu dapat melakukan akses dengan *ssh* ke dalam *node* di dalam klaster dan mengakses IP *Pod* tersebut menggunakan *curl*. Perlu dicatat bahwa kontainer tersebut tidak menggunakan *port* 80 di dalam *node*, atau aturan *NAT* khusus untuk merutekan trafik ke dalam *Pod*. Ini berarti kamu dapat menjalankan banyak *nginx Pod* di *node* yang sama dimana setiap *Pod* dapat menggunakan *containerPort* yang sama, kamu dapat mengakses semua itu dari *Pod* lain ataupun dari *node* di dalam klaster menggunakan IP. Seperti *Docker*, *port* masih dapat di publikasi ke dalam * interface node*, tetapi kebutuhan seperti ini sudah berkurang karena model jaringannya. -Kamu dapat membaca lebih detail [bagaimana kita melakukan ini](/docs/concepts/cluster-administration/networking/#how-to-achieve-this) jika kamu penasaran. +Kamu dapat membaca lebih detail [bagaimana kita melakukan ini](/id/docs/concepts/cluster-administration/networking/#how-to-achieve-this) jika kamu penasaran. ## Membuat Service @@ -107,7 +107,7 @@ NAME ENDPOINTS AGE my-nginx 10.244.2.5:80,10.244.3.4:80 1m ``` -Kamu sekarang dapat melakukan *curl* ke dalam *nginx Service* di `:` dari *node* manapun di klaster. Perlu dicatat bahwa *Service IP* adalah IP virtual, IP tersebut tidak pernah ada di *interface node* manapun. Jika kamu penasaran bagaimana konsep ini bekerja, kamu dapat membaca lebih lanjut tentang [service proxy](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies). +Kamu sekarang dapat melakukan *curl* ke dalam *nginx Service* di `:` dari *node* manapun di klaster. Perlu dicatat bahwa *Service IP* adalah IP virtual, IP tersebut tidak pernah ada di *interface node* manapun. Jika kamu penasaran bagaimana konsep ini bekerja, kamu dapat membaca lebih lanjut tentang [service proxy](/id/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies). ## Mengakses Service @@ -194,7 +194,7 @@ Hingga sekarang kita hanya mengakses *nginx* server dari dalam klaster. Sebelum * *Self signed certificates* untuk *https* (kecuali jika kamu sudah mempunyai *identity certificate*) * Sebuah server *nginx* yang terkonfigurasi untuk menggunakan *certificate* tersebut -* Sebuah [secret](/docs/concepts/configuration/secret/) yang membuat setifikat tersebut dapat diakses oleh *pod* +* Sebuah [secret](/id/docs/concepts/configuration/secret/) yang membuat setifikat tersebut dapat diakses oleh *pod* Kamu dapat melihat semua itu di [contoh nginx https](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/https-nginx/). Contoh ini mengaharuskan kamu melakukan instalasi *go* dan *make*. Jika kamu tidak ingin melakukan instalasi tersebut, ikuti langkah-langkah manualnya nanti, singkatnya: @@ -362,6 +362,6 @@ LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.el ## {{% heading "whatsnext" %}} -Kubernetes juga mendukung *Federated Service*, yang bisa mempengaruhi banyak klaster dan penyedia layanan *cloud*, untuk meningkatkan ketersediaan, peningkatan toleransi kesalahan, dan pengembangan dari *Service* kamu. Lihat [Panduan Federated Service](/docs/concepts/cluster-administration/federation-service-discovery/) untuk informasi lebih lanjut. +Kubernetes juga mendukung *Federated Service*, yang bisa mempengaruhi banyak klaster dan penyedia layanan *cloud*, untuk meningkatkan ketersediaan, peningkatan toleransi kesalahan, dan pengembangan dari *Service* kamu. Lihat [Panduan Federated Service](/id/docs/concepts/cluster-administration/federation-service-discovery/) untuk informasi lebih lanjut. diff --git a/content/id/docs/concepts/services-networking/dns-pod-service.md b/content/id/docs/concepts/services-networking/dns-pod-service.md index 52ec19a420f64..efdba8d7a13be 100644 --- a/content/id/docs/concepts/services-networking/dns-pod-service.md +++ b/content/id/docs/concepts/services-networking/dns-pod-service.md @@ -50,7 +50,7 @@ menggunakan penjadwalan Round-Robin dari set yang ada. ### SRV _record_ SRV _record_ dibuat untuk port bernama yang merupakan bagian dari Service normal maupun [Headless -Services](/docs/concepts/services-networking/service/#headless-services). +Services](/id/docs/concepts/services-networking/service/#headless-services). Untuk setiap port bernama, SRV _record_ akan memiliki format `_my-port-name._my-port-protocol.my-svc.my-namespace.svc.cluster-domain.example`. Untuk sebuah Service normal, ini akan melakukan resolusi pada nomor port dan diff --git a/content/id/docs/concepts/services-networking/endpoint-slices.md b/content/id/docs/concepts/services-networking/endpoint-slices.md index 224e7b4bbd9e7..1782f4273e740 100644 --- a/content/id/docs/concepts/services-networking/endpoint-slices.md +++ b/content/id/docs/concepts/services-networking/endpoint-slices.md @@ -45,7 +45,7 @@ term_id="selector" >}} dituliskan. EndpointSlice tersebut akan memiliki referensi-referensi menuju Pod manapun yang cocok dengan selektor pada Service tersebut. EndpointSlice mengelompokkan _endpoint_ jaringan berdasarkan kombinasi Service dan Port yang unik. Nama dari sebuah objek EndpointSlice haruslah berupa -[nama subdomain DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) yang sah. +[nama subdomain DNS](/id/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) yang sah. Sebagai contoh, berikut merupakan sampel sumber daya EndpointSlice untuk sebuah Service Kubernetes yang bernama `example`. @@ -180,6 +180,6 @@ bersangkutan. * [Mengaktifkan EndpointSlice](/docs/tasks/administer-cluster/enabling-endpointslices) -* Baca [Menghubungkan Aplikasi dengan Service](/docs/concepts/services-networking/connect-applications-service/) +* Baca [Menghubungkan Aplikasi dengan Service](/id/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/id/docs/concepts/services-networking/ingress-controllers.md b/content/id/docs/concepts/services-networking/ingress-controllers.md index 9491f5dc1c9aa..645f2dbf8dcbd 100644 --- a/content/id/docs/concepts/services-networking/ingress-controllers.md +++ b/content/id/docs/concepts/services-networking/ingress-controllers.md @@ -71,7 +71,7 @@ Pastikan kamu sudah terlebih dahulu memahami dokumentasi kontroler Ingress yang ## {{% heading "whatsnext" %}} -* Pelajari [Ingress](/docs/concepts/services-networking/ingress/) lebih lanjut. +* Pelajari [Ingress](/id/docs/concepts/services-networking/ingress/) lebih lanjut. * [Melakukan konfigurasi Ingress pada Minikube dengan kontroler NGINX](/docs/tasks/access-application-cluster/ingress-minikube) diff --git a/content/id/docs/concepts/services-networking/ingress.md b/content/id/docs/concepts/services-networking/ingress.md index 617581b4214f5..1cc56c5960cc8 100644 --- a/content/id/docs/concepts/services-networking/ingress.md +++ b/content/id/docs/concepts/services-networking/ingress.md @@ -16,8 +16,8 @@ Untuk memudahkan, di awal akan dijelaskan beberapa terminologi yang sering dipak * Node: Sebuah mesin fisik atau virtual yang berada di dalam klaster Kubernetes. * Klaster: Sekelompok node yang merupakan *resource* komputasi primer yang diatur oleh Kubernetes, biasanya diproteksi dari internet dengan menggunakan *firewall*. * *Edge router*: Sebuah *router* mengatur *policy firewall* pada klaster kamu. *Router* ini bisa saja berupa *gateway* yang diatur oleh penyedia layanan *cloud* maupun perangkat keras. -* Jaringan klaster: Seperangkat *links* baik logis maupus fisik, yang memfasilitasi komunikasi di dalam klaster berdasarkan [model jaringan Kubernetes](/docs/concepts/cluster-administration/networking/). -* *Service*: Sebuah [*Service*](/docs/concepts/services-networking/service/) yang mengidentifikasi beberapa *Pod* dengan menggunakan *selector label*. Secara umum, semua *Service* diasumsikan hanya memiliki IP virtual yang hanya dapat diakses dari dalam jaringan klaster. +* Jaringan klaster: Seperangkat *links* baik logis maupus fisik, yang memfasilitasi komunikasi di dalam klaster berdasarkan [model jaringan Kubernetes](/id/docs/concepts/cluster-administration/networking/). +* *Service*: Sebuah [*Service*](/id/docs/concepts/services-networking/service/) yang mengidentifikasi beberapa *Pod* dengan menggunakan *selector label*. Secara umum, semua *Service* diasumsikan hanya memiliki IP virtual yang hanya dapat diakses dari dalam jaringan klaster. ## Apakah *Ingress* itu? @@ -34,11 +34,11 @@ Mekanisme *routing* trafik dikendalikan oleh aturan-aturan yang didefinisikan pa ``` Sebuah *Ingress* dapat dikonfigurasi agar berbagai *Service* memiliki URL yang dapat diakses dari eksternal (luar klaster), melakukan *load balance* pada trafik, terminasi SSL, serta Virtual Host berbasis Nama. -Sebuah [kontroler Ingress](/docs/concepts/services-networking/ingress-controllers) bertanggung jawab untuk menjalankan fungsi Ingress yaitu sebagai *loadbalancer*, meskipun dapat juga digunakan untuk mengatur *edge router* atau *frontend* tambahan untuk menerima trafik. +Sebuah [kontroler Ingress](/id/docs/concepts/services-networking/ingress-controllers) bertanggung jawab untuk menjalankan fungsi Ingress yaitu sebagai *loadbalancer*, meskipun dapat juga digunakan untuk mengatur *edge router* atau *frontend* tambahan untuk menerima trafik. Sebuah *Ingress* tidak mengekspos sembarang *port* atau protokol. Mengekspos *Service* untuk protokol selain HTTP ke HTTPS internet biasanya dilakukan dengan menggunakan -*service* dengan tipe [Service.Type=NodePort](/docs/concepts/services-networking/service/#nodeport) atau -[Service.Type=LoadBalancer](/docs/concepts/services-networking/service/#loadbalancer). +*service* dengan tipe [Service.Type=NodePort](/id/docs/concepts/services-networking/service/#nodeport) atau +[Service.Type=LoadBalancer](/id/docs/concepts/services-networking/service/#loadbalancer). ## Prasyarat @@ -47,7 +47,7 @@ Sebuah *Ingress* tidak mengekspos sembarang *port* atau protokol. Mengekspos *Se Sebelum kamu mulai menggunakan *Ingress*, ada beberapa hal yang perlu kamu ketahui sebelumnya. *Ingress* merupakan *resource* dengan tipe beta. {{< note >}} -Kamu harus terlebih dahulu memiliki [kontroler Ingress](/docs/concepts/services-networking/ingress-controllers) untuk dapat memenuhi *Ingress*. Membuat sebuah *Ingress* tanpa adanya kontroler *Ingres* tidak akan berdampak apa pun. +Kamu harus terlebih dahulu memiliki [kontroler Ingress](/id/docs/concepts/services-networking/ingress-controllers) untuk dapat memenuhi *Ingress*. Membuat sebuah *Ingress* tanpa adanya kontroler *Ingres* tidak akan berdampak apa pun. {{< /note >}} GCE/Google Kubernetes Engine melakukan deploy kontroler *Ingress* pada *master*. Perhatikan laman berikut @@ -56,7 +56,7 @@ kontroler ini jika kamu menggunakan GCE/GKE. Jika kamu menggunakan *environment* selain GCE/Google Kubernetes Engine, kemungkinan besar kamu harus [melakukan proses deploy kontroler ingress kamu sendiri](https://kubernetes.github.io/ingress-nginx/deploy/). Terdapat beberapa jenis -[kontroler Ingress](/docs/concepts/services-networking/ingress-controllers) yang bisa kamu pilih. +[kontroler Ingress](/id/docs/concepts/services-networking/ingress-controllers) yang bisa kamu pilih. ### Sebelum kamu memulai @@ -89,10 +89,10 @@ spec: ``` Seperti layaknya *resource* Kubernetes yang lain, sebuah Ingress membutuhkan *field* `apiVersion`, `kind`, dan `metadata`. - Untuk informasi umum soal bagaimana cara bekerja dengan menggunakan file konfigurasi, silahkan merujuk pada [melakukan deploy aplikasi](/docs/tasks/run-application/run-stateless-application-deployment/), [konfigurasi kontainer](/docs/tasks/configure-pod-container/configure-pod-configmap/), [mengatur *resource*](/docs/concepts/cluster-administration/manage-deployment/). + Untuk informasi umum soal bagaimana cara bekerja dengan menggunakan file konfigurasi, silahkan merujuk pada [melakukan deploy aplikasi](/docs/tasks/run-application/run-stateless-application-deployment/), [konfigurasi kontainer](/id/docs/tasks/configure-pod-container/configure-pod-configmap/), [mengatur *resource*](/id/docs/concepts/cluster-administration/manage-deployment/). Ingress seringkali menggunakan anotasi untuk melakukan konfigurasi beberapa opsi yang ada bergantung pada kontroler Ingress yang digunakan, sebagai contohnya adalah [anotasi rewrite-target](https://github.com/kubernetes/ingress-nginx/blob/master/docs/examples/rewrite/README.md). - [Kontroler Ingress](/docs/concepts/services-networking/ingress-controllers) yang berbeda memiliki jenis anotasi yang berbeda. Pastikan kamu sudah terlebih dahulu memahami dokumentasi + [Kontroler Ingress](/id/docs/concepts/services-networking/ingress-controllers) yang berbeda memiliki jenis anotasi yang berbeda. Pastikan kamu sudah terlebih dahulu memahami dokumentasi kontroler Ingress yang akan kamu pakai untuk mengetahui jenis anotasi apa sajakah yang disediakan. [Spesifikasi](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) Ingress @@ -111,7 +111,7 @@ Setiap *rule* HTTP mengandung informasi berikut: dan `servicePort`. Baik *host* dan *path* harus sesuai dengan konten dari *request* yang masuk sebelum *loadbalancer* akan mengarahkan trafik pada *service* yang sesuai. * Suatu *backend* adalah kombinasi *service* dan *port* seperti yang dideskripsikan di - [dokumentasi *Service*](/docs/concepts/services-networking/service/). *Request* HTTP (dan HTTPS) yang sesuai dengan + [dokumentasi *Service*](/id/docs/concepts/services-networking/service/). *Request* HTTP (dan HTTPS) yang sesuai dengan *host* dan *path* yang ada pada *rule* akan diteruskan pada *backend* terkait. *Backend default* seringkali dikonfigurasi pada kontroler kontroler Ingress, tugas *backend default* ini adalah @@ -120,7 +120,7 @@ Setiap *rule* HTTP mengandung informasi berikut: ### *Backend Default* Sebuah Ingress yang tidak memiliki *rules* akan mengarahkan semua trafik pada sebuah *backend default*. *Backend default* inilah yang -biasanya bisa dimasukkan sebagai salah satu opsi konfigurasi dari [kontroler Ingress](/docs/concepts/services-networking/ingress-controllers) dan tidak dimasukkan dalam spesifikasi *resource* Ingress. +biasanya bisa dimasukkan sebagai salah satu opsi konfigurasi dari [kontroler Ingress](/id/docs/concepts/services-networking/ingress-controllers) dan tidak dimasukkan dalam spesifikasi *resource* Ingress. Jika tidak ada *host* atau *path* yang sesuai dengan *request* HTTP pada objek Ingress, maka trafik tersebut akan diarahkan pada *backend default*. @@ -218,8 +218,8 @@ Apabila *Ingress* selesai dibuat, maka kamu dapat melihat alamat IP dari berbaga pada kolom `address`. {{< note >}} -Kamu mungkin saja membutuhkan konfigurasi default-http-backend [Service](/docs/concepts/services-networking/service/) -bergantung pada [kontroler Ingress](/docs/concepts/services-networking/ingress-controllers) yang kamu pakai. +Kamu mungkin saja membutuhkan konfigurasi default-http-backend [Service](/id/docs/concepts/services-networking/service/) +bergantung pada [kontroler Ingress](/id/docs/concepts/services-networking/ingress-controllers) yang kamu pakai. {{< /note >}} ### Virtual Host berbasis Nama @@ -291,7 +291,7 @@ spec: ### TLS -Kamu dapat mengamankan *Ingress* yang kamu miliki dengan memberikan spesifikasi [secret](/docs/concepts/configuration/secret) +Kamu dapat mengamankan *Ingress* yang kamu miliki dengan memberikan spesifikasi [secret](/id/docs/concepts/configuration/secret) yang mengandung *private key* dan sertifikat TLS. Saat ini, Ingress hanya memiliki fitur untuk melakukan konfigurasi *single TLS port*, yaitu 443, serta melakukan terminasi TLS. Jika *section* TLS pada Ingress memiliki spesifikasi *host* yang berbeda, @@ -448,8 +448,8 @@ Ingress yang ingin diubah. ## Mekanisme *failing* pada beberapa zona *availability* Teknik untuk menyeimbangkan persebaran trafik pada *failure domain* berbeda antar penyedia layanan *cloud*. -Kamu dapat mempelajari dokumentasi yang relevan bagi [kontoler Ingress](/docs/concepts/services-networking/ingress-controllers) -untuk informasi yang lebih detail. Kamu juga dapat mempelajari [dokumentasi federasi](/docs/concepts/cluster-administration/federation/) +Kamu dapat mempelajari dokumentasi yang relevan bagi [kontoler Ingress](/id/docs/concepts/services-networking/ingress-controllers) +untuk informasi yang lebih detail. Kamu juga dapat mempelajari [dokumentasi federasi](/id/docs/concepts/cluster-administration/federation/) untuk informasi lebih detail soal bagaimana melakukan *deploy* untuk federasi klaster. ## Pengembangan selanjutnya @@ -463,8 +463,8 @@ soal perubahan berbagai kontroler. Kamu dapat mengekspos sebuah *Service* dalam berbagai cara, tanpa harus menggunakan *resource* Ingress, dengan menggunakan: -* [Service.Type=LoadBalancer](/docs/concepts/services-networking/service/#loadbalancer) -* [Service.Type=NodePort](/docs/concepts/services-networking/service/#nodeport) +* [Service.Type=LoadBalancer](/id/docs/concepts/services-networking/service/#loadbalancer) +* [Service.Type=NodePort](/id/docs/concepts/services-networking/service/#nodeport) * [Port Proxy](https://git.k8s.io/contrib/for-demos/proxy-to-service) diff --git a/content/id/docs/concepts/services-networking/network-policies.md b/content/id/docs/concepts/services-networking/network-policies.md index 25f42ddb98b1f..fe510b846d8c6 100644 --- a/content/id/docs/concepts/services-networking/network-policies.md +++ b/content/id/docs/concepts/services-networking/network-policies.md @@ -80,7 +80,7 @@ kecuali penyedia jaringan mendukung network policy. **_Field-field_ yang bersifat wajib**: Sama dengan seluruh _config_ Kubernetes lainnya, sebuah `NetworkPolicy` membutuhkan _field-field_ `apiVersion`, `kind`, dan `metadata`. Informasi generik mengenai bagaimana bekerja dengan _file_ `config`, dapat dilihat di -[Konfigurasi Kontainer menggunakan `ConfigMap`](/docs/tasks/configure-pod-container/configure-pod-configmap/), +[Konfigurasi Kontainer menggunakan `ConfigMap`](/id/docs/tasks/configure-pod-container/configure-pod-configmap/), serta [Manajemen Objek](/docs/concepts/overview/object-management-kubectl/overview/). **spec**: `NetworkPolicy` [spec](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) memiliki semua informasi yang harus diberikan untuk memberikan definisi _network policy_ yang ada pada _namespace_ tertentu. diff --git a/content/id/docs/concepts/services-networking/service-topology.md b/content/id/docs/concepts/services-networking/service-topology.md index ef15d1ab3da24..05abffa3231f9 100644 --- a/content/id/docs/concepts/services-networking/service-topology.md +++ b/content/id/docs/concepts/services-networking/service-topology.md @@ -186,5 +186,5 @@ spec: * Baca tentang [mengaktifkan topologi Service](/docs/tasks/administer-cluster/enabling-service-topology) -* Baca [menghubungkan aplikasi dengan Service](/docs/concepts/services-networking/connect-applications-service/) +* Baca [menghubungkan aplikasi dengan Service](/id/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/id/docs/concepts/services-networking/service.md b/content/id/docs/concepts/services-networking/service.md index 97626bf9ce2e1..00bf4e624180b 100644 --- a/content/id/docs/concepts/services-networking/service.md +++ b/content/id/docs/concepts/services-networking/service.md @@ -12,9 +12,9 @@ weight: 10 -[`Pod`](/docs/concepts/workloads/pods/pod/) pada Kubernetes bersifat *mortal*. +[`Pod`](/id/docs/concepts/workloads/pods/pod/) pada Kubernetes bersifat *mortal*. Artinya apabila _pod-pod_ tersebut dibuat dan kemudian mati, _pod-pod_ tersebut -tidak akan dihidupkan kembali. [`ReplicaSets`](/docs/concepts/workloads/controllers/replicaset/) secara +tidak akan dihidupkan kembali. [`ReplicaSets`](/id/docs/concepts/workloads/controllers/replicaset/) secara khusus bertugas membuat dan menghapus `Pod` secara dinamsi (misalnya, pada proses *scaling out* atau *scaling in*). Meskipun setiap `Pod` memiliki alamat IP-nya masing-masing, kamu tidak dapat mengandalkan alamat IP yang diberikan pada _pod-pod_ tersebut, karena alamat IP yang diberikan tidak stabil. @@ -26,7 +26,7 @@ Inilah alasan kenapa `Service` ada. Sebuah `Service` pada Kubernetes adalah sebuah abstraksi yang memberikan definisi set logis yang terdiri beberapa `Pod` serta _policy_ bagaimana cara kamu mengakses sekumpulan `Pod` tadi - seringkali disebut sebagai _microservices_. -Set `Pod` yang dirujuk oleh suatu `Service` (biasanya) ditentukan oleh sebuah [`Label Selector`](/docs/concepts/overview/working-with-objects/labels/#label-selectors) +Set `Pod` yang dirujuk oleh suatu `Service` (biasanya) ditentukan oleh sebuah [`Label Selector`](/id/docs/concepts/overview/working-with-objects/labels/#label-selectors) (lihat penjelasan di bawah untuk mengetahui alasan kenapa kamu mungkin saja membutuhkan `Service` tanpa sebuah _selector_). @@ -95,7 +95,7 @@ mereka juga melakukan abstraksi bagi _backend_ lainnya. Misalnya saja: * Kamu ingin memiliki sebuah basis data eksternal di _environment_ _production_ tapi pada tahap _test_, kamu ingin menggunakan basis datamu sendiri. * Kamu ingin merujuk _service_ kamu pada _service_ lainnya yang berada pada - [_Namespace_](/docs/concepts/overview/working-with-objects/namespaces/) yang berbeda atau bahkan klaster yang berbeda. + [_Namespace_](/id/docs/concepts/overview/working-with-objects/namespaces/) yang berbeda atau bahkan klaster yang berbeda. * Kamu melakukan migrasi _workloads_ ke Kubernetes dan beberapa _backend_ yang kamu miliki masih berada di luar klaster Kubernetes. @@ -319,7 +319,7 @@ Meskipun begitu, DNS tidak memiliki keterbatasan ini. ### DNS -Salah satu [_add-on_](/docs/concepts/cluster-administration/addons/) opsional +Salah satu [_add-on_](/id/docs/concepts/cluster-administration/addons/) opsional (meskipun sangat dianjurkan) adalah server DNS. Server DNS bertugas untuk mengamati apakah terdapat objek `Service` baru yang dibuat dan kemudian bertugas menyediakan DNS baru untuk _Service_ tersebut. Jika DNS ini diaktifkan untuk seluruh klaster, maka semua `Pod` akan secara otomatis @@ -338,7 +338,7 @@ nomor _port_ yang digunakan oleh _http_. Server DNS Kubernetes adalah satu-satunya cara untuk mengakses _Service_ dengan tipe `ExternalName`. Informasi lebih lanjut tersedia di -[DNS _Pods_ dan _Services_](/docs/concepts/services-networking/dns-pod-service/). +[DNS _Pods_ dan _Services_](/id/docs/concepts/services-networking/dns-pod-service/). ## `Service` _headless_ @@ -745,10 +745,10 @@ dan tidak akan menerima trafik apa pun. Untuk menghasilkan distribusi trafik yang merata, kamu dapat menggunakan _DaemonSet_ atau melakukan spesifikasi -[pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) +[pod anti-affinity](/id/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) agar `Pod` tidak di-_assign_ ke _node_ yang sama. -NLB juga dapat digunakan dengan anotasi [internal load balancer](/docs/concepts/services-networking/service/#internal-load-balancer). +NLB juga dapat digunakan dengan anotasi [internal load balancer](/id/docs/concepts/services-networking/service/#internal-load-balancer). Agar trafik klien berhasil mencapai _instances_ dibelakang ELB, _security group_ dari _node_ akan diberikan _rules_ IP sebagai berikut: @@ -1006,7 +1006,7 @@ alternatif penggunaan `Service` untuk HTTP/HTTPS. {{< feature-state for_k8s_version="v1.1" state="stable" >}} -Apabila penyedia layanan _cloud_ yang kamu gunakan mendukung, (misalnya saja, [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +Apabila penyedia layanan _cloud_ yang kamu gunakan mendukung, (misalnya saja, [AWS](/id/docs/concepts/cluster-administration/cloud-providers/#aws)), _Service_ dengan _type_ `LoadBalancer` untuk melakukan konfigurasi _load balancer_ di luar Kubernetes sendiri, serta akan melakukan _forwarding_ koneksi yang memiliki prefiks [protokol PROXY](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). diff --git a/content/id/docs/concepts/storage/dynamic-provisioning.md b/content/id/docs/concepts/storage/dynamic-provisioning.md index ac206dfacde02..4b9fa6f35c3ab 100644 --- a/content/id/docs/concepts/storage/dynamic-provisioning.md +++ b/content/id/docs/concepts/storage/dynamic-provisioning.md @@ -8,7 +8,7 @@ weight: 40 Penyediaan volume dinamis memungkinkan volume penyimpanan untuk dibuat sesuai permintaan (_on-demand_). Tanpa adanya penyediaan dinamis (_dynamic provisioning_), untuk membuat volume penyimpanan baru, admin klaster secara manual harus -memanggil penyedia layanan cloud atau layanan penyimpanan, dan kemudian membuat [objek PersistentVolume](/docs/concepts/storage/persistent-volumes/) +memanggil penyedia layanan cloud atau layanan penyimpanan, dan kemudian membuat [objek PersistentVolume](/id/docs/concepts/storage/persistent-volumes/) sebagai representasi di Kubernetes. Fitur penyediaan dinamis menghilangkan kebutuhan admin klaster untuk menyediakan penyimpanan sebelumnya (_pre-provision_). Dengan demikian, penyimpanan akan tersedia secara otomatis ketika diminta oleh pengguna. @@ -32,7 +32,7 @@ kumpulan parameter tertentu. Desain ini memastikan bahwa pengguna tidak perlu kh rumitnya mekanisme penyediaan penyimpanan, tapi tetap memiliki kemampuan untuk memilih berbagai macam pilihan penyimpanan. -Info lebih lanjut mengenai _storage class_ dapat dilihat [di sini](/docs/concepts/storage/storage-classes/). +Info lebih lanjut mengenai _storage class_ dapat dilihat [di sini](/id/docs/concepts/storage/storage-classes/). ## Mengaktifkan Penyediaan Dinamis (_Dynamic Provisioning_) @@ -123,6 +123,6 @@ tidak bisa terbuat. Pada klaster [Multi-Zona](/docs/setup/multiple-zones), Pod dapat tersebar di banyak Zona pada sebuah Region. Penyimpanan dengan *backend* Zona-Tunggal seharusnya disediakan pada Zona-Zona dimana Pod dijalankan. Hal ini dapat dicapai dengan mengatur -[Mode Volume Binding](/docs/concepts/storage/storage-classes/#volume-binding-mode). +[Mode Volume Binding](/id/docs/concepts/storage/storage-classes/#volume-binding-mode). diff --git a/content/id/docs/concepts/storage/persistent-volumes.md b/content/id/docs/concepts/storage/persistent-volumes.md index f75941b86ab37..51163d36a9ac9 100644 --- a/content/id/docs/concepts/storage/persistent-volumes.md +++ b/content/id/docs/concepts/storage/persistent-volumes.md @@ -11,7 +11,7 @@ weight: 20 -Dokumen ini menjelaskan kondisi terkini dari `PersistentVolumes` pada Kubernetes. Disarankan telah memiliki familiaritas dengan [volume](/docs/concepts/storage/volumes/). +Dokumen ini menjelaskan kondisi terkini dari `PersistentVolumes` pada Kubernetes. Disarankan telah memiliki familiaritas dengan [volume](/id/docs/concepts/storage/volumes/). @@ -34,7 +34,7 @@ mode akses, tanpa memaparkan detail-detail bagaimana cara volume tersebut diimpl kepada para pengguna. Untuk mengatasi hal ini maka dibutuhkan sumber daya `StorageClass`. -Silakan lihat [panduan mendetail dengan contoh-contoh yang sudah berjalan](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/). +Silakan lihat [panduan mendetail dengan contoh-contoh yang sudah berjalan](/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage/). ## Siklus hidup dari sebuah volume dan klaim @@ -360,7 +360,7 @@ Pada CLI, mode-mode akses tersebut disingkat menjadi: Sebuah PV bisa memiliki sebuah kelas, yang dispesifikasi dalam pengaturan atribut `storageClassName` menjadi nama -[StorageClass](/docs/concepts/storage/storage-classes/). +[StorageClass](/id/docs/concepts/storage/storage-classes/). Sebuah PV dari kelas tertentu hanya dapat terikat dengan PVC yang meminta kelas tersebut. Sebuah PV tanpa `storageClassName` tidak memiliki kelas dan hanya dapat terikat dengan PVC yang tidak meminta kelas tertentu. @@ -412,7 +412,7 @@ akan dihilangkan sepenuhnya pada rilis Kubernetes mendatang. ### Afinitas Node {{< note >}} -Untuk kebanyakan tipe volume, kamu tidak perlu memasang kolom ini. Kolom ini secara otomatis terisi untuk tipe blok volume [AWS EBS](/docs/concepts/storage/volumes/#awselasticblockstore), [GCE PD](/docs/concepts/storage/volumes/#gcepersistentdisk) dan [Azure Disk](/docs/concepts/storage/volumes/#azuredisk). Kamu harus mengaturnya secara eksplisit untuk volume [lokal](/docs/concepts/storage/volumes/#local). +Untuk kebanyakan tipe volume, kamu tidak perlu memasang kolom ini. Kolom ini secara otomatis terisi untuk tipe blok volume [AWS EBS](/id/docs/concepts/storage/volumes/#awselasticblockstore), [GCE PD](/id/docs/concepts/storage/volumes/#gcepersistentdisk) dan [Azure Disk](/id/docs/concepts/storage/volumes/#azuredisk). Kamu harus mengaturnya secara eksplisit untuk volume [lokal](/id/docs/concepts/storage/volumes/#local). {{< /note >}} Sebuah PV dapat menspesifikasi [afinitas node](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volumenodeaffinity-v1-core) untuk mendefinisikan batasan yang membatasi _node_ mana saja yang dapat mengakses volume tersebut. _Pod_ yang menggunakan sebuah PV hanya akan bisa dijadwalkan ke _node_ yang dipilih oleh afinitas _node_. @@ -466,7 +466,7 @@ Klaim, seperti _pod_, bisa meminta sumber daya dengan jumlah tertentu. Pada kas ### _Selector_ -Klaim dapat menspesifikasi [_label selector_](/docs/concepts/overview/working-with-objects/labels/#label-selectors) untuk memilih serangkaian volume lebih jauh. Hanya volume yang cocok labelnya dengan _selector_ yang dapat terikat dengan klaim. _Selector_ dapat terdiri dari dua kolom: +Klaim dapat menspesifikasi [_label selector_](/id/docs/concepts/overview/working-with-objects/labels/#label-selectors) untuk memilih serangkaian volume lebih jauh. Hanya volume yang cocok labelnya dengan _selector_ yang dapat terikat dengan klaim. _Selector_ dapat terdiri dari dua kolom: * `matchLabels` - volume harus memiliki label dengan nilai ini * `matchExpressions` - daftar dari persyaratan yang dibuat dengan menentukan kunci, daftar nilai, dan operator yang menghubungkan kunci dengan nilai. Operator yang valid meliputi In, NotIn, Exists, dan DoesNotExist. @@ -476,7 +476,7 @@ Semua persyaratan tersebut, dari `matchLabels` dan `matchExpressions` akan dilak ### Kelas Sebuah klaim dapat meminta kelas tertentu dengan menspesifikasi nama dari -[StorageClass](/docs/concepts/storage/storage-classes/) +[StorageClass](/id/docs/concepts/storage/storage-classes/) menggunakan atribut `storageClassName`. Hanya PV dari kelas yang diminta, yang memiliki `storageClassName` yang sama dengan PVC, yang dapat terikat dengan PVC. @@ -647,7 +647,7 @@ Hanya volume yang disediakan secara statis yang didukung untuk rilis alfa. Admin {{< feature-state for_k8s_version="v1.12" state="alpha" >}} -Fitur _volume snapshot_ ditambahkan hanya untuk mendukung _CSI Volume Plugins_. Untuk lebih detail, lihat [_volume snapshots_](/docs/concepts/storage/volume-snapshots/). +Fitur _volume snapshot_ ditambahkan hanya untuk mendukung _CSI Volume Plugins_. Untuk lebih detail, lihat [_volume snapshots_](/id/docs/concepts/storage/volume-snapshots/). Untuk mengaktifkan dukungan pemulihan sebuah volume dari sebuah sumber data _volume snapshot_, aktifkan gerbang fitur `VolumeSnapshotDataSource` pada apiserver dan _controller-manager_. diff --git a/content/id/docs/concepts/storage/storage-classes.md b/content/id/docs/concepts/storage/storage-classes.md index 9e0a5b1664843..2897399e80995 100644 --- a/content/id/docs/concepts/storage/storage-classes.md +++ b/content/id/docs/concepts/storage/storage-classes.md @@ -8,8 +8,8 @@ weight: 30 Dokumen ini mendeskripsikan konsep StorageClass yang ada pada Kubernetes. Sebelum lanjut membaca, sangat dianjurkan untuk memiliki pengetahuan terhadap -[volumes](/docs/concepts/storage/volumes/) dan -[peristent volume](/docs/concepts/storage/persistent-volumes) terlebih dahulu. +[volumes](/id/docs/concepts/storage/volumes/) dan +[peristent volume](/id/docs/concepts/storage/persistent-volumes) terlebih dahulu. @@ -40,7 +40,7 @@ dan objek yang sudah dibuat tidak dapat diubah lagi definisinya. Administrator dapat memberikan spesifikasi StorageClass _default_ bagi PVC yang tidak membutuhkan kelas tertentu untuk dapat melakukan mekanisme _bind_: -kamu dapat membaca [bagian `PersistentVolumeClaim`](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) +kamu dapat membaca [bagian `PersistentVolumeClaim`](/id/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) untuk penjelasan lebih lanjut. ```yaml @@ -131,7 +131,7 @@ akan gagal apabila salah satu dari keduanya bersifat invalid. ### Mode Volume _Binding_ _Field_ `volumeBindingMode` mengontrol kapan mekanisme [_binding_ volume dan -_provisioning_ dinamis](/docs/concepts/storage/persistent-volumes/#provisioning) +_provisioning_ dinamis](/id/docs/concepts/storage/persistent-volumes/#provisioning) harus dilakukan. Secara _default_, ketika mode `Immediate` yang mengindikasikan @@ -148,11 +148,11 @@ dan _binding_ dari sebuah PersistentVolume hingga sebuah Pod yang menggunakan PersistentVolumeClaim dibuat. PersistentVolume akan dipilih atau di-_provisioning_ sesuai dengan topologi yang dispesifikasikan oleh limitasi yang diberikan oleh mekanisme _scheduling_ Pod. Hal ini termasuk, tetapi tidak hanya terbatas pada, -[persyaratan sumber daya](/docs/concepts/configuration/manage-compute-resources-container), -[_node selector_](/docs/concepts/configuration/assign-pod-node/#nodeselector), +[persyaratan sumber daya](/id/docs/concepts/configuration/manage-compute-resources-container), +[_node selector_](/id/docs/concepts/configuration/assign-pod-node/#nodeselector), [afinitas dan -anti-afinitas Pod](/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity), -serta [_taint_ dan _toleration_](/docs/concepts/configuration/taint-and-toleration). +anti-afinitas Pod](/id/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity), +serta [_taint_ dan _toleration_](/id/docs/concepts/configuration/taint-and-toleration). Beberapa _plugin_ di bawah ini mendukung `WaitForFirstConsumer` dengan _provisioning_ dinamis: @@ -168,7 +168,7 @@ PersistentVolume yang terlebih dahulu dibuat: * [Lokal](#lokal) {{< feature-state state="beta" for_k8s_version="1.14" >}} -[Volume-volume CSI](/docs/concepts/storage/volumes/#csi) juga didukung +[Volume-volume CSI](/id/docs/concepts/storage/volumes/#csi) juga didukung dengan adanya _provisioning_ dinamis serta PV yang telah terlebih dahulu dibuat, meskipun demikian, akan lebih baik apabila kamu melihat dokumentasi untuk driver spesifik CSI untuk melihat topologi _key_ yang didukung @@ -634,8 +634,8 @@ parameters: di dalam grup sumber daya yang sama dengan klaster, serta `skuName` dan `location` akan diabaikan. Selama _provision_, sebuah secret dibuat untuk menyimpan _credentials_. Jika klaster -menggunakan konsep [RBAC](/docs/reference/access-authn-authz/rbac/) dan -[_Roles_ Controller](/docs/reference/access-authn-authz/rbac/#controller-roles), +menggunakan konsep [RBAC](/id/docs/reference/access-authn-authz/rbac/) dan +[_Roles_ Controller](/id/docs/reference/access-authn-authz/rbac/#controller-roles), menambahkan kapabilitas `create` untuk sumber daya `secret` bagi clusterrole `system:controller:persistent-volume-binder`. diff --git a/content/id/docs/concepts/storage/volume-pvc-datasource.md b/content/id/docs/concepts/storage/volume-pvc-datasource.md index 4a5f5d8c8c8df..481e74c9761d1 100644 --- a/content/id/docs/concepts/storage/volume-pvc-datasource.md +++ b/content/id/docs/concepts/storage/volume-pvc-datasource.md @@ -7,7 +7,7 @@ weight: 30 {{< feature-state for_k8s_version="v1.16" state="beta" >}} -Dokumen ini mendeskripsikan konsep pengklonaan Volume CSI yang telah tersedia di dalam Kubernetes. Pengetahuan tentang [Volume](/docs/concepts/storage/volumes) disarankan. +Dokumen ini mendeskripsikan konsep pengklonaan Volume CSI yang telah tersedia di dalam Kubernetes. Pengetahuan tentang [Volume](/id/docs/concepts/storage/volumes) disarankan. diff --git a/content/id/docs/concepts/storage/volume-snapshot-classes.md b/content/id/docs/concepts/storage/volume-snapshot-classes.md index 0414a9d7deabc..fff7de9baabe2 100644 --- a/content/id/docs/concepts/storage/volume-snapshot-classes.md +++ b/content/id/docs/concepts/storage/volume-snapshot-classes.md @@ -7,8 +7,8 @@ weight: 30 Laman ini menjelaskan tentang konsep VolumeSnapshotClass pada Kubernetes. Sebelum melanjutkan, -sangat disarankan untuk membaca [_snapshot_ volume](/docs/concepts/storage/volume-snapshots/) -dan [kelas penyimpanan (_storage class_)](/docs/concepts/storage/storage-classes) terlebih dahulu. +sangat disarankan untuk membaca [_snapshot_ volume](/id/docs/concepts/storage/volume-snapshots/) +dan [kelas penyimpanan (_storage class_)](/id/docs/concepts/storage/storage-classes) terlebih dahulu. diff --git a/content/id/docs/concepts/storage/volume-snapshots.md b/content/id/docs/concepts/storage/volume-snapshots.md index 39ab3d31aa264..5ddfc2aaa616b 100644 --- a/content/id/docs/concepts/storage/volume-snapshots.md +++ b/content/id/docs/concepts/storage/volume-snapshots.md @@ -7,7 +7,7 @@ weight: 20 {{< feature-state for_k8s_version="v1.12" state="alpha" >}} -Laman ini menjelaskan tentang fitur VolumeSnapshot pada Kubernetes. Sebelum lanjut membaca, sangat disarankan untuk memahami [PersistentVolume](/docs/concepts/storage/persistent-volumes/) terlebih dahulu. +Laman ini menjelaskan tentang fitur VolumeSnapshot pada Kubernetes. Sebelum lanjut membaca, sangat disarankan untuk memahami [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/) terlebih dahulu. @@ -48,7 +48,7 @@ Seorang adminstrator klaster membuat beberapa VolumeSnapshotContent, yang masing #### Dinamis Ketika VolumeSnapshotContent yang dibuat oleh administrator tidak ada yang sesuai dengan VolumeSnapshot yang dibuat pengguna, klaster bisa saja mencoba untuk menyediakan sebuah VolumeSnapshot secara dinamis, khususnya untuk objek VolumeSnapshot. -Proses penyediaan ini berdasarkan VolumeSnapshotClasses: VolumeSnapshot harus meminta sebuah [VolumeSnapshotClass](/docs/concepts/storage/volume-snapshot-classes/) +Proses penyediaan ini berdasarkan VolumeSnapshotClasses: VolumeSnapshot harus meminta sebuah [VolumeSnapshotClass](/id/docs/concepts/storage/volume-snapshot-classes/) dan administrator harus membuat serta mengatur _class_ tersebut supaya penyediaan dinamis bisa terjadi. ### Ikatan (_Binding_) @@ -93,7 +93,7 @@ spec: ### _Class_ Suatu VolumeSnapshotContent dapat memiliki suatu _class_, yang didapat dengan mengatur atribut -`snapshotClassName` dengan nama dari [VolumeSnapshotClass](/docs/concepts/storage/volume-snapshot-classes/). +`snapshotClassName` dengan nama dari [VolumeSnapshotClass](/id/docs/concepts/storage/volume-snapshot-classes/). VolumeSnapshotContent dari _class_ tertentu hanya dapat terikat (_bound_) dengan VolumeSnapshot yang "meminta" _class_ tersebut. VolumeSnapshotContent tanpa `snapshotClassName` tidak memiliki _class_ dan hanya dapat terikat (_bound_) dengan VolumeSnapshot yang "meminta" untuk tidak menggunakan _class_. @@ -117,7 +117,7 @@ spec: ### _Class_ Suatu VolumeSnapshot dapat meminta sebuah _class_ tertentu dengan mengatur nama dari -[VolumeSnapshotClass](/docs/concepts/storage/volume-snapshot-classes/) +[VolumeSnapshotClass](/id/docs/concepts/storage/volume-snapshot-classes/) menggunakan atribut `snapshotClassName`. Hanya VolumeSnapshotContent dari _class_ yang diminta, memiliki `snapshotClassName` yang sama dengan VolumeSnapshot, dapat terikat (_bound_) dengan VolumeSnapshot tersebut. @@ -127,6 +127,6 @@ dengan VolumeSnapshot, dapat terikat (_bound_) dengan VolumeSnapshot tersebut. Kamu dapat menyediakan sebuah volume baru, yang telah terisi dengan data dari suatu _snapshot_, dengan menggunakan _field_ `dataSource` pada objek PersistentVolumeClaim. -Untuk detailnya bisa dilihat pada [VolumeSnapshot and Mengembalikan Volume dari _Snapshot_](/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support). +Untuk detailnya bisa dilihat pada [VolumeSnapshot and Mengembalikan Volume dari _Snapshot_](/id/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support). diff --git a/content/id/docs/concepts/storage/volumes.md b/content/id/docs/concepts/storage/volumes.md index 679de8c865e23..8d593f1ebae88 100644 --- a/content/id/docs/concepts/storage/volumes.md +++ b/content/id/docs/concepts/storage/volumes.md @@ -185,7 +185,7 @@ Pada saat fitur migrasi CSI untuk Cinder diaktifkan, fitur ini akan menterjemahk ### configMap {#configmap} -Sumber daya [`configMap`](/docs/tasks/configure-pod-container/configure-pod-configmap/) memungkinkan kamu untuk menyuntikkan data konfigurasi ke dalam Pod. +Sumber daya [`configMap`](/id/docs/tasks/configure-pod-container/configure-pod-configmap/) memungkinkan kamu untuk menyuntikkan data konfigurasi ke dalam Pod. Data yang ditaruh di dalam sebuah objek `ConfigMap` dapat dirujuk dalam sebuah Volume dengan tipe `configMap` dan kemudian digunakan oleh aplikasi/container yang berjalan di dalam sebuah Pod. Saat mereferensikan sebuah objek `configMap`, kamu tinggal memasukkan nama ConfigMap tersebut ke dalam rincian Volume yang bersangkutan. Kamu juga dapat mengganti _path_ spesifik yang akan digunakan pada ConfigMap. Misalnya, untuk menambatkan ConfigMap `log-config` pada Pod yang diberi nama `configmap-pod`, kamu dapat menggunakan YAML ini: @@ -215,7 +215,7 @@ ConfigMap `log-config` ditambatkan sebagai sebuah Volume, dan semua isinya yang Perlu dicatat bahwa _path_ tersebut berasal dari isian `mountPath` pada Volume, dan `path` yang ditunjuk dengan `key` bernama `log_level`. {{< caution >}} -Kamu harus membuat sebuah [ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/) sebelum kamu dapat menggunakannya. +Kamu harus membuat sebuah [ConfigMap](/id/docs/tasks/configure-pod-container/configure-pod-configmap/) sebelum kamu dapat menggunakannya. {{< /caution >}} {{< note >}} @@ -346,7 +346,7 @@ Fitur [Regional Persistent Disks](https://cloud.google.com/compute/docs/disks/#r #### Menyediakan sebuah Regional PD PersistentVolume Secara Manual -Penyediaan secara dinamis mungkin dilakukan dengan sebuah [StorageClass untuk GCE PD](/docs/concepts/storage/storage-classes/#gce). +Penyediaan secara dinamis mungkin dilakukan dengan sebuah [StorageClass untuk GCE PD](/id/docs/concepts/storage/storage-classes/#gce). Sebelum membuat sebuah PersistentVolume, kamu harus membuat PD-nya: ```shell @@ -533,7 +533,7 @@ Kolom `nodeAffinity` ada PersistentVolue dibutuhkan saat menggunakan Volume `loc Kolom `volumeMode` pada PersistentVolume sekarang dapat disetel menjadi "Block" (menggantikan nilai bawaan "Filesystem") untuk membuka Volume `local` tersebut sebagai media penyimpanan blok mentah. Hal ini membutuhkan diaktifkannya _Alpha feature gate_ `BlockVolume`. -Saat menggunakan Volume `local`, disarankan untuk membuat sebuah StorageClass dengan `volumeBindingMode` yang disetel menjadi `WaitForFirstConsumer`. Lihat[contohnya](/docs/concepts/storage/storage-classes/#local). Menunda pengikatan Volume memastikan bahwa keputusan pengikatan PersistentVolumeClaim juga akan dievaluasi terhadap batasan-batasan Node yang berlaku pada Pod, seperti kebutuhan sumber daya Node, `nodeSelector`, `podAffinity`, dan `podAntiAffinity`. +Saat menggunakan Volume `local`, disarankan untuk membuat sebuah StorageClass dengan `volumeBindingMode` yang disetel menjadi `WaitForFirstConsumer`. Lihat[contohnya](/id/docs/concepts/storage/storage-classes/#local). Menunda pengikatan Volume memastikan bahwa keputusan pengikatan PersistentVolumeClaim juga akan dievaluasi terhadap batasan-batasan Node yang berlaku pada Pod, seperti kebutuhan sumber daya Node, `nodeSelector`, `podAffinity`, dan `podAntiAffinity`. Sebuah penyedia statis eksternal dapat berjalan secara terpisah untuk memperbaik pengaturan siklus hidup Volume `local`. Perlu dicatat bahwa penyedia ini belum mendukung _dynamic provisioning_. Untuk contoh bagaimana menjalankan penyedia Volume `local` eksternal, lihat [petunjuk penggunaannya](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner). @@ -554,9 +554,9 @@ Lihat [contoh NFS](https://github.com/kubernetes/examples/tree/{{< param "github ### persistentVolumeClaim {#persistentvolumeclaim} -Sebuah Volume `persistentVolumeClaim` digunakan untuk menambatkan sebuah [PersistentVolume](/docs/concepts/storage/persistent-volumes/) ke dalam sebuag Pod. PersistentVolume adalah sebuah cara bagi pengguna untuk "mengklaim" penyimpanan yang _durable_ (seperti sebuah GCE PD atau sebuah volume iSCSI) tanpa mengetahui detil lingkungan _cloud_ yang bersangkutan. +Sebuah Volume `persistentVolumeClaim` digunakan untuk menambatkan sebuah [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/) ke dalam sebuag Pod. PersistentVolume adalah sebuah cara bagi pengguna untuk "mengklaim" penyimpanan yang _durable_ (seperti sebuah GCE PD atau sebuah volume iSCSI) tanpa mengetahui detil lingkungan _cloud_ yang bersangkutan. -Lihat [contoh PersistentVolumes](/docs/concepts/storage/persistent-volumes/) untuk lebih lanjut. +Lihat [contoh PersistentVolumes](/id/docs/concepts/storage/persistent-volumes/) untuk lebih lanjut. ### projected {#projected} @@ -742,7 +742,7 @@ Lihat [contoh RBD](https://github.com/kubernetes/examples/tree/{{< param "github ### scaleIO {#scaleio} -ScaleIO adalah _platform_ penyimpanan berbasis perangkat lunak yang dapat menggunakan perangkat keras yang sudah tersedia untuk membuat klaster-klaster media penyimpanan terhubung jaringan yang _scalable_. _Plugin_ Volume `scaleIO` memungkinkan Pod-pod yang di-_deploy_ untuk mengakses Volume-volume ScaleIO yang telah tersedia (atau dapat menyediakan volume-volume untuk PersistentVolumeClaim secara dinamis, lihat [Persistent Volume ScaleIO](/docs/concepts/storage/persistent-volumes/#scaleio)). +ScaleIO adalah _platform_ penyimpanan berbasis perangkat lunak yang dapat menggunakan perangkat keras yang sudah tersedia untuk membuat klaster-klaster media penyimpanan terhubung jaringan yang _scalable_. _Plugin_ Volume `scaleIO` memungkinkan Pod-pod yang di-_deploy_ untuk mengakses Volume-volume ScaleIO yang telah tersedia (atau dapat menyediakan volume-volume untuk PersistentVolumeClaim secara dinamis, lihat [Persistent Volume ScaleIO](/id/docs/concepts/storage/persistent-volumes/#scaleio)). {{< caution >}} Kamu harus memiliki klaster ScaleIO yang berjalan dengan volume-volume yang sudah dibuat sebelum kamu dapat menggunakannya. @@ -1033,7 +1033,7 @@ Dimulai pada versi 1.11, CSI memperkenalkan dukungak untuk volume blok _raw_, ya Dukungan untuk volume blok CSI bersifat _feature-gate_, tapi secara bawaan diaktifkan. Kedua _feature-gate_ yang harus diaktifkan adalah `BlockVolume` dan `CSIBlockVolume`. -Pelajari cara [menyiapkan PV/PVC dengan dukungan volume blok _raw_](/docs/concepts/storage/persistent-volumes/#raw-block-volume-support). +Pelajari cara [menyiapkan PV/PVC dengan dukungan volume blok _raw_](/id/docs/concepts/storage/persistent-volumes/#raw-block-volume-support). #### Volume CSI Sementara diff --git a/content/id/docs/concepts/workloads/controllers/cron-jobs.md b/content/id/docs/concepts/workloads/controllers/cron-jobs.md index 29fde331ead67..ca5df2d86d7c5 100644 --- a/content/id/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/id/docs/concepts/workloads/controllers/cron-jobs.md @@ -6,7 +6,7 @@ weight: 80 -Suatu CronJob menciptakan [Job](/docs/concepts/workloads/controllers/jobs-run-to-completion/) yang dijadwalkan berdasarkan waktu tertentu. +Suatu CronJob menciptakan [Job](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/) yang dijadwalkan berdasarkan waktu tertentu. Satu objek CronJob sepadan dengan satu baris pada _file_ _crontab_ (_cron table_). CronJob tersebut menjalankan suatu pekerjaan secara berkala pada waktu tertentu, dituliskan dalam format [Cron](https://en.wikipedia.org/wiki/Cron). @@ -15,7 +15,7 @@ pada waktu tertentu, dituliskan dalam format [Cron](https://en.wikipedia.org/wik Seluruh waktu `schedule:` pada _**CronJob**_ mengikuti zona waktu dari _master_ di mana Job diinisiasi. {{< /note >}} -Untuk panduan dalam berkreasi dengan _cron job_, dan contoh _spec file_ untuk suatu _cron job_, lihat [Menjalankan otomasi _task_ dengan _cron job_](/docs/tasks/job/automated-tasks-with-cron-jobs). +Untuk panduan dalam berkreasi dengan _cron job_, dan contoh _spec file_ untuk suatu _cron job_, lihat [Menjalankan otomasi _task_ dengan _cron job_](/id/docs/tasks/job/automated-tasks-with-cron-jobs). diff --git a/content/id/docs/concepts/workloads/controllers/daemonset.md b/content/id/docs/concepts/workloads/controllers/daemonset.md index baa79aa3f27d4..0b1c0e71e92f1 100644 --- a/content/id/docs/concepts/workloads/controllers/daemonset.md +++ b/content/id/docs/concepts/workloads/controllers/daemonset.md @@ -48,7 +48,7 @@ kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml Seperti semua konfigurasi Kubernetes lainnya, DaemonSet membutuhkan _field_ `apiVersion`, `kind`, dan `metadata`. Untuk informasi umum tentang berkas konfigurasi, lihat dokumen [men-_deploy_ aplikasi](/docs/user-guide/deploying-applications/), -[pengaturan kontainer](/docs/tasks/), dan [pengelolaan objek dengan kubectl](/docs/concepts/overview/working-with-objects/object-management/). +[pengaturan kontainer](/docs/tasks/), dan [pengelolaan objek dengan kubectl](/id/docs/concepts/overview/working-with-objects/object-management/). DaemonSet juga membutuhkan bagian [`.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). @@ -61,7 +61,7 @@ DaemonSet juga membutuhkan bagian [`.spec`](https://git.k8s.io/community/contrib Selain _field_ wajib untuk Pod, templat Pod di DaemonSet harus menspesifikasikan label yang sesuai (lihat [selektor Pod](#selektor-pod)). -Templat Pod di DaemonSet harus memiliki [`RestartPolicy`](/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) +Templat Pod di DaemonSet harus memiliki [`RestartPolicy`](/id/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) yang bernilai `Always`, atau tidak dispesifikasikan, sehingga _default_ menjadi `Always`. DaemonSet dengan nilai `Always` membuat Pod akan selalu di-_restart_ saat kontainer keluar/berhenti atau terjadi _crash_. @@ -77,7 +77,7 @@ Mengubah selektor Pod dapat menyebabkan Pod _orphan_ yang tidak disengaja, dan m Objek `.spec.selector` memiliki dua _field_: -* `matchLabels` - bekerja seperti `.spec.selector` pada [ReplicationController](/docs/concepts/workloads/controllers/replicationcontroller/). +* `matchLabels` - bekerja seperti `.spec.selector` pada [ReplicationController](/id/docs/concepts/workloads/controllers/replicationcontroller/). * `matchExpressions` - bisa digunakan untuk membuat selektor yang lebih canggih dengan mendefinisikan _key_, daftar _value_ dan operator yang menyatakan hubungan antara _key_ dan _value_. @@ -97,8 +97,8 @@ membuat Pod dengan nilai yang berbeda di sebuah Node untuk _testing_. Jika kamu menspesifikasikan `.spec.template.spec.nodeSelector`, maka _controller_ DaemonSet akan membuat Pod pada Node yang cocok dengan [selektor -Node](/docs/concepts/configuration/assign-pod-node/). Demikian juga, jika kamu menspesifikasikan `.spec.template.spec.affinity`, -maka _controller_ DaemonSet akan membuat Pod pada Node yang cocok dengan [Node affinity](/docs/concepts/configuration/assign-pod-node/). +Node](/id/docs/concepts/configuration/assign-pod-node/). Demikian juga, jika kamu menspesifikasikan `.spec.template.spec.affinity`, +maka _controller_ DaemonSet akan membuat Pod pada Node yang cocok dengan [Node affinity](/id/docs/concepts/configuration/assign-pod-node/). Jika kamu tidak menspesifikasikan sama sekali, maka _controller_ DaemonSet akan membuat Pod pada semua Node. @@ -116,7 +116,7 @@ mendatangkan masalah-masalah berikut: * Inkonsistensi perilaku Pod: Pod normal yang menunggu dijadwalkan akan dibuat dalam keadaan `Pending`, tapi Pod DaemonSet tidak seperti itu. Ini membingungkan untuk pengguna. - * [Pod preemption](/docs/concepts/configuration/pod-priority-preemption/) + * [Pod preemption](/id/docs/concepts/configuration/pod-priority-preemption/) ditangani oleh _default scheduler_. Ketika _preemption_ dinyalakan, _controller_ DaemonSet akan membuat keputusan penjadwalan tanpa memperhitungkan prioritas Pod dan _preemption_. @@ -148,7 +148,7 @@ mengabaikan Node `unschedulable` ketika menjadwalkan Pod DaemonSet. ### _Taint_ dan _Toleration_ Meskipun Pod Daemon menghormati -[taint dan toleration](/docs/concepts/configuration/taint-and-toleration), +[taint dan toleration](/id/docs/concepts/configuration/taint-and-toleration), _toleration_ berikut ini akan otomatis ditambahkan ke Pod DaemonSet sesuai dengan fitur yang bersangkutan. @@ -170,7 +170,7 @@ Beberapa pola yang mungkin digunakan untuk berkomunikasi dengan Pod dalam Daemon - **Push**: Pod dalam DaemonSet diatur untuk mengirim pembaruan status ke servis lain, contohnya _stats database_. Pod ini tidak memiliki klien. - **IP Node dan Konvensi Port**: Pod dalam DaemonSet dapat menggunakan `hostPort`, sehingga Pod dapat diakses menggunakan IP Node. Klien tahu daftar IP Node dengan suatu cara, dan tahu port berdasarkan konvensi. -- **DNS**: Buat [headless service](/docs/concepts/services-networking/service/#headless-services) dengan Pod selektor yang sama, +- **DNS**: Buat [headless service](/id/docs/concepts/services-networking/service/#headless-services) dengan Pod selektor yang sama, dan temukan DaemonSet menggunakan _resource_ `endpoints` atau mengambil beberapa A _record_ dari DNS. - **Service**: Buat Servis dengan Pod selektor yang sama, dan gunakan Servis untuk mengakses _daemon_ pada Node random. (Tidak ada cara mengakses spesifik Node) @@ -223,7 +223,7 @@ _bootstrapping_ klaster. ### Deployment -DaemonSet mirip dengan [Deployment](/docs/concepts/workloads/controllers/deployment/) sebab mereka +DaemonSet mirip dengan [Deployment](/id/docs/concepts/workloads/controllers/deployment/) sebab mereka sama-sama membuat Pod, dan Pod yang mereka buat punya proses yang seharusnya tidak berhenti (e.g. peladen web, peladen penyimpanan) diff --git a/content/id/docs/concepts/workloads/controllers/deployment.md b/content/id/docs/concepts/workloads/controllers/deployment.md index 045c04e59b2b6..8eae6c579feec 100644 --- a/content/id/docs/concepts/workloads/controllers/deployment.md +++ b/content/id/docs/concepts/workloads/controllers/deployment.md @@ -51,14 +51,14 @@ Dalam contoh ini: Dalam kasus ini, kamu hanya perlu memilih sebuah label yang didefinisikan pada templat Pod (`app: nginx`). Namun, aturan pemilihan yang lebih canggih mungkin dilakukan asal templat Pod-nya memenuhi aturan. {{< note >}} - Kolom `matchLabels` berbentuk pasangan {key,value}. Sebuah {key,value} dalam _map_ `matchLabels` ekuivalen dengan + Kolom `matchLabels` berbentuk pasangan {key,value}. Sebuah {key,value} dalam _map_ `matchLabels` ekuivalen dengan elemen pada `matchExpressions`, yang mana kolom key adalah "key", operator adalah "In", dan larik values hanya berisi "value". Semua prasyarat dari `matchLabels` maupun `matchExpressions` harus dipenuhi agar dapat dicocokkan. {{< /note >}} * Kolom `template` berisi sub kolom berikut: * Pod dilabeli `app: nginx` dengan kolom `labels`. - * Spesifikasi templat Pod atau kolom `.template.spec` menandakan bahwa Pod mennjalankan satu kontainer `nginx`, + * Spesifikasi templat Pod atau kolom `.template.spec` menandakan bahwa Pod mennjalankan satu kontainer `nginx`, yang menjalankan image `nginx` [Docker Hub](https://hub.docker.com/) dengan versi 1.7.9. * Membuat satu kontainer bernama `nginx` sesuai kolom `name`. @@ -123,8 +123,8 @@ Dalam contoh ini: ReplicaSet yang dibuat menjamin bahwa ada tiga Pod `nginx`. {{< note >}} - Kamu harus memasukkan selektor dan label templat Pod yang benar pada Deployment (dalam kasus ini, `app: nginx`). - Jangan membuat label atau selektor yang beririsan dengan kontroler lain (termasuk Deployment dan StatefulSet lainnya). Kubernetes tidak akan mencegah adanya label yang beririsan. + Kamu harus memasukkan selektor dan label templat Pod yang benar pada Deployment (dalam kasus ini, `app: nginx`). + Jangan membuat label atau selektor yang beririsan dengan kontroler lain (termasuk Deployment dan StatefulSet lainnya). Kubernetes tidak akan mencegah adanya label yang beririsan. Namun, jika beberapa kontroler memiliki selektor yang beririsan, kontroler itu mungkin akan konflik dan berjalan dengan tidak semestinya. {{< /note >}} @@ -144,7 +144,7 @@ Label ini menjamin anak-anak ReplicaSet milik Deployment tidak tumpang tindih. D Rilis Deployment hanya dapat dipicu oleh perubahan templat Pod Deployment (yaitu, `.spec.template`), contohnya perubahan kolom label atau image container. Yang lain, seperti replika, tidak akan memicu rilis. {{< /note >}} -Ikuti langkah-langkah berikut untuk membarui Deployment: +Ikuti langkah-langkah berikut untuk membarui Deployment: 1. Ganti Pod nginx menjadi image `nginx:1.9.1` dari image `nginx:1.7.9`. @@ -191,7 +191,7 @@ Untuk menampilkan detail lain dari Deployment yang terbaru: nginx-deployment 3 3 3 3 36s ``` -* Jalankan `kubectl get rs` to see that the Deployment updated the Pods dengan membuat ReplicaSet baru dan +* Jalankan `kubectl get rs` to see that the Deployment updated the Pods dengan membuat ReplicaSet baru dan menggandakannya menjadi 3 replika, sembari menghapus ReplicaSet menjadi 0 replika. ```shell @@ -228,7 +228,7 @@ menggandakannya menjadi 3 replika, sembari menghapus ReplicaSet menjadi 0 replik Umumnya, dia memastikan paling banyak ada 125% jumlah Pod yang diinginkan menyala (25% tambahan maksimal). Misalnya, jika kamu lihat Deployment diatas lebih jauh, kamu akan melihat bahwa pertama-tama dia membuat Pod baru, - kemudian menghapus beberapa Pod lama, dan membuat yang baru. Dia tidak akan menghapus Pod lama sampai ada cukup + kemudian menghapus beberapa Pod lama, dan membuat yang baru. Dia tidak akan menghapus Pod lama sampai ada cukup Pod baru menyala, dan pula tidak membuat Pod baru sampai ada cukup Pod lama telah mati. Dia memastikan paling sedikit 2 Pod menyala dan paling banyak total 4 Pod menyala. @@ -236,7 +236,7 @@ menggandakannya menjadi 3 replika, sembari menghapus ReplicaSet menjadi 0 replik ```shell kubectl describe deployments ``` - Keluaran akan tampil seperti berikut: + Keluaran akan tampil seperti berikut: ``` Name: nginx-deployment Namespace: default @@ -277,15 +277,15 @@ menggandakannya menjadi 3 replika, sembari menghapus ReplicaSet menjadi 0 replik ``` Disini bisa dilihat ketika pertama Deployment dibuat, dia membuat ReplicaSet (nginx-deployment-2035384211) dan langsung menggandakannya menjadi 3 replika. Saat Deployment diperbarui, dia membuat ReplicaSet baru - (nginx-deployment-1564180365) dan menambah 1 replika kemudian mengecilkan ReplicaSet lama menjadi 2, + (nginx-deployment-1564180365) dan menambah 1 replika kemudian mengecilkan ReplicaSet lama menjadi 2, sehingga paling sedikit 2 Pod menyala dan paling banyak 4 Pod dibuat setiap saat. Dia kemudian lanjut menaik-turunkan - ReplicaSet baru dan ReplicaSet lama, dengan strategi pembaruan rolling yang sama. + ReplicaSet baru dan ReplicaSet lama, dengan strategi pembaruan rolling yang sama. Terakhir, kamu akan dapat 3 replika di ReplicaSet baru telah menyala, dan ReplicaSet lama akan hilang (berisi 0). ### Perpanjangan (alias banyak pembaruan secara langsung) -Setiap kali Deployment baru is teramati oleh Deployment kontroler, ReplicaSet dibuat untuk membangkitkan Pod sesuai keinginan. -Jika Deployment diperbarui, ReplicaSet yang terkait Pod dengan label `.spec.selector` yang cocok, +Setiap kali Deployment baru is teramati oleh Deployment kontroler, ReplicaSet dibuat untuk membangkitkan Pod sesuai keinginan. +Jika Deployment diperbarui, ReplicaSet yang terkait Pod dengan label `.spec.selector` yang cocok, namun kolom `.spec.template` pada templat tidak cocok akan dihapus. Kemudian, ReplicaSet baru akan digandakan sebanyak `.spec.replicas` dan semua ReplicaSet lama dihapus. @@ -294,7 +294,7 @@ tiap perubahan dan memulai penggandaan. Lalu, dia akan mengganti ReplicaSet yang -- mereka ditambahkan ke dalam daftar ReplicaSet lama dan akan mulai dihapus. Contohnya, ketika kamu membuat Deployment untuk membangkitkan 5 replika `nginx:1.7.9`, -kemudian membarui Deployment dengan versi `nginx:1.9.1` ketika ada 3 replika `nginx:1.7.9` yang dibuat. +kemudian membarui Deployment dengan versi `nginx:1.9.1` ketika ada 3 replika `nginx:1.7.9` yang dibuat. Dalam kasus ini, Deployment akan segera menghapus 3 replika Pod `nginx:1.7.9` yang telah dibuat, dan mulai membuat Pod `nginx:1.9.1`. Dia tidak akan menunggu kelima replika `nginx:1.7.9` selesai baru menjalankan perubahan. @@ -310,8 +310,8 @@ Pada versi API `apps/v1`, selektor label Deployment tidak bisa diubah ketika sel * Penambahan selektor mensyaratkan label templat Pod di spek Deployment untuk diganti dengan label baru juga. Jika tidak, galat validasi akan muncul. Perubahan haruslah tidak tumpang-tindih, dengan kata lain selektor baru tidak mencakup ReplicaSet dan Pod yang dibuat dengan selektor lama. Sehingga, semua ReplicaSet lama akan menggantung sedangkan ReplicaSet baru tetap dibuat. * Pengubahan selektor mengubah nilai pada kunci selektor -- menghasilkan perilaku yang sama dengan penambahan. -* Penghapusan selektor menghilangkan kunci yang ada pada selektor Deployment -- tidak mensyaratkan perubahan apapun pada label templat Pod. -ReplicaSet yang ada tidak menggantung dan ReplicaSet baru tidak dibuat. +* Penghapusan selektor menghilangkan kunci yang ada pada selektor Deployment -- tidak mensyaratkan perubahan apapun pada label templat Pod. +ReplicaSet yang ada tidak menggantung dan ReplicaSet baru tidak dibuat. Tapi perhatikan bahwa label yang dihapus masih ada pada Pod dan ReplicaSet masing-masing. ## Membalikkan Deployment @@ -321,10 +321,10 @@ Umumnya, semua riwayat rilis Deployment disimpan oleh sistem sehingga kamu dapat (kamu dapat mengubahnya dengan mengubah batas riwayat revisi). {{< note >}} -Revisi Deployment dibuat saat rilis Deployment dipicu. Ini berarti revisi baru dibuat jika dan hanya jika -templat Pod Deployment (`.spec.template`) berubah, misalnya jika kamu membarui label atau image kontainer pada templat. -Pembaruan lain, seperti penggantian skala Deployment, tidak membuat revisi Deployment, jadi kamu dapat memfasilitasi -penggantian skala secara manual atau otomatis secara simultan. Artinya saat kamu membalikkan ke versi sebelumnya, +Revisi Deployment dibuat saat rilis Deployment dipicu. Ini berarti revisi baru dibuat jika dan hanya jika +templat Pod Deployment (`.spec.template`) berubah, misalnya jika kamu membarui label atau image kontainer pada templat. +Pembaruan lain, seperti penggantian skala Deployment, tidak membuat revisi Deployment, jadi kamu dapat memfasilitasi +penggantian skala secara manual atau otomatis secara simultan. Artinya saat kamu membalikkan ke versi sebelumnya, hanya bagian templat Pod Deployment yang dibalikkan. {{< /note >}} @@ -350,7 +350,7 @@ hanya bagian templat Pod Deployment yang dibalikkan. Waiting for rollout to finish: 1 out of 3 new replicas have been updated... ``` -* Tekan Ctrl-C untuk menghentikan pemeriksaan status rilis di atas. Untuk info lebih lanjut +* Tekan Ctrl-C untuk menghentikan pemeriksaan status rilis di atas. Untuk info lebih lanjut tentang rilis tersendat, [baca disini](#status-deployment). * Kamu lihat bahwa jumlah replika lama (`nginx-deployment-1564180365` dan `nginx-deployment-2035384211`) adalah 2, dan replika baru (nginx-deployment-3066724191) adalah 1. @@ -383,17 +383,17 @@ tentang rilis tersendat, [baca disini](#status-deployment). ``` {{< note >}} - Controller Deployment menghentikan rilis yang buruk secara otomatis dan juga berhenti meningkatkan ReplicaSet baru. + Controller Deployment menghentikan rilis yang buruk secara otomatis dan juga berhenti meningkatkan ReplicaSet baru. Ini tergantung pada parameter rollingUpdate (secara khusus `maxUnavailable`) yang dimasukkan. Kubernetes umumnya mengatur jumlahnya menjadi 25%. {{< /note >}} -* Tampilkan deskripsi Deployment: +* Tampilkan deskripsi Deployment: ```shell kubectl describe deployment ``` - Keluaran akan tampil seperti berikut: + Keluaran akan tampil seperti berikut: ``` Name: nginx-deployment Namespace: default @@ -440,11 +440,11 @@ tentang rilis tersendat, [baca disini](#status-deployment). Ikuti langkah-langkah berikut untuk mengecek riwayat rilis: -1. Pertama, cek revisi Deployment sekarang: +1. Pertama, cek revisi Deployment sekarang: ```shell kubectl rollout history deployment.v1.apps/nginx-deployment ``` - Keluaran akan tampil seperti berikut: + Keluaran akan tampil seperti berikut: ``` deployments "nginx-deployment" REVISION CHANGE-CAUSE @@ -464,7 +464,7 @@ Ikuti langkah-langkah berikut untuk mengecek riwayat rilis: kubectl rollout history deployment.v1.apps/nginx-deployment --revision=2 ``` - Keluaran akan tampil seperti berikut: + Keluaran akan tampil seperti berikut: ``` deployments "nginx-deployment" revision 2 Labels: app=nginx @@ -489,7 +489,7 @@ Ikuti langkah-langkah berikut untuk membalikkan Deployment dari versi sekarang k kubectl rollout undo deployment.v1.apps/nginx-deployment ``` - Keluaran akan tampil seperti berikut: + Keluaran akan tampil seperti berikut: ``` deployment.apps/nginx-deployment ``` @@ -499,7 +499,7 @@ Ikuti langkah-langkah berikut untuk membalikkan Deployment dari versi sekarang k kubectl rollout undo deployment.v1.apps/nginx-deployment --to-revision=2 ``` - Keluaran akan tampil seperti berikut: + Keluaran akan tampil seperti berikut: ``` deployment.apps/nginx-deployment ``` @@ -514,16 +514,16 @@ Ikuti langkah-langkah berikut untuk membalikkan Deployment dari versi sekarang k kubectl get deployment nginx-deployment ``` - Keluaran akan tampil seperti berikut: + Keluaran akan tampil seperti berikut: ``` NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 3 3 3 3 30m ``` -3. Tampilkan deskripsi Deployment: +3. Tampilkan deskripsi Deployment: ```shell kubectl describe deployment nginx-deployment ``` - Keluaran akan tampil seperti berikut: + Keluaran akan tampil seperti berikut: ``` Name: nginx-deployment Namespace: default @@ -594,9 +594,9 @@ deployment.apps/nginx-deployment scaled ### Pengaturan skala proporsional -Deployment RollingUpdate mendukung beberapa versi aplikasi berjalan secara bersamaan. Ketika kamu atau autoscaler -mengubah skala Deployment RollingUpdate yang ada di tengah rilis (yang sedang berjalan maupun terjeda), -kontroler Deployment menyeimbangkan replika tambahan dalam ReplicaSet aktif (ReplicaSet dengan Pod) untuk mencegah resiko. +Deployment RollingUpdate mendukung beberapa versi aplikasi berjalan secara bersamaan. Ketika kamu atau autoscaler +mengubah skala Deployment RollingUpdate yang ada di tengah rilis (yang sedang berjalan maupun terjeda), +kontroler Deployment menyeimbangkan replika tambahan dalam ReplicaSet aktif (ReplicaSet dengan Pod) untuk mencegah resiko. Ini disebut *pengaturan skala proporsional*. Sebagai contoh, kamu menjalankan Deployment dengan 10 replika, [maxSurge](#max-surge)=3, dan [maxUnavailable](#max-unavailable)=2. @@ -636,20 +636,20 @@ persyaratan `maxUnavailable` yang disebut di atas. Cek status rilis: * Kemudian, permintaan peningkatan untuk Deployment akan masuk. Autoscaler menambah replika Deployment menjadi 15. Controller Deployment perlu menentukan dimana 5 replika ini ditambahkan. Jika kamu memakai -pengaturan skala proporsional, kelima replika akan ditambahkan ke ReplicaSet baru. Dengan pengaturan skala proporsional, +pengaturan skala proporsional, kelima replika akan ditambahkan ke ReplicaSet baru. Dengan pengaturan skala proporsional, kamu menyebarkan replika tambahan ke semua ReplicaSet. Proporsi terbesar ada pada ReplicaSet dengan -replika terbanyak dan proporsi yang lebih kecil untuk replika dengan ReplicaSet yang lebih sedikit. +replika terbanyak dan proporsi yang lebih kecil untuk replika dengan ReplicaSet yang lebih sedikit. Sisanya akan diberikan ReplicaSet dengan replika terbanyak. ReplicaSet tanpa replika tidak akan ditingkatkan. -Dalam kasus kita di atas, 3 replika ditambahkan ke ReplicaSet lama dan 2 replika ditambahkan ke ReplicaSet baru. -Proses rilis akan segera memindahkan semua ReplicaSet baru, dengan asumsi semua replika dalam kondisi sehat. -Untuk memastikannya, jalankan: +Dalam kasus kita di atas, 3 replika ditambahkan ke ReplicaSet lama dan 2 replika ditambahkan ke ReplicaSet baru. +Proses rilis akan segera memindahkan semua ReplicaSet baru, dengan asumsi semua replika dalam kondisi sehat. +Untuk memastikannya, jalankan: ```shell kubectl get deploy ``` -Keluaran akan tampil seperti berikut: +Keluaran akan tampil seperti berikut: ``` NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 15 18 7 8 7m @@ -668,7 +668,7 @@ nginx-deployment-618515232 11 11 11 7m ## Menjeda dan Melanjutkan Deployment -Kamu dapat menjeda Deployment sebelum memicu satu atau lebih pembaruan kemudian meneruskannya. +Kamu dapat menjeda Deployment sebelum memicu satu atau lebih pembaruan kemudian meneruskannya. Hal ini memungkinkanmu menerapkan beberapa perbaikan selama selang jeda tanpa melakukan rilis yang tidak perlu. * Sebagai contoh, Deployment yang baru dibuat: @@ -743,7 +743,7 @@ Hal ini memungkinkanmu menerapkan beberapa perbaikan selama selang jeda tanpa me deployment.apps/nginx-deployment resource requirements updated ``` - The state awal Deployment sebelum jeda akan melanjutkan fungsinya, tapi perubahan + The state awal Deployment sebelum jeda akan melanjutkan fungsinya, tapi perubahan Deployment tidak akan berefek apapun selama Deployment masih terjeda. * Kemudian, mulai kembali Deployment dan perhatikan ReplicaSet baru akan muncul dengan semua perubahan baru: @@ -795,7 +795,7 @@ Kamu tidak bisa membalikkan Deployment yang terjeda sampai dia diteruskan. ## Status Deployment -Deployment melalui berbagai state dalam daur hidupnya. Dia dapat [berlangsung](#deployment-berlangsung) selagi merilis ReplicaSet baru, bisa juga [selesai](#deployment-selesai), +Deployment melalui berbagai state dalam daur hidupnya. Dia dapat [berlangsung](#deployment-berlangsung) selagi merilis ReplicaSet baru, bisa juga [selesai](#deployment-selesai), atau juga [gagal](#deployment-gagal). ### Deployment Berlangsung @@ -817,7 +817,7 @@ Kubernetes menandai Deployment sebagai _complete_ saat memiliki karakteristik be * Semua replika terkait Deployment dapat diakses. * Tidak ada replika lama untuk Deployment yang berjalan. -Kamu dapat mengecek apakah Deployment telah selesai dengan `kubectl rollout status`. +Kamu dapat mengecek apakah Deployment telah selesai dengan `kubectl rollout status`. Jika rilis selesai, `kubectl rollout status` akan mengembalikan nilai balik nol. ```shell @@ -833,7 +833,7 @@ $ echo $? ### Deployment Gagal -Deployment-mu bisa saja terhenti saat mencoba deploy ReplicaSet terbaru tanpa pernah selesai. +Deployment-mu bisa saja terhenti saat mencoba deploy ReplicaSet terbaru tanpa pernah selesai. Ini dapat terjadi karena faktor berikut: * Kuota tidak mencukupi @@ -868,7 +868,7 @@ berikut ke `.status.conditions` milik Deployment: Lihat [konvensi Kubernetes API](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties) untuk info lebih lanjut tentang kondisi status. {{< note >}} -Kubernetes tidak melakukan apapun pada Deployment yang tersendat selain melaporkannya sebagai `Reason=ProgressDeadlineExceeded`. +Kubernetes tidak melakukan apapun pada Deployment yang tersendat selain melaporkannya sebagai `Reason=ProgressDeadlineExceeded`. Orkestrator yang lebih tinggi dapat memanfaatkannya untuk melakukan tindak lanjut. Misalnya, mengembalikan Deployment ke versi sebelumnya. {{< /note >}} @@ -877,7 +877,7 @@ Jika Deployment terjeda, Kubernetes tidak akan mengecek kemajuan pada selang itu Kamu dapat menjeda Deployment di tengah rilis dan melanjutkannya dengan aman tanpa memicu kondisi saat tenggat telah lewat. {{< /note >}} -Kamu dapat mengalami galat sejenak pada Deployment disebabkan timeout yang dipasang terlalu kecil atau +Kamu dapat mengalami galat sejenak pada Deployment disebabkan timeout yang dipasang terlalu kecil atau hal-hal lain yang terjadi sementara. Misalnya, kamu punya kuota yang tidak mencukupi. Jika kamu mendeskripsikan Deployment kamu akan menjumpai pada bagian ini: @@ -937,7 +937,7 @@ Conditions: ReplicaFailure True FailedCreate ``` -Kamu dapat menangani isu keterbatasan kuota dengan menurunkan jumlah Deployment, bisa dengan menghapus kontrolers +Kamu dapat menangani isu keterbatasan kuota dengan menurunkan jumlah Deployment, bisa dengan menghapus kontrolers yang sedang berjalan, atau dengan meningkatkan kuota pada namespace. Jika kuota tersedia, kemudian kontroler Deployment akan dapat menyelesaikan rilis Deployment. Kamu akan melihat bahwa status Deployment berubah menjadi kondisi sukses (`Status=True` dan `Reason=NewReplicaSetAvailable`). @@ -951,7 +951,7 @@ Conditions: `Type=Available` dengan `Status=True` artinya Deployment-mu punya ketersediaan minimum. Ketersediaan minimum diatur oleh parameter yang dibuat pada strategi deployment. `Type=Progressing` dengan `Status=True` berarti Deployment -sedang dalam rilis dan masih berjalan atau sudah selesai berjalan dan jumlah minimum replika tersedia +sedang dalam rilis dan masih berjalan atau sudah selesai berjalan dan jumlah minimum replika tersedia (lihat bagian Alasan untuk kondisi tertentu - dalam kasus ini `Reason=NewReplicaSetAvailable` berarti Deployment telah selesai). Kamu dapat mengecek apakah Deployment gagal berkembang dengan perintah `kubectl rollout status`. `kubectl rollout status` @@ -974,7 +974,7 @@ Semua aksi yang dapat diterapkan pada Deployment yang selesai berjalan juga pada ## Kebijakan Pembersihan -Kamu dapat mengisi kolom `.spec.revisionHistoryLimit` di Deployment untuk menentukan banyak ReplicaSet +Kamu dapat mengisi kolom `.spec.revisionHistoryLimit` di Deployment untuk menentukan banyak ReplicaSet pada Deployment yang ingin dipertahankan. Sisanya akan di garbage-collected di balik layar. Umumnya, nilai kolom berisi 10. {{< note >}} @@ -984,7 +984,7 @@ sehingga Deployment tidak akan dapat dikembalikan. ## Deployment Canary -Jika kamu ingin merilis ke sebagian pengguna atau server menggunakan Deployment, +Jika kamu ingin merilis ke sebagian pengguna atau server menggunakan Deployment, kamu dapat membuat beberapa Deployment, satu tiap rilis, dengan mengikuti pola canary yang didesripsikan pada [mengelola sumber daya](/id/docs/concepts/cluster-administration/manage-deployment/#deploy-dengan-canary). @@ -1002,7 +1002,7 @@ Dalam `.spec` hanya ada kolom `.spec.template` dan `.spec.selector` yang wajib d `.spec.template` adalah [templat Pod](/id/docs/concepts/workloads/pods/pod-overview/#templat-pod). Dia memiliki skema yang sama dengan [Pod](/id/docs/concepts/workloads/pods/pod/). Bedanya dia bersarang dan tidak punya `apiVersion` atau `kind`. -Selain kolom wajib untuk Pod, templat Pod pada Deployment harus menentukan label dan aturan menjalankan ulang yang tepat. +Selain kolom wajib untuk Pod, templat Pod pada Deployment harus menentukan label dan aturan menjalankan ulang yang tepat. Untuk label, pastikaan tidak bertumpang tindih dengan kontroler lainnya. Lihat [selektor](#selektor)). [`.spec.template.spec.restartPolicy`](/id/docs/concepts/workloads/pods/pod-lifecycle/#aturan-menjalankan-ulang) hanya boleh berisi `Always`, @@ -1019,21 +1019,21 @@ untuk Pod yang dituju oleh Deployment ini. `.spec.selector` harus sesuai `.spec.template.metadata.labels`, atau akan ditolak oleh API. -Di versi API `apps/v1`, `.spec.selector` dan `.metadata.labels` tidak berisi `.spec.template.metadata.labels` jika tidak disetel. +Di versi API `apps/v1`, `.spec.selector` dan `.metadata.labels` tidak berisi `.spec.template.metadata.labels` jika tidak disetel. Jadi mereka harus disetel secara eksplisit. Perhatikan juga `.spec.selector` tidak dapat diubah setelah Deployment dibuat pada `apps/v1`. Deployment dapat mematikan Pod yang labelnya cocok dengan selektor jika templatnya berbeda -dari `.spec.template` atau total jumlah Pod melebihi `.spec.replicas`. Dia akan membuat Pod baru +dari `.spec.template` atau total jumlah Pod melebihi `.spec.replicas`. Dia akan membuat Pod baru dengan `.spec.template` jika jumlah Pod kurang dari yang diinginkan. {{< note >}} -Kamu sebaiknya tidak membuat Pod lain yang labelnya cocok dengan selektor ini, baik secara langsung, -melalui Deployment lain, atau membuat kontroler lain seperti ReplicaSet atau ReplicationController. -Kalau kamu melakukannya, Deployment pertama akan mengira dia yang membuat Pod-pod ini. +Kamu sebaiknya tidak membuat Pod lain yang labelnya cocok dengan selektor ini, baik secara langsung, +melalui Deployment lain, atau membuat kontroler lain seperti ReplicaSet atau ReplicationController. +Kalau kamu melakukannya, Deployment pertama akan mengira dia yang membuat Pod-pod ini. Kubernetes tidak akan mencegahmu melakukannya. {{< /note >}} -Jika kamu punya beberapa kontroler dengan selektor bertindihan, mereka akan saling bertikai +Jika kamu punya beberapa kontroler dengan selektor bertindihan, mereka akan saling bertikai dan tidak akan berjalan semestinya. ### Strategi @@ -1047,65 +1047,65 @@ Semua Pod yang ada dimatikan sebelum yang baru dibuat ketika nilai `.spec.strate #### Membarui Deployment secara Bergulir -Deployment membarui Pod secara [bergulir](/id/docs/tasks/run-application/rolling-update-replication-controller/) +Deployment membarui Pod secara bergulir saat `.spec.strategy.type==RollingUpdate`. Kamu dapat menentukan `maxUnavailable` dan `maxSurge` untuk mengatur proses pembaruan bergulir. ##### Ketidaktersediaan Maksimum -`.spec.strategy.rollingUpdate.maxUnavailable` adalah kolom opsional yang mengatur jumlah Pod maksimal -yang tidak tersedia selama proses pembaruan. Nilainya bisa berupa angka mutlak (contohnya 5) -atau persentase dari Pod yang diinginkan (contohnya 10%). Angka mutlak dihitung berdasarkan persentase -dengan pembulatan ke bawah. Nilai tidak bisa nol jika `.spec.strategy.rollingUpdate.maxSurge` juga nol. +`.spec.strategy.rollingUpdate.maxUnavailable` adalah kolom opsional yang mengatur jumlah Pod maksimal +yang tidak tersedia selama proses pembaruan. Nilainya bisa berupa angka mutlak (contohnya 5) +atau persentase dari Pod yang diinginkan (contohnya 10%). Angka mutlak dihitung berdasarkan persentase +dengan pembulatan ke bawah. Nilai tidak bisa nol jika `.spec.strategy.rollingUpdate.maxSurge` juga nol. Nilai bawaannya yaitu 25%. -Sebagai contoh, ketika nilai berisi 30%, ReplicaSet lama dapat segera diperkecil menjadi 70% dari Pod -yang diinginkan saat pembaruan bergulir dimulai. Seketika Pod baru siap, ReplicaSet lama dapat lebih diperkecil lagi, -diikuti dengan pembesaran ReplicaSet, menjamin total jumlah Pod yang siap kapanpun ketika pembaruan +Sebagai contoh, ketika nilai berisi 30%, ReplicaSet lama dapat segera diperkecil menjadi 70% dari Pod +yang diinginkan saat pembaruan bergulir dimulai. Seketika Pod baru siap, ReplicaSet lama dapat lebih diperkecil lagi, +diikuti dengan pembesaran ReplicaSet, menjamin total jumlah Pod yang siap kapanpun ketika pembaruan paling sedikit 70% dari Pod yang diinginkan. ##### Kelebihan Maksimum -`.spec.strategy.rollingUpdate.maxSurge` adalah kolom opsional yang mengatur jumlah Pod maksimal yang -dapat dibuat melebihi jumlah Pod yang diinginkan. Nilainya bisa berupa angka mutlak (contohnya 5) atau persentase -dari Pod yang diinginkan (contohnya 10%). Nilai tidak bisa nol jika `MaxUnavailable` juga nol. Angka mutlak +`.spec.strategy.rollingUpdate.maxSurge` adalah kolom opsional yang mengatur jumlah Pod maksimal yang +dapat dibuat melebihi jumlah Pod yang diinginkan. Nilainya bisa berupa angka mutlak (contohnya 5) atau persentase +dari Pod yang diinginkan (contohnya 10%). Nilai tidak bisa nol jika `MaxUnavailable` juga nol. Angka mutlak dihitung berdasarkan persentase dengan pembulatan ke bawah. Nilai bawaannya yaitu 25%. -Sebagai contoh, ketika nilai berisi 30%, ReplicaSet baru dapat segera diperbesar saat pembaruan bergulir dimulai, -sehingga total jumlah Pod yang baru dan lama tidak melebihi 130% dari Pod yang diinginkan. -Saat Pod lama dimatikan, ReplicaSet baru dapat lebih diperbesar lagi, menjamin total jumlah Pod yang siap +Sebagai contoh, ketika nilai berisi 30%, ReplicaSet baru dapat segera diperbesar saat pembaruan bergulir dimulai, +sehingga total jumlah Pod yang baru dan lama tidak melebihi 130% dari Pod yang diinginkan. +Saat Pod lama dimatikan, ReplicaSet baru dapat lebih diperbesar lagi, menjamin total jumlah Pod yang siap kapanpun ketika pembaruan paling banyak 130% dari Pod yang diinginkan. ### Tenggat Kemajuan dalam Detik -`.spec.progressDeadlineSeconds` adalah kolom opsional yang mengatur lama tunggu dalam dalam detik untuk Deployment-mu berjalan -sebelum sistem melaporkan lagi bahwa Deployment [gagal](#deployment-gagal) - ditunjukkan dengan kondisi `Type=Progressing`, `Status=False`, -dan `Reason=ProgressDeadlineExceeded` pada status sumber daya. Controller Deployment akan tetap mencoba ulang Deployment. -Nantinya begitu pengembalian otomatis diimplementasikan, kontroler Deployment akan membalikkan Deployment segera +`.spec.progressDeadlineSeconds` adalah kolom opsional yang mengatur lama tunggu dalam dalam detik untuk Deployment-mu berjalan +sebelum sistem melaporkan lagi bahwa Deployment [gagal](#deployment-gagal) - ditunjukkan dengan kondisi `Type=Progressing`, `Status=False`, +dan `Reason=ProgressDeadlineExceeded` pada status sumber daya. Controller Deployment akan tetap mencoba ulang Deployment. +Nantinya begitu pengembalian otomatis diimplementasikan, kontroler Deployment akan membalikkan Deployment segera saat dia menjumpai kondisi tersebut. Jika ditentukan, kolom ini harus lebih besar dari `.spec.minReadySeconds`. ### Lama Minimum untuk Siap dalam Detik -`.spec.minReadySeconds` adalah kolom opsional yang mengatur lama minimal sebuah Pod yang baru dibuat +`.spec.minReadySeconds` adalah kolom opsional yang mengatur lama minimal sebuah Pod yang baru dibuat seharusnya siap tanpa ada kontainer yang rusak, untuk dianggap tersedia, dalam detik. -Nilai bawaannya yaitu 0 (Pod akan dianggap tersedia segera ketika siap). Untuk mempelajari lebih lanjut +Nilai bawaannya yaitu 0 (Pod akan dianggap tersedia segera ketika siap). Untuk mempelajari lebih lanjut kapan Pod dianggap siap, lihat [Pemeriksaan Kontainer](/id/docs/concepts/workloads/pods/pod-lifecycle/#pemeriksaan-kontainer). ### Kembali Ke -Kolom `.spec.rollbackTo` telah ditinggalkan pada versi API `extensions/v1beta1` dan `apps/v1beta1`, dan sudah tidak didukung mulai versi API `apps/v1beta2`. +Kolom `.spec.rollbackTo` telah ditinggalkan pada versi API `extensions/v1beta1` dan `apps/v1beta1`, dan sudah tidak didukung mulai versi API `apps/v1beta2`. Sebagai gantinya, disarankan untuk menggunakan `kubectl rollout undo` sebagaimana diperkenalkan dalam [Kembali ke Revisi Sebelumnya](#kembali-ke-revisi-sebelumnya). ### Batas Riwayat Revisi Riwayat revisi Deployment disimpan dalam ReplicaSet yang dia kendalikan. -`.spec.revisionHistoryLimit` adalah kolom opsional yang mengatur jumlah ReplicaSet lama yang dipertahankan -untuk memungkinkan pengembalian. ReplicaSet lama ini mengambil sumber daya dari `etcd` dan memunculkan keluaran -dari `kubectl get rs`. Konfigurasi tiap revisi Deployment disimpan pada ReplicaSet-nya; sehingga, begitu ReplicaSet lama dihapus, -kamu tidak mampu lagi membalikkan revisi Deployment-nya. Umumnya, 10 ReplicaSet lama akan dipertahankan, +`.spec.revisionHistoryLimit` adalah kolom opsional yang mengatur jumlah ReplicaSet lama yang dipertahankan +untuk memungkinkan pengembalian. ReplicaSet lama ini mengambil sumber daya dari `etcd` dan memunculkan keluaran +dari `kubectl get rs`. Konfigurasi tiap revisi Deployment disimpan pada ReplicaSet-nya; sehingga, begitu ReplicaSet lama dihapus, +kamu tidak mampu lagi membalikkan revisi Deployment-nya. Umumnya, 10 ReplicaSet lama akan dipertahankan, namun nilai idealnya tergantung pada frekuensi dan stabilitas Deployment-deployment baru. Lebih spesifik, mengisi kolom dengan nol berarti semua ReplicaSet lama dengan 0 replika akan dibersihkan. @@ -1114,7 +1114,7 @@ Dalam kasus ini, rilis Deployment baru tidak dapat dibalikkan, sebab riwayat rev ### Terjeda `.spec.paused` adalah kolom boolean opsional untuk menjeda dan melanjutkan Deployment. Perbedaan antara Deployment yang terjeda -dan yang tidak hanyalah perubahan apapun pada PodTemplateSpec Deployment terjeda tidak akan memicu rilis baru selama masih terjeda. +dan yang tidak hanyalah perubahan apapun pada PodTemplateSpec Deployment terjeda tidak akan memicu rilis baru selama masih terjeda. Deployment umumnya tidak terjeda saat dibuat. ## Alternatif untuk Deployment @@ -1122,7 +1122,6 @@ Deployment umumnya tidak terjeda saat dibuat. ### kubectl rolling update [`kubectl rolling update`](/id/docs/reference/generated/kubectl/kubectl-commands#rolling-update) membarui Pod dan ReplicationController -dengan cara yang serupa. Namun, Deployments lebih disarankan karena deklaratif, berjalan di sisi server, dan punya fitur tambahan, +dengan cara yang serupa. Namun, Deployments lebih disarankan karena deklaratif, berjalan di sisi server, dan punya fitur tambahan, seperti pembalikkan ke revisi manapun sebelumnya bahkan setelah pembaruan rolling selesais. - diff --git a/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md index 4aca03535f9ba..5f4720646be0b 100644 --- a/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/content/id/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -119,14 +119,14 @@ Sebuah Job juga membutuhkan sebuah [bagian `.spec`](https://git.k8s.io/community _Field_ `.spec.template` merupakan satu-satunya _field_ wajib pada `.spec`. -_Field_ `.spec.template` merupakan sebuah [templat Pod](/docs/concepts/workloads/pods/pod-overview/#pod-templates). _Field_ ini memiliki skema yang sama dengan yang ada pada [Pod](/docs/user-guide/pods), +_Field_ `.spec.template` merupakan sebuah [templat Pod](/id/docs/concepts/workloads/pods/pod-overview/#pod-templates). _Field_ ini memiliki skema yang sama dengan yang ada pada [Pod](/docs/user-guide/pods), kecuali _field_ ini bersifat _nested_ dan tidak memiliki _field_ `apiVersion` atau _field_ `kind`. Sebagai tambahan dari _field_ wajib pada sebuah Job, sebuah tempat pod pada Job haruslah menspesifikasikan label yang sesuai (perhatikan [selektor pod](#pod-selektor)) dan sebuah mekanisme _restart_ yang sesuai. -Hanya sebuah [`RestartPolicy`](/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) yang sesuai dengan `Never` atau `OnFailure` yang bersifat valid. +Hanya sebuah [`RestartPolicy`](/id/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) yang sesuai dengan `Never` atau `OnFailure` yang bersifat valid. ### Selektor Pod @@ -194,7 +194,7 @@ Jika hal ini terjadi, dan `.spec.template.spec.restartPolicy = "OnFailure"`, mak akan tetap ada di dalam node, tetapi Container tersebut akan dijalankan kembali. Dengan demikian, program kamu harus dapat mengatasi kasus dimana program tersebut di-_restart_ secara lokal, atau jika tidak maka spesifikasikan `.spec.template.spec.restartPolicy = "Never"`. Perhatikan -[_lifecycle_ pod](/docs/concepts/workloads/pods/pod-lifecycle/#example-states) untuk informasi lebih lanjut mengenai `restartPolicy`. +[_lifecycle_ pod](/id/docs/concepts/workloads/pods/pod-lifecycle/#example-states) untuk informasi lebih lanjut mengenai `restartPolicy`. Sebuah Pod juga dapat gagal secara menyeluruh, untuk beberapa alasan yang mungkin, misalnya saja, ketika Pod tersebut dipindahkan dari Node (ketika Node diperbarui, di-_restart_, dihapus, dsb.), atau @@ -288,7 +288,7 @@ Pastikan kamu telah menspesifikasikan nilai tersebut pada level yang dibutuhkan. Job yang sudah selesai biasanya tidak lagi dibutuhkan di dalam sistem. Tetap menjaga keberadaan objek-objek tersebut di dalam sistem akan memberikan tekanan tambahan pada API server. Jika sebuah Job yang diatur secara langsung oleh _controller_ dengan level yang lebih tinggi, seperti -[CronJob](/docs/concepts/workloads/controllers/cron-jobs/), maka Job ini dapat +[CronJob](/id/docs/concepts/workloads/controllers/cron-jobs/), maka Job ini dapat di-_clean up_ oleh CronJob berdasarkan _policy_ berbasis kapasitas yang dispesifikasikan. ### Mekanisme TTL untuk Job yang Telah Selesai Dijalankan @@ -298,7 +298,7 @@ di-_clean up_ oleh CronJob berdasarkan _policy_ berbasis kapasitas yang dispesif Salah satu cara untuk melakukan _clean up_ Job yang telah selesai dijalankan (baik dengan status `Complete` atau `Failed`) secara otomatis adalah dengan menerapkan mekanisme TTL yang disediakan oleh -[_controller_ TTL](/docs/concepts/workloads/controllers/ttlafterfinished/) untuk +[_controller_ TTL](/id/docs/concepts/workloads/controllers/ttlafterfinished/) untuk sumber daya yang telah selesai digunakan, dengan cara menspesifikasikan _field_ `.spec.ttlSecondsAfterFinished` dari Job tersebut. @@ -334,7 +334,7 @@ maka Job ini tidak akan dihapus oleh _controller_ TTL setelah Job ini selesai di Perhatikan bahwa mekanisme TTL ini merupakan fitur alpha, dengan gerbang fitur `TTLAfterFinished`. Untuk informasi lebih lanjut, kamu dapat membaca dokumentasi untuk -[_controller_ TTL](/docs/concepts/workloads/controllers/ttlafterfinished/) untuk +[_controller_ TTL](/id/docs/concepts/workloads/controllers/ttlafterfinished/) untuk sumber daya yang telah selesai dijalankan. ## Pola Job @@ -478,7 +478,7 @@ Job merupakan komplemen dari [Replication Controller](/docs/user-guide/replicati Sebuah Replication Controller mengatur Pod yang diharapkan untuk tidak dihentikan (misalnya, _web server_), dan sebuah Job mengatur Pod yang diharapkan untuk berhenti (misalnya, _batch task_). -Seperti yang sudah dibahas pada [_Lifecycle_ Pod](/docs/concepts/workloads/pods/pod-lifecycle/), `Job` *hanya* pantas +Seperti yang sudah dibahas pada [_Lifecycle_ Pod](/id/docs/concepts/workloads/pods/pod-lifecycle/), `Job` *hanya* pantas digunakan untuk Pod dengan `RestartPolicy` yang sama dengan `OnFailure` atau `Never`. (Perhatikan bahwa: Jika `RestartPolicy` tidak dispesifikasikan, nilai defaultnya adalah `Always`.) @@ -499,7 +499,7 @@ dari sebuah Job, tetapi kontrol secara mutlak atas Pod yang dibuat serta tugas y ## CronJob {#cron-jobs} -Kamu dapat menggunakan [`CronJob`](/docs/concepts/workloads/controllers/cron-jobs/) untuk membuat Job yang akan +Kamu dapat menggunakan [`CronJob`](/id/docs/concepts/workloads/controllers/cron-jobs/) untuk membuat Job yang akan dijalankan pada waktu/tanggal yang spesifik, mirip dengan perangkat lunak `cron` yang ada pada Unix. diff --git a/content/id/docs/concepts/workloads/controllers/replicaset.md b/content/id/docs/concepts/workloads/controllers/replicaset.md index c0c3a83d51958..57b1124208a91 100644 --- a/content/id/docs/concepts/workloads/controllers/replicaset.md +++ b/content/id/docs/concepts/workloads/controllers/replicaset.md @@ -197,7 +197,7 @@ Untuk _field_ [_restart policy_](/docs/concepts/workloads/Pods/pod-lifecycle/#re ### Selektor Pod -_Field_ `.spec.selector` adalah sebuah [selektor labe](/docs/concepts/overview/working-with-objects/labels/). Seperti yang telah dibahas [sebelumnya](#how-a-replicaset-works), _field_ ini adalah label yang digunakan untuk mengidentifikasi Pod yang memungkinkan untuk diakuisisi. Pada contoh `frontend.yaml`, selektornya adalah: +_Field_ `.spec.selector` adalah sebuah [selektor labe](/id/docs/concepts/overview/working-with-objects/labels/). Seperti yang telah dibahas [sebelumnya](#how-a-replicaset-works), _field_ ini adalah label yang digunakan untuk mengidentifikasi Pod yang memungkinkan untuk diakuisisi. Pada contoh `frontend.yaml`, selektornya adalah: ```shell matchLabels: tier: frontend @@ -219,7 +219,7 @@ Jika nilai `.spec.replicas` tidak ditentukan maka akan diatur ke nilai _default_ ### Menghapus ReplicaSet dan Pod-nya -Untuk menghapus sebuah ReplicaSet beserta dengan Pod-nya, gunakan [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete). [_Garbage collector_](/docs/concepts/workloads/controllers/garbage-collection/) secara otomatis akan menghapus semua Pod dependen secara _default_. +Untuk menghapus sebuah ReplicaSet beserta dengan Pod-nya, gunakan [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete). [_Garbage collector_](/id/docs/concepts/workloads/controllers/garbage-collection/) secara otomatis akan menghapus semua Pod dependen secara _default_. Ketika menggunakan REST API atau _library_ `client-go`, kamu harus mengatur nilai `propagationPolicy` menjadi `Background` atau `Foreground` pada opsi -d. Sebagai contoh: @@ -243,7 +243,7 @@ curl -X DELETE 'localhost:8080/apis/extensions/v1beta1/namespaces/default/repli ``` Ketika ReplicaSet yang asli telah dihapus, kamu dapat membuat ReplicaSet baru untuk menggantikannya. Selama _field_ `.spec.selector` yang lama dan baru memilki nilai yang sama, maka ReplicaSet baru akan mengadopsi Pod lama namun tidak serta merta membuat Pod yang sudah ada sama dan sesuai dengan templat Pod yang baru. -Untuk memperbarui Pod dengan _spec_ baru dapat menggunakan [Deployment](/docs/concepts/workloads/controllers/deployment/#creating-a-deployment) karena ReplicaSet tidak mendukung pembaruan secara langsung. +Untuk memperbarui Pod dengan _spec_ baru dapat menggunakan [Deployment](/id/docs/concepts/workloads/controllers/deployment/#creating-a-deployment) karena ReplicaSet tidak mendukung pembaruan secara langsung. ### Mengisolasi Pod dari ReplicaSet @@ -275,7 +275,7 @@ kubectl autoscale rs frontend --max=10 ### Deployment (direkomendasikan) -[`Deployment`](/docs/concepts/workloads/controllers/deployment/) adalah sebuah objek yang bisa memiliki ReplicaSet dan memperbarui ReplicaSet dan Pod-nya melalui _rolling update_ deklaratif dan _server-side_. +[`Deployment`](/id/docs/concepts/workloads/controllers/deployment/) adalah sebuah objek yang bisa memiliki ReplicaSet dan memperbarui ReplicaSet dan Pod-nya melalui _rolling update_ deklaratif dan _server-side_. Walaupun ReplicaSet dapat digunakan secara independen, seringkali ReplicaSet digunakan oleh Deployments sebagai mekanisme untuk mengorkestrasi pembuatan, penghapusan dan pembaruan Pod. Ketika kamu menggunakan Deployments kamu tidak perlu khawatir akan pengaturan dari ReplicaSet yang dibuat. Deployments memiliki dan mengatur ReplicaSet-nya sendiri. Maka dari itu penggunaan Deployments direkomendasikan jika kamu menginginkan ReplicaSet. @@ -289,9 +289,9 @@ Gunakan [`Job`](/docs/concepts/jobs/run-to-completion-finite-workloads/) alih-al ### DaemonSet -Gunakan [`DaemonSet`](/docs/concepts/workloads/controllers/daemonset/) alih-alih ReplicaSet untuk Pod yang menyediakan fungsi pada level mesin, seperti _monitoring_ mesin atau _logging_ mesin. Pod ini memiliki waktu hidup yang bergantung terhadap waktu hidup mesin: Pod perlu untuk berjalan pada mesin sebelum Pod lain dijalankan, dan aman untuk diterminasi ketika mesin siap untuk di-_reboot_ atau dimatikan. +Gunakan [`DaemonSet`](/id/docs/concepts/workloads/controllers/daemonset/) alih-alih ReplicaSet untuk Pod yang menyediakan fungsi pada level mesin, seperti _monitoring_ mesin atau _logging_ mesin. Pod ini memiliki waktu hidup yang bergantung terhadap waktu hidup mesin: Pod perlu untuk berjalan pada mesin sebelum Pod lain dijalankan, dan aman untuk diterminasi ketika mesin siap untuk di-_reboot_ atau dimatikan. ### ReplicationController -ReplicaSet adalah suksesor dari [_ReplicationControllers_](/docs/concepts/workloads/controllers/replicationcontroller/). Keduanya memenuhi tujuan yang sama dan memiliki perilaku yang serupa, kecuali bahwa ReplicationController tidak mendukung kebutuhan selektor _set-based_ seperti yang dijelaskan pada [panduan penggunaan label](/docs/concepts/overview/working-with-objects/labels/#label-selectors). Pada kasus tersebut, ReplicaSet lebih direkomendasikan dibandingkan ReplicationController. +ReplicaSet adalah suksesor dari [_ReplicationControllers_](/id/docs/concepts/workloads/controllers/replicationcontroller/). Keduanya memenuhi tujuan yang sama dan memiliki perilaku yang serupa, kecuali bahwa ReplicationController tidak mendukung kebutuhan selektor _set-based_ seperti yang dijelaskan pada [panduan penggunaan label](/id/docs/concepts/overview/working-with-objects/labels/#label-selectors). Pada kasus tersebut, ReplicaSet lebih direkomendasikan dibandingkan ReplicationController. diff --git a/content/id/docs/concepts/workloads/controllers/replicationcontroller.md b/content/id/docs/concepts/workloads/controllers/replicationcontroller.md index f828ff9c64f96..48ec718a6df67 100644 --- a/content/id/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/id/docs/concepts/workloads/controllers/replicationcontroller.md @@ -13,7 +13,7 @@ weight: 20 {{< note >}} -[`Deployment`](/docs/concepts/workloads/controllers/deployment/) yang mengonfigurasi [`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/) sekarang menjadi cara yang direkomendasikan untuk melakukan replikasi. +[`Deployment`](/id/docs/concepts/workloads/controllers/deployment/) yang mengonfigurasi [`ReplicaSet`](/id/docs/concepts/workloads/controllers/replicaset/) sekarang menjadi cara yang direkomendasikan untuk melakukan replikasi. {{< /note >}} Sebuah _ReplicationController_ memastikan bahwa terdapat sejumlah Pod yang sedang berjalan dalam suatu waktu tertentu. Dengan kata lain, ReplicationController memastikan bahwa sebuah Pod atau sebuah kumpulan Pod yang homogen selalu berjalan dan tersedia. @@ -101,7 +101,7 @@ Pada perintah di atas, selektor yang dimaksud adalah selektor yang sama dengan y Seperti semua konfigurasi Kubernetes lainnya, sebuah ReplicationController membutuhkan _field_ `apiVersion`, `kind`, dan `metadata`. -Untuk informasi umum mengenai berkas konfigurasi, kamu dapat melihat [pengaturan objek](/docs/concepts/overview/working-with-objects/object-management/). +Untuk informasi umum mengenai berkas konfigurasi, kamu dapat melihat [pengaturan objek](/id/docs/concepts/overview/working-with-objects/object-management/). Sebuah ReplicationController juga membutuhkan [bagian `.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). @@ -109,11 +109,11 @@ Sebuah ReplicationController juga membutuhkan [bagian `.spec`](https://git.k8s.i `.spec.template` adalah satu-satunya _field_ yang diwajibkan pada `.spec`. -`.spec.template` adalah sebuah [templat Pod](/docs/concepts/workloads/pods/pod-overview/#pod-templates). Ia memiliki skema yang sama persis dengan sebuah [Pod](/docs/concepts/workloads/pods/pod/), namun dapat berbentuk _nested_ dan tidak memiliki _field_ `apiVersion` ataupun `kind`. +`.spec.template` adalah sebuah [templat Pod](/id/docs/concepts/workloads/pods/pod-overview/#pod-templates). Ia memiliki skema yang sama persis dengan sebuah [Pod](/id/docs/concepts/workloads/pods/pod/), namun dapat berbentuk _nested_ dan tidak memiliki _field_ `apiVersion` ataupun `kind`. Selain _field-field_ yang diwajibkan untuk sebuah Pod, templat Pod pada ReplicationController harus menentukan label dan kebijakan pengulangan kembali yang tepat. Untuk label, pastikan untuk tidak tumpang tindih dengan kontroler lain. Lihat [selektor pod](#selektor-pod). -Nilai yang diperbolehkan untuk [`.spec.template.spec.restartPolicy`](/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) hanyalah `Always`, yaitu nilai bawaan jika tidak ditentukan. +Nilai yang diperbolehkan untuk [`.spec.template.spec.restartPolicy`](/id/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) hanyalah `Always`, yaitu nilai bawaan jika tidak ditentukan. Untuk pengulangan kembali dari sebuah kontainer lokal, ReplicationController mendelegasikannya ke agen pada Node, contohnya [Kubelet](/docs/admin/kubelet/) atau Docker. @@ -123,7 +123,7 @@ ReplicationController itu sendiri dapat memiliki label (`.metadata.labels`). Bia ### Selektor Pod -_Field_ `.spec.selector` adalah sebuah [selektor label](/docs/concepts/overview/working-with-objects/labels/#label-selectors). Sebuah ReplicationController mengatur semua Pod dengan label yang sesuai dengan nilai selektor tersebut. Ia tidak membedakan antara Pod yang ia buat atau hapus atau Pod yang dibuat atau dihapus oleh orang atau proses lain. Hal ini memungkinkan ReplicationController untuk digantikan tanpa memengaruhi Pod-Pod yang sedang berjalan. +_Field_ `.spec.selector` adalah sebuah [selektor label](/id/docs/concepts/overview/working-with-objects/labels/#label-selectors). Sebuah ReplicationController mengatur semua Pod dengan label yang sesuai dengan nilai selektor tersebut. Ia tidak membedakan antara Pod yang ia buat atau hapus atau Pod yang dibuat atau dihapus oleh orang atau proses lain. Hal ini memungkinkan ReplicationController untuk digantikan tanpa memengaruhi Pod-Pod yang sedang berjalan. Jika ditentukan, `.spec.template.metadata.labels` harus memiliki nilai yang sama dengan `.spec.selector`, atau akan ditolak oleh API. Jika `.spec.selector` tidak ditentukan, maka akan menggunakan nilai bawaan yaitu `.spec.template.metadata.labels`. @@ -216,13 +216,13 @@ ReplicationController adalah sebuah sumber daya _top-level_ pada REST API Kubern ### ReplicaSet -[`ReplicaSet`](/docs/concepts/workloads/controllers/replicaset/) adalah kelanjutan dari ReplicationController yang mendukung selektor [selektor label _set-based_](/docs/concepts/overview/working-with-objects/labels/#set-based-requirement) yang baru. Umumnya digunakan oleh [`Deployment`](/docs/concepts/workloads/controllers/deployment/) sebagai mekanisme untuk mengorkestrasi pembuatan, penghapusan, dan pembaruan Pod. +[`ReplicaSet`](/id/docs/concepts/workloads/controllers/replicaset/) adalah kelanjutan dari ReplicationController yang mendukung selektor [selektor label _set-based_](/id/docs/concepts/overview/working-with-objects/labels/#set-based-requirement) yang baru. Umumnya digunakan oleh [`Deployment`](/id/docs/concepts/workloads/controllers/deployment/) sebagai mekanisme untuk mengorkestrasi pembuatan, penghapusan, dan pembaruan Pod. Perhatikan bahwa kami merekomendasikan untuk menggunakan Deployment sebagai ganti dari menggunakan ReplicaSet secara langsung, kecuali jika kamu membutuhkan orkestrasi pembaruan khusus atau tidak membutuhkan pembaruan sama sekali. ### Deployment (Direkomendasikan) -[`Deployment`](/docs/concepts/workloads/controllers/deployment/) adalah objek API tingkat tinggi yang memperbarui ReplicaSet dan Pod-Pod di bawahnya yang mirip dengan cara kerja `kubectl rolling-update`. Deployment direkomendasikan jika kamu menginginkan fungsionalitas dari pembaruan bergulir ini, karena tidak seperti `kubectl rolling-update`, Deployment memiliki sifat deklaratif, _server-side_, dan memiliki beberapa fitur tambahan lainnya. +[`Deployment`](/id/docs/concepts/workloads/controllers/deployment/) adalah objek API tingkat tinggi yang memperbarui ReplicaSet dan Pod-Pod di bawahnya yang mirip dengan cara kerja `kubectl rolling-update`. Deployment direkomendasikan jika kamu menginginkan fungsionalitas dari pembaruan bergulir ini, karena tidak seperti `kubectl rolling-update`, Deployment memiliki sifat deklaratif, _server-side_, dan memiliki beberapa fitur tambahan lainnya. ### Pod sederhana @@ -234,7 +234,7 @@ Gunakan [`Job`](/docs/concepts/jobs/run-to-completion-finite-workloads/) sebagai ### DaemonSet -Gunakan [`DaemonSet`](/docs/concepts/workloads/controllers/daemonset/) sebagai ganti ReplicationController untuk Pod-Pod yang menyediakan fungsi pada level mesin, seperti pengamatan mesin atau pencatatan mesin. Pod-Pod ini memiliki waktu hidup yang bergantung dengan waktu hidup mesin: Pod butuh untuk dijalankan di mesin sebelum Pod-Pod lainnya dimulai, dan aman untuk diterminasi ketika mesin sudah siap untuk dinyalakan ulang atau dimatikan. +Gunakan [`DaemonSet`](/id/docs/concepts/workloads/controllers/daemonset/) sebagai ganti ReplicationController untuk Pod-Pod yang menyediakan fungsi pada level mesin, seperti pengamatan mesin atau pencatatan mesin. Pod-Pod ini memiliki waktu hidup yang bergantung dengan waktu hidup mesin: Pod butuh untuk dijalankan di mesin sebelum Pod-Pod lainnya dimulai, dan aman untuk diterminasi ketika mesin sudah siap untuk dinyalakan ulang atau dimatikan. ## Informasi lanjutan diff --git a/content/id/docs/concepts/workloads/controllers/statefulset.md b/content/id/docs/concepts/workloads/controllers/statefulset.md index 9d12de91ddc47..aa99acd6e6abb 100644 --- a/content/id/docs/concepts/workloads/controllers/statefulset.md +++ b/content/id/docs/concepts/workloads/controllers/statefulset.md @@ -31,8 +31,8 @@ Stabil dalam poin-poin di atas memiliki arti yang sama dengan persisten pada Pod saat dilakukan _(re)scheduling_. Jika suatu aplikasi tidak membutuhkan identitas yang stabil atau _deployment_ yang memiliki urutan, penghapusan, atau mekanisme _scaling_, kamu harus melakukan _deploy_ aplikasi dengan _controller_ yang menyediakan -replika _stateless_. _Controller_ seperti [Deployment](/docs/concepts/workloads/controllers/deployment/) atau -[ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) akan lebih sesuai dengan kebutuhan kamu. +replika _stateless_. _Controller_ seperti [Deployment](/id/docs/concepts/workloads/controllers/deployment/) atau +[ReplicaSet](/id/docs/concepts/workloads/controllers/replicaset/) akan lebih sesuai dengan kebutuhan kamu. ## Keterbatasan @@ -40,7 +40,7 @@ replika _stateless_. _Controller_ seperti [Deployment](/docs/concepts/workloads pada Kubernetes rilis sebelum versi 1.5. * Penyimpanan untuk sebuah Pod harus terlebih dahulu di-_provision_ dengan menggunakan sebuah [Provisioner PersistentVolume](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/README.md) berdasarkan `storage class` yang dispesifikasikan, atau sudah ditentukan sebelumnya oleh administrator. * Menghapus dan/atau _scaling_ sebuah StatefulSet *tidak akan* menghapus volume yang berkaitan dengan StatefulSet tersebut. Hal ini dilakukan untuk menjamin data yang disimpan, yang secara umum dinilai lebih berhaga dibandingkan dengan mekanisme penghapusan data secara otomatis pada sumber daya terkait. -* StatefulSet saat ini membutuhkan sebuah [Headless Service](/docs/concepts/services-networking/service/#headless-services) yang nantinya akan bertanggung jawab terhadap pada identitas jaringan pada Pod. Kamulah yang bertanggung jawab untuk membuat Service tersebut. +* StatefulSet saat ini membutuhkan sebuah [Headless Service](/id/docs/concepts/services-networking/service/#headless-services) yang nantinya akan bertanggung jawab terhadap pada identitas jaringan pada Pod. Kamulah yang bertanggung jawab untuk membuat Service tersebut. * StatefulSet tidak menjamin terminasi Pod ketika sebuah StatefulSet dihapus. Untuk mendapatkan terminasi Pod yang terurut dan _graceful_ pada StatefulSet, kita dapat melakukan _scale down_ Pod ke 0 sebelum penghapusan. * Ketika menggunakan [Rolling Update](#mekanisme-strategi-update-rolling-update) dengan [Kebijakan Manajemen Pod](#kebijakan-manajemen-pod) (`OrderedReady`) secara default, @@ -52,7 +52,7 @@ Contoh di bawah ini akna menunjukkan komponen-komponen penyusun StatefulSet. * Sebuah Service Headless, dengan nama nginx, digunakan untuk mengontrol domain jaringan. * StatefulSet, dengan nama web, memiliki Spek yang mengindikasikan terdapat 3 replika Container yang akan dihidupkan pada Pod yang unik. -* _Field_ `volumeClaimTemplates` akan menyediakan penyimpanan stabil menggunakan [PersistentVolume](/docs/concepts/storage/persistent-volumes/) yang di-_provision_ oleh sebuah Provisioner PersistentVolume. +* _Field_ `volumeClaimTemplates` akan menyediakan penyimpanan stabil menggunakan [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/) yang di-_provision_ oleh sebuah Provisioner PersistentVolume. ```yaml apiVersion: v1 @@ -124,7 +124,7 @@ Setiap Pod di dalam StatefulSet memiliki _hostname_ diturunkan dari nama Satetul serta ordinal Pod tersebut. Pola pada _hostname_ yang terbentuk adalah `$(statefulset name)-$(ordinal)`. Contoh di atas akan menghasilkan tiga Pod dengan nama `web-0,web-1,web-2`. -Sebuah StatefulSet dapat menggunakan sebuah [Service Headless](/docs/concepts/services-networking/service/#headless-services) +Sebuah StatefulSet dapat menggunakan sebuah [Service Headless](/id/docs/concepts/services-networking/service/#headless-services) untuk mengontrol domain dari Pod yang ada. Domain yang diatur oleh Service ini memiliki format: `$(service name).$(namespace).svc.cluster.local`, dimana "cluster.local" merupakan domain klaster. @@ -133,7 +133,7 @@ Seiring dibuatnya setiap Pod, Pod tersebut akan memiliki subdomain DNS-nya sendi _field_ `serviceName` pada StatefulSet. Seperti sudah disebutkan di dalam bagian [keterbatasan](#keterbatasan), kamulah yang bertanggung jawab -untuk membuat [Service Headless](/docs/concepts/services-networking/service/#headless-services) +untuk membuat [Service Headless](/id/docs/concepts/services-networking/service/#headless-services) yang bertanggung jawab terhadap identitas jaringan pada Pod. Di sini terdapat beberapa contoh penggunaan Domain Klaster, nama Service, @@ -147,12 +147,12 @@ Domain Klaster | Service (ns/nama) | StatefulSet (ns/nama) | Domain StatefulSet {{< note >}} Domain klaster akan diatur menjadi `cluster.local` kecuali -[nilainya dikonfigurasi](/docs/concepts/services-networking/dns-pod-service/). +[nilainya dikonfigurasi](/id/docs/concepts/services-networking/dns-pod-service/). {{< /note >}} ### Penyimpanan Stabil -Kubernetes membuat sebuah [PersistentVolume](/docs/concepts/storage/persistent-volumes/) untuk setiap +Kubernetes membuat sebuah [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/) untuk setiap VolumeClaimTemplate. Pada contoh nginx di atas, setiap Pod akan menerima sebuah PersistentVolume dengan StorageClass `my-storage-class` dan penyimpanan senilai 1 Gib yang sudah di-_provisioning_. Jika tidak ada StorageClass yang dispesifikasikan, maka StorageClass _default_ akan digunakan. Ketika sebuah Pod dilakukan _(re)schedule_ diff --git a/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md index f2c232faf2b74..0e1b36ccc5f6f 100644 --- a/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/id/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -10,7 +10,7 @@ weight: 65 Pengendali TTL menyediakan mekanisme TTL yang membatasi umur dari suatu objek sumber daya yang telah selesai digunakan. Pengendali TTL untuk saat ini hanya menangani -[Jobs](/docs/concepts/workloads/controllers/jobs-run-to-completion/), +[Jobs](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/), dan nantinya bisa saja digunakan untuk sumber daya lain yang telah selesai digunakan misalnya saja Pod atau sumber daya khusus (_custom resource_) lainnya. @@ -32,7 +32,7 @@ Pengendali TTL untuk saat ini hanya mendukung Job. Sebuah operator klaster dapat menggunakan fitur ini untuk membersihkan Job yang telah dieksekusi (baik `Complete` atau `Failed`) secara otomatis dengan menentukan _field_ `.spec.ttlSecondsAfterFinished` pada Job, seperti yang tertera di -[contoh](/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically). +[contoh](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically). Pengendali TTL akan berasumsi bahwa sebuah sumber daya dapat dihapus apabila TTL dari sumber daya tersebut telah habis. Proses dihapusnya sumber daya ini dilakukan secara berantai, dimana sumber daya lain yang @@ -83,7 +83,7 @@ Perhatikan bahwa hal ini dapat terjadi apabila TTL diaktifkan dengan nilai selai ## {{% heading "whatsnext" %}} -[Membersikan Job secara Otomatis](/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically) +[Membersikan Job secara Otomatis](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/#clean-up-finished-jobs-automatically) [Dokumentasi Rancangan](https://github.com/kubernetes/enhancements/blob/master/keps/sig-apps/0026-ttl-after-finish.md) diff --git a/content/id/docs/concepts/workloads/pods/disruptions.md b/content/id/docs/concepts/workloads/pods/disruptions.md index 1adde6c949280..7a09eed3a502f 100644 --- a/content/id/docs/concepts/workloads/pods/disruptions.md +++ b/content/id/docs/concepts/workloads/pods/disruptions.md @@ -79,7 +79,7 @@ Jumlah Pod yang "diharapkan" dihitung dari `.spec.replicas` dari pengendali Pod PDB tidak dapat mencegah [disrupsi yang tidak disengaja](#disrupsi-yang-disengaja-dan-tidak-disengaja), tapi disrupsi ini akan dihitung terhadap bujet PDB. -Pod yang dihapus atau tidak tersetia dikarenakan pembaruan bertahap juga dihitung terhadap bujet PDB, tetapi pengendali (seperti Deployment dan StatefulSet) tidak dibatasi oleh PDB ketika melakukan pembaruan bertahap; Penanganan kerusakan saat pembaruan aplikasi dikonfigurasikan pada spesifikasi pengendali. (Pelajari tentang [memperbarui sebuah Deployment](/docs/concepts/workloads/controllers/deployment/#updating-a-deployment).) +Pod yang dihapus atau tidak tersetia dikarenakan pembaruan bertahap juga dihitung terhadap bujet PDB, tetapi pengendali (seperti Deployment dan StatefulSet) tidak dibatasi oleh PDB ketika melakukan pembaruan bertahap; Penanganan kerusakan saat pembaruan aplikasi dikonfigurasikan pada spesifikasi pengendali. (Pelajari tentang [memperbarui sebuah Deployment](/id/docs/concepts/workloads/controllers/deployment/#updating-a-deployment).) Saat sebuah Pod diusir menggunakan _eviction API_, Pod tersebut akan dihapus secara _graceful_ (lihat `terminationGracePeriodSeconds` pada [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#Podspec-v1-core).)) diff --git a/content/id/docs/concepts/workloads/pods/ephemeral-containers.md b/content/id/docs/concepts/workloads/pods/ephemeral-containers.md index 45154caf25836..e952bdd19bec5 100644 --- a/content/id/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/id/docs/concepts/workloads/pods/ephemeral-containers.md @@ -80,7 +80,7 @@ pun, sehingga sulit untuk memecahkan masalah _image distroless_ dengan menggunakan `kubectl exec` saja. Saat menggunakan kontainer sementara, akan sangat membantu untuk mengaktifkan -[_process namespace sharing_](/docs/tasks/configure-pod-container/share-process-namespace/) +[_process namespace sharing_](/id/docs/tasks/configure-pod-container/share-process-namespace/) sehingga kamu dapat melihat proses pada kontainer lain. ### Contoh diff --git a/content/id/docs/concepts/workloads/pods/init-containers.md b/content/id/docs/concepts/workloads/pods/init-containers.md index 91807fdaf6bd0..9cd208fbc8b83 100644 --- a/content/id/docs/concepts/workloads/pods/init-containers.md +++ b/content/id/docs/concepts/workloads/pods/init-containers.md @@ -14,7 +14,7 @@ Fitur ini telah keluar dari trek Beta sejak versi 1.6. Init Container dapat disp ## Memahami Init Container -Sebuah [Pod](/docs/concepts/workloads/pods/pod-overview/) dapat memiliki beberapa Container yang berjalan di dalamnya, dan dapat juga memiliki satu atau lebih Init Container, yang akan berjalan sebelum Container aplikasi dijalankan. +Sebuah [Pod](/id/docs/concepts/workloads/pods/pod-overview/) dapat memiliki beberapa Container yang berjalan di dalamnya, dan dapat juga memiliki satu atau lebih Init Container, yang akan berjalan sebelum Container aplikasi dijalankan. Init Container sama saja seperti Container biasa, kecuali: @@ -59,7 +59,7 @@ Berikut beberapa contoh kasus penggunaan Init Container: * Mengklon sebuah _git repository_ ke dalam sebuah _volume_. * Menaruh nilai-nilai tertentu ke dalam sebuah _file_ konfigurasi dan menjalankan peralatan _template_ untuk membuat _file_ konfigurasi secara dinamis untuk Container aplikasi utama. Misalnya, untuk menaruh nilai POD_IP ke dalam sebuah konfigurasi dan membuat konfigurasi aplikasi utama menggunakan Jinja. -Contoh-contoh penggunaan yang lebih detail dapat dilihat pada [dokumentasi StatefulSet](/docs/concepts/workloads/controllers/statefulset/) dan [petunjuk Produksi Pod](/docs/tasks/configure-pod-container/configure-pod-initialization/). +Contoh-contoh penggunaan yang lebih detail dapat dilihat pada [dokumentasi StatefulSet](/id/docs/concepts/workloads/controllers/statefulset/) dan [petunjuk Produksi Pod](/docs/tasks/configure-pod-container/configure-pod-initialization/). ### Menggunakan Init Container diff --git a/content/id/docs/concepts/workloads/pods/pod-lifecycle.md b/content/id/docs/concepts/workloads/pods/pod-lifecycle.md index 8dac6706a7f7c..fdb3e7b71cc29 100644 --- a/content/id/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/id/docs/concepts/workloads/pods/pod-lifecycle.md @@ -52,7 +52,7 @@ Suatu Pod memiliki sebuah PodStatus, yang merupakan _array_ dari [PodConditions] * `PodScheduled`: Pod telah dijadwalkan masuk ke node; * `Ready`: Pod sudah mampu menerima _request_ masuk dan seharusnya sudah ditambahkan ke daftar pembagian beban kerja untuk servis yang sama; - * `Initialized`: Semua [init containers](/docs/concepts/workloads/pods/init-containers) telah berjalan sempurna. + * `Initialized`: Semua [init containers](/id/docs/concepts/workloads/pods/init-containers) telah berjalan sempurna. * `Unschedulable`: _scheduler_ belum dapat menjadwalkan Pod saat ini, sebagai contoh karena kekurangan _resources_ atau ada batasan-batasan lain. * `ContainersReady`: Semua kontainer di dalam Pod telah siap. @@ -191,7 +191,7 @@ status: ... ``` -Kondisi Pod yang baru harus memenuhi [format label](/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) pada Kubernetes. +Kondisi Pod yang baru harus memenuhi [format label](/id/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) pada Kubernetes. Sejak perintah `kubectl patch` belum mendukung perubahan status objek, kondisi Pod yang baru harus mengubah melalui aksi `PATCH` dengan menggunakan salah satu dari [KubeClient _libraries_](/docs/reference/using-api/client-libraries/). @@ -232,13 +232,13 @@ Tiga tipe pengontrol yang tersedia yaitu: sebagai contoh, penghitungan dalam jumlah banyak. Jobs hanyak cocok untuk Pod dengan `restartPolicy` yang bernilai OnFailure atau Never. -- Menggunakan sebuah [ReplicationController](/docs/concepts/workloads/controllers/replicationcontroller/), - [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/), atau - [Deployment](/docs/concepts/workloads/controllers/deployment/) untuk Pod yang tidak diharapkan untuk berakhir, +- Menggunakan sebuah [ReplicationController](/id/docs/concepts/workloads/controllers/replicationcontroller/), + [ReplicaSet](/id/docs/concepts/workloads/controllers/replicaset/), atau + [Deployment](/id/docs/concepts/workloads/controllers/deployment/) untuk Pod yang tidak diharapkan untuk berakhir, sebagai contoh, _web servers_. ReplicationControllers hanya cocok digunakan pada Pod dengan `restartPolicy` yang bernilai Always. -- Menggunakan sebuah [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) untuk Pod yang akan berjalan +- Menggunakan sebuah [DaemonSet](/id/docs/concepts/workloads/controllers/daemonset/) untuk Pod yang akan berjalan hanya satu untuk setiap mesin, karena menyediakan servis yang spesifik untuk suatu mesin. @@ -346,7 +346,7 @@ spec: * Dapatkan pengalaman langsung mengenai [pengaturan _liveness_ dan _readiness probes_](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/). -* Pelajari lebih lanjut mengenai [_lifecycle hooks_ pada kontainer](/docs/concepts/containers/container-lifecycle-hooks/). +* Pelajari lebih lanjut mengenai [_lifecycle hooks_ pada kontainer](/id/docs/concepts/containers/container-lifecycle-hooks/). diff --git a/content/id/docs/concepts/workloads/pods/pod-overview.md b/content/id/docs/concepts/workloads/pods/pod-overview.md index 0e9593e0d1cf9..f427358999bbb 100644 --- a/content/id/docs/concepts/workloads/pods/pod-overview.md +++ b/content/id/docs/concepts/workloads/pods/pod-overview.md @@ -47,7 +47,7 @@ Setiap *Pod* diberikan sebuah alamat *IP* unik. Setiap kontainer di dalam *Pod* #### Penyimpanan -*Pod* dapat menentukan penyimpanan bersama yaitu *volumes*. Semua kontainer di dalam *Pod* dapat mengakses *volumes* ini, mengizinkan kontainer untuk berbagi data. *Volumes* juga memungkinkan data di *Pod* untuk bertahan jika salah satu kontainer perlu melakukan proses *restart*. Lihat *[Volumes](/docs/concepts/storage/volumes/)* untuk informasi lebih lanjut bagaimana Kubernetes mengimplementasikan penyimpanan di dalam *Pod*. +*Pod* dapat menentukan penyimpanan bersama yaitu *volumes*. Semua kontainer di dalam *Pod* dapat mengakses *volumes* ini, mengizinkan kontainer untuk berbagi data. *Volumes* juga memungkinkan data di *Pod* untuk bertahan jika salah satu kontainer perlu melakukan proses *restart*. Lihat *[Volumes](/id/docs/concepts/storage/volumes/)* untuk informasi lebih lanjut bagaimana Kubernetes mengimplementasikan penyimpanan di dalam *Pod*. ## Bekerja dengan Pod @@ -66,16 +66,16 @@ Kontroler dapat membuat dan mengelola banyak *Pod* untuk kamu, menangani replika Beberapa contoh kontroler yang berisi satu atau lebih *Pod* meliputi: -* [Deployment](/docs/concepts/workloads/controllers/deployment/) -* [StatefulSet](/docs/concepts/workloads/controllers/statefulset/) -* [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) +* [Deployment](/id/docs/concepts/workloads/controllers/deployment/) +* [StatefulSet](/id/docs/concepts/workloads/controllers/statefulset/) +* [DaemonSet](/id/docs/concepts/workloads/controllers/daemonset/) Secara umum, kontroler menggunakan templat *Pod* yang kamu sediakan untuk membuat *Pod*. ## Templat Pod Templat *Pod* adalah spesifikasi dari *Pod* yang termasuk di dalam objek lain seperti -[Replication Controllers](/docs/concepts/workloads/controllers/replicationcontroller/), [Jobs](/docs/concepts/jobs/run-to-completion-finite-workloads/), dan [DaemonSets](/docs/concepts/workloads/controllers/daemonset/). Kontroler menggunakan templat *Pod* untuk membuat *Pod*. +[Replication Controllers](/id/docs/concepts/workloads/controllers/replicationcontroller/), [Jobs](/docs/concepts/jobs/run-to-completion-finite-workloads/), dan [DaemonSets](/id/docs/concepts/workloads/controllers/daemonset/). Kontroler menggunakan templat *Pod* untuk membuat *Pod*. Contoh di bawah merupakan manifestasi sederhana untuk *Pod* yang berisi kontainer yang membuat sebuah pesan. @@ -102,6 +102,6 @@ Perubahan yang terjadi pada templat atau berganti ke templat yang baru tidak mem ## {{% heading "whatsnext" %}} * Pelajari lebih lanjut tentang perilaku *Pod*: - * [Terminasi Pod](/docs/concepts/workloads/pods/pod/#termination-of-pods) - * [Lifecycle Pod](/docs/concepts/workloads/pods/pod-lifecycle/) + * [Terminasi Pod](/id/docs/concepts/workloads/pods/pod/#termination-of-pods) + * [Lifecycle Pod](/id/docs/concepts/workloads/pods/pod-lifecycle/) diff --git a/content/id/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/id/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index e723edee9c7ad..f1d970a473614 100644 --- a/content/id/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/id/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -1,10 +1,10 @@ --- title: Batasan Persebaran Topologi Pod -content_template: templates/concept +content_type: concept weight: 50 --- -{{% capture overview %}} + {{< feature-state for_k8s_version="v1.18" state="beta" >}} @@ -14,9 +14,9 @@ pada klaster yang ditetapkan sebagai _failure-domains_, seperti wilayah, zona, N topologi yang ditentukan oleh pengguna. Ini akan membantu untuk mencapai ketersediaan yang tinggi dan juga penggunaan sumber daya yang efisien. -{{% /capture %}} -{{% capture body %}} + + ## Persyaratan @@ -287,4 +287,4 @@ Pada versi 1.18, dimana fitur ini masih Beta, beberapa limitasi yang sudah diket - Pengurangan jumlah Deployment akan membuat ketidakseimbangan pada persebaran Pod. - Pod yang cocok pada _tainted_ Node akan dihargai. Lihat [Issue 80921](https://github.com/kubernetes/kubernetes/issues/80921) -{{% /capture %}} + diff --git a/content/id/docs/concepts/workloads/pods/pod.md b/content/id/docs/concepts/workloads/pods/pod.md index 3838ec56b5374..e25a3a9104caa 100644 --- a/content/id/docs/concepts/workloads/pods/pod.md +++ b/content/id/docs/concepts/workloads/pods/pod.md @@ -39,7 +39,7 @@ dan bisa saling berkomunikasi melalui `localhost`. Komunikasi tersebut mengunaka standar _inter-process communications_ (IPC) seperti SystemV semaphores atau POSIX shared memory. Kontainer pada Pod yang berbeda memiliki alamat IP yang berbeda dan tidak dapat berkomunikasi menggunakan IPC tanpa -[pengaturan khusus](/docs/concepts/policy/pod-security-policy/). Kontainer ini +[pengaturan khusus](/id/docs/concepts/policy/pod-security-policy/). Kontainer ini biasa berkomunikasi dengan yang lain menggunakan alamat IP setiap Pod. Aplikasi dalam suatu Pod juga memiliki akses ke {{< glossary_tooltip text="ruang penyimpanan" term_id="volume" >}} bersama, @@ -51,14 +51,14 @@ gabungan dari kontainer Docker yang berbagi _namespace_ dan ruang penyimpanan _f Layaknya aplikasi dengan kontainer, Pod dianggap sebagai entitas yang relatif tidak kekal (tidak bertahan lama). Seperti yang didiskusikan dalam -[siklus hidup Pod](/docs/concepts/workloads/pods/pod-lifecycle/), Pod dibuat, diberikan +[siklus hidup Pod](/id/docs/concepts/workloads/pods/pod-lifecycle/), Pod dibuat, diberikan ID unik (UID), dan dijadwalkan pada suatu mesin dan akan tetap disana hingga dihentikan (bergantung pada aturan _restart_) atau dihapus. Jika {{< glossary_tooltip text="mesin" term_id="node" >}} mati, maka semua Pod pada mesin tersebut akan dijadwalkan untuk dihapus, namun setelah suatu batas waktu. Suatu Pod tertentu (sesuai dengan ID unik) tidak akan dijadwalkan ulang ke mesin baru, namun akan digantikan oleh Pod yang identik, bahkan jika dibutuhkan bisa dengan nama yang sama, tapi dengan ID unik yang baru -(baca [_replication controller_](/docs/concepts/workloads/controllers/replicationcontroller/) +(baca [_replication controller_](/id/docs/concepts/workloads/controllers/replicationcontroller/) untuk info lebih lanjut) Ketika sesuatu dikatakan memiliki umur yang sama dengan Pod, misalnya saja ruang penyimpanan, @@ -96,7 +96,7 @@ dan Pod lain dalam jaringan yang sama. Kontainer dalam suatu Pod melihat _hostname_ sistem sebagai sesuatu yang sama dengan konfigurasi `name` pada Pod. Informasi lebih lanjut terdapat dibagian -[jaringan](/docs/concepts/cluster-administration/networking/). +[jaringan](/id/docs/concepts/cluster-administration/networking/). Sebagai tambahan dalam mendefinisikan kontainer aplikasi yang berjalan dalam Pod, Pod memberikan sepaket sistem penyimpanan bersama. Sistem penyimpanan memungkinkan @@ -153,10 +153,10 @@ kasus mesin sedang dalam pemeliharaan. Secara umum, pengguna tidak seharusnya butuh membuat Pod secara langsung. Mereka seharusnya selalu menggunakan pengontrol, sekalipun untuk yang tunggal, misalnya, -[_Deployment_](/docs/concepts/workloads/controllers/deployment/). Pengontrol +[_Deployment_](/id/docs/concepts/workloads/controllers/deployment/). Pengontrol menyediakan penyembuhan diri dengan ruang lingkup kelompok, begitu juga dengan pengelolaan replikasi dan penluncuran. -Pengontrol seperti [_StatefulSet_](/docs/concepts/workloads/controllers/statefulset.md) +Pengontrol seperti [_StatefulSet_](/id/docs/concepts/workloads/controllers/statefulset.md) bisa memberikan dukungan terhadap Pod yang _stateful_. Penggunaan API kolektif sebagai _user-facing primitive_ utama adalah hal yang @@ -202,7 +202,7 @@ bersama dengan masa tenggang. 1. (bersamaan dengan poin 3) Ketika Kubelet melihat Pod sudah ditandai sebagai "Terminating" karena waktu pada poin 2 sudah diatur, ini memulai proses penghentian Pod 1. Jika salah satu kontainer pada Pod memiliki - [preStop _hook_](/docs/concepts/containers/container-lifecycle-hooks/#hook-details), + [preStop _hook_](/id/docs/concepts/containers/container-lifecycle-hooks/#hook-details), maka akan dipanggil di dalam kontainer. Jika `preStop` _hook_ masih berjalan setelah masa tenggang habis, langkah 2 akan dipanggil dengan tambahan masa tenggang yang sedikit, 2 detik. @@ -223,7 +223,7 @@ Secara _default_, semua penghapusan akan berjalan normal selama 30 detik. Perint `kubectl delete` mendukung opsi `--grace-period=` yang akan memperbolehkan pengguna untuk menimpa nilai awal dan memberikan nilai sesuai keinginan pengguna. Nilai `0` akan membuat Pod -[dihapus paksa](/docs/concepts/workloads/pods/pod/#force-deletion-of-pods). +[dihapus paksa](/id/docs/concepts/workloads/pods/pod/#force-deletion-of-pods). Kamu harus memberikan opsi tambahan `--force` bersamaan dengan `--grace-period=0` untuk melakukan penghapusan paksa. @@ -243,7 +243,7 @@ dokumentasi untuk [penghentian Pod dari StatefulSet](/docs/tasks/run-application ## Hak istimewa untuk kontainer pada Pod Setiap kontainer dalam Pod dapat mengaktifkan hak istimewa (mode _privileged_), dengan menggunakan tanda -`privileged` pada [konteks keamanan](/docs/tasks/configure-pod-container/security-context/) +`privileged` pada [konteks keamanan](/id/docs/tasks/configure-pod-container/security-context/) pada spesifikasi kontainer. Ini akan berguna untuk kontainer yang ingin menggunakan kapabilitas Linux seperti memanipulasi jaringan dan mengakses perangkat. Proses dalam kontainer mendapatkan hak istimewa yang hampir sama dengan proses di luar kontainer. diff --git a/content/id/docs/concepts/workloads/pods/podpreset.md b/content/id/docs/concepts/workloads/pods/podpreset.md index 2fc1b8598b458..9b899c4687d82 100644 --- a/content/id/docs/concepts/workloads/pods/podpreset.md +++ b/content/id/docs/concepts/workloads/pods/podpreset.md @@ -57,6 +57,6 @@ Dalam rangka untuk menggunakan Pod Preset di dalam klaster kamu, kamu harus mema ## {{% heading "whatsnext" %}} - * [Memasukkan data ke dalam sebuah Pod dengan PodPreset](/docs/concepts/workloads/pods/pod/#injecting-data-into-a-pod-using-podpreset.md) + * [Memasukkan data ke dalam sebuah Pod dengan PodPreset](/id/docs/concepts/workloads/pods/pod/#injecting-data-into-a-pod-using-podpreset.md) diff --git a/content/id/docs/contribute/participate/_index.md b/content/id/docs/contribute/participate/_index.md new file mode 100644 index 0000000000000..72c561432b5b9 --- /dev/null +++ b/content/id/docs/contribute/participate/_index.md @@ -0,0 +1,116 @@ +--- +title: Berpartisipasi dalam SIG Docs +content_type: concept +weight: 60 +card: + name: contribute + weight: 60 +--- + + + +SIG Docs merupakan salah satu +[kelompok peminatan khusus (_special interest groups_)](https://github.com/kubernetes/community/blob/master/sig-list.md) +dalam proyek Kubernetes, yang berfokus pada penulisan, pembaruan, dan pemeliharaan +dokumentasi untuk Kubernetes secara keseluruhan. Lihatlah +[SIG Docs dari repositori github komunitas](https://github.com/kubernetes/community/tree/master/sig-docs) +untuk informasi lebih lanjut tentang SIG. + +SIG Docs menerima konten dan ulasan dari semua kontributor. Siapa pun dapat membuka +_pull request_ (PR), dan siapa pun boleh mengajukan isu tentang konten atau komen +pada _pull request_ yang sedang berjalan. + +Kamu juga bisa menjadi [anggota (_member_)](/id/docs/contribute/participating/roles-and-responsibilities/#anggota), +[pengulas (_reviewer_](/id/docs/contribute/participating/roles-and-responsibilities/#pengulas), atau [pemberi persetujuan (_approver_)](/id/docs/contribute/participating/roles-and-responsibilities/#approvers). Peran tersebut membutuhkan +akses dan mensyaratkan tanggung jawab tertentu untuk menyetujui dan melakukan perubahan. +Lihatlah [keanggotaan-komunitas (_community-membership_)](https://github.com/kubernetes/community/blob/master/community-membership.md) +untuk informasi lebih lanjut tentang cara kerja keanggotaan dalam komunitas Kubernetes. + +Selebihnya dari dokumen ini akan menguraikan beberapa cara unik dari fungsi peranan tersebut dalam +SIG Docs, yang bertanggung jawab untuk memelihara salah satu aspek yang paling berhadapan dengan publik +dalam Kubernetes - situs web dan dokumentasi dari Kubernetes. + + + + +## Ketua umum (_chairperson_) SIG Docs {#ketua-umum-sig-docs} + +Setiap SIG, termasuk SIG Docs, memilih satu atau lebih anggota SIG untuk bertindak sebagai +ketua umum. Mereka merupakan kontak utama antara SIG Docs dan bagian lain dari +organisasi Kubernetes. Mereka membutuhkan pengetahuan yang luas tentang struktur +proyek Kubernetes secara keseluruhan dan bagaimana SIG Docs bekerja di dalamnya. Lihatlah +[Kepemimpinan (_leadership_)](https://github.com/kubernetes/community/tree/master/sig-docs#leadership) +untuk daftar ketua umum yang sekarang. + +## Tim dan automasi dalam SIG Docs + +Automasi dalam SIG Docs bergantung pada dua mekanisme berbeda: +Tim GitHub dan berkas OWNERS. + +### Tim GitHub + +Terdapat dua kategori tim dalam SIG Docs [tim (_teams_)](https://github.com/orgs/kubernetes/teams?query=sig-docs) dalam GitHub: + +- `@sig-docs-{language}-owners` merupakan pemberi persetujuan (_approver_) dan pemimpin (_lead_) +- `@sig-docs-{language}-reviewers` merupakan pengulas (_reviewer_) + +Setiap tim dapat direferensikan dengan `@name` mereka dalam komen GitHub untuk berkomunikasi dengan setiap orang di dalam grup. + +Terkadang tim Prow dan GitHub tumpang tindih (_overlap_) tanpa kecocokan sama persis. Untuk penugasan masalah, _pull request_, dan untuk mendukung persetujuan PR, +otomatisasi menggunakan informasi dari berkas `OWNERS`. + + +### Berkas OWNERS dan bagian yang utama (_front-matter_) + +Proyek Kubernetes menggunakan perangkat otomatisasi yang disebut prow untuk melakukan automatisasi +yang terkait dengan isu dan _pull request_ dalam GitHub. +[Repositori situs web Kubernetes](https://github.com/kubernetes/website) menggunakan +dua buah [prow _plugin_](https://github.com/kubernetes/test-infra/tree/master/prow/plugins): + +- blunderbuss +- approve + +Kedua _plugin_ menggunakan berkas +[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS) dan +[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES) +dalam level teratas dari repositori GitHub `kubernetes/website` untuk mengontrol +bagaimana prow bekerja di dalam repositori. + +Berkas OWNERS berisi daftar orang-orang yang menjadi pengulas dan pemberi persetujuan di dalam SIG Docs. +Berkas OWNERS juga bisa terdapat di dalam subdirektori, dan dapat menimpa peranan karena +dapat bertindak sebagai pengulas atau pemberi persetujuan berkas untuk subdirektori itu dan +apa saja yang ada di dalamnya. Untuk informasi lebih lanjut tentang berkas OWNERS pada umumnya, lihatlah +[OWNERS](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md). + +Selanjutnya, berkas _markdown_ individu dapat menyimpan daftar pengulas dan pemberi persetujuan +pada bagian yang utama, baik dengan menyimpan daftar nama pengguna individu GitHub atau grup GitHub. + +Kombinasi dari berkas OWNERS dan bagian yang utama dalam berkas _markdown_ menentukan +saran kepada pemilik PR yang didapat dari sistem otomatis tentang siapa yang akan meminta ulasan teknis +dan ulasan editorial untuk PR mereka. + +## Cara menggabungkan pekerjaan + +Ketika _pull request_ digabungkan ke cabang (_branch_) yang digunakan untuk mempublikasikan konten, konten itu dipublikasikan di http://kubernetes.io. Untuk memastikan bahwa +kualitas konten yang kita terbitkan bermutu tinggi, kita membatasi penggabungan _pull request_ bagi para pemberi persetujuan +SIG Docs. Beginilah cara kerjanya. + +- Ketika _pull request_ memiliki label `lgtm` dan `approve`, tidak memiliki label `hold`, + dan telah lulus semua tes, _pull request_ akan digabungkan secara otomatis. +- Anggota organisasi Kubernetes dan pemberi persetujuan SIG Docs dapat menambahkan komen + untuk mencegah penggabungan otomatis dari _pull request_ yang diberikan (dengan menambahkan komen `/hold` + atau menahan komen `/lgtm`). +- Setiap anggota Kubernetes dapat menambahkan label `lgtm` dengan menambahkan komen `lgtm` +- Hanya pemberi persetujuan SIG Docs yang bisa menggabungkan _pull request_ + dengan menambahkan komen `/approve`. Beberapa pemberi persetujuan juga dapat melakukan + tugas tambahan seperti [PR _Wrangler_](/id/docs/contribute/advanced#menjadi-pr-wrangler-untuk-seminggu) atau + [Ketua Umum SIG Docs](#ketua-umum-sig-docs). + + +## {{% heading "whatsnext" %}} + +Untuk informasi lebih lanjut tentang cara berkontribusi pada dokumentasi Kubernetes, lihatlah: + +- [Berkontribusi konten baru](/id/docs/contribute/overview/) +- [Mengulas konten](/id/docs/contribute/review/reviewing-prs) +- [Panduan gaya dokumentasi](/id/docs/contribute/style/) diff --git a/content/id/docs/contribute/suggesting-improvements.md b/content/id/docs/contribute/suggesting-improvements.md new file mode 100644 index 0000000000000..588bebeb6d011 --- /dev/null +++ b/content/id/docs/contribute/suggesting-improvements.md @@ -0,0 +1,65 @@ +--- +title: Menyarankan peningkatan kualitas konten +slug: suggest-improvements +content_type: concept +weight: 10 +card: + name: contribute + weight: 20 +--- + + + +Jika kamu menemukan masalah pada dokumentasi Kubernetes, atau mempunyai ide untuk +konten baru, maka silakan untuk membuat isu pada Github. Kamu hanya membutuhkan +sebuah [akun Github](https://github.com/join) dan sebuah _web browser_. + +Pada kebanyakan kasus, pekerjaan dalam dokumentasi Kubernetes diawali dengan sebuah +isu pada Github. Kontributor Kubernetes akan mengkaji, mengkategorisasi dan menandai isu +sesuai kebutuhan. Selanjutnya, kamu atau anggota lain dari komunitas Kubernetes dapat membuat +_pull request_ dengan perubahan yang akan menyelesaikan masalahnya. + + + +## Membuka sebuah issue + +Jika kamu mau menyarankan peningkatan kualitas pada konten yang sudah ada, atau menemukan kesalahan, +maka silakan membuka sebuah isu. + +1. Turun ke bagian bawah dari suatu halaman dan klik pada tombol **Buat Isu**. Ini akan +mengantarmu pada halaman Github isu dengan beberapa tajuk yang telah diisi. +2. Deskripsikan isu atau saran untuk peningkatan kualitas. Sediakan detail sebanyak mungkin yang kamu bisa. +3. Klik **Submit new issue** + +Setelah dikirim, cek isu yang kamu buat secara berkala atau hidupkan notifikasi Github. +Pengulas (_reviewer_) atau anggota komunitas lainnya mungkin akan menanyakan pertanyaan +sebelum mereka mengambil suatu tindakan terhadap isumu. + +## Menyarankan konten baru + +Jika kamu memiliki ide untuk konten baru, tapi kamu tidak yakin dimana mengutarakannya, +kamu tetap dapat membuat sebuah isu. Antara lain: + +- Pilih halaman pada bagian yang menurutmu konten tersebut berhubungan dan klik **Buat Isu**. +- Pergi ke [Github](https://github.com/kubernetes/website/issues/new/) dan langsung membuat isu. + +## Bagaimana cara membuat isu yang bagus + +Perhatikan hal berikut ketika membuat sebuah isu: + +- Memberikan deskripsi isu yang jelas. Deskripsikan apa yang memang kurang, tertinggal, + salah atau konten mana yang memerlukan peningkatan kualitas. +- Jelaskan dampak spesifik dari isu terhadap pengguna. +- Batasi cakupan dari sebuah isu menjadi ukuran pekerjaan yang masuk akal. + Untuk masalah dengan cakupan yang besar, pecah isu itu menjadi beberapa isu lebih kecil. + Misal, "Membenahi dokumentasi keamanan" masih sangat luas cakupannya, tapi "Penambahan + detail pada topik 'Pembatasan akses jaringan'" adalah lebih spesifik untuk dikerjakan. +- Mencari isu yang sudah ada untuk melihat apakah ada sesuatu yang berhubungan atau + mirip dengan isu yang baru. +- Jika isu yang baru berhubungan dengan isu lain atau _pull request_, tambahkan rujukan + dengan menuliskan URL lengkap atau dengan nomor isu atau _pull request_ yang diawali dengan + karakter `#`. Contohnya, `Diajukan oleh #987654`. +- Mengikuti [Kode Etik Komunitas](/id/community/code-of-conduct/). Menghargai kontributor lain. + Misalnya, "Dokumentasi ini sangat jelek" adalah contoh yang tidak membantu dan juga bukan + masukan yang sopan. + diff --git a/content/id/docs/reference/access-authn-authz/rbac.md b/content/id/docs/reference/access-authn-authz/rbac.md index 27d060329f3e8..49aa20ed6ecbf 100644 --- a/content/id/docs/reference/access-authn-authz/rbac.md +++ b/content/id/docs/reference/access-authn-authz/rbac.md @@ -1,16 +1,16 @@ --- title: Menggunakan Otorisasi RBAC -content_template: templates/concept +content_type: concept aliases: [../../../rbac/] weight: 70 --- -{{% capture overview %}} + Kontrol akses berbasis peran (RBAC) adalah metode pengaturan akses ke sumber daya komputer atau jaringan berdasarkan peran pengguna individu dalam organisasi kamu. -{{% /capture %}} -{{% capture body %}} + + Otorisasi RBAC menggunakan `rbac.authorization.k8s.io` kelompok API untuk mengendalikan keputusan otorisasi, memungkinkan kamu untuk mengkonfigurasi kebijakan secara dinamis melalui API Kubernetes. @@ -24,7 +24,7 @@ kube-apiserver --authorization-mode=Example,RBAC --other-options --more-options ## Objek API {#api-overview} API RBAC mendeklarasikan empat jenis objek Kubernetes: Role, ClusterRole, -RoleBinding and ClusterRoleBinding. kamu bisa [mendeskripsikan beberapa objek](/docs/concepts/overview/working-with-objects/kubernetes-objects/#understanding-kubernetes-objects), atau mengubahnya menggunakan alat seperti `kubectl`, seperti objek Kubernetes lain. +RoleBinding and ClusterRoleBinding. kamu bisa [mendeskripsikan beberapa objek](/id/docs/concepts/overview/working-with-objects/kubernetes-objects/#understanding-kubernetes-objects), atau mengubahnya menggunakan alat seperti `kubectl`, seperti objek Kubernetes lain. {{< caution >}} Objek-objek ini, dengan disengaja, memaksakan pembatasan akses. Jika kamu melakukan perubahan @@ -100,7 +100,7 @@ rules: verbs: ["get", "watch", "list"] ``` -Nama objek Role dan ClusterRole harus menggunakan [nama _path segment_](/docs/concepts/overview/working-with-objects/names#path-segment-names) yang valid. +Nama objek Role dan ClusterRole harus menggunakan [nama _path segment_](/id/docs/concepts/overview/working-with-objects/names#path-segment-names) yang valid. ### RoleBinding dan ClusterRoleBinding @@ -116,7 +116,7 @@ Jika kamu ingin memasangkan ClusterRole ke semua Namespace di klaster kamu, kamu ClusterRoleBinding. Nama objek RoleBinding atau ClusterRoleBinding harus valid menggunakan -[nama _path segment_](/docs/concepts/overview/working-with-objects/names#path-segment-names) yang valid. +[nama _path segment_](/id/docs/concepts/overview/working-with-objects/names#path-segment-names) yang valid. #### Contoh RoleBinding @@ -456,7 +456,7 @@ Di Kubernetes, modul otentikasi menyediakan informasi grup. Grup, seperti halnya pengguna, direpresentasikan sebagai string, dan string tersebut tidak memiliki format tertentu, selain awalan `system:` yang sudah direservasi. -[ServiceAccount](/docs/tasks/configure-pod-container/configure-service-account/) memiliki nama yang diawali dengan `system:serviceaccount:`, dan menjadi milik grup yang diawali dengan nama `system:serviceaccounts:`. +[ServiceAccount](/id/docs/tasks/configure-pod-container/configure-service-account/) memiliki nama yang diawali dengan `system:serviceaccount:`, dan menjadi milik grup yang diawali dengan nama `system:serviceaccounts:`. {{< note >}} - `system:serviceaccount:` (tunggal) adalah awalan untuk ServiceAccount _username_. @@ -1077,7 +1077,7 @@ In order from most secure to least secure, the approaches are: --namespace=my-namespace ``` - Many [add-ons](/docs/concepts/cluster-administration/addons/) run as the + Many [add-ons](/id/docs/concepts/cluster-administration/addons/) run as the "default" service account in the `kube-system` namespace. To allow those add-ons to run with super-user access, grant cluster-admin permissions to the "default" service account in the `kube-system` namespace. @@ -1192,4 +1192,4 @@ kubectl create clusterrolebinding permissive-binding \ After you have transitioned to use RBAC, you should adjust the access controls for your cluster to ensure that these meet your information security needs. -{{% /capture %}} + diff --git a/content/id/docs/reference/kubectl/cheatsheet.md b/content/id/docs/reference/kubectl/cheatsheet.md index 9afe9990644ca..671ac6b77a297 100644 --- a/content/id/docs/reference/kubectl/cheatsheet.md +++ b/content/id/docs/reference/kubectl/cheatsheet.md @@ -319,8 +319,8 @@ kubectl taint nodes foo dedicated=special-user:NoSchedule ### Berbagai Tipe Sumber Daya -Mendapatkan seluruh daftar tipe sumber daya yang didukung lengkap dengan singkatan pendeknya, [grup API](/docs/concepts/overview/kubernetes-api/#api-groups), -apakah sumber daya merupakan sumber daya yang berada di dalam Namespace atau tidak, serta [Kind](/docs/concepts/overview/working-with-objects/kubernetes-objects): +Mendapatkan seluruh daftar tipe sumber daya yang didukung lengkap dengan singkatan pendeknya, [grup API](/id/docs/concepts/overview/kubernetes-api/#api-groups), +apakah sumber daya merupakan sumber daya yang berada di dalam Namespace atau tidak, serta [Kind](/id/docs/concepts/overview/working-with-objects/kubernetes-objects): ```bash kubectl api-resources diff --git a/content/id/docs/setup/best-practices/multiple-zones.md b/content/id/docs/setup/best-practices/multiple-zones.md index 2727db559daab..e2e314eb632be 100644 --- a/content/id/docs/setup/best-practices/multiple-zones.md +++ b/content/id/docs/setup/best-practices/multiple-zones.md @@ -1,16 +1,16 @@ --- title: Menjalankan klaster dalam beberapa zona weight: 10 -content_template: templates/concept +content_type: concept --- -{{% capture overview %}} + Laman ini menjelaskan tentang bagaimana menjalankan sebuah klaster dalam beberapa zona. -{{% /capture %}} -{{% capture body %}} + + ## Pendahuluan @@ -398,4 +398,4 @@ KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b k KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh ``` -{{% /capture %}} + diff --git a/content/id/docs/setup/production-environment/container-runtimes.md b/content/id/docs/setup/production-environment/container-runtimes.md new file mode 100644 index 0000000000000..bca967593c0f8 --- /dev/null +++ b/content/id/docs/setup/production-environment/container-runtimes.md @@ -0,0 +1,414 @@ +--- +title: Runtime Container +content_type: concept +weight: 10 +--- + +{{< feature-state for_k8s_version="v1.6" state="stable" >}} + +Untuk menjalankan Container di Pod, Kubernetes menggunakan _runtime_ Container (Container runtimes). Berikut ini adalah +petunjuk instalasi untuk berbagai macam _runtime_. + + + + + +{{< caution >}} +Sebuah kekurangan ditemukan dalam cara `runc` menangani pendeskripsi berkas (_file_) sistem ketika menjalankan Container. +Container yang berbahaya dapat menggunakan kekurangan ini untuk menimpa konten biner `runc` dan +akibatnya Container tersebut dapat menjalankan perintah yang sewenang-wenang pada sistem host dari Container tersebut. + +Silahkan merujuk pada [CVE-2019-5736](https://access.redhat.com/security/cve/cve-2019-5736) untuk informasi lebih lanjut tentang masalah ini. +{{< /caution >}} + +### Penerapan + +{{< note >}} +Dokumen ini ditulis untuk pengguna yang memasang CRI (Container Runtime Interface) pada sistem operasi Linux. Untuk sistem operasi yang lain, +silahkan cari dokumentasi khusus untuk platform kamu. + +{{< /note >}} + +Kamu harus menjalankan semua perintah dalam panduan ini sebagai `root`. Sebagai contoh, awali perintah +dengan `sudo`, atau masuk sebagai `root` dan kemudian baru menjalankan perintah sebagai pengguna `root`. + +### _Driver_ cgroup + +Ketika systemd dipilih sebagai sistem init untuk sebuah distribusi Linux, proses init menghasilkan +dan menggunakan grup kontrol root (`cgroup`) dan proses ini akan bertindak sebagai manajer cgroup. Systemd memiliki integrasi yang ketat +dengan cgroup dan akan mengalokasikan cgroups untuk setiap proses. Kamu dapat mengonfigurasi +_runtime_ Container dan kubelet untuk menggunakan `cgroupfs`. Menggunakan `cgroupfs` bersama dengan systemd berarti +akan ada dua manajer cgroup yang berbeda. + +Cgroup digunakan untuk membatasi sumber daya yang dialokasikan untuk proses. +Sebuah manajer cgroup tunggal akan menyederhanakan pandangan tentang sumber daya apa yang sedang dialokasikan +dan secara bawaan (_default_) akan memiliki pandangan yang lebih konsisten tentang sumber daya yang tersedia dan yang sedang digunakan. Ketika kita punya memiliki +dua manajer maka kita pun akan memiliki dua pandangan berbeda tentang sumber daya tersebut. Kita telah melihat kasus di lapangan +di mana Node yang dikonfigurasi menggunakan `cgroupfs` untuk kubelet dan Docker, dan `systemd` +untuk semua sisa proses yang berjalan pada Node maka Node tersebut akan menjadi tidak stabil di bawah tekanan sumber daya. + +Mengubah aturan sedemikian rupa sehingga _runtime_ Container dan kubelet kamu menggunakan `systemd` sebagai _driver_ cgroup +akan menstabilkan sistem. Silahkan perhatikan opsi `native.cgroupdriver=systemd` dalam pengaturan Docker di bawah ini. + +{{< caution >}} +Mengubah driver cgroup dari Node yang telah bergabung kedalam sebuah Cluster sangat tidak direkomendasikan. +Jika kubelet telah membuat Pod menggunakan semantik dari sebuah _driver_ cgroup, mengubah _runtime_ Container +ke _driver_ cgroup yang lain dapat mengakibatkan kesalahan pada saat percobaan untuk membuat kembali PodSandbox +untuk Pod yang sudah ada. Menjalankan ulang (_restart_) kubelet mungkin tidak menyelesaikan kesalahan tersebut. Rekomendasi yang dianjurkan +adalah untuk menguras Node dari beban kerjanya, menghapusnya dari Cluster dan menggabungkannya kembali. + +{{< /caution >}} + +## Docker + +Pada setiap mesin kamu, mari menginstall Docker. +Versi yang direkomendasikan adalah 19.03.11, tetapi versi 1.13.1, 17.03, 17.06, 17.09, 18.06 dan 18.09 juga diketahui bekerja dengan baik. +Jagalah versi Docker pada versi terbaru yang sudah terverifikasi pada catatan rilis Kubernetes. + +Gunakan perintah berikut untuk menginstal Docker pada sistem kamu: + +{{< tabs name="tab-cri-docker-installation" >}} +{{% tab name="Ubuntu 16.04+" %}} + +```shell +# (Menginstal Docker CE) +## Mengatur repositori: +### Menginstal packet untuk mengijinkan apt untuk menggunakan repositori melalui HTTPS +apt-get update && apt-get install -y \ + apt-transport-https ca-certificates curl software-properties-common gnupg2 +``` + +```shell +# Menambahkan key GPG resmi dari Docker: +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - +``` + +```shell +# Menambahkan repositori apt dari Docker: +add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" +``` + +```shell +# Menginstal Docker CE +apt-get update && apt-get install -y \ + containerd.io=1.2.13-2 \ + docker-ce=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) \ + docker-ce-cli=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) +``` + +```shell +# Mengatur daemon Docker +cat > /etc/docker/daemon.json < /etc/docker/daemon.json <}} + +Jika kamu menginginkan layanan Docker berjalan dari saat memulai pertama (_boot_), jalankan perintah ini: + +```shell +sudo systemctl enable docker +``` + +Silahkan merujuk pada [Panduan resmi instalasi Docker](https://docs.docker.com/engine/installation/) +untuk informasi lebih lanjut. + +## CRI-O + +Bagian ini mencakup langkah-langkah yang diperlukan untuk menginstal `CRI-O` sebagai _runtime_ CRI. + +Gunakan perintah-perinath berikut untuk menginstal CRI-O pada sistem kamu: + +{{< note >}} +Versi mayor dan minor dari CRI-O harus sesuai dengan versi mayor dan minor dari Kubernetes. +Untuk informasi lebih lanjut, lihatlah [Matriks kompatibilitas CRI-O](https://github.com/cri-o/cri-o). +{{< /note >}} + +### Prasyarat + +```shell +modprobe overlay +modprobe br_netfilter + +# Mengatur parameter sysctl yang diperlukan, dimana ini akan bernilai tetap setiap kali penjalanan ulang. +cat > /etc/sysctl.d/99-kubernetes-cri.conf <}} +{{% tab name="Debian" %}} + +```shell +# Debian Unstable/Sid +echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Unstable/Release.key -O- | sudo apt-key add - +``` + +```shell +# Debian Testing +echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Testing/Release.key -O- | sudo apt-key add - +``` + +```shell +# Debian 10 +echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_10/Release.key -O- | sudo apt-key add - +``` + +```shell +# Raspbian 10 +echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Raspbian_10/Release.key -O- | sudo apt-key add - +``` + +dan kemudian install CRI-O: +```shell +sudo apt-get install cri-o-1.17 +``` + +{{% /tab %}} + +{{% tab name="Ubuntu 18.04, 19.04 and 19.10" %}} + +```shell +# Mengatur repositori paket +. /etc/os-release +sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${NAME}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${NAME}_${VERSION_ID}/Release.key -O- | sudo apt-key add - +sudo apt-get update +``` + +```shell +# Menginstal CRI-O +sudo apt-get install cri-o-1.17 +``` +{{% /tab %}} + +{{% tab name="CentOS/RHEL 7.4+" %}} + +```shell +# Menginstal prasyarat +curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_7/devel:kubic:libcontainers:stable.repo +curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:{{< skew latestVersion >}}.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:{{< skew latestVersion >}}/CentOS_7/devel:kubic:libcontainers:stable:cri-o:{{< skew latestVersion >}}.repo +``` + +```shell +# Menginstal CRI-O +yum install -y cri-o +``` +{{% /tab %}} + +{{% tab name="openSUSE Tumbleweed" %}} + +```shell +sudo zypper install cri-o +``` +{{% /tab %}} +{{< /tabs >}} + +### Memulai CRI-O + +```shell +systemctl daemon-reload +systemctl start crio +``` + +Silahkan merujuk pada [Panduan instalasi CRI-O](https://github.com/kubernetes-sigs/cri-o#getting-started) +untuk informasi lanjut. + +## Containerd + +Bagian ini berisi langkah-langkah yang diperlukan untuk menggunakan `containerd` sebagai _runtime_ CRI. + +Gunakan perintah-perintah berikut untuk menginstal containerd pada sistem kamu: + + +### Prasyarat + +```shell +cat > /etc/modules-load.d/containerd.conf < /etc/sysctl.d/99-kubernetes-cri.conf <}} +{{% tab name="Ubuntu 16.04" %}} + +```shell +# (Meninstal containerd) +## Mengatur repositori paket +### Install packages to allow apt to use a repository over HTTPS +apt-get update && apt-get install -y apt-transport-https ca-certificates curl software-properties-common +``` + +```shell +## Menambahkan key GPG resmi dari Docker: +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - +``` + +```shell +## Mengatur repositori paket Docker +add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" +``` + +```shell +## Menginstal containerd +apt-get update && apt-get install -y containerd.io +``` + +```shell +# Mengonfigure containerd +mkdir -p /etc/containerd +containerd config default > /etc/containerd/config.toml +``` + +```shell +# Menjalankan ulang containerd +systemctl restart containerd +``` +{{% /tab %}} +{{% tab name="CentOS/RHEL 7.4+" %}} + +```shell +# (Menginstal containerd) +## Mengatur repositori +### Menginstal paket prasyarat +yum install -y yum-utils device-mapper-persistent-data lvm2 +``` + +```shell +## Menambahkan repositori Docker +yum-config-manager \ + --add-repo \ + https://download.docker.com/linux/centos/docker-ce.repo +``` + +```shell +## Menginstal containerd +yum update -y && yum install -y containerd.io +``` + +```shell +## Mengonfigurasi containerd +mkdir -p /etc/containerd +containerd config default > /etc/containerd/config.toml +``` + +```shell +# Menjalankan ulang containerd +systemctl restart containerd +``` +{{% /tab %}} +{{< /tabs >}} + +### systemd + +Untuk menggunakan driver cgroup `systemd`, atur `plugins.cri.systemd_cgroup = true` pada `/etc/containerd/config.toml`. +Ketika menggunakan kubeadm, konfigurasikan secara manual +[driver cgroup untuk kubelet](/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#mengonfigurasi-cgroup-untuk-kubelet-pada-node-control-plane) + +## _Runtime_ CRI yang lainnya: Frakti + +Silahkan lihat [Panduan cepat memulai Frakti](https://github.com/kubernetes/frakti#quickstart) untuk informasi lebih lanjut. + + diff --git a/content/id/docs/setup/production-environment/tools/_index.md b/content/id/docs/setup/production-environment/tools/_index.md new file mode 100644 index 0000000000000..fc98544230da1 --- /dev/null +++ b/content/id/docs/setup/production-environment/tools/_index.md @@ -0,0 +1,4 @@ +--- +title: Menginstal Kubernetes dengan perkakas penyebaran +weight: 30 +--- diff --git a/content/id/docs/setup/production-environment/tools/kubeadm/_index.md b/content/id/docs/setup/production-environment/tools/kubeadm/_index.md new file mode 100644 index 0000000000000..f88a749c9b54c --- /dev/null +++ b/content/id/docs/setup/production-environment/tools/kubeadm/_index.md @@ -0,0 +1,4 @@ +--- +title: "Menyiapkan klaster dengan kubeadm" +weight: 10 +--- diff --git a/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 4b7b15e91c5be..8a345296a394a 100644 --- a/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -1,10 +1,10 @@ --- title: Membuat sebuah klaster dengan control-plane tunggal menggunakan kubeadm -content_template: templates/task +content_type: task weight: 30 --- -{{% capture overview %}} + Perkakas `kubeadm` membantu kamu membuat sebuah klaster Kubernetes minimum yang layak dan sesuai dengan _best practice_. Bahkan, kamu dapat menggunakan `kubeadm` untuk membuat sebuah klaster yang lolos [uji Kubernetes Conformance](https://kubernetes.io/blog/2017/10/software-conformance-certification). `kubeadm` juga mendukung fungsi siklus hidup (_lifecycle_) @@ -22,9 +22,10 @@ server di _cloud_, sebuah Raspberry Pi, dan lain-lain. Baik itu men-_deploy_ pad _cloud_ ataupun _on-premise_, kamu dapat mengintegrasikan `kubeadm` pada sistem _provisioning_ seperti Ansible atau Terraform. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + Untuk mengikuti panduan ini, kamu membutuhkan: @@ -51,9 +52,9 @@ sedikit seiring dengan berevolusinya kubeadm, namun secara umum implementasinya Semua perintah di dalam `kubeadm alpha`, sesuai definisi, didukung pada level _alpha_. {{< /note >}} -{{% /capture %}} -{{% capture steps %}} + + ## Tujuan @@ -65,7 +66,7 @@ Semua perintah di dalam `kubeadm alpha`, sesuai definisi, didukung pada level _a ### Menginstal kubeadm pada hos -Lihat ["Menginstal kubeadm"](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). +Lihat ["Menginstal kubeadm"](/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). {{< note >}} Jika kamu sudah menginstal kubeadm sebelumnya, jalankan `apt-get update && @@ -93,7 +94,7 @@ yang spesifik pada penyedia tertentu. Lihat [Menginstal _add-on_ jaringan Pod](# 3. (Opsional) Sejak versi 1.14, `kubeadm` mencoba untuk mendeteksi _runtime_ kontainer pada Linux dengan menggunakan daftar _domain socket path_ yang umum diketahui. Untuk menggunakan _runtime_ kontainer yang berbeda atau jika ada lebih dari satu yang terpasang pada Node yang digunakan, tentukan argumen `--cri-socket` -pada `kubeadm init`. Lihat [Menginstal _runtime_](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime). +pada `kubeadm init`. Lihat [Menginstal _runtime_](/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime). 4. (Opsional) Kecuali ditentukan sebelumnya, `kubeadm` akan menggunakan antarmuka jaringan yang diasosiasikan dengan _default gateway_ untuk mengatur alamat _advertise_ untuk API Server pada Node _control-plane_ ini. Untuk menggunakan antarmuka jaringan yang berbeda, tentukan argumen `--apiserver-advertise-address=` @@ -261,7 +262,7 @@ DNS klaster (CoreDNS) tidak akan menyala sebelum jaringan dipasangkan.** `--pod-network-cidr`, atau sebagai penggantinya pada YAML _plugin_ jaringan kamu). - Secara bawaan, `kubeadm` mengatur klastermu untuk menggunakan dan melaksanakan penggunaan - [RBAC](/docs/reference/access-authn-authz/rbac/) (_role based access control_). + [RBAC](/id/docs/reference/access-authn-authz/rbac/) (_role based access control_). Pastikan _plugin_ jaringan Pod mendukung RBAC, dan begitu juga seluruh manifes yang kamu gunakan untuk men-_deploy_-nya. @@ -559,9 +560,9 @@ Lihat dokumentasi referensi [`kubeadm reset`](/docs/reference/setup-tools/kubead untuk informasi lebih lanjut mengenai sub-perintah ini dan opsinya. -{{% /capture %}} -{{% capture discussion %}} + + ## Selanjutnya @@ -570,14 +571,14 @@ opsinya. untuk detail mengenai pembaruan klaster menggunakan `kubeadm`. * Pelajari penggunaan `kubeadm` lebih lanjut pada [dokumentasi referensi kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm) * Pelajari lebih lanjut mengenai [konsep-konsep](/docs/concepts/) Kubernetes dan [`kubectl`](/docs/user-guide/kubectl-overview/). -* Lihat halaman [Cluster Networking](/docs/concepts/cluster-administration/networking/) untuk daftar +* Lihat halaman [Cluster Networking](/id/docs/concepts/cluster-administration/networking/) untuk daftar _add-on_ jaringan Pod yang lebih banyak. -* Lihat [daftar _add-on_](/docs/concepts/cluster-administration/addons/) untuk +* Lihat [daftar _add-on_](/id/docs/concepts/cluster-administration/addons/) untuk mengeksplor _add-on_ lainnya, termasuk perkakas untuk _logging_, _monitoring_, _network policy_, visualisasi & pengendalian klaster Kubernetes. * Atur bagaimana klaster mengelola log untuk peristiwa-peristiwa klaster dan dari aplikasi-aplikasi yang berjalan pada Pod. - Lihat [Arsitektur Logging](/docs/concepts/cluster-administration/logging/) untuk + Lihat [Arsitektur Logging](/id/docs/concepts/cluster-administration/logging/) untuk gambaran umum tentang hal-hal yang terlibat. ### Umpan balik @@ -601,7 +602,7 @@ Karena kita tidak dapat memprediksi masa depan, CLI kubeadm v{{< skew latestVers Sumber daya ini menyediakan informasi lebih lanjut mengenai _version skew_ yang didukung antara kubelet dan _control plane_, serta komponen Kubernetes lainnya: * [Kebijakan versi and version-skew Kubernetes](/docs/setup/release/version-skew-policy/) -* [Panduan instalasi](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl) spesifik untuk kubeadm +* [Panduan instalasi](/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl) spesifik untuk kubeadm ## Keterbatasan @@ -635,4 +636,4 @@ mendukung platform pilihanmu. Jika kamu menemui kesulitan dengan kubeadm, silakan merujuk pada [dokumen penyelesaian masalah](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). -{{% /capture %}} + diff --git a/content/id/docs/setup/production-environment/tools/kubeadm/high-availability.md b/content/id/docs/setup/production-environment/tools/kubeadm/high-availability.md new file mode 100644 index 0000000000000..afe6d1e6b8f5f --- /dev/null +++ b/content/id/docs/setup/production-environment/tools/kubeadm/high-availability.md @@ -0,0 +1,364 @@ +--- +title: Membangun Klaster dengan Ketersediaan Tinggi menggunakan kubeadm +content_type: task +weight: 60 +--- + + + +Laman ini menjelaskan dua pendekatan yang berbeda untuk membuat klaster Kubernetes dengan ketersediaan tinggi menggunakan kubeadm: + +- Dengan Node _control plane_ yang bertumpuk (_stacked_). Pendekatan ini membutuhkan sumber daya infrastruktur yang lebih sedikit. Anggota-anggota etcd dan Node _control plane_ diletakkan pada tempat yang sama (_co-located_). +- Dengan klaster etcd eksternal. Pendekatan ini membutuhkan lebih banyak sumber daya infrastruktur. Node _control plane_ dan anggota etcd berada pada tempat yang berbeda. + +Sebelum memulai, kamu harus memikirkan dengan matang pendekatan mana yang paling sesuai untuk kebutuhan aplikasi dan _environment_-mu. [Topik perbandingan berikut](/id/docs/setup/production-environment/tools/kubeadm/ha-topology/) menguraikan kelebihan dan kekurangan dari masing-masing pendekatan. + +Jika kamu menghadapi masalah dalam pembuatan klaster dengan ketersediaan tinggi, silakan berikan umpan balik +pada [pelacak isu](https://github.com/kubernetes/kubeadm/issues/new) kubeadm. + +Lihat juga [dokumentasi pembaruan](/id/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15). + +{{< caution >}} +Laman ini tidak menunjukkan cara untuk menjalankan klastermu pada penyedia layanan cloud. Pada _environment_ cloud, kedua pendekatan yang didokumentasikan di sini tidak akan bekerja untuk objek Service dengan tipe LoadBalancer maupun PersistentVolume dinamis. +{{< /caution >}} + + + +## {{% heading "prerequisites" %}} + + +Untuk kedua metode kamu membutuhkan infrastruktur seperti berikut: + +- Tiga mesin yang memenuhi [kebutuhan minimum kubeadm](/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#sebelum-mulai) untuk + Node _control plane_ +- Tiga mesin yang memenuhi [kebutuhan minimum kubeadm](/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#sebelum-mulai) untuk Node _worker_ +- Konektivitas internet pada seluruh mesin di dalam klaster (baik jaringan publik maupun jaringan pribadi) +- Hak akses sudo pada seluruh mesin +- Akses SSH dari satu perangkat ke seluruh Node pada sistem +- Perkakas `kubeadm` dan `kubelet` diinstal pada seluruh mesin. Perkakas `kubectl` bersifat opsional. + +Untuk klaster etcd eksternal saja, kamu juga membutuhkan: + +- Tiga mesin tambahan untuk anggota-anggota etcd + + + + + +## Langkah pertama untuk kedua metode + +### Membuat _load balancer_ untuk kube-apiserver + +{{< note >}} +Akan ada banyak konfigurasi untuk _load balancer_. Contoh berikut ini hanyalah salah satu +opsi. Kebutuhan klastermu mungkin membutuhkan konfigurasi berbeda. +{{< /note >}} + +1. Buat sebuah _load balancer_ kube-apiserver dengan sebuah nama yang yang akan mengubah ke dalam bentuk DNS. + + - Pada _environment_ cloud kamu harus meletakkan Node _control plane_ di belakang _load balancer_ yang meneruskan TCP. _Load balancer_ ini mendistribusikan trafik ke seluruh Node _control plane_ pada daftar tujuan. _Health check_ untuk + apiserver adalah pengujian TCP pada porta yang didengarkan oleh kube-apiserver + (nilai semula `:6443`). + + - Tidak direkomendasikan untuk menggunakan alamat IP secara langsung pada _environment_ cloud. + + - _Load balancer_ harus dapat berkomunikasi dengan seluruh Node _control plane_ + pada porta yang digunakan apiserver. _Load balancer_ tersebut juga harus mengizinkan trafik masuk pada porta yang didengarkannya. + + - Pastikan alamat _load balancer_ sesuai + dengan alamat `ControlPlaneEndpoint` pada kubeadm. + + - Baca panduan [Opsi untuk _Software Load Balancing_](https://github.com/kubernetes/kubeadm/blob/master/id/docs/ha-considerations.md#options-for-software-load-balancing) + untuk detail lebih lanjut. + +2. Tambahkan Node _control plane_ pertama pada _load balancer_ dan lakukan pengujian koneksi: + + ```sh + nc -v LOAD_BALANCER_IP PORT + ``` + + - Kegalatan koneksi yang ditolak memang diantisipasi karena apiserver belum + berjalan. Namun jika mendapat _timeout_, berarti _load balancer_ tidak dapat berkomunikasi + dengan Node _control plane_. Jika terjadi _timeout_, lakukan pengaturan ulang pada _load balancer_ agar dapat berkomunikasi dengan Node _control plane_. + +3. Tambahkan Node _control plane_ lainnya pada grup tujuan _load balancer_. + +## Node _control plane_ dan etcd bertumpuk (_stacked_) + +### Langkah-langkah untuk Node _control plane_ pertama + +1. Inisialisasi _control plane_: + + ```sh + sudo kubeadm init --control-plane-endpoint "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" --upload-certs + ``` + + - Kamu bisa menggunakan opsi `--kubernetes-version` untuk mengatur versi Kubernetes yang akan digunakan. + Direkomendasikan untuk menggunakan versi kubeadm, kubelet, kubectl, dan Kubernetes yang sama. + + - Opsi `--control-plane-endpoint` harus diatur menuju alamat atau DNS dan porta dari _load balancer_. + + - Opsi `--upload-certs` digunakan untuk mengunggah sertifikat-sertifikat yang harus dibagikan ke seluruh + Node _control plane_ pada klaster. Jika sebaliknya, kamu memilih untuk menyalin sertifikat ke + seluruh Node _control plane_ sendiri atau menggunakan perkakas automasi, silakan hapus opsi ini dan merujuk ke bagian [Distribusi sertifikat manual](#distribusi-sertifikat-manual) di bawah. + + {{< note >}} + Opsi `--config` dan `--certificate-key` pada `kubeadm init` tidak dapat digunakan secara bersamaan, maka dari itu jika kamu ingin menggunakan + [konfigurasi kubeadm](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) + kamu harus menambahkan _field_ `certificateKey` pada lokasi pengaturan yang sesuai + (berada di bawah `InitConfiguration` dan `JoinConfiguration: controlPlane`). + {{< /note >}} + + {{< note >}} + Beberapa _plugin_ jaringan CNI membutuhkan pengaturan tambahan, seperti menentukan CIDR IP untuk Pod, meski beberapa lainnya tidak. + Lihat [dokumentasi jaringan CNI](/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#jaringan-pod). + Untuk menambahkan CIDR Pod, tambahkan opsi `--pod-network-cidr`, atau jika kamu menggunakan berkas konfigurasi kubeadm + pasang _field_ `podSubnet` di bawah objek `networking` dari `ClusterConfiguration`. + {{< /note >}} + + - Keluaran yang dihasilkan terlihat seperti berikut ini: + + ```sh + ... + You can now join any number of control-plane node by running the following command on each as a root: + kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 + + Please note that the certificate-key gives access to cluster sensitive data, keep it secret! + As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use kubeadm init phase upload-certs to reload certs afterward. + + Then you can join any number of worker nodes by running the following on each as root: + kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 + ``` + + - Salin keluaran ini pada sebuah berkas teks. Kamu akan membutuhkannya nanti untuk menggabungkan Node _control plane_ dan _worker_ ke klaster. + - Ketika opsi `--upload-certs` digunakan dengan `kubeadm init`, sertifikat dari _control plane_ utama + akan dienkripsi dan diunggah ke Secret `kubeadm-certs`. + - Untuk mengunggah ulang sertifikat dan membuat kunci dekripsi baru, gunakan perintah berikut pada Node _control plane_ + yang sudah tergabung pada klaster: + + ```sh + sudo kubeadm init phase upload-certs --upload-certs + ``` + + - Kamu juga dapat menentukan `--certificate-key` _custom_ pada saat `init` yang nanti dapat digunakan pada saat `join`. + Untuk membuat kunci tersebut kamu dapat menggunakan perintah berikut: + + ```sh + kubeadm alpha certs certificate-key + ``` + + {{< note >}} + Secret `kubeadm-certs` dan kunci dekripsi akan kadaluarsa setelah dua jam. + {{< /note >}} + + {{< caution >}} + Seperti yang tertera pada keluaran perintah, kunci sertifikat memberikan akses ke data klaster yang bersifat sensitif, jaga kerahasiaannya! + {{< /caution >}} + +2. Pasang _plugin_ CNI pilihanmu: + [Ikuti petunjuk berikut](/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#jaringan-pod) + untuk menginstal penyedia CNI. Pastikan konfigurasinya sesuai dengan CIDR Pod yang ditentukan pada berkas konfigurasi kubeadm jika diterapkan. + + Pada contoh berikut kami menggunakan Weave Net: + + ```sh + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + ``` + +3. Tulis perintah berikut dan saksikan Pod komponen-komponen _control plane_ mulai dinyalakan: + + ```sh + kubectl get pod -n kube-system -w + ``` + +### Langkah-langkah selanjutnya untuk Node _control plane_ + +{{< note >}} +Sejak kubeadm versi 1.15 kamu dapat menggabungkan beberapa Node _control plane_ secara bersamaan. +Pada versi sebelumnya, kamu harus menggabungkan Node _control plane_ baru secara berurutan, setelah +Node pertama selesai diinisialisasi. +{{< /note >}} + +Untuk setiap Node _control plane_ kamu harus: + +1. Mengeksekusi perintah untuk bergabung yang sebelumnya diberikan pada keluaran `kubeadm init` pada Node pertama. + Perintah tersebut terlihat seperti ini: + + ```sh + sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 + ``` + + - Opsi `--control-plane` menunjukkan `kubeadm join` untuk membuat _control plane_ baru. + - Opsi `--certificate-key ...` akan membuat sertifikat _control plane_ diunduh + dari Secret `kubeadm-certs` pada klaster dan didekripsi menggunakan kunci yang diberikan. + +## Node etcd eksternal + +Membangun sebuah klaster dengan Node etcd eksternal memiliki prosedur yang mirip dengan etcd bertumpuk +dengan pengecualian yaitu kamu harus setup etcd terlebih dulu, dan kamu harus memberikan informasi etcd +pada berkas konfigurasi kubeadm. + +### Memasang klaster etcd + +1. Ikuti [petunjuk berikut](/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/) untuk membangun klaster etcd. + +2. Lakukan pengaturan SSH seperti yang dijelaskan [di sini](#distribusi-sertifikat-manual). + +3. Salin berkas-berkas berikut dari Node etcd manapun pada klaster ke Node _control plane_ pertama: + + ```sh + export CONTROL_PLANE="ubuntu@10.0.0.7" + scp /etc/kubernetes/pki/etcd/ca.crt "${CONTROL_PLANE}": + scp /etc/kubernetes/pki/apiserver-etcd-client.crt "${CONTROL_PLANE}": + scp /etc/kubernetes/pki/apiserver-etcd-client.key "${CONTROL_PLANE}": + ``` + + - Ganti nilai `CONTROL_PLANE` dengan `user@host` dari mesin _control plane_ pertama. + +### Mengatur Node _control plane_ pertama + +1. Buat sebuah berkas bernama `kubeadm-config.yaml` dengan konten sebagai berikut: + + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + kubernetesVersion: stable + controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" + etcd: + external: + endpoints: + - https://ETCD_0_IP:2379 + - https://ETCD_1_IP:2379 + - https://ETCD_2_IP:2379 + caFile: /etc/kubernetes/pki/etcd/ca.crt + certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt + keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key + + {{< note >}} + Perbedaan antara etcd bertumpuk dan etcd eksternal yaitu etcd eksternal membutuhkan + sebuah berkas konfigurasi dengan _endpoint_ etcd di bawah objek `external`untuk `etcd`. + Pada kasus ini topologi etcd bertumpuk dikelola secara otomatis. + {{< /note >}} + + - Ganti variabel-variabel berikut pada templat konfigurasi dengan nilai yang sesuai untuk klastermu: + + - `LOAD_BALANCER_DNS` + - `LOAD_BALANCER_PORT` + - `ETCD_0_IP` + - `ETCD_1_IP` + - `ETCD_2_IP` + +Langkah-langkah berikut sama dengan pengaturan pada etcd bertumpuk: + +1. Jalankan `sudo kubeadm init --config kubeadm-config.yaml --upload-certs` pada Node ini. + +2. Tulis perintah untuk bergabung yang didapat dari keluaran ke dalam sebuah berkas teks untuk digunakan nanti. + +3. Pasang _plugin_ CNI pilihanmu. Contoh berikut ini untuk Weave Net: + + ```sh + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + ``` + +### Langkah selanjutnya untuk Node _control plane_ lainnya + +Langkah-langkah selanjutnya sama untuk pengaturan etcd bertumpuk: + +- Pastikan Node _control plane_ pertama sudah diinisialisasi dengan sempurna. +- Gabungkan setiap Node _control plane_ dengan perintah untuk bergabung yang kamu simpan dalam berkas teks. Direkomendasikan untuk +menggabungkan Node _control plane_ satu persatu. +- Jangan lupakan bahwa kunci dekripsi dari `--certificate-key` akan kadaluarsa setelah dua jam, pada pengaturan semula. + +## Tugas-tugas umum setelah menyiapkan _control plane_ + +### Menginstal _worker_ + +Node _worker_ bisa digabungkan ke klaster menggunakan perintah yang kamu simpan sebelumnya +dari keluaran perintah `kubeadm init`: + +```sh +sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 +``` + +## Distribusi sertifikat manual + +Jika kamu memilih untuk tidak menggunakan `kubeadm init` dengan opsi `--upload-certs` berarti kamu harus +menyalin sertifikat dari Node _control plane_ utama secara manual ke +Node _control plane_ yang akan bergabung. + +Ada beberapa cara untuk melakukan hal ini. Pada contoh berikut ini kami menggunakan `ssh` dan `scp`: + +SSH dibutuhkan jika kamu ingin mengendalikan seluruh Node dari satu mesin. + +1. Nyalakan ssh-agent pada perangkat utamamu yang memiliki akses ke seluruh Node pada + sistem: + + ``` + eval $(ssh-agent) + ``` + +2. Tambahkan identitas SSH milikmu ke dalam sesi: + + ``` + ssh-add ~/.ssh/path_to_private_key + ``` + +3. Lakukan SSH secara bergantian ke setiap Node untuk memastikan koneksi bekerja dengan baik. + + - Ketika kamu melakukan SSH ke Node, pastikan untuk menambahkan opsi `-A`: + + ``` + ssh -A 10.0.0.7 + ``` + + - Jika kamu menggunakan sudo pada Node, pastikan kamu menyimpan _environment_ yang ada sehingga penerusan SSH + dapat bekerja dengan baik: + + ``` + sudo -E -s + ``` + +4. Setelah mengatur SSH pada seluruh Node kamu harus menjalankan skrip berikut pada Node _control plane_ pertama setelah + menjalankan `kubeadm init`. Skrip ini akan menyalin sertifikat dari Node _control plane_ pertama ke Node + _control plane_ lainnya: + + Pada contoh berikut, ganti `CONTROL_PLANE_IPS` dengan alamat IP dari + Node _control plane_ lainnya. + ```sh + USER=ubuntu # dapat disesuaikan + CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" + for host in ${CONTROL_PLANE_IPS}; do + scp /etc/kubernetes/pki/ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.pub "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt + # Kutip baris berikut jika kamu menggunakan etcd eksternal + scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key + done + ``` + + {{< caution >}} + Salinlah hanya sertifikat yang berada pada daftar di atas saja. Perkakas kubeadm akan mengambil alih pembuatan sertifikat lainnya + dengan SANs yang dibutuhkan untuk Node _control plane_ yang akan bergabung. Jika kamu menyalin seluruh sertifikat tanpa sengaja, + pembuatan Node tambahan dapat gagal akibat tidak adanya SANs yang dibutuhkan. + {{< /caution >}} + +5. Lalu, pada setiap Node _control plane_ yang bergabung kamu harus menjalankan skrip berikut sebelum menjalankan `kubeadm join`. + Skrip ini akan memindahkan sertifikat yang telah disalin sebelumnya dari direktori _home_ ke `/etc/kubernetes/pki`: + + ```sh + USER=ubuntu # dapat disesuaikan + mkdir -p /etc/kubernetes/pki/etcd + mv /home/${USER}/ca.crt /etc/kubernetes/pki/ + mv /home/${USER}/ca.key /etc/kubernetes/pki/ + mv /home/${USER}/sa.pub /etc/kubernetes/pki/ + mv /home/${USER}/sa.key /etc/kubernetes/pki/ + mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/ + mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/ + mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt + # Kutip baris berikut jika kamu menggunakan etcd eksternal + mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key + ``` + diff --git a/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index c0a9721d81b2b..adcf73db774a1 100644 --- a/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/id/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -11,7 +11,7 @@ card: Laman ini menunjukkan cara untuk menginstal `kubeadm`. -Untuk informasi mengenai cara membuat sebuah klaster dengan kubeadm setelah kamu melakukan proses instalasi ini, lihat laman [Menggunakan kubeadm untuk Membuat Sebuah Klaster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/). +Untuk informasi mengenai cara membuat sebuah klaster dengan kubeadm setelah kamu melakukan proses instalasi ini, lihat laman [Menggunakan kubeadm untuk Membuat Sebuah Klaster](/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/). @@ -132,14 +132,14 @@ Jika ditemukan selain dari kedua _runtime_ Container tersebut, kubeadm akan berh Komponen kubelet berintegrasi dengan Docker melalui implementasi CRI `dockershim` bawaannya. -Lihat [_runtime_ Container](/docs/setup/production-environment/container-runtimes/) +Lihat [_runtime_ Container](/id/docs/setup/production-environment/container-runtimes/) untuk informasi lebih lanjut. {{% /tab %}} {{% tab name="sistem operasi lainnya" %}} Secara bawaan, kubeadm menggunakan {{< glossary_tooltip term_id="docker" >}} sebagai _runtime_ Container. Komponen kubelet berintegrasi dengan Docker melalui implementasi CRI `dockershim` bawaannya. -Lihat [_runtime_ Container](/docs/setup/production-environment/container-runtimes/) +Lihat [_runtime_ Container](/id/docs/setup/production-environment/container-runtimes/) untuk informasi lebih lanjut. {{% /tab %}} {{< /tabs >}} @@ -174,7 +174,7 @@ Hal ini karena kubeadm dan Kubernetes membutuhkan Untuk informasi lebih lanjut mengenai _version skew_, lihat: * [Kebijakan _version-skew_ dan versi Kubernetes](/docs/setup/release/version-skew-policy/) -* [Kebijakan _version skew_](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#version-skew-policy) yang spesifik untuk kubeadm +* [Kebijakan _version skew_](/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#version-skew-policy) yang spesifik untuk kubeadm {{< tabs name="k8s_install" >}} {{% tab name="Ubuntu, Debian atau HypriotOS" %}} @@ -264,7 +264,7 @@ systemctl enable --now kubelet Sekarang kubelet akan melakukan _restart_ setiap beberapa detik, sambil menunggu dalam kondisi _crashloop_ sampai kubeadm memberikan instruksi yang harus dilakukan. -## Mengonfigurasi _driver_ cgroup yang digunakan oleh kubelet pada Node _control-plane_ +## Mengonfigurasi _driver_ cgroup yang digunakan oleh kubelet pada Node _control-plane_ {#mengonfigurasi-cgroup-untuk-kubelet-pada-node-control-plane} Ketika menggunakan Docker, kubeadm akan mendeteksi secara otomatis _driver_ cgroup untuk kubelet dan mengaturnya pada berkas `/var/lib/kubelet/config.yaml` pada saat _runtime_. @@ -304,4 +304,4 @@ Jika kamu menemui kesulitan dengan kubeadm, silakan merujuk pada [dokumen penyel ## {{% heading "whatsnext" %}} -* [Menggunakan kubeadm untuk Membuat Sebuah Klaster](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) +* [Menggunakan kubeadm untuk Membuat Sebuah Klaster](/id/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) diff --git a/content/id/docs/tasks/access-application-cluster/access-cluster.md b/content/id/docs/tasks/access-application-cluster/access-cluster.md index 148f402402d85..6a575ad8f151d 100644 --- a/content/id/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/id/docs/tasks/access-application-cluster/access-cluster.md @@ -178,7 +178,7 @@ Saat mengakses API dari Pod, pencarian dan autentikasi ke apiserver agak berbeda Cara yang disarankan untuk menemukan apiserver di dalam Pod adalah dengan nama DNS `kubernetes.default.svc`, yang akan mengubah kedalam bentuk Service IP yang pada gilirannya akan dialihkan ke apiserver. -Cara yang disarankan untuk mengautentikasi ke apiserver adalah dengan kredensial [akun servis](/docs/tasks/configure-pod-container/configure-service-account/). +Cara yang disarankan untuk mengautentikasi ke apiserver adalah dengan kredensial [akun servis](/id/docs/tasks/configure-pod-container/configure-service-account/). Oleh kube-system, Pod dikaitkan dengan sebuah akun servis (_service account_), dan sebuah kredensial (token) untuk akun servis (_service account_) tersebut ditempatkan ke pohon sistem berkas (_file system tree_) dari setiap Container di dalam Pod tersebut, di `/var/run/secrets/kubernetes.io/serviceaccount/token`. @@ -317,7 +317,7 @@ Ada beberapa proksi berbeda yang mungkin kamu temui saat menggunakan Kubernetes: - dapat digunakan untuk menjangkau Node, Pod, atau Service - melakukan _load balancing_ saat digunakan untuk menjangkau sebuah Service -1. [kube-proxy](/docs/concepts/services-networking/service/#ips-and-vips): +1. [kube-proxy](/id/docs/concepts/services-networking/service/#ips-and-vips): - berjalan di setiap Node - memproksi UDP dan TCP diff --git a/content/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index b2b80aacba7a2..8775823304f18 100644 --- a/content/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -320,7 +320,7 @@ contexts: ``` Untuk informasi lebih tentang bagaimana berkas Kubeconfig tergabung, lihat -[Mengatur Akses Cluster Menggunakan Berkas Kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) +[Mengatur Akses Cluster Menggunakan Berkas Kubeconfig](/id/docs/concepts/configuration/organize-cluster-access-kubeconfig/) ## Jelajahi direktori $HOME/.kube @@ -372,7 +372,7 @@ $Env:KUBECONFIG=$ENV:KUBECONFIG_SAVED ## {{% heading "whatsnext" %}} -* [Mengatur Akses Cluster Menggunakan Berkas Kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) +* [Mengatur Akses Cluster Menggunakan Berkas Kubeconfig](/id/docs/concepts/configuration/organize-cluster-access-kubeconfig/) * [kubectl config](/docs/reference/generated/kubectl/kubectl-commands#config) diff --git a/content/id/docs/tasks/access-application-cluster/create-external-load-balancer.md b/content/id/docs/tasks/access-application-cluster/create-external-load-balancer.md index 0d306a559a037..d6d04df2add94 100644 --- a/content/id/docs/tasks/access-application-cluster/create-external-load-balancer.md +++ b/content/id/docs/tasks/access-application-cluster/create-external-load-balancer.md @@ -1,11 +1,11 @@ --- title: Membuat Load Balancer Eksternal -content_template: templates/task +content_type: task weight: 80 --- -{{% capture overview %}} + Laman ini menjelaskan bagaimana membuat _Load Balancer_ Eksternal. @@ -19,22 +19,23 @@ _asalkan klaster kamu beroperasi pada lingkungan yang mendukung dan terkonfigura Untuk informasi mengenai penyediaan dan penggunaan sumber daya Ingress yang dapat memberikan servis URL yang dapat dijangkau secara eksternal, penyeimbang beban lalu lintas, terminasi SSL, dll., -silahkan cek dokumentasi [Ingress](/docs/concepts/services-networking/ingress/) +silahkan cek dokumentasi [Ingress](/id/docs/concepts/services-networking/ingress/) -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Berkas konfigurasi Untuk membuat _load balancer_ eksternal, tambahkan baris di bawah ini ke -[berkas konfigurasi Service](/docs/concepts/services-networking/service/#loadbalancer) kamu: +[berkas konfigurasi Service](/id/docs/concepts/services-networking/service/#loadbalancer) kamu: ```yaml type: LoadBalancer @@ -193,4 +194,4 @@ Sekali _load balancer_ eksternal menyediakan bobot, fungsionalitas ini dapat dit Pod internal ke lalu lintas Pod harus berperilaku sama seperti Service ClusterIP, dengan probabilitas yang sama pada seluruh Pod. -{{% /capture %}} + diff --git a/content/id/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/id/docs/tasks/access-application-cluster/list-all-running-container-images.md new file mode 100644 index 0000000000000..f2140e527698a --- /dev/null +++ b/content/id/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -0,0 +1,129 @@ +--- +title: Membuat Daftar Semua Image Container yang Berjalan dalam Klaster +content_type: task +weight: 100 +--- + + + +Laman ini menunjukkan cara menggunakan kubectl untuk membuat daftar semua _image_ Container +untuk Pod yang berjalan dalam sebuah klaster. + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +Dalam latihan ini kamu akan menggunakan kubectl untuk mengambil semua Pod yang +berjalan dalam sebuah klaster, dan mengubah format keluarannya untuk melihat daftar +Container untuk masing-masing Pod. + +## Membuat daftar semua _image_ Container pada semua Namespace + +- Silakan ambil semua Pod dalam Namespace dengan menggunakan perintah `kubectl get pods --all-namespaces` +- Silakan format keluarannya agar hanya menyertakan daftar nama _image_ dari Container + dengan menggunakan perintah `-o jsonpath={..image}`. Perintah ini akan mem-_parsing field_ + `image` dari keluaran json yang dihasilkan. + - Silakan lihat [referensi jsonpath](/docs/user-guide/jsonpath/) + untuk informasi lebih lanjut tentang cara menggunakan `jsonpath`. +- Silakan format keluaran dengan menggunakan peralatan standar: `tr`, `sort`, `uniq` + - Gunakan `tr` untuk mengganti spasi dengan garis baru + - Gunakan `sort` untuk menyortir hasil + - Gunakan `uniq` untuk mengumpulkan jumlah _image_ + +```sh +kubectl get pods --all-namespaces -o jsonpath="{..image}" |\ +tr -s '[[:space:]]' '\n' |\ +sort |\ +uniq -c +``` + +Perintah di atas secara berulang akan mengembalikan semua _field_ bernama `image` +dari semua poin yang dikembalikan. + +Sebagai pilihan, dimungkinkan juga untuk menggunakan jalur (_path_) absolut ke _field image_ +di dalam Pod. Hal ini memastikan _field_ yang diambil benar +bahkan ketika nama _field_ tersebut diulangi, +misalnya banyak _field_ disebut dengan `name` dalam sebuah poin yang diberikan: + +```sh +kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" +``` + +`Jsonpath` dapat diartikan sebagai berikut: + +- `.items[*]`: untuk setiap nilai yang dihasilkan +- `.spec`: untuk mendapatkan spesifikasi +- `.containers[*]`: untuk setiap Container +- `.image`: untuk mendapatkan _image_ + +{{< note >}} +Pada saat mengambil sebuah Pod berdasarkan namanya, misalnya `kubectl get pod nginx`, +bagian `.items[*]` dari jalur harus dihilangkan karena hanya akan menghasilkan sebuah Pod +sebagai keluarannya, bukan daftar dari semua Pod. + +{{< /note >}} + +## Membuat daftar _image_ Container berdasarkan Pod + +Format dapat dikontrol lebih lanjut dengan menggunakan operasi `range` untuk +melakukan iterasi untuk setiap elemen secara individual. + +```sh +kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' |\ +sort +``` + +## Membuat daftar _image_ yang difilter berdasarkan label dari Pod + +Untuk menargetkan hanya Pod yang cocok dengan label tertentu saja, gunakan tanda -l. Filter +dibawah ini akan menghasilkan Pod dengan label yang cocok dengan `app=nginx`. + +```sh +kubectl get pods --all-namespaces -o=jsonpath="{..image}" -l app=nginx +``` + +## Membuat daftar _image_ Container yang difilter berdasarkan Namespace Pod + +Untuk hanya menargetkan Pod pada Namespace tertentu, gunakankan tanda Namespace. Filter +dibawah ini hanya menyaring Pod pada Namespace `kube-system`. + +```sh +kubectl get pods --namespace kube-system -o jsonpath="{..image}" +``` + +## Membuat daftar _image_ Container dengan menggunakan go-template sebagai alternatif dari jsonpath + +Sebagai alternatif untuk `jsonpath`, kubectl mendukung penggunaan [go-template](https://golang.org/pkg/text/template/) +untuk memformat keluaran seperti berikut: + + +```sh +kubectl get pods --all-namespaces -o go-template --template="{{range .items}}{{range .spec.containers}}{{.image}} {{end}}{{end}}" +``` + + + + + + + + + +## {{% heading "whatsnext" %}} + + +### Referensi + +* Referensi panduan [Jsonpath](/docs/user-guide/jsonpath/). +* Referensi panduan [Go template](https://golang.org/pkg/text/template/). + + + + diff --git a/content/id/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/id/docs/tasks/access-application-cluster/web-ui-dashboard.md index a83605db40aab..99d23c823deda 100644 --- a/content/id/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/id/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -69,17 +69,17 @@ Tekan tombol **CREATE** di pojok kanan atas di laman apapun untuk memulai. _Deploy wizard_ meminta kamu untuk menyediakan informasi sebagai berikut: -- **App name** (wajib): Nama dari aplikasi kamu. Sebuah [label](/docs/concepts/overview/working-with-objects/labels/) dengan nama tersebut akan ditambahkan ke Deployment dan Service, jika ada, akan di-_deploy_. +- **App name** (wajib): Nama dari aplikasi kamu. Sebuah [label](/id/docs/concepts/overview/working-with-objects/labels/) dengan nama tersebut akan ditambahkan ke Deployment dan Service, jika ada, akan di-_deploy_. Nama aplikasi harus unik di dalam [Namespace](/docs/tasks/administer-cluster/namespaces/) Kubernetes yang kamu pilih. Nama tersebut harus dimulai dengan huruf kecil, dan diakhiri dengan huruf kecil atau angka, dan hanya berisi huruf kecil, angka dan tanda hubung (-). Nama tersebut juga dibatasi hanya 24 karakter. Spasi di depan dan belakang nama tersebut diabaikan. -- **Container image** (wajib): Tautan publik dari sebuah [_image_](/docs/concepts/containers/images/) kontainer Docker pada _registry_ apapun, atau sebuah _image_ privat (biasanya di-_hosting_ di Google Container Registry atau Docker Hub). Spesifikasi _image_ kontainer tersebut harus diakhiri dengan titik dua. +- **Container image** (wajib): Tautan publik dari sebuah [_image_](/id/docs/concepts/containers/images/) kontainer Docker pada _registry_ apapun, atau sebuah _image_ privat (biasanya di-_hosting_ di Google Container Registry atau Docker Hub). Spesifikasi _image_ kontainer tersebut harus diakhiri dengan titik dua. - **Number of pods** (wajib): Berapa banyak Pod yang kamu inginkan untuk men-_deploy_ aplikasimu. Nilainya haruslah sebuah bilangan bulat positif. - Sebuah [Deployment](/docs/concepts/workloads/controllers/deployment/) akan terbuat untuk mempertahankan jumlah Pod di klaster kamu. + Sebuah [Deployment](/id/docs/concepts/workloads/controllers/deployment/) akan terbuat untuk mempertahankan jumlah Pod di klaster kamu. -- **Service** (opsional): Untuk beberapa aplikasi (misalnya aplikasi _frontend_) kamu mungkin akan mengekspos sebuah [Service](/docs/concepts/services-networking/service/) ke alamat IP publik yang mungkin berada diluar klaster kamu(Service eksternal). Untuk Service eksternal, kamu mungkin perlu membuka lebih dari satu porta jaringan untuk mengeksposnya. Lihat lebih lanjut [di sini](/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/). +- **Service** (opsional): Untuk beberapa aplikasi (misalnya aplikasi _frontend_) kamu mungkin akan mengekspos sebuah [Service](/id/docs/concepts/services-networking/service/) ke alamat IP publik yang mungkin berada diluar klaster kamu(Service eksternal). Untuk Service eksternal, kamu mungkin perlu membuka lebih dari satu porta jaringan untuk mengeksposnya. Lihat lebih lanjut [di sini](/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/). Service lainnya yang hanya dapat diakses dari dalam klaster disebut Service internal. @@ -87,9 +87,9 @@ _Deploy wizard_ meminta kamu untuk menyediakan informasi sebagai berikut: Jika membutuhkan, kamu dapat membuka bagian **Advanced options** di mana kamu dapat menyetel lebih banyak pengaturan: -- **Description**: Tels yang kamu masukkan ke sini akan ditambahkan sebagai sebuah [anotasi](/docs/concepts/overview/working-with-objects/annotations/) ke Deployment dan akan ditampilkan di detail aplikasi. +- **Description**: Tels yang kamu masukkan ke sini akan ditambahkan sebagai sebuah [anotasi](/id/docs/concepts/overview/working-with-objects/annotations/) ke Deployment dan akan ditampilkan di detail aplikasi. -- **Labels**: [Label-label](/docs/concepts/overview/working-with-objects/labels/) bawaan yang akan digunakan untuk aplikasi kamu adalah `name` dan `version` aplikasi. Kamu dapat menentukan label lain untuk diterapkan ke Deployment, Service (jika ada), dan Pod, seperti `release`, `environment`, `tier`, `partition`, dan `track` rilis. +- **Labels**: [Label-label](/id/docs/concepts/overview/working-with-objects/labels/) bawaan yang akan digunakan untuk aplikasi kamu adalah `name` dan `version` aplikasi. Kamu dapat menentukan label lain untuk diterapkan ke Deployment, Service (jika ada), dan Pod, seperti `release`, `environment`, `tier`, `partition`, dan `track` rilis. Contoh: @@ -107,9 +107,9 @@ track=stable Jika pembuatan Namespace berhasil, Namespace tersebut akan dipilih secara bawaan. Jika pembuatannya gagal, maka Namespace yang pertama akan terpilih. -- **_Image Pull Secret_**: Jika kamu menggunakan _image_ kontainer Docker yang privat, mungkin diperlukan kredensial [_pull secret_](/docs/concepts/configuration/secret/). +- **_Image Pull Secret_**: Jika kamu menggunakan _image_ kontainer Docker yang privat, mungkin diperlukan kredensial [_pull secret_](/id/docs/concepts/configuration/secret/). - Dashboard menampilkan semua _secret_ yang tersedia dengan daftar _dropdown_, dan mengizinkan kamu untuk membuat _secret_ baru. Nama _secret_ tersebut harus mengikuti aturan Nama DNS, misalnya `new.image-pull.secret`. Isi dari sebuah _secret_ harus dienkode dalam bentuk _base64_ dan ditentukan dalam sebuah berkas [`.dockercfg`](/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). Nama kredensial dapat berisi maksimal 253 karakter. + Dashboard menampilkan semua _secret_ yang tersedia dengan daftar _dropdown_, dan mengizinkan kamu untuk membuat _secret_ baru. Nama _secret_ tersebut harus mengikuti aturan Nama DNS, misalnya `new.image-pull.secret`. Isi dari sebuah _secret_ harus dienkode dalam bentuk _base64_ dan ditentukan dalam sebuah berkas [`.dockercfg`](/id/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). Nama kredensial dapat berisi maksimal 253 karakter. Jika pembuatan _image pull secret_ berhasil, _image pull secret_ tersebut akan terpilih secara bawaan. Jika gagal, maka tidak ada _secret_ yang dipilih. @@ -123,7 +123,7 @@ track=stable ### Menggungah berkas YAML atau JSON -Kubernetes mendukung pengaturan deklaratif. Dengan cara ini, semua pengaturan disimpan dalam bentuk berkas YAML atau JSON menggunakan skema sumber daya [[API](/docs/concepts/overview/kubernetes-api/). +Kubernetes mendukung pengaturan deklaratif. Dengan cara ini, semua pengaturan disimpan dalam bentuk berkas YAML atau JSON menggunakan skema sumber daya [[API](/id/docs/concepts/overview/kubernetes-api/). Sebagai alternatif untuk menentukan detail aplikasi di _deploy wizard_, kamu dapat menentukan sendiri detail aplikasi kamu dalam berkas YAML atau JSON, dan mengunggah berkas tersebut menggunakan Dashboard. diff --git a/content/id/docs/tasks/administer-cluster/highly-available-master.md b/content/id/docs/tasks/administer-cluster/highly-available-master.md new file mode 100644 index 0000000000000..0b2ebea7fe1ca --- /dev/null +++ b/content/id/docs/tasks/administer-cluster/highly-available-master.md @@ -0,0 +1,177 @@ +--- +title: Mengatur Control Plane Kubernetes dengan Ketersediaan Tinggi (High-Availability) +content_type: task +--- + + + +{{< feature-state for_k8s_version="v1.5" state="alpha" >}} + +Kamu dapat mereplikasi _control plane_ Kubernetes dalam skrip `kube-up` atau `kube-down` untuk Google Compute Engine (GCE). +Dokumen ini menjelaskan cara menggunakan skrip kube-up/down untuk mengelola _control plane_ dengan ketersedian tinggi atau _high_availability_ (HA) dan bagaimana _control plane_ HA diimplementasikan untuk digunakan dalam GCE. + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## Memulai klaster yang kompatibel dengan HA + +Untuk membuat klaster yang kompatibel dengan HA, kamu harus mengatur tanda ini pada skrip `kube-up`: + +* `MULTIZONE=true` - untuk mencegah penghapusan replika _control plane_ kubelet dari zona yang berbeda dengan zona bawaan server. +Ini diperlukan jika kamu ingin menjalankan replika _control plane_ pada zona berbeda, dimana hal ini disarankan. + +* `ENABLE_ETCD_QUORUM_READ=true` - untuk memastikan bahwa pembacaan dari semua server API akan mengembalikan data terbaru. +Jika `true`, bacaan akan diarahkan ke replika pemimpin dari etcd. +Menetapkan nilai ini menjadi `true` bersifat opsional: pembacaan akan lebih dapat diandalkan tetapi juga akan menjadi lebih lambat. + +Sebagai pilihan, kamu dapat menentukan zona GCE tempat dimana replika _control plane_ pertama akan dibuat. +Atur tanda berikut: + +* `KUBE_GCE_ZONE=zone` - zona tempat di mana replika _control plane_ pertama akan berjalan. + +Berikut ini contoh perintah untuk mengatur klaster yang kompatibel dengan HA pada zona GCE europe-west1-b: + +```shell +MULTIZONE=true KUBE_GCE_ZONE=europe-west1-b ENABLE_ETCD_QUORUM_READS=true ./cluster/kube-up.sh +``` + +Perhatikan bahwa perintah di atas digunakan untuk membuat klaster dengan sebuah _control plane_; +Namun, kamu bisa menambahkan replika _control plane_ baru ke klaster dengan perintah berikutnya. + + +## Menambahkan replika _control plane_ yang baru + +Setelah kamu membuat klaster yang kompatibel dengan HA, kamu bisa menambahkan replika _control plane_ ke sana. +Kamu bisa menambahkan replika _control plane_ dengan menggunakan skrip `kube-up` dengan tanda berikut ini: + +* `KUBE_REPLICATE_EXISTING_MASTER=true` - untuk membuat replika dari _control plane_ yang sudah ada. + +* `KUBE_GCE_ZONE=zone` - zona di mana replika _control plane_ itu berjalan. +Region ini harus sama dengan region dari zona replika yang lain. + +Kamu tidak perlu mengatur tanda `MULTIZONE` atau `ENABLE_ETCD_QUORUM_READS`, +karena tanda itu diturunkan pada saat kamu memulai klaster yang kompatible dengan HA. + +Berikut ini contoh perintah untuk mereplikasi _control plane_ pada klaster sebelumnya yang kompatibel dengan HA: + +```shell +KUBE_GCE_ZONE=europe-west1-c KUBE_REPLICATE_EXISTING_MASTER=true ./cluster/kube-up.sh +``` + +## Menghapus replika _control plane_ + +Kamu dapat menghapus replika _control plane_ dari klaster HA dengan menggunakan skrip `kube-down` dengan tanda berikut: + +* `KUBE_DELETE_NODES=false` - untuk mencegah penghapusan kubelet. + +* `KUBE_GCE_ZONE=zone` - zona di mana replika _control plane_ akan dihapus. + +* `KUBE_REPLICA_NAME=replica_name` - (opsional) nama replika _control plane_ yang akan dihapus. +Jika kosong: replika mana saja dari zona yang diberikan akan dihapus. + +Berikut ini contoh perintah untuk menghapus replika _control plane_ dari klaster HA yang sudah ada sebelumnya: + +```shell +KUBE_DELETE_NODES=false KUBE_GCE_ZONE=europe-west1-c ./cluster/kube-down.sh +``` + +## Mengatasi replika _control plane_ yang gagal + +Jika salah satu replika _control plane_ di klaster HA kamu gagal, +praktek terbaik adalah menghapus replika dari klaster kamu dan menambahkan replika baru pada zona yang sama. +Berikut ini contoh perintah yang menunjukkan proses tersebut: + +1. Menghapus replika yang gagal: + +```shell +KUBE_DELETE_NODES=false KUBE_GCE_ZONE=replica_zone KUBE_REPLICA_NAME=replica_name ./cluster/kube-down.sh +``` + +2. Menambahkan replika baru untuk menggantikan replika yang lama + +```shell +KUBE_GCE_ZONE=replica-zone KUBE_REPLICATE_EXISTING_MASTER=true ./cluster/kube-up.sh +``` + +## Praktek terbaik untuk mereplikasi _control plane_ untuk klaster HA + +* Usahakan untuk menempatkan replika _control plane_ pada zona yang berbeda. Pada saat terjadi kegagalan zona, semua _control plane_ yang ditempatkan dalam zona tersebut akan gagal pula. +Untuk bertahan dari kegagalan pada sebuah zona, tempatkan juga Node pada beberapa zona yang lain +(Lihatlah [multi-zona](/id/docs/setup/best-practices/multiple-zones/) untuk lebih detail). + +* Jangan gunakan klaster dengan dua replika _control plane_. Konsensus pada klaster dengan dua replika membutuhkan kedua replika tersebut berjalan pada saat mengubah keadaan yang persisten. +Akibatnya, kedua replika tersebut diperlukan dan kegagalan salah satu replika mana pun mengubah klaster dalam status kegagalan mayoritas. +Dengan demikian klaster dengan dua replika lebih buruk, dalam hal HA, daripada klaster dengan replika tunggal. + +* Ketika kamu menambahkan sebuah replika _control plane_, status klaster (etcd) disalin ke sebuah _instance_ baru. +Jika klaster itu besar, mungkin butuh waktu yang lama untuk menduplikasi keadaannya. +Operasi ini dapat dipercepat dengan memigrasi direktori data etcd, seperti yang dijelaskan [di sini](https://coreos.com/etcd/docs/latest/admin_guide.html#member-migration) +(Kami sedang mempertimbangkan untuk menambahkan dukungan untuk migrasi direktori data etcd di masa mendatang). + + + + + +## Catatan implementasi + +![ha-master-gce](/images/docs/ha-master-gce.png) + +### Ikhtisar + +Setiap replika _control plane_ akan menjalankan komponen berikut dalam mode berikut: + +* _instance_ etcd: semua _instance_ akan dikelompokkan bersama menggunakan konsensus; + +* server API : setiap server akan berbicara dengan lokal etcd - semua server API pada cluster akan tersedia; + +* pengontrol (_controller_), penjadwal (_scheduler_), dan _scaler_ klaster automatis: akan menggunakan mekanisme sewa - dimana hanya satu _instance_ dari masing-masing mereka yang akan aktif dalam klaster; + +* manajer tambahan (_add-on_): setiap manajer akan bekerja secara independen untuk mencoba menjaga tambahan dalam sinkronisasi. + +Selain itu, akan ada penyeimbang beban (_load balancer_) di depan server API yang akan mengarahkan lalu lintas eksternal dan internal menuju mereka. + + +### Penyeimbang Beban + +Saat memulai replika _control plane_ kedua, penyeimbang beban yang berisi dua replika akan dibuat +dan alamat IP dari replika pertama akan dipromosikan ke alamat IP penyeimbang beban. +Demikian pula, setelah penghapusan replika _control plane_ kedua yang dimulai dari paling akhir, penyeimbang beban akan dihapus dan alamat IP-nya akan diberikan ke replika terakhir yang ada. +Mohon perhatikan bahwa pembuatan dan penghapusan penyeimbang beban adalah operasi yang rumit dan mungkin perlu beberapa waktu (~20 menit) untuk dipropagasikan. + + +### Service _control plane_ & kubelet + +Daripada sistem mencoba untuk menjaga daftar terbaru dari apiserver Kubernetes yang ada dalam Service Kubernetes, +sistem akan mengarahkan semua lalu lintas ke IP eksternal: + +* dalam klaster dengan satu _control plane_, IP diarahkan ke _control plane_ tunggal. + +* dalam klaster dengan multiple _control plane_, IP diarahkan ke penyeimbang beban yang ada di depan _control plane_. + +Demikian pula, IP eksternal akan digunakan oleh kubelet untuk berkomunikasi dengan _control plane_. + + +### Sertifikat _control plane_ + +Kubernetes menghasilkan sertifikat TLS _control plane_ untuk IP publik eksternal dan IP lokal untuk setiap replika. +Tidak ada sertifikat untuk IP publik sementara (_ephemeral_) dari replika; +Untuk mengakses replika melalui IP publik sementara, kamu harus melewatkan verifikasi TLS. + +### Pengklasteran etcd + +Untuk mengizinkan pengelompokkan etcd, porta yang diperlukan untuk berkomunikasi antara _instance_ etcd akan dibuka (untuk komunikasi dalam klaster). +Untuk membuat penyebaran itu aman, komunikasi antara _instance_ etcd diotorisasi menggunakan SSL. + +## Bacaan tambahan + +[Dokumen desain - Penyebaran master HA automatis](https://git.k8s.io/community/contributors/design-proposals/cluster-lifecycle/ha_master.md) + + diff --git a/content/id/docs/tasks/administer-cluster/namespaces.md b/content/id/docs/tasks/administer-cluster/namespaces.md new file mode 100644 index 0000000000000..409e42dd7046d --- /dev/null +++ b/content/id/docs/tasks/administer-cluster/namespaces.md @@ -0,0 +1,302 @@ +--- +title: Membagi sebuah Klaster dengan Namespace +content_type: task +--- + + +Laman ini menunjukkan bagaimana cara melihat, menggunakan dan menghapus {{< glossary_tooltip text="namespaces" term_id="namespace" >}}. Laman ini juga menunjukkan bagaimana cara menggunakan Namespace Kubernetes namespaces untuk membagi klaster kamu. + + +## {{% heading "prerequisites" %}} + +* Memiliki [Klaster Kubernetes](/id/docs/setup/). +* Memiliki pemahaman dasar [_Pod_](/id/docs/concepts/workloads/pods/pod/), [_Service_](/id/docs/concepts/services-networking/service/), dan [_Deployment_](/id/docs/concepts/workloads/controllers/deployment/) dalam Kubernetes. + + + + +## Melihat Namespace + +1. Untuk melihat Namespace yang ada saat ini pada sebuah klaster anda bisa menggunakan: + +```shell +kubectl get namespaces +``` +``` +NAME STATUS AGE +default Active 11d +kube-system Active 11d +kube-public Active 11d +``` + +Kubernetes mulai dengan tiga Namespace pertama: + + * `default` Namespace bawaan untuk objek-objek yang belum terkait dengan Namespace lain + * `kube-system` Namespace untuk objek-objek yang dibuat oleh sistem Kubernetes + * `kube-public` Namespace ini dibuat secara otomatis dan dapat dibaca oleh seluruh pengguna (termasuk yang tidak terotentikasi). Namespace ini sering dicadangkan untuk kepentingan klaster, untuk kasus dimana beberapa sumber daya seharusnya dapat terlihat dan dapat terlihat secara publik di seluruh klaster. Aspek publik pada Namespace ini hanya sebuah konvensi bukan suatu kebutuhan. + +Kamu bisa mendapat ringkasan Namespace tertentu dengan menggunakan: + +```shell +kubectl get namespaces +``` + +Atau kamu bisa mendapatkan informasi detail menggunakan: + +```shell +kubectl describe namespaces +``` +``` +Name: default +Labels: +Annotations: +Status: Active + +No resource quota. + +Resource Limits + Type Resource Min Max Default + ---- -------- --- --- --- + Container cpu - - 100m +``` + +Sebagai catatan, detail diatas menunjukkan baik kuota sumber daya (jika ada) dan juga jangkauan batas sumber daya. + +Kuota sumber daya melacak penggunaan total sumber daya didalam Namespace dan mengijinkan operator-operator klaster mendefinisikan batas atas penggunaan sumber daya yang dapat di gunakan sebuah Namespace. + +Jangkauan batas mendefinisikan pertimbangan min/maks jumlah sumber daya yang dapat di gunakan oleh sebuah entitas dalam sebuah Namespace. + +Lihatlah [Kontrol Admisi: Rentang Batas](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_limit_range.md) + +Sebuah Namespace dapat berada dalam salah satu dari dua buah fase: + + * `Active` Namespace sedang digunakan + * `Terminating` Namespace sedang dihapus dan tidak dapat digunakan untuk objek-objek baru + +Lihat [dokumentasi desain](https://git.k8s.io/community/contributors/design-proposals/architecture/namespaces.md#phases) untuk detil lebih lanjut. + +## Membuat sebuah Namespace baru + +{{< note >}} + Hindari membuat Namespace dengan awalan `kube-`, karena awalan ini dicadangkan untuk Namespace dari sistem Kubernetes. +{{< /note >}} + +1. Buat berkas YAML baru dengan nama `my-namespace.yaml` dengan isi berikut ini: + + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: + ``` + Then run: + + ``` + kubectl create -f ./my-namespace.yaml + ``` + +2. Sebagai alternatif, kamu bisa membuat Namespace menggunakan perintah dibawah ini: + + ``` + kubectl create namespace + ``` + +Nama Namespace kamu harus merupakan +[Label DNS](/docs/concepts/overview/working-with-objects/names#dns-label-names) yang valid. + +Ada kolom opsional `finalizers`, yang memungkinkan _observables_ untuk membersihkan sumber daya ketika Namespace dihapus. Ingat bahwa jika kamu memberikan finalizer yang tidak ada, Namespace akan dibuat tapi akan berhenti pada status `Terminating` jika pengguna mencoba untuk menghapusnya. + +Informasi lebih lanjut mengenai `finalizers` bisa dibaca pada [dokumentasi desain](https://git.k8s.io/community/contributors/design-proposals/architecture/namespaces.md#finalizers) dari Namespace. + +## Menghapus Namespace + +Hapus Namespace dengan + +```shell +kubectl delete namespaces +``` + +{{< warning >}} +Ini akan menghapus semua hal yang ada dalam Namespace! +{{< /warning >}} + +Proses penghapusan ini asinkron, jadi untuk beberapa waktu kamu akan melihat Namespace dalam status `Terminating`. + +## Membagi klaster kamu menggunakan Namespace Kubernetes + +1. Pahami Namespace bawaan + + Secara bawaan, sebuah klaster Kubernetes akan membuat Namespace bawaan ketika menyediakan klaster untuk menampung Pod, Service, dan Deployment yang digunakan oleh klaster. + + Dengan asumsi kamu memiliki klaster baru, kamu bisa mengecek Namespace yang tersedia dengan melakukan hal berikut: + + ```shell + kubectl get namespaces + ``` + ``` + NAME STATUS AGE + default Active 13m + ``` + +2. Membuat Namespace baru + + Untuk latihan ini, kita akan membuat dua Namespace Kubernetes tambahan untuk menyimpan konten kita + + Dalam sebuah skenario dimana sebuah organisasi menggunakan klaster Kubernetes yang digunakan bersama untuk penggunaan pengembangan dan produksi: + + Tim pengembang ingin mengelola ruang di dalam klaster dimana mereka bisa melihat daftar Pod, Service, dan Deployment yang digunakan untuk membangun dan menjalankan apliksi mereka. Di ruang ini sumber daya akan datang dan pergi, dan pembatasan yang tidak ketat mengenai siapa yang bisa atau tidak bisa memodifikasi sumber daya untuk mendukung pengembangan secara gesit (_agile_). + + Tim operasi ingin mengelola ruang didalam klaster dimana mereka bisa memaksakan prosedur ketat mengenai siapa yang bisa atau tidak bisa melakukan manipulasi pada kumpulan Pod, Layanan, dan Deployment yang berjalan pada situs produksi. + + Satu pola yang bisa diikuti organisasi ini adalah dengan membagi klaster Kubernetes menjadi dua Namespace: `development` dan `production` + + Mari kita buat dua Namespace untuk menyimpan hasil kerja kita. + + Buat Namespace `development` menggunakan kubectl: + + ```shell + kubectl create -f https://k8s.io/examples/admin/namespace-dev.json + ``` + + Kemudian mari kita buat Namespace `production` menggunakan kubectl: + + ```shell + kubectl create -f https://k8s.io/examples/admin/namespace-prod.json + ``` + + Untuk memastikan apa yang kita lakukan benar, lihat seluruh Namespace dalam klaster. + + ```shell + kubectl get namespaces --show-labels + ``` + ``` + NAME STATUS AGE LABELS + default Active 32m + development Active 29s name=development + production Active 23s name=production + ``` + +3. Buat pod pada setiap Namespace + + Sebuah Namespace Kubernetes memberikan batasan untuk Pod, Service, dan Deployment dalam klaster. + + Pengguna yang berinteraksi dengan salah satu Namespace tidak melihat konten di dalam Namespace lain + + Untuk menunjukkan hal ini, mari kita jalankan Deployment dan Pod sederhana di dalam Namespace `development`. + + ```shell + kubectl create deployment snowflake --image=k8s.gcr.io/serve_hostname -n=development + kubectl scale deployment snowflake --replicas=2 -n=development + ``` + Kita baru aja membuat sebuah Deployment yang memiliki ukuran replika dua yang menjalankan Pod dengan nama `snowflake` dengan sebuah Container dasar yang hanya melayani _hostname_. + + + ```shell + kubectl get deployment -n=development + ``` + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + snowflake 2/2 2 2 2m + ``` + ```shell + kubectl get pods -l app=snowflake -n=development + ``` + ``` + NAME READY STATUS RESTARTS AGE + snowflake-3968820950-9dgr8 1/1 Running 0 2m + snowflake-3968820950-vgc4n 1/1 Running 0 2m + ``` + + Dan ini merupakan sesuatu yang bagus, dimana pengembang bisa melakukan hal yang ingin mereka lakukan tanpa harus khawatir hal itu akan mempengaruhi konten pada namespace `production`. + + Mari kita pindah ke Namespace `production` dan menujukkan bagaimana sumber daya di satu Namespace disembunyikan dari yang lain + + Namespace `production` seharusnya kosong, dan perintah berikut ini seharusnya tidak menghasilkan apapun. + + ```shell + kubectl get deployment -n=production + kubectl get pods -n=production + ``` + + `Production` Namespace ingin menjalankan `cattle`, mari kita buat beberapa Pod `cattle`. + + ```shell + kubectl create deployment cattle --image=k8s.gcr.io/serve_hostname -n=production + kubectl scale deployment cattle --replicas=5 -n=production + + kubectl get deployment -n=production + ``` + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + cattle 5/5 5 5 10s + ``` + + ```shell + kubectl get pods -l app=cattle -n=production + ``` + ``` + NAME READY STATUS RESTARTS AGE + cattle-2263376956-41xy6 1/1 Running 0 34s + cattle-2263376956-kw466 1/1 Running 0 34s + cattle-2263376956-n4v97 1/1 Running 0 34s + cattle-2263376956-p5p3i 1/1 Running 0 34s + cattle-2263376956-sxpth 1/1 Running 0 34s + ``` + +Sampai sini, seharusnya sudah jelas bahwa sumber daya yang dibuat pengguna pada sebuah Namespace disembunyikan dari Namespace lainnya. + +Seiring dengan evolusi dukungan kebijakan di Kubernetes, kami akan memperluas skenario ini untuk menunjukkan bagaimana kamu bisa menyediakan aturan otorisasi yang berbeda untuk tiap Namespace. + + + + +## Memahami motivasi penggunaan Namespace + +Sebuah klaster tunggal umumnya bisa memenuhi kebutuhan pengguna yang berbeda atau kelompok pengguna (itulah sebabnya disebut 'komunitas pengguna'). + +Namespace Kubernetes membantu proyek-proyek, tim-tim dan pelanggan yang berbeda untuk berbagi klaster Kubernetes. + +Ini dilakukan dengan menyediakan hal berikut: + +1. Cakupan untuk [Names](/id/docs/concepts/overview/working-with-objects/names/). +2. Sebuah mekanisme untuk memasang otorisasi dan kebijakan untuk bagian dari klaster. + +Penggunaan Namespace berbeda merupakan hal opsional. + +Tiap komunitas pengguna ingin bisa bekerja secara terisolasi dari komunitas lainnya. + +Tiap komunitas pengguna memiliki hal berikut sendiri: + +1. sumber daya (Pod, Service, ReplicationController, dll.) +2. kebijakan (siapa yang bisa atau tidak bisa melakukan hal tertentu dalam komunitasnya) +3. batasan (komunitas ini diberi kuota sekian, dll.) + +Seorang operator klaster dapat membuat sebuah Namespace untuk tiap komunitas user yang unik. + +Namespace tersebut memberikan cakupan yang unik untuk: + +1. penamaan sumber daya (untuk menghindari benturan penamaan dasar) +2. pendelegasian otoritas pengelolaan untuk pengguna yang dapat dipercaya +3. kemampuan untuk membatasi konsumsi sumber daya komunitas + +Contoh penggunaan mencakup + +1. Sebagai operator klaster, aku ingin mendukung beberapa komunitas pengguna dalam sebuah klaster. +2. Sebagai operator klaster, aku ingin mendelegasikan otoritas untuk mempartisi klaster ke pengguna terpercaya di komunitasnya. +3. Sebagai operator klaster, aku ingin membatasi jumlah sumber daya yang bisa dikonsumsi komunitas dalam rangka membatasi dampak ke komunitas lain yang menggunakan klaster yang sama. +4. Sebagai pengguna klaster, aku ingin berinteraksi dengan sumber daya yang berkaitan dengan komunitas pengguna saya secara terisolasi dari apa yang dilakukan komunitas lain di klaster yang sama. + +## Memahami Namespace dan DNS + +Ketika kamu membuat sebuah [Service](/docs/concepts/services-networking/service/), akan terbentuk [entri DNS](/id/docs/concepts/services-networking/dns-pod-service/) untuk Service tersebut. +Entri DNS ini dalam bentuk `..svc.cluster.local`, yang berarti jika sebuah Container hanya menggunakan `` maka dia akan me-_resolve_ ke layanan yang lokal dalam Namespace yang sama. Ini berguna untuk menggunakan konfigurasi yang sama pada Namespace yang berbeda seperti _Development_, _Staging_ dan _Production_. Jika kami ingin menjangkau antar Namespace, kamu harus menggunakan _fully qualified domain name_ (FQDN). + + + +## {{% heading "whatsnext" %}} + +* Pelajari lebih lanjut mengenai [pengaturan preferensi Namespace](/id/docs/concepts/overview/working-with-objects/namespaces/#pengaturan-preferensi-namespace). +* Pelajari lebih lanjut mengenai [pengaturan namespace untuk sebuah permintaan](/id/docs/concepts/overview/working-with-objects/namespaces/#pengaturan-namespace-untuk-sebuah-permintaan) +* Baca [desain Namespace](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/architecture/namespaces.md). + + diff --git a/content/id/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md b/content/id/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md new file mode 100644 index 0000000000000..9eb79e7676942 --- /dev/null +++ b/content/id/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md @@ -0,0 +1,52 @@ +--- +title: Menggunakan Calico untuk NetworkPolicy +content_type: task +weight: 10 +--- + + +Laman ini menunjukkan beberapa cara cepat untuk membuat klaster Calico pada Kubernetes. + + +## {{% heading "prerequisites" %}} + +Putuskan apakah kamu ingin menggelar (_deploy_) sebuah klaster di [_cloud_](#membuat-klaster-calico-menggunakan-google-kubernetes-engine-gke) atau di [lokal](#membuat-klaster-calico-dengan-kubeadm). + + + +## Membuat klaster Calico dengan menggunakan _Google Kubernetes Engine_ (GKE) {#membuat-klaster-calico-menggunakan-google-kubernetes-engine-gke} + +**Prasyarat**: [gcloud](https://cloud.google.com/sdk/docs/quickstarts). + +1. Untuk meluncurkan klaster GKE dengan Calico, cukup sertakan opsi `--enable-network-policy`. + + **Sintaksis** + ```shell + gcloud container clusters create [CLUSTER_NAME] --enable-network-policy + ``` + + **Contoh** + ```shell + gcloud container clusters create my-calico-cluster --enable-network-policy + ``` + +2. Untuk memverifikasi penggelaran, gunakanlah perintah berikut ini. + + ```shell + kubectl get pods --namespace=kube-system + ``` + + Pod Calico dimulai dengan kata `calico`. Periksa untuk memastikan bahwa statusnya `Running`. + +## Membuat klaster lokal Calico dengan kubeadm {#membuat-klaster-calico-dengan-kubeadm} + +Untuk membuat satu klaster Calico dengan hos tunggal dalam waktu lima belas menit dengan menggunakan kubeadm, silakan merujuk pada + +[Memulai cepat Calico](https://docs.projectcalico.org/latest/getting-started/kubernetes/). + + +## {{% heading "whatsnext" %}} + +Setelah klaster kamu berjalan, kamu dapat mengikuti [Mendeklarasikan Kebijakan Jaringan](/id/docs/tasks/administer-cluster/declare-network-policy/) untuk mencoba NetworkPolicy Kubernetes. + + diff --git a/content/id/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md b/content/id/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md new file mode 100644 index 0000000000000..a60d862fd26c2 --- /dev/null +++ b/content/id/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md @@ -0,0 +1,119 @@ +--- +title: Menempatkan Pod pada Node Menggunakan Afinitas Pod +min-kubernetes-server-version: v1.10 +content_type: task +weight: 120 +--- + + +Dokumen ini menunjukkan cara menempatkan Pod Kubernetes pada sebuah Node menggunakan +Afinitas Node di dalam klaster Kubernetes. + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## Menambahkan sebuah Label pada sebuah Node + +1. Jabarkan Node-Node yang ada pada klaster kamu, bersamaan dengan label yang ada: + + ```shell + kubectl get nodes --show-labels + ``` + Keluaran dari perintah tersebut akan berupa: + + ```shell + NAME STATUS ROLES AGE VERSION LABELS + worker0 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker0 + worker1 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker1 + worker2 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker2 + ``` +1. Pilihkan salah satu dari Node yang ada dan tambahkan label pada Node tersebut. + + ```shell + kubectl label nodes disktype=ssd + ``` + dimana `` merupakan nama dari Node yang kamu pilih. + +1. Keluaran dari Node yang kamu pilih dan sudah memiliki label `disktype=ssd`: + + ```shell + kubectl get nodes --show-labels + ``` + + Keluaran dari perintah tersebut akan berupa: + + ``` + NAME STATUS ROLES AGE VERSION LABELS + worker0 Ready 1d v1.13.0 ...,disktype=ssd,kubernetes.io/hostname=worker0 + worker1 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker1 + worker2 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker2 + ``` + + Pada keluaran dari perintah di atas, kamu dapat melihat bahwa Node `worker0` + memiliki label `disktype=ssd`. + +## Menjadwalkan Pod menggunakan Afinitas Node + +Konfigurasi ini menunjukkan sebuah Pod yang memiliki afinitas node `requiredDuringSchedulingIgnoredDuringExecution`, `disktype: ssd`. +Dengan kata lain, Pod hanya akan dijadwalkan hanya pada Node yang memiliki label `disktype=ssd`. + +{{< codenew file="pods/pod-nginx-required-affinity.yaml" >}} + +1. Terapkan konfigurasi berikut untuk membuat sebuah Pod yang akan dijadwalkan pada Node yang kamu pilih: + + ```shell + kubectl apply -f https://k8s.io/examples/pods/pod-nginx-required-affinity.yaml + ``` + +1. Verifikasi apakah Pod yang kamu pilih sudah dijalankan pada Node yang kamu pilih: + + ```shell + kubectl get pods --output=wide + ``` + + Keluaran dari perintah tersebut akan berupa: + + ``` + NAME READY STATUS RESTARTS AGE IP NODE + nginx 1/1 Running 0 13s 10.200.0.4 worker0 + ``` + +## Jadwalkan Pod menggunakan Afinitas Node yang Dipilih + +Konfigurasi ini memberikan deskripsi sebuah Pod yang memiliki afinitas Node `preferredDuringSchedulingIgnoredDuringExecution`,`disktype: ssd`. +Artinya Pod akan diutamakan dijalankan pada Node yang memiliki label `disktype=ssd`. + +{{< codenew file="pods/pod-nginx-preferred-affinity.yaml" >}} + +1. Terapkan konfigurasi berikut untuk membuat sebuah Pod yang akan dijadwalkan pada Node yang kamu pilih: + + ```shell + kubectl apply -f https://k8s.io/examples/pods/pod-nginx-preferred-affinity.yaml + ``` + +1. Verifikasi apakah Pod yang kamu pilih sudah dijalankan pada Node yang kamu pilih: + + ```shell + kubectl get pods --output=wide + ``` + + Keluaran dari perintah tersebut akan berupa: + + ``` + NAME READY STATUS RESTARTS AGE IP NODE + nginx 1/1 Running 0 13s 10.200.0.4 worker0 + ``` + + + +## {{% heading "whatsnext" %}} + +Pelajari lebih lanjut mengenai +[Afinitas Node](/id/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). diff --git a/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md index 1d36713f7f4b2..934f6178cd9e5 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md +++ b/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -1,10 +1,10 @@ --- title: Mengatur Probe Liveness, Readiness dan Startup -content_template: templates/task +content_type: task weight: 110 --- -{{% capture overview %}} + Laman ini memperlihatkan bagaimana cara untuk mengatur _probe liveness_, _readiness_, dan _startup_ untuk Container. @@ -26,15 +26,16 @@ berhasil, kamu harus memastikan _probe_ tersebut tidak mengganggu _startup_ dari Mekanisme ini dapat digunakan untuk mengadopsi pemeriksaan _liveness_ pada saat memulai Container yang lambat, untuk menghindari Container dimatikan oleh kubelet sebelum Container mulai dan berjalan. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Mendefinisikan perintah liveness @@ -358,9 +359,10 @@ Untuk _probe_ TCP, kubelet membuat koneksi _probe_ pada Node, tidak pada Pod, ya kamu tidak menggunakan nama Service di dalam parameter `host` karena kubelet tidak bisa me-_resolve_-nya. -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang [Probe Container](/id/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). @@ -371,4 +373,4 @@ Kamu juga dapat membaca rujukan API untuk: * [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) * [Probe](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#probe-v1-core) -{{% /capture %}} + diff --git a/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md b/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md index 22e13e2c8ac87..1d41c53e4c118 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md +++ b/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md @@ -1,10 +1,10 @@ --- title: Mengatur Pod untuk Penyimpanan dengan PersistentVolume -content_template: templates/task +content_type: task weight: 60 --- -{{% capture overview %}} + Laman ini akan menjelaskan bagaimana kamu dapat mengatur sebuah Pod dengan menggunakan {{< glossary_tooltip text="PersistentVolumeClaim" term_id="persistent-volume-claim" >}} @@ -19,9 +19,10 @@ PersistentVolumeClaim yang secara otomatis terikat dengan PersistentVolume yang 3. Kamu membuat sebuah Pod yang menggunakan PersistentVolumeClaim di atas untuk penyimpanan. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + * Kamu membutuhkan sebuah klaster Kubernetes yang hanya memiliki satu Node, dan {{< glossary_tooltip text="kubectl" term_id="kubectl" >}} @@ -32,9 +33,9 @@ tidak memiliki sebuah klaster dengan Node tunggal, kamu dapat membuatnya dengan * Familiar dengan materi di [Persistent Volumes](/id/docs/concepts/storage/persistent-volumes/). -{{% /capture %}} -{{% capture steps %}} + + ## Membuat sebuah berkas index.html di dalam Node kamu @@ -235,10 +236,10 @@ sudo rmdir /mnt/data Sekarang kamu dapat menutup _shell_ Node kamu. -{{% /capture %}} -{{% capture discussion %}} + + ## Kontrol akses @@ -266,10 +267,11 @@ Ketika sebuah Pod mengkonsumsi PersistentVolume, GID yang terkait dengan Persist tidak ada di dalam sumberdaya Pod itu sendiri. {{< /note >}} -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Belajar lebih lanjut tentang [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/). * Baca [dokumen perancangan Penyimpanan _Persistent_](https://git.k8s.io/community/contributors/design-proposals/storage/persistent-storage.md). @@ -281,4 +283,4 @@ tidak ada di dalam sumberdaya Pod itu sendiri. * [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) * [PersistentVolumeClaimSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaimspec-v1-core) -{{% /capture %}} + diff --git a/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md index e5175ccf0e1df..bfdad56610635 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -35,7 +35,7 @@ kubectl create configmap di mana \ merupakan nama yang ingin kamu berikan pada ConfigMap tersebut dan \ adalah direktori, berkas, atau nilai harfiah yang digunakan sebagai sumber data. Nama dari sebuah objek ConfigMap haruslah berupa -[nama subdomain DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) yang sah. +[nama subdomain DNS](/id/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) yang sah. Ketika kamu membuat ConfigMap dari sebuah berkas, secara bawaan, _basename_ dari berkas tersebut akan menjadi kunci pada \, dan isi dari berkas tersebut akan menjadi nilai dari kunci tersebut. @@ -615,14 +615,14 @@ Seperti sebelumnya, semua berkas yang sebelumnya berada pada direktori `/etc/con ### Memproyeksikan kunci ke jalur dan perizinan berkas tertentu Kamu dapat memproyeksikan kunci ke jalur dan perizinan tertentu pada setiap -berkas. Panduan pengguna [Secret](/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod) menjelaskan mengenai sintaks-sintaksnya. +berkas. Panduan pengguna [Secret](/id/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod) menjelaskan mengenai sintaks-sintaksnya. ### ConfigMap yang dipasang akan diperbarui secara otomatis Ketika sebuah ConfigMap yang sudah dipasang pada sebuah volume diperbarui, kunci-kunci yang diproyeksikan akan turut diperbarui. Kubelet akan memeriksa apakah ConfigMap yang dipasang merupakan yang terbaru pada sinkronisasi berkala. Namun, ConfigMap menggunakan _cache_ lokal berbasis ttl (_time-to-live_) miliknya untuk mendapatkan nilai dari ConfigMap saat ini. Hasilnya, keseluruhan penundaan dari saat ketika ConfigMap diperbarui sampai saat ketika kunci-kunci baru diproyeksikan ke pada Pod bisa selama periode sinkronisasi kubelet (secara bawaan selama 1 menit) + ttl dari _cache_ ConfigMap (secara bawaan selama 1 menit) pada kubelet. Kamu dapat memicu pembaruan langsung dengan memperbarui salah satu dari anotasi Pod. {{< note >}} -Kontainer yang menggunakan ConfigMap sebagai volume [subPath](/docs/concepts/storage/volumes/#using-subpath) tidak akan menerima pembaruan ConfigMap. +Kontainer yang menggunakan ConfigMap sebagai volume [subPath](/id/docs/concepts/storage/volumes/#using-subpath) tidak akan menerima pembaruan ConfigMap. {{< /note >}} @@ -631,10 +631,10 @@ Kontainer yang menggunakan ConfigMap sebagai volume [subPath](/docs/concepts/sto ## Memahami ConfigMap dan Pod -Sumber daya API ConfigMap menyimpan data konfigurasi sebagai pasangan kunci-nilai. Data tersebut dapat dikonsumsi oleh Pod atau sebagai penyedia konfigurasi untuk komponen-komponen sistem seperti kontroler. ConfigMap mirip dengan [Secret](/docs/concepts/configuration/secret/), tetapi ConfigMap dimaksudkan untuk mengolah tulisan yang tidak memiliki informasi yang sensitif. Baik pengguna maupun komponen sistem dapat menyimpan data konfigurasi pada ConfigMap. +Sumber daya API ConfigMap menyimpan data konfigurasi sebagai pasangan kunci-nilai. Data tersebut dapat dikonsumsi oleh Pod atau sebagai penyedia konfigurasi untuk komponen-komponen sistem seperti kontroler. ConfigMap mirip dengan [Secret](/id/docs/concepts/configuration/secret/), tetapi ConfigMap dimaksudkan untuk mengolah tulisan yang tidak memiliki informasi yang sensitif. Baik pengguna maupun komponen sistem dapat menyimpan data konfigurasi pada ConfigMap. {{< note >}} -ConfigMap harus mereferensikan berkas-berkas properti, bukan menggantikannya. Anggaplah ConfigMap sebagai sesuatu yang merepresentasikan direktori `/etc` beserta isinya pada Linux. Sebagai contoh, jika kamu membuat sebuah [Volume Kubernetes](/docs/concepts/storage/volumes/) dari ConfigMap, tiap butir data pada ConfigMap direpresentasikan sebagai sebuah berkas pada volume. +ConfigMap harus mereferensikan berkas-berkas properti, bukan menggantikannya. Anggaplah ConfigMap sebagai sesuatu yang merepresentasikan direktori `/etc` beserta isinya pada Linux. Sebagai contoh, jika kamu membuat sebuah [Volume Kubernetes](/id/docs/concepts/storage/volumes/) dari ConfigMap, tiap butir data pada ConfigMap direpresentasikan sebagai sebuah berkas pada volume. {{< /note >}} Kolom `data` pada ConfigMap berisi data konfigurasi. Seperti pada contoh di bawah, hal ini bisa berupa sesuatu yang sederhana -- seperti properti individual yang ditentukan menggunakan `--from-literal` -- atau sesuatu yang kompleks -- seperti berkas konfigurasi atau _blob_ JSON yang ditentukan dengan `--from-file`. diff --git a/content/id/docs/tasks/configure-pod-container/configure-service-account.md b/content/id/docs/tasks/configure-pod-container/configure-service-account.md index 73c0946b8f468..4a4d5999db1e5 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/id/docs/tasks/configure-pod-container/configure-service-account.md @@ -84,7 +84,7 @@ metadata: EOF ``` -Nama dari objek ServiceAccount haruslah sebuah [nama subdomain DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) yang valid. +Nama dari objek ServiceAccount haruslah sebuah [nama subdomain DNS](/id/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) yang valid. Jika kamu mendapatkan objek ServiceAccount secara komplit, seperti ini: @@ -108,7 +108,7 @@ secrets: maka kamu dapat melihat bahwa _token_ telah dibuat secara otomatis dan dirujuk oleh ServiceAccount. -Kamu dapat menggunakan _plugin_ otorisasi untuk [mengatur hak akses dari ServiceAccount](/docs/reference/access-authn-authz/rbac/#service-account-permissions). +Kamu dapat menggunakan _plugin_ otorisasi untuk [mengatur hak akses dari ServiceAccount](/id/docs/reference/access-authn-authz/rbac/#service-account-permissions). Untuk menggunakan ServiceAccount selain nilai standar, atur _field_ `spec.serviceAccountName` dari Pod menjadi nama dari ServiceAccount yang hendak kamu gunakan. @@ -280,7 +280,7 @@ ServiceAccountTokenVolumeProjection masih dalam tahap __beta__ untuk versi 1.12 Kubelet juga dapat memproyeksikan _token_ ServiceAccount ke Pod. Kamu dapat menentukan properti yang diinginkan dari _token_ seperti target pengguna dan durasi validitas. Properti tersebut tidak dapat diubah pada _token_ ServiceAccount standar. _Token_ ServiceAccount juga akan menjadi tidak valid terhadap API ketika Pod atau ServiceAccount dihapus. -Perilaku ini diatur pada PodSpec menggunakan tipe ProjectedVolume yaitu [ServiceAccountToken](/docs/concepts/storage/volumes/#projected). Untuk memungkinkan Pod dengan _token_ dengan pengguna bertipe _"vault"_ dan durasi validitas selama dua jam, kamu harus mengubah bagian ini pada PodSpec: +Perilaku ini diatur pada PodSpec menggunakan tipe ProjectedVolume yaitu [ServiceAccountToken](/id/docs/concepts/storage/volumes/#projected). Untuk memungkinkan Pod dengan _token_ dengan pengguna bertipe _"vault"_ dan durasi validitas selama dua jam, kamu harus mengubah bagian ini pada PodSpec: {{< codenew file="pods/pod-projected-svc-token.yaml" >}} diff --git a/content/id/docs/tasks/configure-pod-container/pull-image-private-registry.md b/content/id/docs/tasks/configure-pod-container/pull-image-private-registry.md index 2158afcf35bdb..50aad8de9a15a 100644 --- a/content/id/docs/tasks/configure-pod-container/pull-image-private-registry.md +++ b/content/id/docs/tasks/configure-pod-container/pull-image-private-registry.md @@ -206,7 +206,7 @@ kubectl get pod private-reg * Pelajari lebih lanjut tentang [Secret](/id/docs/concepts/configuration/secret/). * Pelajari lebih lanjut tentang [menggunakan register pribadi](/id/docs/concepts/containers/images/#menggunakan-register-privat). -* Pelajari lebih lanjut tentang [menambahkan Secret untuk menarik _image_ ke dalam sebuah akun service](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account). +* Pelajari lebih lanjut tentang [menambahkan Secret untuk menarik _image_ ke dalam sebuah akun service](/id/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account). * Lihatlah [kubectl create secret docker-registry](/docs/reference/generated/kubectl/kubectl-commands/#-em-secret-docker-registry-em-). * Lihatlah [Secret](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core). * Lihatlah bidang `imagePullSecrets` dari [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). diff --git a/content/id/docs/tasks/configure-pod-container/security-context.md b/content/id/docs/tasks/configure-pod-container/security-context.md index 6ea554f2d66d7..d190468399cf1 100644 --- a/content/id/docs/tasks/configure-pod-container/security-context.md +++ b/content/id/docs/tasks/configure-pod-container/security-context.md @@ -1,10 +1,10 @@ --- title: Mengonfigurasi Konteks Keamanan untuk Pod atau Container -content_template: templates/task +content_type: task weight: 80 --- -{{% capture overview %}} + Konteks keamanan (_security context_) menentukan wewenang (_privilege_) dan aturan kontrol akses untuk sebuah Pod atau Container. Aturan konteks keamanan meliputi hal-hal berikut ini namun tidak terbatas pada hal-hal tersebut: @@ -31,15 +31,16 @@ Poin-poin di atas bukanlah sekumpulan lengkap dari aturan konteks keamanan - sil Untuk informasi lebih lanjut tentang mekanisme keamanan pada Linux, silahkan lihat [ikhtisar fitur keamanan pada Kernel Linux](https://www.linux.com/learn/overview-linux-kernel-security-features) -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Mengatur konteks keamanan untuk Pod @@ -401,16 +402,17 @@ kubectl delete pod security-context-demo-3 kubectl delete pod security-context-demo-4 ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [PodSecurityContext](/docs/reference/generated/kubernetes-api/{{}}/#podsecuritycontext-v1-core) * [SecurityContext](/docs/reference/generated/kubernetes-api/{{}}/#securitycontext-v1-core) * [Menyetel Docker dengan peningkatan keamanan terbaru](https://opensource.com/business/15/3/docker-security-tuning) * [Dokumen desain konteks keamanan](https://git.k8s.io/community/contributors/design-proposals/auth/security_context.md) * [Dokumen desain manajemen kepemilikan](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md) -* [Kebijakan keamanan Pod](/docs/concepts/policy/pod-security-policy/) +* [Kebijakan keamanan Pod](/id/docs/concepts/policy/pod-security-policy/) * [Dokumen desain AllowPrivilegeEscalation](https://git.k8s.io/community/contributors/design-proposals/auth/no-new-privs.md) -{{% /capture %}} + diff --git a/content/id/docs/tasks/configure-pod-container/share-process-namespace.md b/content/id/docs/tasks/configure-pod-container/share-process-namespace.md new file mode 100644 index 0000000000000..9b32d74b3cdf6 --- /dev/null +++ b/content/id/docs/tasks/configure-pod-container/share-process-namespace.md @@ -0,0 +1,118 @@ +--- +title: Pembagian Namespace Proses antar Container pada sebuah Pod +min-kubernetes-server-version: v1.10 +content_type: task +weight: 160 +--- + + + +{{< feature-state state="stable" for_k8s_version="v1.17" >}} + +Dokumen ini akan menjelaskan menkanisme konfigurasi pembagian namespace +process dalam sebuah Pod. Ketika pembagian _namespace_ proses diaktifkan untuk sebuah Pod, +proses yang ada di dalam Container akan bersifat transparan pada semua Container +yang terdapat di dalam Pod tersebut. + +Kamu dapat mengaktifkan fitur ini untuk melakukan konfigurasi kontainer yang saling terhubung, +misalnya saja kontainer _sidecar_ yang bertugas dalam urusan log, atau untuk melakukan +proses pemecahan masalah (_troubleshoot_) image kontainer yang tidak memiliki utilitas _debugging_ seperti shell. + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## Mengatur sebuah Pod + +Pembagian _namespace_ proses (_Process Namespace Sharing_) diaktifkan menggunakan _field_ `shareProcessNamespace` +`v1.PodSpec`. Sebagai contoh: + +{{< codenew file="pods/share-process-namespace.yaml" >}} + +1. Buatlah sebuah Pod `nginx` di dalam klaster kamu: + + ```shell + kubectl apply -f https://k8s.io/examples/pods/share-process-namespace.yaml + ``` + +2. Tempelkan kontainer `shell` dan jalankan perintah `ps`: + + ```shell + kubectl attach -it nginx -c shell + ``` + + Jika kamu tidak melihat _prompt_ perintah, kamu dapat menekan tombol enter: + + ``` + / # ps ax + PID USER TIME COMMAND + 1 root 0:00 /pause + 8 root 0:00 nginx: master process nginx -g daemon off; + 14 101 0:00 nginx: worker process + 15 root 0:00 sh + 21 root 0:00 ps ax + ``` + +Kamu dapat memberikan sinyal pada kontainer lain. Misalnya saja, mengirim sinyal `SIGHUP` pada +nginx untuk menjalankan ulang proses worker. Hal ini membutuhkan kapabilitas `SYS_PTRACE`. + +``` +/ # kill -HUP 8 +/ # ps ax +PID USER TIME COMMAND + 1 root 0:00 /pause + 8 root 0:00 nginx: master process nginx -g daemon off; + 15 root 0:00 sh + 22 101 0:00 nginx: worker process + 23 root 0:00 ps ax +``` + +Hal ini juga merupakan alasan mengapa kita dapat mengakses kontainer lain menggunakan +tautan (_link_) `/proc/$pid/root`. + +``` +/ # head /proc/8/root/etc/nginx/nginx.conf + +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + + +events { + worker_connections 1024; +``` + + + + + +## Memahami Pembagian Namespace Process + +Pod berbagi banyak sumber daya yang ada sehingga memungkinkan adanya pembagian _namespace_ +proses. Beberapa _image_ kontainer bisa jadi terisolasi dari kontainer lainnya, +meskipun begitu, memahami beberapa perbedaan berikut juga merupakan hal yang +penting untuk diketahui: + +1. **Proses kontainer tidak lagi memiliki PID 1.** Beberapa image kontainer akan menolak + untuk dijalankan (contohnya, kontainer yang menggunakan `systemd`) atau menjalankan + perintah seperti `kill -HUP 1` untuk memberikan sinyal pada proses kontainer. Di dalam Pod dengan + sebuah namespace process terbagi, sinyal `kill -HUP 1` akan diberikan pada _sandbox_ Pod. + (`/pause` pada contoh di atas.) + +2. **Proses-proses yang ada akan transparan pada kontainer lain di dalam Pod.** Hal ini termasuk + informasi pada `/proc`, seperti kata sandi yang diberikan sebagai argumen atau _environment variable_. + Hal ini hanya dilindungi oleh perizinan reguler Unix. + +3. **Berkas sistem (_filesystem_) kontainer bersifat transparan pada kontainer lain di dalam Pod melalui link + `/proc/$pid/root`.** Hal ini memungkinkan proses _debugging_ menjadi lebih mudah, meskipun begitu hal ini + juga berarti kata kunci (_secret_) yang ada di dalam _filesystem_ juga hanya dilindungi oleh perizinan _filesystem_ saja. + diff --git a/content/id/docs/tasks/debug-application-cluster/get-shell-running-container.md b/content/id/docs/tasks/debug-application-cluster/get-shell-running-container.md index 39d2317de55d6..e15a8a4df6532 100644 --- a/content/id/docs/tasks/debug-application-cluster/get-shell-running-container.md +++ b/content/id/docs/tasks/debug-application-cluster/get-shell-running-container.md @@ -1,24 +1,25 @@ --- title: Mendapatkan Shell Untuk Masuk ke Container yang Sedang Berjalan -content_template: templates/task +content_type: task --- -{{% capture overview %}} + Laman ini menunjukkan bagaimana cara menggunakan `kubectl exec` untuk mendapatkan _shell_ untuk masuk ke dalam Container yang sedang berjalan. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Mendapatkan sebuah _shell_ untuk masuk ke sebuah Container @@ -118,9 +119,9 @@ kubectl exec shell-demo ls / kubectl exec shell-demo cat /proc/1/mounts ``` -{{% /capture %}} -{{% capture discussion %}} + + ## Membuka sebuah _shell_ ketika sebuah Pod memiliki lebih dari satu Container @@ -134,14 +135,15 @@ _shell_ ke Container dengan nama main-app. kubectl exec -it my-pod --container main-app -- /bin/bash ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec) -{{% /capture %}} + diff --git a/content/id/docs/tasks/inject-data-application/define-command-argument-container.md b/content/id/docs/tasks/inject-data-application/define-command-argument-container.md index 28a3a1d7e9764..9f2cd7a7aefc8 100644 --- a/content/id/docs/tasks/inject-data-application/define-command-argument-container.md +++ b/content/id/docs/tasks/inject-data-application/define-command-argument-container.md @@ -1,26 +1,27 @@ --- title: Mendefinisikan Perintah dan Argumen untuk sebuah Kontainer -content_template: templates/task +content_type: task weight: 10 --- -{{% capture overview %}} + Laman ini menunjukkan bagaimana cara mendefinisikan perintah-perintah dan argumen-argumen saat kamu menjalankan Container dalam sebuah {{< glossary_tooltip term_id="Pod" >}}. -{{% /capture %}} -{{% capture prerequisites %}} + +## {{% heading "prerequisites" %}} + {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -{{% /capture %}} -{{% capture steps %}} + + ## Mendefinisikan sebuah perintah dan argumen-argumen saat kamu membuat sebuah Pod @@ -145,12 +146,13 @@ Berikut ini beberapa contoh: | `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` | -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Pelajari lebih lanjut tentang [mengatur Pod and Container](/id/docs/tasks/). * Pelajari lebih lanjut tentang [menjalankan perintah di dalam sebuah Container](/id/docs/tasks/debug-application-cluster/get-shell-running-container/). * Lihat [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core). -{{% /capture %}} + diff --git a/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md index 2139f5162938b..c2c4b9399f52d 100644 --- a/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -16,7 +16,7 @@ CronJob memiliki keterbatasan dan kekhasan. Misalnya, dalam keadaan tertentu, sebuah CronJob dapat membuat banyak Job. Karena itu, Job haruslah _idempotent._ -Untuk informasi lanjut mengenai keterbatasan, lihat [CronJob](/docs/concepts/workloads/controllers/cron-jobs). +Untuk informasi lanjut mengenai keterbatasan, lihat [CronJob](/id/docs/concepts/workloads/controllers/cron-jobs). @@ -127,7 +127,7 @@ kubectl delete cronjob hello ``` Menghapus CronJob akan menghapus semua Job dan Pod yang telah terbuat dan menghentikanya dari pembuatan Job tambahan. -Kamu dapat membaca lebih lanjut tentang menghapus Job di [_garbage collection_](/docs/concepts/workloads/controllers/garbage-collection/). +Kamu dapat membaca lebih lanjut tentang menghapus Job di [_garbage collection_](/id/docs/concepts/workloads/controllers/garbage-collection/). ## Menulis Speifikasi Sebuah Cron @@ -162,8 +162,8 @@ Sebuah tanda tanya (`?`) dalam penjadwalan memiliki makna yang sama dengan tanda ### Templat Job `.spec.JobTemplate` adalah templat untuk sebuah Job, dan itu wajib. -Templat Job memiliki skema yang sama dengan [Job](/docs/concepts/workloads/controllers/jobs-run-to-completion/), kecuali jika bersarang dan tidak memiliki sebuah `apiVersion` atau `kind`. -Untuk informasi lebih lanjut tentang menulis sebuah Job `.spec` lihat [Menulis spesifikasi Job](/docs/concepts/workloads/controllers/jobs-run-to-completion/#writing-a-job-spec). +Templat Job memiliki skema yang sama dengan [Job](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/), kecuali jika bersarang dan tidak memiliki sebuah `apiVersion` atau `kind`. +Untuk informasi lebih lanjut tentang menulis sebuah Job `.spec` lihat [Menulis spesifikasi Job](/id/docs/concepts/workloads/controllers/jobs-run-to-completion/#writing-a-job-spec). ### _Starting Deadline_ diff --git a/content/id/docs/tasks/manage-kubernetes-objects/_index.md b/content/id/docs/tasks/manage-kubernetes-objects/_index.md new file mode 100644 index 0000000000000..26a982813ebfe --- /dev/null +++ b/content/id/docs/tasks/manage-kubernetes-objects/_index.md @@ -0,0 +1,5 @@ +--- +title: "Mengelola Objek Kubernetes" +description: Paradigma deklaratif dan imperatif untuk berinteraksi dengan API Kubernetes. +weight: 25 +--- \ No newline at end of file diff --git a/content/id/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/id/docs/tasks/manage-kubernetes-objects/kustomization.md new file mode 100644 index 0000000000000..680b20d371f2e --- /dev/null +++ b/content/id/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -0,0 +1,841 @@ +--- +title: Mengelola Objek Kubernetes secara Deklaratif menggunakan Kustomize +content_type: task +weight: 20 +--- + + + +[Kustomize](https://github.com/kubernetes-sigs/kustomize) merupakan sebuah alat +untuk melakukan kustomisasi objek Kubernetes melalui sebuah berkas [berkas kustomization](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md#kustomization). + +Sejak versi 1.14, kubectl mendukung pengelolaan objek Kubernetes melalui berkas kustomization. +Untuk melihat sumber daya yang ada di dalam direktori yang memiliki berkas kustomization, jalankan perintah berikut: + +```shell +kubectl kustomize +``` + +Untuk menerapkan sumber daya tersebut, jalankan perintah `kubectl apply` dengan _flag_ `--kustomize` atau `-k`: + +```shell +kubectl apply -k +``` + + + +## {{% heading "prerequisites" %}} + + +Instal [`kubectl`](/id/docs/tasks/tools/install-kubectl/) terlebih dahulu. + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## Gambaran Umum Kustomize + +Kustomize adalah sebuah alat untuk melakukan kustomisasi konfigurasi Kubernetes. Untuk mengelola berkas-berkas konfigurasi, kustomize memiliki fitur -fitur di bawah ini: + +* membangkitkan (_generate_) sumber daya dari sumber lain +* mengatur _field_ dari berbagai sumber daya yang bersinggungan +* mengkomposisikan dan melakukan kustomisasi sekelompok sumber daya + +### Membangkitkan Sumber Daya + +ConfigMap dan Secret menyimpan konfigurasi atau data sensitif yang digunakan oleh objek-objek Kubernetes lainnya, seperti Pod. +Biasanya, _source of truth_ dari ConfigMap atau Secret berasal dari luar klaster, seperti berkas `.properties` atau berkas kunci SSH. +Kustomize memiliki `secretGenerator` dan `configMapGenerator`, yang akan membangkitkan (_generate_) Secret dan ConfigMap dari berkas-berkas atau nilai-nilai literal. + +#### configMapGenerator + +Untuk membangkitkan sebuah ConfigMap dari berkas, tambahkan entri ke daftar `files` pada `configMapGenerator`. +Contoh di bawah ini membangkitkan sebuah ConfigMap dengan data dari berkas `.properties`: + +```shell +# Membuat berkas application.properties +cat <application.properties +FOO=Bar +EOF + +cat <./kustomization.yaml +configMapGenerator: +- name: example-configmap-1 + files: + - application.properties +EOF +``` + +ConfigMap yang telah dibangkitkan dapat dilihat menggunakan perintah berikut: + +```shell +kubectl kustomize ./ +``` + +Isinya seperti di bawah ini: + +```yaml +apiVersion: v1 +data: + application.properties: | + FOO=Bar +kind: ConfigMap +metadata: + name: example-configmap-1-8mbdf7882g +``` + +ConfigMap juga dapat dibangkitkan dari pasangan _key-value_ literal. Untuk membangkitkan secara literal, tambahkan entri pada daftar `literals` di `configMapGenerator`. +Contoh di bawah ini membangkitkan ConfigMap dengan data dari pasangan _key-value_: + +```shell +cat <./kustomization.yaml +configMapGenerator: +- name: example-configmap-2 + literals: + - FOO=Bar +EOF +``` + +ConfigMap yang dibangkitkan dapat dilihat menggunakan perintah berikut: + +```shell +kubectl kustomize ./ +``` + +Isinya seperti ini: + +```yaml +apiVersion: v1 +data: + FOO: Bar +kind: ConfigMap +metadata: + name: example-configmap-2-g2hdhfc6tk +``` + +#### secretGenerator + +Kamu dapat membangkitkan Secret dari berkas atau pasangan _key-value_ literal. Untuk membangkitkan dari berkas, tambahkan entri pada daftar `files` di `secretGenerator`. +Contoh di bawah ini membangkitkan Secret dengan data dari berkas: + +```shell +# Membuat berkas password.txt +cat <./password.txt +username=admin +password=secret +EOF + +cat <./kustomization.yaml +secretGenerator: +- name: example-secret-1 + files: + - password.txt +EOF +``` + +Isinya seperti ini: + +```yaml +apiVersion: v1 +data: + password.txt: dXNlcm5hbWU9YWRtaW4KcGFzc3dvcmQ9c2VjcmV0Cg== +kind: Secret +metadata: + name: example-secret-1-t2kt65hgtb +type: Opaque +``` + +Untuk membangkitkan secara literal dari pasangan _key-value_, tambahkan entri pada daftar `literals` di `secretGenerator`. +Contoh di bawah ini membangkitkan Secret dengan data dari pasangan _key-value_: + +```shell +cat <./kustomization.yaml +secretGenerator: +- name: example-secret-2 + literals: + - username=admin + - password=secret +EOF +``` + +Isinya seperti ini: + +```yaml +apiVersion: v1 +data: + password: c2VjcmV0 + username: YWRtaW4= +kind: Secret +metadata: + name: example-secret-2-t52t6g96d8 +type: Opaque +``` + +#### generatorOptions + +ConfigMap dan Secret yang dibangkitkan memiliki informasi sufiks _hash_. Hal ini memastikan bahwa ConfigMap atau Secret yang baru, dibangkitkan saat isinya berubah. +Untuk menonaktifkan penambahan sufiks ini, kamu bisa menggunakan `generatorOptions`. Selain itu, melalui _field_ ini kamu juga bisa mengatur opsi-opsi yang bersinggungan untuk ConfigMap dan Secret yang dibangkitkan. + +```shell +cat <./kustomization.yaml +configMapGenerator: +- name: example-configmap-3 + literals: + - FOO=Bar +generatorOptions: + disableNameSuffixHash: true + labels: + type: generated + annotations: + note: generated +EOF +``` + +Jalankan perintah `kubectl kustomize ./` untuk melihat ConfigMap yang dibangkitkan: + +```yaml +apiVersion: v1 +data: + FOO: Bar +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: example-configmap-3 +``` + +### Mengatur _field_ yang bersinggungan + +Mengatur _field-field_ yang bersinggungan untuk semua sumber daya Kubernetes dalam sebuah proyek. +Beberapa contoh kasusnya seperti di bawah ini: + +* mengatur Namespace yang sama untuk semua sumber daya +* menambahkan prefiks atau sufiks yang sama +* menambahkan kumpulan label yang sama +* menambahkan kumpulan anotasi yang sama + +Lihat contoh di bawah ini: + +```shell +# Membuat sebuah deployment.yaml +cat <./deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx +EOF + +cat <./kustomization.yaml +namespace: my-namespace +namePrefix: dev- +nameSuffix: "-001" +commonLabels: + app: bingo +commonAnnotations: + oncallPager: 800-555-1212 +resources: +- deployment.yaml +EOF +``` + +Jalankan perintah `kubectl kustomize ./` untuk melihat _field-field_ tersebut telah terisi di dalam sumber daya Deployment: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + oncallPager: 800-555-1212 + labels: + app: bingo + name: dev-nginx-deployment-001 + namespace: my-namespace +spec: + selector: + matchLabels: + app: bingo + template: + metadata: + annotations: + oncallPager: 800-555-1212 + labels: + app: bingo + spec: + containers: + - image: nginx + name: nginx +``` + +### Mengkomposisi dan Melakukan Kustomisasi Sumber Daya + +Mengkomposisi kumpulan sumber daya dalam sebuah proyek dan mengelolanya di dalam berkas atau direktori yang sama merupakan hal yang cukup umum dilakukan. +Kustomize menyediakan cara untuk mengkomposisi sumber daya dari berkas-berkas yang berbeda, lalu menerapkan _patch_ atau kustomisasi lain di atasnya. + +#### Melakukan Komposisi + +Kustomize mendukung komposisi dari berbagai sumber daya yang berbeda. _Field_ `resources` pada berkas `kustomization.yaml`, mendefinisikan daftar sumber daya yang diinginkan dalam sebuah konfigurasi. Atur terlebih dahulu jalur (_path_) ke berkas konfigurasi sumber daya pada daftar `resources`. +Contoh di bawah ini merupakan sebuah aplikasi NGINX yang terdiri dari sebuah Deployment dan sebuah Service: + +```shell +# Membuat berkas deployment.yaml +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +# Membuat berkas service.yaml +cat < service.yaml +apiVersion: v1 +kind: Service +metadata: + name: my-nginx + labels: + run: my-nginx +spec: + ports: + - port: 80 + protocol: TCP + selector: + run: my-nginx +EOF + +# Membuat berkas kustomization.yaml yang terdiri dari keduanya +cat <./kustomization.yaml +resources: +- deployment.yaml +- service.yaml +EOF +``` + +Sumber daya dari `kubectl kustomize ./` berisi kedua objek Deployment dan Service. + +#### Melakukan Kustomisasi + +_Patch_ dapat digunakan untuk menerapkan berbagai macam kustomisasi pada sumber daya. Kustomize mendukung berbagai mekanisme _patching_ yang berbeda melalui `patchesStrategicMerge` dan `patchesJson6902`. `patchesStrategicMerge` adalah daftar dari yang berisi tentang _path_ berkas. Setiap berkas akan dioperasikan dengan cara [strategic merge patch](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md). Nama di dalam _patch_ harus sesuai dengan nama sumber daya yang telah dimuat. Kami menyarankan _patch-patch_ kecil yang hanya melakukan satu hal saja. +Contoh membuat sebuah _patch_ di bawah ini akan menambahkan jumlah replika Deployment dan _patch_ lainnya untuk mengatur limit memori. + +```shell +# Membuat berkas deployment.yaml +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +# Membuat sebuah patch increase_replicas.yaml +cat < increase_replicas.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + replicas: 3 +EOF + +# Membuat patch lainnya set_memory.yaml +cat < set_memory.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + template: + spec: + containers: + - name: my-nginx + resources: + limits: + memory: 512Mi +EOF + +cat <./kustomization.yaml +resources: +- deployment.yaml +patchesStrategicMerge: +- increase_replicas.yaml +- set_memory.yaml +EOF +``` + +Jalankan perintah `kubectl kustomize ./` untuk melihat isi dari Deployment: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + replicas: 3 + selector: + matchLabels: + run: my-nginx + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - image: nginx + limits: + memory: 512Mi + name: my-nginx + ports: + - containerPort: 80 +``` + +Tidak semua sumber daya atau _field_ mendukung _strategic merge patch_. Untuk mendukung _field_ sembarang pada sumber daya _field_, Kustomize +menyediakan penerapan [_patch_ JSON](https://tools.ietf.org/html/rfc6902) melalui `patchesJson6902`. +Untuk mencari sumber daya yang tepat dengan sebuah _patch_ Json, maka grup, versi, jenis dan nama dari sumber daya harus dispesifikasikan dalam `kustomization.yaml`. +Contoh di bawah ini menambahkan jumlah replika dari objek Deployment yang bisa juga dilakukan melalui `patchesJson6902`. + +```shell +# Membuat berkas deployment.yaml +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +# Membuat patch json +cat < patch.yaml +- op: replace + path: /spec/replicas + value: 3 +EOF + +# Membuat berkas kustomization.yaml +cat <./kustomization.yaml +resources: +- deployment.yaml + +patchesJson6902: +- target: + group: apps + version: v1 + kind: Deployment + name: my-nginx + path: patch.yaml +EOF +``` + +Jalankan perintah `kubectl kustomize ./` untuk melihat _field_ `replicas` yang telah diperbarui: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + replicas: 3 + selector: + matchLabels: + run: my-nginx + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - image: nginx + name: my-nginx + ports: + - containerPort: 80 +``` + +Selain _patch_, Kustomize juga menyediakan cara untuk melakukan kustomisasi _image_ Container atau memasukkan nilai _field_ dari objek lainnya ke dalam Container tanpa membuat _patch_. Sebagai contoh, kamu dapat melakukan kustomisasi _image_ yang digunakan di dalam Container dengan menyebutkan spesifikasi _field_ `images` di dalam `kustomization.yaml`. + +```shell +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +cat <./kustomization.yaml +resources: +- deployment.yaml +images: +- name: nginx + newName: my.image.registry/nginx + newTag: 1.4.0 +EOF +``` + +Jalankan perintah `kubectl kustomize ./` untuk melihat _image_ yang sedang digunakan telah diperbarui: +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + replicas: 2 + selector: + matchLabels: + run: my-nginx + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - image: my.image.registry/nginx:1.4.0 + name: my-nginx + ports: + - containerPort: 80 +``` + +Terkadang, aplikasi yang berjalan di dalam Pod perlu untuk menggunakan nilai konfigurasi dari objek lainnya. +Contohnya, sebuah Pod dari objek Deployment perlu untuk membaca nama Service dari Env atau sebagai argumen perintah. +Ini karena nama Service bisa saja berubah akibat dari penambahan `namePrefix` atau `nameSuffix` pada berkas `kustomization.yaml`. +Kami tidak menyarankan kamu untuk meng-_hardcode_ nama Service di dalam argumen perintah. +Untuk penggunaan ini, Kustomize dapat memasukkan nama Service ke dalam Container melalui `vars`. + +```shell +# Membuat berkas deployment.yaml +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + command: ["start", "--host", "\$(MY_SERVICE_NAME)"] +EOF + +# Membuat berkas service.yaml +cat < service.yaml +apiVersion: v1 +kind: Service +metadata: + name: my-nginx + labels: + run: my-nginx +spec: + ports: + - port: 80 + protocol: TCP + selector: + run: my-nginx +EOF + +cat <./kustomization.yaml +namePrefix: dev- +nameSuffix: "-001" + +resources: +- deployment.yaml +- service.yaml + +vars: +- name: MY_SERVICE_NAME + objref: + kind: Service + name: my-nginx + apiVersion: v1 +EOF +``` + +Jalankan perintah `kubectl kustomize ./` untuk melihat nama Service yang dimasukkan ke dalam Container menjadi `dev-my-nginx-001`: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dev-my-nginx-001 +spec: + replicas: 2 + selector: + matchLabels: + run: my-nginx + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - command: + - start + - --host + - dev-my-nginx-001 + image: nginx + name: my-nginx +``` + +## Base dan Overlay + +Kustomize memiliki konsep **base** dan **overlay**. **base** merupakan direktori dengan `kustomization.yaml`, yang berisi +sekumpulan sumber daya dan kustomisasi yang terkait. **base** dapat berupa direktori lokal maupun direktori dari repo _remote_, +asalkan berkas `kustomization.yaml` ada di dalamnya. **overlay** merupakan direktori dengan `kustomization.yaml` yang merujuk pada +direktori kustomization lainnya sebagai **base**-nya. **base** tidak memiliki informasi tentang **overlay**. dan dapat digunakan pada beberapa **overlay** sekaligus. +**overlay** bisa memiliki beberapa **base** dan terdiri dari semua sumber daya yang berasal dari **base** yang juga dapat memiliki kustomisasi lagi di atasnya. + +Contoh di bawah ini memperlihatkan kegunaan dari **base**: + +```shell +# Membuat direktori untuk menyimpan base +mkdir base +# Membuat base/deployment.yaml +cat < base/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx +EOF + +# Membuat berkas base/service.yaml +cat < base/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: my-nginx + labels: + run: my-nginx +spec: + ports: + - port: 80 + protocol: TCP + selector: + run: my-nginx +EOF + +# Membuat berkas base/kustomization.yaml +cat < base/kustomization.yaml +resources: +- deployment.yaml +- service.yaml +EOF +``` + +**base** ini dapat digunakan di dalam beberapa **overlay** sekaligus. Kamu dapat menambahkan `namePrefix` yang berbeda ataupun +_field_ lainnya yang bersinggungan di dalam **overlay** berbeda. Di bawah ini merupakan dua buah **overlay** yang menggunakan **base** yang sama. + +```shell +mkdir dev +cat < dev/kustomization.yaml +bases: +- ../base +namePrefix: dev- +EOF + +mkdir prod +cat < prod/kustomization.yaml +bases: +- ../base +namePrefix: prod- +EOF +``` + +## Cara menerapkan/melihat/menghapus objek menggunakan Kustomize + +Gunakan `--kustomize` atau `-k` di dalam perintah `kubectl` untuk mengenali sumber daya yang dikelola oleh `kustomization.yaml`. +Perhatikan bahwa `-k` harus merujuk pada direktori kustomization, misalnya: + +```shell +kubectl apply -k / +``` + +Buatlah `kustomization.yaml` seperti di bawah ini: + +```shell +# Membuat berkas deployment.yaml +cat < deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-nginx +spec: + selector: + matchLabels: + run: my-nginx + replicas: 2 + template: + metadata: + labels: + run: my-nginx + spec: + containers: + - name: my-nginx + image: nginx + ports: + - containerPort: 80 +EOF + +# Membuat berkas kustomization.yaml +cat <./kustomization.yaml +namePrefix: dev- +commonLabels: + app: my-nginx +resources: +- deployment.yaml +EOF +``` + +Jalankan perintah di bawah ini untuk menerapkan objek Deployment `dev-my-nginx`: + +```shell +> kubectl apply -k ./ +deployment.apps/dev-my-nginx created +``` + +Jalankan perintah di bawah ini untuk melihat objek Deployment `dev-my-nginx`: + +```shell +kubectl get -k ./ +``` + +```shell +kubectl describe -k ./ +``` + +Jalankan perintah di bawah ini untuk membandingkan objek Deployment `dev-my-nginx` dengan kondisi yang diinginkan pada klaster jika manifes telah berhasil diterapkan: + +```shell +kubectl diff -k ./ +``` + +Jalankan perintah di bawah ini untuk menghapus objek Deployment `dev-my-nginx`: + +```shell +> kubectl delete -k ./ +deployment.apps "dev-my-nginx" deleted +``` + +## Daftar Fitur Kustomize + +| _Field_ | Tipe | Deskripsi | +|-----------------------|--------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------| +| namespace | string | menambahkan Namespace untuk semua sumber daya | +| namePrefix | string | nilai dari _field_ ini ditambahkan di awal pada nama dari semua sumber daya | +| nameSuffix | string | nilai dari _field_ ini ditambahkan di akhir pada nama dari semua sumber daya | +| commonLabels | map[string]string | label untuk ditambahkan pada semua sumber daya dan selektor | +| commonAnnotations | map[string]string | anotasi untuk ditambahkan pada semua sumber daya | +| resources | []string | setiap entri di dalam daftar ini harus diselesaikan pada berkas konfigurasi sumber daya yang sudah ada | +| configmapGenerator | [][ConfigMapArgs](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L99) | setiap entri di dalam daftar ini membangkitkan ConfigMap | +| secretGenerator | [][SecretArgs](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L106) | setiap entri di dalam daftar ini membangkitkan Secret | +| generatorOptions | [GeneratorOptions](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L109) | memodifikasi perilaku dari semua generator ConfigMap dan Secret | +| bases | []string | setiap entri di dalam daftar ini harus diselesaikan ke dalam sebuah direktori yang berisi berkas kustomization.yaml | +| patchesStrategicMerge | []string | setiap entri di dalam daftar ini harus diselesaikan dengan _strategic merge patch_ dari sebuah objek Kubernetes | +| patchesJson6902 | [][Json6902](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/patchjson6902.go#L8) | setiap entri di dalam daftar ini harus diselesaikan ke suatu objek Kubernetes atau _patch_ Json | +| vars | [][Var](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/var.go#L31) | setiap entri digunakan untuk menangkap teks yang berasal dari _field_ sebuah sumber daya | +| images | [][Image](https://github.com/kubernetes-sigs/kustomize/tree/master/api/types/image.go#L23) | setiap entri digunakan untuk memodifikasi nama, tag dan/atau _digest_ untuk sebuah _image_ tanpa membuat _patch_ | +| configurations | []string | setiap entri di dalam daftar ini harus diselesaikan ke sebuah berkas yang berisi [konfigurasi transformer Kustomize](https://github.com/kubernetes-sigs/kustomize/tree/master/examples/transformerconfigs) | +| crds | []string | setiap entri di dalam daftar ini harus diselesaikan ke sebuah berkas definisi OpenAPI untuk tipe Kubernetes | + + + +## {{% heading "whatsnext" %}} + + +* [Kustomize](https://github.com/kubernetes-sigs/kustomize) +* [Buku Kubectl](https://kubectl.docs.kubernetes.io) +* [Rujukan Perintah Kubectl](/id/docs/reference/generated/kubectl/kubectl/) +* [Rujukan API Kubernetes](/id/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) + + diff --git a/content/id/docs/tasks/run-application/_index.md b/content/id/docs/tasks/run-application/_index.md new file mode 100644 index 0000000000000..7c5e073f2b88b --- /dev/null +++ b/content/id/docs/tasks/run-application/_index.md @@ -0,0 +1,5 @@ +--- +title: "Menjalankan" +description: Menjalankan dan mengatur aplikasi stateless dan stateful. +weight: 40 +--- diff --git a/content/id/docs/tasks/run-application/horizontal-pod-autoscaler.md b/content/id/docs/tasks/run-application/horizontal-pod-autoscaler.md index badc7a058dad5..c4ed16413f3fd 100644 --- a/content/id/docs/tasks/run-application/horizontal-pod-autoscaler.md +++ b/content/id/docs/tasks/run-application/horizontal-pod-autoscaler.md @@ -4,11 +4,11 @@ feature: title: Horizontal scaling description: > Scale up dan scale down aplikasimu dengan sebuah perintah yang serderhana, dengan UI, atau otomatis bersadarkan penggunaan CPU. -content_template: templates/concept +content_type: concept weight: 90 --- -{{% capture overview %}} + HorizontalPodAutoscaler secara otomatis akan memperbanyak jumlah Pod di dalam ReplicationController, Deployment, ReplicaSet ataupun StatefulSet berdasarkan hasil observasi penggunaan CPU(atau, dengan @@ -20,10 +20,10 @@ HorizontalPodAutoscaler diimplementasikan sebagai Kubernetes API *resource* dan Kontroler akan mengubah jumlah replika pada ReplicationController atau pada Deployment untuk menyesuaikan dengan hasil observasi rata-rata penggunaan CPU sesuai dengan yang ditentukan oleh pengguna. -{{% /capture %}} -{{% capture body %}} + + ## Bagaimana cara kerja HorizontalPodAutoscaler? @@ -172,7 +172,7 @@ dapat ditemukan pada `autoscaling/v2beta2`. *Field* yang baru diperkenalkan pada `autoscaling/v2beta2` adalah *preserved* sebagai anotasi ketika menggunakan `autoscaling/v1`. Ketika kamu membuat sebuah HorizontalPodAutoscaler, pastikan nama yang ditentukan adalah valid -[nama subdomain DNS](/docs/concepts/overview/working-with-objects/names#nama). +[nama subdomain DNS](/id/docs/concepts/overview/working-with-objects/names#nama). Untuk lebih detail tentang objek API ini dapat ditemukan di [Objek HorizontalPodAutoscaler](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). @@ -441,12 +441,13 @@ behavior: selectPolicy: Disabled ``` -{{% /capture %}} -{{% capture whatsnext %}} + +## {{% heading "whatsnext" %}} + * Dokumentasi desain [Horizontal Pod Autoscaling](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md). * Perintah kubectl autoscale [kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale). * Contoh penggunaan [HorizontalPodAutoscaler](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). -{{% /capture %}} + diff --git a/content/id/docs/tasks/run-application/run-stateless-application-deployment.md b/content/id/docs/tasks/run-application/run-stateless-application-deployment.md new file mode 100644 index 0000000000000..a069188de66b8 --- /dev/null +++ b/content/id/docs/tasks/run-application/run-stateless-application-deployment.md @@ -0,0 +1,158 @@ +--- +title: Menjalankan Aplikasi Stateless Menggunakan Deployment +min-kubernetes-server-version: v1.9 +content_type: tutorial +weight: 10 +--- + + + +Dokumen ini menunjukkan cara bagaimana cara menjalankan sebuah aplikasi menggunakan objek Deployment Kubernetes. + + + + +## {{% heading "objectives" %}} + + +* Membuat sebuah Deployment Nginx. +* Menggunakan kubectl untuk mendapatkan informasi mengenai Deployment. +* Mengubah Deployment. + + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + + +## Membuat dan Menjelajahi Deployment Nginx + +Kamu dapat menjalankan aplikasi dengan membuat sebuah objek Deployment Kubernetes, dan kamu +dapat mendeskripsikan sebuah Deployment di dalam berkas YAML. Sebagai contohnya, berkas +YAML berikut mendeskripsikan sebuah Deployment yang menjalankan _image_ Docker nginx:1.14.2: + +{{< codenew file="application/deployment.yaml" >}} + + +1. Buatlah sebuah Deployment berdasarkan berkas YAML: + + kubectl apply -f https://k8s.io/examples/application/deployment.yaml + +2. Tampilkan informasi dari Deployment: + + kubectl describe deployment nginx-deployment + + Keluaran dari perintah tersebut akan menyerupai: + + user@computer:~/website$ kubectl describe deployment nginx-deployment + Name: nginx-deployment + Namespace: default + CreationTimestamp: Tue, 30 Aug 2016 18:11:37 -0700 + Labels: app=nginx + Annotations: deployment.kubernetes.io/revision=1 + Selector: app=nginx + Replicas: 2 desired | 2 updated | 2 total | 2 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 1 max surge + Pod Template: + Labels: app=nginx + Containers: + nginx: + Image: nginx:1.14.2 + Port: 80/TCP + Environment: + Mounts: + Volumes: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: nginx-deployment-1771418926 (2/2 replicas created) + No events. + +3. Lihatlah daftar Pod-Pod yang dibuat oleh Deployment: + + kubectl get pods -l app=nginx + + Keluaran dari perintah tersebut akan menyerupai: + + NAME READY STATUS RESTARTS AGE + nginx-deployment-1771418926-7o5ns 1/1 Running 0 16h + nginx-deployment-1771418926-r18az 1/1 Running 0 16h + +4. Tampilkan informasi mengenai Pod: + + kubectl describe pod + + dimana `` merupakan nama dari Pod kamu. + +## Mengubah Deployment + +Kamu dapat mengubah Deployment dengan cara mengaplikasikan berkas YAML yang baru. +Berkas YAML ini memberikan spesifikasi Deployment untuk menggunakan Nginx versi 1.16.1. + +{{< codenew file="application/deployment-update.yaml" >}} + +1. Terapkan berkas YAML yang baru: + + kubectl apply -f https://k8s.io/examples/application/deployment-update.yaml + +2. Perhatikan bahwa Deployment membuat Pod-Pod dengan nama baru dan menghapus Pod-Pod lama: + + kubectl get pods -l app=nginx + +## Meningkatkan Jumlah Aplikasi dengan Meningkatkan Ukuran Replika + +Kamu dapat meningkatkan jumlah Pod di dalam Deployment dengan menerapkan +berkas YAML baru. Berkas YAML ini akan meningkatkan jumlah replika menjadi 4, +yang nantinya memberikan spesifikasi agar Deployment memiliki 4 buah Pod. + +{{< codenew file="application/deployment-scale.yaml" >}} + +1. Terapkan berkas YAML: + + kubectl apply -f https://k8s.io/examples/application/deployment-scale.yaml + +2. Verifikasi Deployment kamu saat ini yang memiliki empat Pod: + + kubectl get pods -l app=nginx + + Keluaran dari perintah tersebut akan menyerupai: + + NAME READY STATUS RESTARTS AGE + nginx-deployment-148880595-4zdqq 1/1 Running 0 25s + nginx-deployment-148880595-6zgi1 1/1 Running 0 25s + nginx-deployment-148880595-fxcez 1/1 Running 0 2m + nginx-deployment-148880595-rwovn 1/1 Running 0 2m + +## Menghapus Deployment + +Menghapus Deployment dengan nama: + + kubectl delete deployment nginx-deployment + +## Cara Lama Menggunakan: ReplicationController + +Cara yang dianjurkan untuk membuat aplikasi dengan replika adalah dengan menggunakan Deployment, +yang nantinya akan menggunakan ReplicaSet. Sebelum Deployment dan ReplicaSet ditambahkan +ke Kubernetes, aplikasi dengan replika dikonfigurasi menggunakan [ReplicationController](/id/docs/concepts/workloads/controllers/replicationcontroller/). + + + + +## {{% heading "whatsnext" %}} + + +* Pelajari lebih lanjut mengenai [objek Deployment](/id/docs/concepts/workloads/controllers/deployment/). + + diff --git a/content/id/docs/tasks/tls/_index.md b/content/id/docs/tasks/tls/_index.md new file mode 100755 index 0000000000000..8607aa28d29b6 --- /dev/null +++ b/content/id/docs/tasks/tls/_index.md @@ -0,0 +1,5 @@ +--- +title: "TLS" +weight: 100 +--- + diff --git a/content/id/docs/tasks/tls/managing-tls-in-a-cluster.md b/content/id/docs/tasks/tls/managing-tls-in-a-cluster.md new file mode 100644 index 0000000000000..e672a7b265098 --- /dev/null +++ b/content/id/docs/tasks/tls/managing-tls-in-a-cluster.md @@ -0,0 +1,214 @@ +--- +title: Kelola Sertifikat TLS Pada Klaster +content_type: task +--- + + + +Kubernetes menyediakan API `certificates.k8s.io` yang memungkinkan kamu membuat sertifikat +TLS yang ditandatangani oleh Otoritas Sertifikat (CA) yang kamu kendalikan. CA dan sertifikat ini +bisa digunakan oleh _workload_ untuk membangun kepercayaan. + +API `certificates.k8s.io` menggunakan protokol yang mirip dengan [konsep ACME](https://github.com/ietf-wg-acme/acme/). + +{{< note >}} +Sertifikat yang dibuat menggunakan API `certificates.k8s.io` ditandatangani oleh CA +khusus. Ini memungkinkan untuk mengkonfigurasi klaster kamu agar menggunakan CA _root_ klaster untuk tujuan ini, +namun jangan pernah mengandalkan ini. Jangan berasumsi bahwa sertifikat ini akan melakukan validasi +dengan CA _root_ klaster +{{< /note >}} + + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## Mempercayai TLS dalam Klaster + +Mempercayai CA khusus dari aplikasi yang berjalan sebagai Pod biasanya memerlukan +beberapa tambahan konfigurasi aplikasi. Kamu harus menambahkan bundel sertifikat CA +ke daftar sertifikat CA yang dipercaya klien atau server TLS. +Misalnya, kamu akan melakukan ini dengan konfigurasi TLS golang dengan mengurai rantai sertifikat +dan menambahkan sertifikat yang diurai ke `RootCAs` di _struct_ +[`tls.Config`](https://godoc.org/crypto/tls#Config). + +Kamu bisa mendistribusikan sertifikat CA sebagai sebuah +[ConfigMap](/id/docs/tasks/configure-pod-container/configure-pod-configmap) yang bisa diakses oleh Pod kamu. + +## Meminta Sertifikat + +Bagian berikut mendemonstrasikan cara membuat sertifikat TLS untuk sebuah +Service kubernetes yang diakses melalui DNS. + +{{< note >}} +Tutorial ini menggunakan CFSSL: PKI dan peralatan TLS dari Cloudflare [klik disini](https://blog.cloudflare.com/introducing-cfssl/) untuk mengetahui lebih jauh. +{{< /note >}} + +## Unduh dan Pasang CFSSL + +Contoh ini menggunakan cfssl yang dapat diunduh pada +[https://pkg.cfssl.org/](https://pkg.cfssl.org/). + +## Membuat CertificateSigningRequest + +Buat kunci pribadi dan CertificateSigningRequest (CSR) dengan menggunakan perintah berikut: + +```shell +cat < +Annotations: +CreationTimestamp: Tue, 21 Mar 2017 07:03:51 -0700 +Requesting User: yourname@example.com +Status: Pending +Subject: + Common Name: my-svc.my-namespace.svc.cluster.local + Serial Number: +Subject Alternative Names: + DNS Names: my-svc.my-namespace.svc.cluster.local + IP Addresses: 192.0.2.24 + 10.0.34.2 +Events: +``` + +## Mendapatkan Persetujuan CertificateSigningRequest + +Penyetujuan CertificateSigningRequest dapat dilakukan dengan otomatis +atau dilakukan sekali oleh administrator klaster. Informasi lebih lanjut tentang +apa yang terjadi dibahas dibawah ini. + +## Unduh dan Gunakan Sertifikat + +Setelah CSR ditandatangani dan disetujui, kamu akan melihat: + +```shell +kubectl get csr +``` + +```none +NAME AGE REQUESTOR CONDITION +my-svc.my-namespace 10m yourname@example.com Approved,Issued +``` + +Kamu bisa mengundur sertifikat yang telah diterbitkan dan menyimpannya ke berkas +`server.crt` dengan menggunakan perintah berikut: + +```shell +kubectl get csr my-svc.my-namespace -o jsonpath='{.status.certificate}' \ + | base64 --decode > server.crt +``` + +Sekarang kamu bisa menggunakan `server.crt` dan `server-key.pem` sebagai pasangan +kunci untuk memulai server HTTPS kamu. + +## Penyetujuan CertificateSigningRequest + +Administrator Kubernetes (dengan izin yang cukup) dapat menyetujui secara manual +(atau menolak) Certificate Signing Requests dengan menggunakan perintah `kubectl certificate +approve` dan `kubectl certificate deny`. Namun jika kamu bermaksud +untuk menggunakan API ini secara sering, kamu dapat mempertimbangkan untuk menulis +Certificate _controller_ otomatis. + +Baik itu mesin atau manusia yang menggunakan kubectl seperti di atas, peran pemberi persetujuan adalah +untuk memverifikasi bahwa CSR memenuhi dua persyaratan: +1. Subjek CSR mengontrol kunci pribadi yang digunakan untuk menandatangani CSR. Ini + mengatasi ancaman pihak ketiga yang menyamar sebagai subjek resmi. + Pada contoh di atas, langkah ini adalah untuk memverifikasi bahwa Pod mengontrol + kunci pribadi yang digunakan untuk menghasilkan CSR. +2. Subjek CSR berwenang untuk bertindak dalam konteks yang diminta. Ini + mengatasi ancaman subjek yang tidak diinginkan bergabung dengan klaster. Dalam + contoh di atas, langkah ini untuk memverifikasi bahwa Pod diizinkan + berpartisipasi dalam Service yang diminta. + +Jika dan hanya jika kedua persyaratan ini dipenuhi, pemberi persetujuan harus menyetujui +CSR dan sebaliknya harus menolak CSR. + +## Peringatan tentang Izin Persetujuan + +Kemampuan untuk menyetujui CSR menentukan siapa yang mempercayai siapa di dalam lingkungan kamu. +Kemampuan untuk menyetujui CSR tersebut seharusnya tidak diberikan secara luas. +Persyaratan tantangan yang disebutkan di bagian sebelumnya dan +dampak dari mengeluarkan sertifikat khusus, harus sepenuhnya dipahami +sebelum memberikan izin ini. + +## Catatan Untuk Administrator Klaster + +Tutorial ini mengasumsikan bahwa penanda tangan diatur untuk melayani API sertifikat. +Kubernetes _controller manager_ menyediakan implementasi bawaan dari penanda tangan. Untuk +mengaktifkan, berikan parameter `--cluster-signed-cert-file` dan +`--cluster-signed-key-file` ke _controller manager_ dengan _path_ ke +pasangan kunci CA kamu. + diff --git a/content/id/docs/tasks/tools/install-kubectl.md b/content/id/docs/tasks/tools/install-kubectl.md index e4d0019c3e865..bc112c3cddb33 100644 --- a/content/id/docs/tasks/tools/install-kubectl.md +++ b/content/id/docs/tasks/tools/install-kubectl.md @@ -284,7 +284,7 @@ Kamu dapat menginstal `kubectl` sebagai bagian dari Google Cloud SDK. ## Memeriksa konfigurasi kubectl -Agar `kubectl` dapat mengakses klaster Kubernetes, dibutuhkan sebuah [berkas kubeconfig](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/), yang akan otomatis dibuat ketika kamu membuat klaster baru menggunakan [kube-up.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/kube-up.sh) atau setelah berhasil men-_deploy_ klaster Minikube. Secara bawaan, konfigurasi `kubectl` disimpan di `~/.kube/config`. +Agar `kubectl` dapat mengakses klaster Kubernetes, dibutuhkan sebuah [berkas kubeconfig](/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters/), yang akan otomatis dibuat ketika kamu membuat klaster baru menggunakan [kube-up.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/kube-up.sh) atau setelah berhasil men-_deploy_ klaster Minikube. Secara bawaan, konfigurasi `kubectl` disimpan di `~/.kube/config`. Kamu dapat memeriksa apakah konfigurasi `kubectl` sudah benar dengan mengambil keadaan klaster: @@ -490,9 +490,9 @@ compinit ## {{% heading "whatsnext" %}} -* [Menginstal Minikube.](/docs/tasks/tools/install-minikube/) +* [Menginstal Minikube.](/id/docs/tasks/tools/install-minikube/) * Lihat [panduan persiapan](/docs/setup/) untuk mencari tahu tentang pembuatan klaster. * [Pelajari cara untuk menjalankan dan mengekspos aplikasimu.](/docs/tasks/access-application-cluster/service-access-application-cluster/) -* Jika kamu membutuhkan akses ke klaster yang tidak kamu buat, lihat [dokumen Berbagi Akses Klaster](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). +* Jika kamu membutuhkan akses ke klaster yang tidak kamu buat, lihat [dokumen Berbagi Akses Klaster](/id/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). * Baca [dokumen referensi kubectl](/docs/reference/kubectl/kubectl/) diff --git a/content/id/docs/tasks/tools/install-minikube.md b/content/id/docs/tasks/tools/install-minikube.md index 342f05246ace1..d3e10f4fd6823 100644 --- a/content/id/docs/tasks/tools/install-minikube.md +++ b/content/id/docs/tasks/tools/install-minikube.md @@ -9,7 +9,7 @@ card: -Halaman ini menunjukkan cara instalasi [Minikube](/docs/tutorials/hello-minikube), sebuah alat untuk menjalankan sebuah klaster Kubernetes dengan satu Node pada mesin virtual yang ada di komputer kamu. +Halaman ini menunjukkan cara instalasi [Minikube](/id/docs/tutorials/hello-minikube), sebuah alat untuk menjalankan sebuah klaster Kubernetes dengan satu Node pada mesin virtual yang ada di komputer kamu. @@ -65,7 +65,7 @@ Hyper-V Requirements: A hypervisor has been detected. Features required for ### Menginstal kubectl -Pastikan kamu mempunyai kubectl yang terinstal. Kamu bisa menginstal kubectl dengan mengikuti instruksi pada halaman [Menginstal dan Menyiapkan kubectl](/docs/tasks/tools/install-kubectl/#install-kubectl-on-linux). +Pastikan kamu mempunyai kubectl yang terinstal. Kamu bisa menginstal kubectl dengan mengikuti instruksi pada halaman [Menginstal dan Menyiapkan kubectl](/id/docs/tasks/tools/install-kubectl/#install-kubectl-on-linux). ### Menginstal sebuah Hypervisor @@ -125,7 +125,7 @@ brew install minikube {{% tab name="macOS" %}} ### Instalasi kubectl -Pastikan kamu mempunyai kubectl yang terinstal. Kamu bisa menginstal kubectl berdasarkan instruksi pada laman [Menginstal dan Menyiapkan kubectl](/docs/tasks/tools/install-kubectl/#install-kubectl-on-macos). +Pastikan kamu mempunyai kubectl yang terinstal. Kamu bisa menginstal kubectl berdasarkan instruksi pada laman [Menginstal dan Menyiapkan kubectl](/id/docs/tasks/tools/install-kubectl/#install-kubectl-on-macos). ### Instalasi sebuah Hypervisor @@ -161,7 +161,7 @@ sudo mv minikube /usr/local/bin {{% tab name="Windows" %}} ### Instalasi kubectl -Pastikan kamu mempunyai kubectl yang terinstal. Kamu bisa menginstal kubectl berdasarkan instruksi pada halaman [Menginstal dan Menyiapkan kubectl](/docs/tasks/tools/install-kubectl/#install-kubectl-on-windows). +Pastikan kamu mempunyai kubectl yang terinstal. Kamu bisa menginstal kubectl berdasarkan instruksi pada halaman [Menginstal dan Menyiapkan kubectl](/id/docs/tasks/tools/install-kubectl/#install-kubectl-on-windows). ### Menginstal sebuah Hypervisor diff --git a/content/id/docs/tasks/tools/kubeadm/_index.md b/content/id/docs/tasks/tools/kubeadm/_index.md new file mode 100644 index 0000000000000..e342c2da513aa --- /dev/null +++ b/content/id/docs/tasks/tools/kubeadm/_index.md @@ -0,0 +1,4 @@ +--- +title: "Membangun klaster menggunakan kubeadm" +weight: 10 +--- diff --git a/content/id/docs/tutorials/_index.md b/content/id/docs/tutorials/_index.md index 1093644e157d0..f56702b94eeef 100644 --- a/content/id/docs/tutorials/_index.md +++ b/content/id/docs/tutorials/_index.md @@ -24,7 +24,7 @@ Sebelum melangkah lebih lanjut ke tutorial, sebaiknya tandai dulu halaman [Kamus * [Pengenalan Kubernetes (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) -* [Halo Minikube](/docs/tutorials/hello-minikube/) +* [Halo Minikube](/id/docs/tutorials/hello-minikube/) ## Konfigurasi @@ -32,7 +32,7 @@ Sebelum melangkah lebih lanjut ke tutorial, sebaiknya tandai dulu halaman [Kamus ## Aplikasi Stateless -* [Memberi Akses Aplikasi di dalam Klaster melalui IP Eksternal](/docs/tutorials/stateless-application/expose-external-ip-address/) +* [Memberi Akses Aplikasi di dalam Klaster melalui IP Eksternal](/id/docs/tutorials/stateless-application/expose-external-ip-address/) * [Contoh: Deploy aplikasi Guestbook PHP dengan Redis](/docs/tutorials/stateless-application/guestbook/) diff --git a/content/id/docs/tutorials/hello-minikube.md b/content/id/docs/tutorials/hello-minikube.md index f2588e776b797..faba283d89bdf 100644 --- a/content/id/docs/tutorials/hello-minikube.md +++ b/content/id/docs/tutorials/hello-minikube.md @@ -19,7 +19,7 @@ Tutorial ini menunjukkan bagaimana caranya menjalankan aplikasi sederhana Node.j Katacoda menyediakan environment Kubernetes secara gratis di dalam browser. {{< note >}} -Kamupun bisa mengikuti tutorial ini kalau sudah instalasi [Minikube di lokal](/docs/tasks/tools/install-minikube/) kamu. +Kamupun bisa mengikuti tutorial ini kalau sudah instalasi [Minikube di lokal](/id/docs/tasks/tools/install-minikube/) kamu. {{< /note >}} @@ -68,9 +68,9 @@ Untuk info lebih lanjut tentang perintah `docker build`, baca [dokumentasi Docke ## Membuat sebuah Deployment -Sebuah Kubernetes [*Pod*](/docs/concepts/workloads/pods/pod/) adalah kumpulan dari satu atau banyak Kontainer, +Sebuah Kubernetes [*Pod*](/id/docs/concepts/workloads/pods/pod/) adalah kumpulan dari satu atau banyak Kontainer, saling terhubung untuk kebutuhan administrasi dan jaringan. Pod dalam tutorial ini hanya punya satu Kontainer. Sebuah Kubernetes -[*Deployment*](/docs/concepts/workloads/controllers/deployment/) selalu memeriksa kesehatan +[*Deployment*](/id/docs/concepts/workloads/controllers/deployment/) selalu memeriksa kesehatan Pod kamu dan melakukan restart saat Kontainer di dalam Pod tersebut mati. Deployment adalah cara jitu untuk membuat dan mereplikasi Pod. 1. Gunakan perintah `kubectl create` untuk membuat Deployment yang dapat mengatur Pod. @@ -122,7 +122,7 @@ Pod menjalankan Kontainer sesuai dengan image Docker yang telah diberikan. ## Membuat sebuah Servis Secara default, Pod hanya bisa diakses melalui alamat IP internal di dalam klaster Kubernetes. -Supaya Kontainer `hello-node` bisa diakses dari luar jaringan virtual Kubernetes, kamu harus ekspos Pod sebagai [*Servis*](/docs/concepts/services-networking/service/) Kubernetes. +Supaya Kontainer `hello-node` bisa diakses dari luar jaringan virtual Kubernetes, kamu harus ekspos Pod sebagai [*Servis*](/id/docs/concepts/services-networking/service/) Kubernetes. 1. Ekspos Pod pada internet publik menggunakan perintah `kubectl expose`: @@ -266,8 +266,8 @@ minikube delete ## {{% heading "whatsnext" %}} -* Pelajari lebih lanjut tentang [Deployment](/docs/concepts/workloads/controllers/deployment/). +* Pelajari lebih lanjut tentang [Deployment](/id/docs/concepts/workloads/controllers/deployment/). * Pelajari lebih lanjut tentang [Deploy aplikasi](/docs/user-guide/deploying-applications/). -* Pelajari lebih lanjut tentang [Servis](/docs/concepts/services-networking/service/). +* Pelajari lebih lanjut tentang [Servis](/id/docs/concepts/services-networking/service/). diff --git a/content/id/docs/tutorials/kubernetes-basics/create-cluster/_index.md b/content/id/docs/tutorials/kubernetes-basics/create-cluster/_index.md new file mode 100644 index 0000000000000..6ae659eca39ad --- /dev/null +++ b/content/id/docs/tutorials/kubernetes-basics/create-cluster/_index.md @@ -0,0 +1,4 @@ +--- +title: Membuat Klaster +weight: 10 +--- diff --git a/content/id/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/id/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html new file mode 100644 index 0000000000000..aeae9f469da35 --- /dev/null +++ b/content/id/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html @@ -0,0 +1,37 @@ +--- +title: Tutorial Interaktif - Membuat Klaster +weight: 20 +--- + + + + + + + + + + + + + + + diff --git a/content/id/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/id/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html new file mode 100644 index 0000000000000..debeaf6d48144 --- /dev/null +++ b/content/id/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -0,0 +1,107 @@ +--- +title: Menggunakan Minikube Untuk Membuat Klaster +weight: 10 +--- + + + + + + + + + +
+ +
+ +
+ +
+

Objectives

+
    +
  • Belajar apa itu klaster Kubernetes.
  • +
  • Belajar apa itu Minikube.
  • +
  • Memulai klaster Kubernetes menggunakan terminal _online_.
  • +
+
+ +
+

Klaster Kubernetes

+

+ Kubernetes mengoordinasikan klaster komputer ketersediaan tinggi (_highly available_) yang saling terhubung sebagai unit tunggal. Abstraksi pada Kubernetes mengizinkan kamu untuk men-_deploy_ aplikasi terkemas (_containerized_) ke sebuah klaster tanpa perlu membalutnya secara spesifik pada setiap mesin. Untuk menggunakan model baru _deployment_ ini, aplikasi perlu dikemas dengan cara memisahkan mereka dari hos individu: mereka perlu dikemas. Aplikasi terkemas lebih fleksibel dan tersedia dibanding model _deployment_ lama, dimana aplikasi dipasang secara langsung didalam mesin spesifik sebagai paket yang sangat terintegrasi dengan hos. Kubernetes mengotomasisasikan distribusi dan penjadwalan kontainer aplikasi sebuah klaster secara menyeluruh dengan cara yang lebih efisien. Kubernetes merupakan platform _open-source_ dan siap produksi. +

+

Klaster Kubernetes terdiri dari 2 tipe sumber daya: +

    +
  • Master mengoordinasikan klaster
  • +
  • Node adalah pekerja (_worker_) yang menjalankan aplikasi
  • +
+

+
+ +
+
+

Summary:

+
    +
  • Klaster Kubernetes
  • +
  • Minikube
  • +
+
+
+

+ Kubernetes merupakan platform _open-source_ tingkat produksi yang mengatur penjadwalan dan eksekusi kontainer aplikasi didalam dan keseluruhan klaster komputer. +

+
+
+
+
+ +
+
+

Diagram Klaster

+
+
+ +
+
+

+
+
+
+ +
+
+

Master mempunyai kewajiban untuk mengelola klaster. Master mengoordinasikan semua aktifitas di klaster kamu, seperti penjadwalan aplikasi, pemeliharaan keadaan (_state_) aplikasi yang diinginkan, _scaling_ aplikasi, dan _roll-out_ pembaharuan.

+

Node merupakan VM atau komputer fisik yang berfungsi sebagai mesin pekerja dalam klaster Kubernetes. Setiap node mempunyai Kubelet, sebuah agen untuk mengatur Node dan komunikasi dengan Kubernetes master. Node juga harus mempunyai alat untuk menangani operasi kontainer, seperti Docker atau rkt. Sebuah klaster Kubernetes yang menangani trafik produksi harus mempunyai minimal 3 Node.

+
+
+
+

Master mengatur klaster dan Node yang digunakan sebagai hos dari aplikasi yang berjalan.

+
+
+
+ +
+
+

Ketika kamu men-_deploy_ aplikasi pada Kubernetes, kamu memberitahu master untuk memulai kontainer aplikasi. Master melakukan penjadwalan kontainer untuk berjalan diatas klaster Node. Node berkomunikasi dengan master menggunakan Kubernetes API, yang disediakan oleh master. Pengguna akhir juga dapat menggunakan Kubernetes API secara langsung untuk berinteraksi dengan klaster.

+ +

Klaster Kubernetes dapat di-_deploy_ ke mesik fisik maupun virtual. Untuk memulai pengembangan Kubernetes, kamu dapat menggunakan Minikube. Minikube merupakan implementasi Kubernetes ringan yang membuat VM padi mesin lokal kamu dan men-_deploy_ klaster sederhanya yang terdiri atas 1 Node. Minikube tersedia untuk Linux, macOS, dan sistem Windows. Minikube CLI menyediakan operasi _bootstraping_ dasar untuk bekerja dengan klaster kamu. Namun untuk tutorial ini, kamu akan menggunakan online terminal yang sudah disediakan dengan Minikube yang sudah diinstall sebelumnya.

+ +

Sekarang kamu telah mengetahui apa itu Kubernetes, mari kita pergi ke tutorial online dan memulai klaster pertama kita!

+ +
+
+
+ + + +
+ +
+ + + diff --git a/content/id/docs/tutorials/stateless-application/_index.md b/content/id/docs/tutorials/stateless-application/_index.md new file mode 100644 index 0000000000000..6923ee8165fb8 --- /dev/null +++ b/content/id/docs/tutorials/stateless-application/_index.md @@ -0,0 +1,5 @@ +--- +title: "Aplikasi Stateless" +weight: 40 +--- + diff --git a/content/id/docs/tutorials/stateless-application/expose-external-ip-address.md b/content/id/docs/tutorials/stateless-application/expose-external-ip-address.md new file mode 100644 index 0000000000000..df297f4c634b7 --- /dev/null +++ b/content/id/docs/tutorials/stateless-application/expose-external-ip-address.md @@ -0,0 +1,173 @@ +--- +title: Mengekspos Alamat IP Eksternal untuk Mengakses Aplikasi di dalam Klaster +content_type: tutorial +weight: 10 +--- + + + +Dokumen ini menjelaskan bagaimana cara membuat objek Service Kubernetes +yang mengekspos alamat IP eksternal. + + + + +## {{% heading "prerequisites" %}} + + + * Instal [kubectl](/id/docs/tasks/tools/install-kubectl/). + + * Gunakan sebuah penyedia layanan cloud seperti Google Kubernetes Engine atau Amazon Web Services + untuk membuat sebuah klaster Kubernetes. Tutorial ini membuat sebuah + [_load balancer_ eksternal](/id/docs/tasks/access-application-cluster/create-external-load-balancer/), + yang membutuhkan sebuah penyedia layanan cloud. + + * Konfigurasi `kubectl` agar dapat berkomunikasi dengan Kubernetes API Server kamu. + Untuk informasi lebih lanjut, kamu dapat merujuk pada dokumentasi penyedia layanan cloud + yang kamu gunakan. + + + +## {{% heading "objectives" %}} + +* Jalankan lima buah instans dari aplikasi Hello World. +* Buatlah sebuah objek Service yang mengekspos sebuah alamat IP eksternal. +* Gunakan sebuah objek Service untuk mengakses aplikasi yang sedang dijalankan. + + + + + +## Membuat sebuah objek Service untuk sebuah aplikasi yang dijalankan pada lima buah Pod + +1. Jalankan sebuah aplikasi Hello World pada klaster kamu: + +{{< codenew file="service/load-balancer-example.yaml" >}} + +```shell +kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml +``` + + +Perintah di atas akan membuat sebuah + objek [Deployment](/id/docs/concepts/workloads/controllers/deployment/) + dan sebuah objek + [ReplicaSet](/id/docs/concepts/workloads/controllers/replicaset/) + yang diasosiasikan dengan Deployment yang dibuat. ReplicaSet memiliki lima buah + [Pod](/id/docs/concepts/workloads/pods/pod/), + yang masing-masing dari Pod tersebut menjalankan aplikasi Hello World. + +1. Tampilkan informasi mengenai Deployment: + + kubectl get deployments hello-world + kubectl describe deployments hello-world + +1. Tampilkan informasi mengenai objek ReplicaSet: + + kubectl get replicasets + kubectl describe replicasets + +1. Buatlah sebuah objek Service yang mengekspos deployment: + + kubectl expose deployment hello-world --type=LoadBalancer --name=my-service + +1. Tampilkan informasi mengenai Service: + + kubectl get services my-service + + Keluaran dari perintah di atas akan menyerupai tampilan berikut: + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + my-service LoadBalancer 10.3.245.137 104.198.205.71 8080/TCP 54s + + {{< note >}} + + Service dengan `type=LoadBalancer` didukung oleh penyedia layanan cloud eksternal, yang tidak tercakup dalam contoh ini, silahkan merujuk pada [laman berikut](/id/docs/concepts/services-networking/service/#loadbalancer) untuk informasi lebih lanjut. + + {{< /note >}} + + {{< note >}} + + Jika sebuah alamat IP eksternal yang ditunjukkan dalam status \, tunggulah hingga satu menit kemudian masukkan perintah yang sama lagi. + + {{< /note >}} + +1. Tampilkan informasi detail mengenai Service: + + kubectl describe services my-service + + Perintah di atas akan menampilkan keluaran sebagai berikut: + + Name: my-service + Namespace: default + Labels: app.kubernetes.io/name=load-balancer-example + Annotations: + Selector: app.kubernetes.io/name=load-balancer-example + Type: LoadBalancer + IP: 10.3.245.137 + LoadBalancer Ingress: 104.198.205.71 + Port: 8080/TCP + NodePort: 32377/TCP + Endpoints: 10.0.0.6:8080,10.0.1.6:8080,10.0.1.7:8080 + 2 more... + Session Affinity: None + Events: + + Pastikan nilai dari alamat IP eksternal (`LoadBalancer Ingress`) diekspos + pada Service yang kamu buat. Pada contoh ini, alamat IP eksternal yang diberikan adalah 104.198.205.71. + Kemudian pastikan nilai dari `Port` dan `NodePort`. Pada contoh ini, `Port` + yang digunakan adalah 8080 dan `NodePort` adalah 32377. + +1. Pada keluaran perintah sebelumnya, kamu dapat melihat beberapa Service dengan beberapa endpoint: + 10.0.0.6:8080,10.0.1.6:8080,10.0.1.7:8080 + 2 lainnya. Berikut ini merupakan alamat IP dari Pod + dimana aplikasi tersebut dijalankan. Untuk melakukan verifikasi alamat-alamat IP yang digunakan oleh Pod, + masukkan perintah berikut: + + kubectl get pods --output=wide + + Keluaran yang diberikan akan menyerupai: + + NAME ... IP NODE + hello-world-2895499144-1jaz9 ... 10.0.1.6 gke-cluster-1-default-pool-e0b8d269-1afc + hello-world-2895499144-2e5uh ... 10.0.1.8 gke-cluster-1-default-pool-e0b8d269-1afc + hello-world-2895499144-9m4h1 ... 10.0.0.6 gke-cluster-1-default-pool-e0b8d269-5v7a + hello-world-2895499144-o4z13 ... 10.0.1.7 gke-cluster-1-default-pool-e0b8d269-1afc + hello-world-2895499144-segjf ... 10.0.2.5 gke-cluster-1-default-pool-e0b8d269-cpuc + +1. Gunakan alamat IP eksternal (`LoadBalancer Ingress`) untuk mengakses aplikasi Hello World: + + curl http://: + + dimana `` adalah alamat IP eksternal (`LoadBalancer Ingress`) + dari Service kamu, dan `` adalah nilai dari `Port` dari deskripsi Service kamu. + Jika kamu menggunakan minikube, menuliskan perintah `minikube service my-service` akan + secara otomatis membuka aplikasi Hello World pada _browser_. + + Respons yang diberikan apabila permintaan ini berhasil adalah sebuah pesan sapaan: + + Hello Kubernetes! + + + + +## {{% heading "cleanup" %}} + + +Untuk menghapus Service, kamu dapat menggunakan perintah ini: + + kubectl delete services my-service + +Untuk menghapus Deployment, ReplicaSet, dan Pod-Pod yang digunakan untuk +menjalankan aplikasi Hello World, kamu dapat memasukkan perintah berikut: + + kubectl delete deployment hello-world + + + + +## {{% heading "whatsnext" %}} + + +Pelajari lebih lanjut cara untuk +[menghubungkan aplikasi dengan berbagai Service](/id/docs/concepts/services-networking/connect-applications-service/). + + diff --git a/content/id/examples/application/deployment-scale.yaml b/content/id/examples/application/deployment-scale.yaml new file mode 100644 index 0000000000000..84e326eee1084 --- /dev/null +++ b/content/id/examples/application/deployment-scale.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 # untuk versi sebelum 1.9.0 gunakan apps/v1beta2 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 4 # Memperbarui replica dari 2 menjadi 4 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/content/id/examples/application/deployment-update.yaml b/content/id/examples/application/deployment-update.yaml new file mode 100644 index 0000000000000..63fbdb69cf5c9 --- /dev/null +++ b/content/id/examples/application/deployment-update.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 # untuk versi sebelum 1.9.0 gunakan apps/v1beta2 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.16.1 # Memperbarui versi nginx dari 1.14.2 ke 1.16.1 + ports: + - containerPort: 80 diff --git a/content/id/examples/pods/pod-nginx-preferred-affinity.yaml b/content/id/examples/pods/pod-nginx-preferred-affinity.yaml new file mode 100644 index 0000000000000..f169576bc2b0c --- /dev/null +++ b/content/id/examples/pods/pod-nginx-preferred-affinity.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: disktype + operator: In + values: + - ssd + containers: + - name: nginx + image: nginx + imagePullPolicy: IfNotPresent + diff --git a/content/id/examples/pods/pod-nginx-required-affinity.yaml b/content/id/examples/pods/pod-nginx-required-affinity.yaml new file mode 100644 index 0000000000000..a1093da1886f0 --- /dev/null +++ b/content/id/examples/pods/pod-nginx-required-affinity.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: disktype + operator: In + values: + - ssd + containers: + - name: nginx + image: nginx + imagePullPolicy: IfNotPresent + diff --git a/content/id/examples/pods/share-process-namespace.yaml b/content/id/examples/pods/share-process-namespace.yaml new file mode 100644 index 0000000000000..af812732a247a --- /dev/null +++ b/content/id/examples/pods/share-process-namespace.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + shareProcessNamespace: true + containers: + - name: nginx + image: nginx + - name: shell + image: busybox + securityContext: + capabilities: + add: + - SYS_PTRACE + stdin: true + tty: true diff --git a/content/id/examples/service/load-balancer-example.yaml b/content/id/examples/service/load-balancer-example.yaml new file mode 100644 index 0000000000000..ea88fd154868c --- /dev/null +++ b/content/id/examples/service/load-balancer-example.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: load-balancer-example + name: hello-world +spec: + replicas: 5 + selector: + matchLabels: + app.kubernetes.io/name: load-balancer-example + template: + metadata: + labels: + app.kubernetes.io/name: load-balancer-example + spec: + containers: + - image: gcr.io/google-samples/node-hello:1.0 + name: hello-world + ports: + - containerPort: 8080 diff --git a/content/it/docs/concepts/_index.md b/content/it/docs/concepts/_index.md index 89b80f409dd0d..a4175f8f8a08a 100644 --- a/content/it/docs/concepts/_index.md +++ b/content/it/docs/concepts/_index.md @@ -59,7 +59,7 @@ Il master Kubernetes è responsabile della gestione dello stato desiderato per i ### Kubernetes Nodes -I nodi di un cluster sono le macchine (VM, server fisici, ecc.) Che eseguono i flussi di lavoro delle applicazioni e del cloud. Il master Kubernetes controlla ciascun nodo; raramente interagirai direttamente con i nodi. +I nodi di un cluster sono le macchine (VM, server fisici, ecc.) che eseguono i flussi di lavoro delle applicazioni e del cloud. Il master Kubernetes controlla ciascun nodo; raramente interagirai direttamente con i nodi. #### Object Metadata diff --git a/content/ja/docs/contribute/localization.md b/content/ja/docs/contribute/localization.md index c9b1221b532c9..ab1639c294d97 100644 --- a/content/ja/docs/contribute/localization.md +++ b/content/ja/docs/contribute/localization.md @@ -9,272 +9,148 @@ card: -このページでは、ドキュメントを異なる言語に[翻訳](https://blog.mozilla.org/l10n/2011/12/14/i18n-vs-l10n-whats-the-diff/)する方法について紹介します。 +このページでは、Kubernetesドキュメントにおける日本語翻訳の方針について説明します。 -## はじめる +## ドキュメントを日本語に翻訳するまでの流れ -コントリビューターが自分自身のプルリクエストを承認することはできないため、翻訳を始めるには、最低でも2人が必要です。 +翻訳を行うための基本的な流れについて説明します。不明点がある場合は[Kubernetes公式Slack](http://slack.kubernetes.io/)の`#kubernetes-docs-ja`チャンネルにてお気軽にご質問ください。 -すべての翻訳チームは、自分たちのリソースを継続的に自己管理しなければいけません。私たちはドキュメントを喜んでホストしますが、あなたの代わりに翻訳することはできないからです。 +### 前提知識 -### 2文字の言語コードを探す +翻訳作業は全て[GitHubのIssue](https://github.com/kubernetes/website/issues?q=is%3Aissue+is%3Aopen+label%3Alanguage%2Fja)によって管理されています。翻訳作業を行いたい場合は、Issueの一覧をまず最初にご確認ください。 -最初に、[ISO 639-1標準](https://www.loc.gov/standards/iso639-2/php/code_list.php)のドキュメントから、翻訳先の言語に対応する2文字の国コードを探してください。たとえば、韓国語の国コードは`ko`です。 +また、Kubernetes傘下のリポジトリでは`CLA`と呼ばれる同意書に署名しないと、Pull Requestをマージすることができません。詳しくは[英語のドキュメント](https://github.com/kubernetes/community/blob/master/CLA.md)や、[Qiitaに有志の方が書いてくださった日本語のまとめ](https://qiita.com/jlandowner/items/d14d9bc8797a62b65e67)をご覧ください。 -### リポジトリをフォーク・クローンする {#fork-and-clone-the-repo} +### 翻訳を始めるまで -初めに、[kubernetes/website](https://github.com/kubernetes/website)リポジトリの[自分用のフォークを作成](/docs/contribute/start/#improve-existing-content)します。 +#### 翻訳を希望するページのIssueが存在しない場合 -そして、フォークをクローンして、ディレクトリに`cd`します。 +1. [こちらのサンプル](https://github.com/kubernetes/website/issues/22340)に従う形でIssueを作成する +2. 自分自身を翻訳作業に割り当てたい場合は、Issueのメッセージまたはコメントに`/assign`と書く +3. [新規ページを翻訳する場合](#translate-new-page)のステップに進む -```shell -git clone https://github.com//website -cd website -``` +**不明点がある場合は[Kubernetes公式Slack](http://slack.kubernetes.io/)の`#kubernetes-docs-ja`チャンネルにてお気軽にご質問ください。** -### プルリクエストを開く +#### 翻訳を希望するページのIssueが存在する場合 -次に、`kubernetes/website`リポジトリに翻訳を追加するための[プルリクエスト(PR)を開きます](/docs/contribute/start/#submit-a-pull-request)。 +1. 自分自身を翻訳作業に割り当てるために、Issueのコメントに`/assign`と書く +2. [新規ページを翻訳する場合](#translate-new-page)のステップに進む -このPRが承認されるためには、[最低限必要なコンテンツ](#minimum-required-content)が含まれていなければなりません。 +### Pull Requestを送るまで -新しい翻訳を追加する例としては、[フランス語版ドキュメントを追加するPR](https://github.com/kubernetes/website/pull/12548)を参考にしてください。 +未翻訳ページの新規翻訳作業と既存ページの修正作業でそれぞれ手順が異なります。 -### Kubernetes GitHub organizationに参加する +既存ページへの追加修正については、後述の[マイルストーンについて](#milestones)に目を通すことをおすすめします。 -翻訳のPRを作ると、Kubernetes GitHub organizationのメンバーになることができます。チームの各個人は、それぞれ`kubernetes/org`リポジトリに[Organization Membership Request](https://github.com/kubernetes/org/issues/new/choose)を作成する必要があります。 +#### 新規ページを翻訳する場合の手順 {#translate-new-page} -### 翻訳チームをGitHubに追加する {#add-your-localization-team-in-github} +1. `kubernetes/website`リポジトリをフォークする +2. `master`から任意の名前でブランチを作成する +3. `content/en`のディレクトリから必要なファイルを`content/ja`にコピーし、翻訳する +4. `master`ブランチに向けてPull Requestを作成する -次に、Kubernetesの翻訳チームを[`sig-docs/teams.yaml`](https://github.com/kubernetes/org/blob/master/config/kubernetes/sig-docs/teams.yaml)に追加します。翻訳チームを追加する例として、[スペイン語の翻訳チーム](https://github.com/kubernetes/org/pull/685)を追加するPRを見てください。 +#### 既存のページの誤字脱字や古い記述を修正する場合の手順 -`@kubernetes/sig-docs-**-owners`のメンバーは、翻訳のディレクトリ`/content/**/`以下のコンテンツのみを変更するPRを承認できます。 +1. `kubernetes/website`リポジトリをフォークする +2. `dev-1.18-ja.1`(最新のマイルストーンブランチに適宜読み替えること)から任意の名前でブランチを作成し、該当箇所を編集する +3. `dev-1.18-ja.1`(最新のマイルストーンブランチに適宜読み替えること)ブランチに向けてPull Requestを作成する -各翻訳ごとに、新しいPRに対して`@kubernetes/sig-docs-**-reviews`チームがレビューに自動的にアサインされます。 +### マイルストーンについて {#milestones} -`@kubernetes/website-maintainers`のメンバーは、翻訳作業を調整するために新しい開発ブランチを作ることができます。 +翻訳作業を集中的に管理するために、日本語を含む複数の言語ではマイルストーンを採用しています。 -`@kubernetes/website-milestone-maintainers`のメンバーは、issueやPRにマイルストーンをアサインするために、`/milestone`[Prowコマンド](https://prow.k8s.io/command-help)が使用できます。 +各マイルストーンでは、 -### ワークフローを設定する {#configure-the-workflow} +- 最低要件のコンテンツの追加・更新(項目については[こちら](https://kubernetes.io/docs/contribute/localization/#translating-documents)を参照してください) +- バージョンに追従できていない翻訳済みコンテンツの更新 -次に、`kubernetes/test-infra`リポジトリに新しい翻訳用のGitHubラベルを追加します。ラベルを利用すると、issueやプルリクエストを特定の言語のものだけにフィルタできます。 +を行い、ドキュメントの全体的なメンテナンスを行っています。 -ラベルを追加する例としては、[イタリア語の言語ラベル](https://github.com/kubernetes/test-infra/pull/11316)を追加するPRを見てください。 +マイルストーンのバージョンはOwner権限を持つメンバーが管理するものとします。 -### コミュニティを見つける +## 翻訳スタイルガイド -Kubernetes SIG Docsに、新しく翻訳チームを作りたいという意思を知らせてください![SIG Docs Slackチャンネル](https://kubernetes.slack.com/messages/C1J0BPD2M/)に参加してください。他の言語のメンバーが、翻訳を始めるのを喜んで助けてくれ、どんな疑問にも答えてくれます。 +### 基本方針 -`kubernetes/community`リポジトリ内で、翻訳用のSlackチャンネルを作ることもできます。Slackチャンネルを追加する例としては、[インドネシア語とポルトガル語用のチャンネルを追加する](https://github.com/kubernetes/community/pull/3605)ためのPRを見てください。 +- 本文を、敬体(ですます調)で統一 + - 特に、「〜になります」「〜となります」という表現は「〜です」の方が適切な場合が多いため注意 +- 句読点は「、」と「。」を使用 +- 漢字、ひらがな、カタカナは全角で表記 +- 数字とアルファベットは半角で表記 +- スペースと括弧 `()` 、コロン `:` は半角、それ以外の記号類は全角で表記 +- 英単語と日本語の間に半角スペースは不要 -## 最低限必要なコンテンツ {#minimum-required-content} +### 頻出単語 -### サイトの設定を修正する +英語 | 日本語 +--------- | --------- +cluster|クラスター +orchestrate(動詞)|オーケストレーションする +Persistent Volume|KubernetesリソースとしてのPersistentVolumeはママ表記、一般的な用語としての場合は、永続ボリューム +Deployment/Deploy|KubernetesリソースとしてのDeploymentはママ表記、一般的な用語としてのdeployの場合は、デプロイ +Addon/Add-on|アドオン +Quota|クォータ +For more information|さらなる情報(一時的) +prefix | プレフィックス +container | コンテナ +directory | ディレクトリ +binary | バイナリ +controller | コントローラー +opeartor | オペレーター +Aggregation Layer | アグリゲーションレイヤー +Issue | Issue (ママ表記) +Pull Request | Pull Request (ママ表記) +GitHub | GitHub (ママ表記) +registry | レジストリ +architecture | アーキテクチャ +secure | セキュア +stacked | 積層(例: stacked etcd clusterは積層etcdクラスター) +a set of ~ | ~の集合 -Kubernetesのウェブサイトでは、Hugoをウェブフレームワークとして使用しています。ウェブサイトのHugoの設定は、[`config.toml`](https://github.com/kubernetes/website/tree/master/config.toml)ファイルの中に書かれています。新しい翻訳をサポートするには、`config.toml`を修正する必要があります。 +### 備考 -`config.toml`の既存の`[languages]`ブロックの下に、新しい言語の設定ブロックを追加してください。たとえば、ドイツ語のブロックの場合、次のようになります。 +ServiceやDeploymentなどのKubernetesのAPIオブジェクトや技術仕様的な固有名詞は、無理に日本語訳せずそのまま書いてください。 -```toml -[languages.de] -title = "Kubernetes" -description = "Produktionsreife Container-Verwaltung" -languageName = "Deutsch" -contentDir = "content/de" -weight = 3 -``` +また、日本語では名詞を複数形にする意味はあまりないので、英語の名詞を利用する場合は原則として単数形で表現してください。 -ブロックの`weight`パラメーターの設定では、言語の一覧から最も数字の大きい番号を探し、その値に1を加えた値を指定してください。 +例: -Hugoの多言語サポートについての詳しい情報は、「[多言語モード](https://gohugo.io/content-management/multilingual/)」を参照してください。 +- Kubernetes Service +- Node +- Pod -### 新しい翻訳のディレクトリを追加する +外部サイトへの参照の記事タイトルは翻訳しましょう。(一時的) -[`content`](https://github.com/kubernetes/website/tree/master/content)フォルダーに、言語用のサブディレクトリを追加してください。2文字の言語コードが`de`であるドイツ語の場合、次のようにディレクトリを作ります。 +### 頻出表記(日本語) -```shell -mkdir content/de -``` +よくある表記 | あるべき形 +--------- | --------- +〜ので、〜から、〜だから| 〜のため 、〜ため +(あいうえお。)| (あいうえお)。 +〇,〇,〇|〇、〇、〇(※今回列挙はすべて読点で統一) -### Community Code of Conductを翻訳する +### 単語末尾に長音記号(「ー」)を付けるかどうか -あなたの言語のcode of conductを追加するために、PRを[`cncf/foundation`](https://github.com/cncf/foundation/tree/master/code-of-conduct-languages)リポジトリに対して開いてください。 +「サーバー」「ユーザー」など英単語をカタカナに訳すときに、末尾の「ー」を付けるかどうか。 -### 翻訳したREADMEを追加する +- 「r」「re」「y」などで終わる単語については、原則付ける +- 上の頻出語のように、別途まとめたものは例外とする -他の翻訳のコントリビューターをガイドするために、kubernetes/websiteのトップレベルに新しい[`README-**.md`](https://help.github.com/articles/about-readmes/)を追加してください。ここで、`**`は2文字の言語コードです。たとえば、ドイツ語のREADMEファイルは`README-de.md`です。 +参考: https://kubernetes.slack.com/archives/CAG2M83S8/p1554096635015200 辺りのやりとり -翻訳された`README-**.md`ファイルの中で、翻訳のコントリビューターにガイダンスを提供してください。`README.md`に含まれているのと同じ情報に加えて、以下の情報も追加してください。 +### cron jobの訳し方に関して -- 翻訳プロジェクトのための連絡先 -- 翻訳固有の情報 +混同を避けるため、cron jobはcronジョブと訳し、CronJobはリソース名としてのままにする。 +cron「の」ジョブは、「の」が続く事による解釈の難から基本的にはつけないものとする。 -翻訳されたREADMEを作成したら、メインの英語の`README.md`からそのファイルへのリンクを追加し、英語で連絡先情報も書いてください。GitHub ID、メールアドレス、[Slackチャンネル](https://slack.com)、その他の連絡手段を提供できます。翻訳されたCommunity Code of Conductへのリンクも必ず提供してください。 +### その他基本方針など -### OWNERSファイルを設定する - -翻訳にコントリビュートする各ユーザーのロールを設定するには、言語用のサブディレクトリの中に`OWNERS`ファイルを作成し、以下の項目を設定します。 - -- **レビュアー**: レビュアーのロールを持つkubernetesチームのリストです。この場合は、[GitHubで翻訳チームを追加](#add-your-localization-team-in-github)で作成した`sig-docs-**-reviews`チームです。 -- **承認者**: 承認者のロールを持つkubernetesチームのリストです。この場合は、[GitHubで翻訳チームを追加](#add-your-localization-team-in-github)で追加した`sig-docs-**-owners`チームです。 -- **ラベル**: PRに自動的に適用されるGitHub上のラベルのリストです。この場合は、[ワークフローを設定する](#configure-the-workflow)で作成した言語ラベルです。 - -`OWNERS`ファイルに関するより詳しい情報は、[go.k8s.io/owners](https://go.k8s.io/owners)を参照してください。 - -言語コードが`es`の[スペイン語のOWNERSファイル](https://git.k8s.io/website/content/es/OWNERS)は次のようになります。 - -```yaml -# See the OWNERS docs at https://go.k8s.io/owners - -# This is the localization project for Spanish. -# Teams and members are visible at https://github.com/orgs/kubernetes/teams. - -reviewers: -- sig-docs-es-reviews - -approvers: -- sig-docs-es-owners - -labels: -- language/es -``` - -特定の言語用の`OWNERS`ファイルを追加したら、[ルートの`OWNERS_ALIASES`](https://git.k8s.io/website/OWNERS_ALIASES)ファイルを、翻訳のための新しいKuerbetesチーム、`sig-docs-**-owners`および`sig-docs-**-reviews`で更新します。 - -各チームごとに、[翻訳チームをGitHubに追加する](#add-your-localization-team-in-github)でリクエストしたGitHubユーザーのリストをアルファベット順で追加してください。 - -```diff ---- a/OWNERS_ALIASES -+++ b/OWNERS_ALIASES -@@ -48,6 +48,14 @@ aliases: - - stewart-yu - - xiangpengzhao - - zhangxiaoyu-zidif -+ sig-docs-es-owners: # Admins for Spanish content -+ - alexbrand -+ - raelga -+ sig-docs-es-reviews: # PR reviews for Spanish content -+ - alexbrand -+ - electrocucaracha -+ - glo-pena -+ - raelga - sig-docs-fr-owners: # Admins for French content - - perriea - - remyleone -``` - -## コンテンツを翻訳する - -Kubernetesのドキュメントの *すべて* を翻訳するのは、非常に大きな作業です。小さく始めて、時間をかけて拡大していけば大丈夫です。 - -最低限、すべての翻訳には以下のコンテンツが必要です。 - -説明 | URL ------|----- -ホーム | [すべての見出しと小見出しのURL](/docs/home/) -セットアップ | [すべての見出しと小見出しのURL](/docs/setup/) -チュートリアル | [Kubernetes Basics](/docs/tutorials/kubernetes-basics/)、[Hello Minikube](/docs/tutorials/hello-minikube/) -サイト文字列 | [翻訳された新しいTOMLファイル内のすべてのサイト文字列](https://github.com/kubernetes/website/tree/master/i18n) - -翻訳されたドキュメントは、言語ごとに`content/**/`サブディレクトリに置き、英語のソースと同じURLパスに従うようにしなければいけません。たとえば、[Kubernetes Basics](/docs/tutorials/kubernetes-basics/)のチュートリアルをドイツ語に翻訳する準備をするには、次のように、`content/de/`フォルダ以下にサブディレクトリを作り、英語のソースをコピーします。 - -```shell -mkdir -p content/de/docs/tutorials -cp content/en/docs/tutorials/kubernetes-basics.md content/de/docs/tutorials/kubernetes-basics.md -``` - -翻訳ツールを使えば、翻訳のプロセスをスピードアップできます。たとえば、エディタによってはテキストを高速に翻訳してくれるプラグインがあります。 - -{{< caution >}} -機械生成された翻訳は、そのままでは最低限の品質基準を満たしません。基準を満たすためには、人間による十分なレビューが必要です。 -{{< /caution >}} - -文法と意味の正確さを保証するために、公開する前に翻訳チームのメンバーが機械生成されたすべての翻訳を注意深くレビューしなければいけません。 - -### ソースファイル - -翻訳は、最新のリリース{{< latest-version >}}の英語のファイルをベースにしなければなりません。 - -最新のリリースのソースファイルを見つけるには、次のように探してください。 - -1. Kubernetesのウェブサイトのリポジトリ https://github.com/kubernetes/website に移動する。 -2. 最新バージョンの`release-1.X`ブランチを選択する。 - -最新バージョンは{{< latest-version >}}であるため、最新のリリースブランチは[`{{< release-branch >}}`](https://github.com/kubernetes/website/tree/{{< release-branch >}})です。 - -### i18n/内のサイト文字列 - -翻訳には、[`i18n/en.toml`](https://github.com/kubernetes/website/blob/master/i18n/en.toml)の内容を新しい言語用のファイル内に含める必要があります。ドイツ語を例に取ると、ファイル名は`i18n/de.toml`です。 - -新しい翻訳ファイルを`i18n/`に追加します。たとえば、ドイツ語(`de`)であれば次のようになります。 - -```shell -cp i18n/en.toml i18n/de.toml -``` - -そして、各文字列の値を翻訳します。 - -```TOML -[docs_label_i_am] -other = "ICH BIN..." -``` - -サイト文字列を翻訳することで、サイト全体で使われるテキストや機能をカスタマイズできます。たとえば、各ページのフッターにある著作権のテキストなどです。 - -### 言語固有のスタイルガイドと用語集 - -一部の言語チームには、言語固有のスタイルガイドや用語集があります。たとえば、[韓国語の翻訳ガイド](/ko/docs/contribute/localization_ko/)を見てください。 - -## ブランチの戦略 - -翻訳プロジェクトは協力が非常に重要な活動のため、チームごとに共有の開発ブランチで作業することを推奨します。 - -開発ブランチ上で共同作業するためには、以下の手順を行います。 - -1. [@kubernetes/website-maintainers](https://github.com/orgs/kubernetes/teams/website-maintainers)のチームメンバーが https://github.com/kubernetes/website のソースブランチから開発ブランチを作る。 - - [`kubernetes/org`](https://github.com/kubernetes/org)リポジトリに[翻訳チームを追加](#add-your-localization-team-in-github)したとき、チームの承認者は`@kubernetes/website-maintainers`チームに参加します。 - - 次のようなブランチの命名規則に従うことを推奨します。 - - `dev-<ソースのバージョン>-<言語コード>.<チームのマイルストーン>` - - たとえば、ドイツ語の翻訳チームの承認者は、Kubernetes v1.12のソースブランチをベースに、k/websiteリポジトリから直接開発ブランチ`dev-1.12-de.1`を作ります。 - -2. 各コントリビューターが、開発ブランチをベースにフィーチャーブランチを作る。 - - たとえば、ドイツ語のコントリビューターは、`username:local-branch-name`から`kubernetes:dev-1.12-de.1`に対して、変更を含むプルリクエストを開きます。 - -3. 承認者がフィーチャーブランチをレビューして、開発ブランチにマージする。 - -4. 定期的に新しいプルリクエストを開いて承認することで、承認者が開発ブランチをソースブランチにマージする。プルリクエストを承認する前にコミットをsquashするようにしてください。 - -翻訳が完了するまで、1-4のステップを必要なだけ繰り返します。たとえば、ドイツ語のブランチは、`dev-1.12-de.2`、`dev-1.12-de.3`と続きます。 - -チームは、翻訳したコンテンツを元となったリリースブランチにマージする必要があります。たとえば、{{< release-branch >}}から作られた開発ブランチは、必ず{{< release-branch >}}にマージしなければなりません。 - -承認者は、ソースブランチを最新の状態に保ち、マージのコンフリクトを解決することで、開発ブランチをメンテナンスしなければなりません。開発ブランチが長く開いたままであるほど、一般により多くのメンテナンスが必要になります。そのため、非常に長い期間に渡って開発ブランチを維持するよりは、定期的に開発ブランチをマージして、新しいブランチを作ることを考えてください。 - -各チームマイルストーンの最初には、1つ前の開発ブランチと現在の開発ブランチの間のアップストリームの変更を比較するissueを開くと役に立ちます。 - -新しい開発ブランチを開いたりプルリクエストをマージできるのは承認者だけですが、新しい開発ブランチには誰でもプルリクエストを開くことができます。特別な許可は必要ありません。 - -フォークやリポジトリから直接行う作業についての詳しい情報は、「[リポジトリをフォーク・クローンする](#fork-and-clone-the-repo)」を読んでください。 +- 意訳と直訳で迷った場合は「直訳」で訳す +- 訳で難しい・わからないと感じたらSlackの#kubernetes-docs-jaでみんなに聞く +- できることを挙手制で、できないときは早めに報告 ## アップストリームのコントリビューター SIG Docsでは、英語のソースに対する[アップストリームへのコントリビュートや誤りの訂正](/docs/contribute/intermediate#localize-content)を歓迎しています。 - -## 既存の翻訳を助ける - -コンテンツの追加や改善により既存の翻訳を助けることもできます。翻訳のための[Slackチャンネル](https://kubernetes.slack.com/messages/C1J0BPD2M/)に参加して、助けとなるPRを開くことを始めましょう。 - -## {{% heading "whatsnext" %}} - -翻訳がワークフローと最小限のコンテンツの要件を満たしたら、SIG docsは次の作業を行います。 - -- ウェブサイト上で言語の選択を有効にする。 -- [Kubernetesブログ](https://kubernetes.io/blog/)を含む[Cloud Native Computing Foundation](https://www.cncf.io/about/)(CNCF)のチャンネルで、翻訳が利用できるようになったことを公表する。 diff --git a/content/ja/docs/setup/learning-environment/minikube.md b/content/ja/docs/setup/learning-environment/minikube.md index 543256b4e3bb9..1a28e492612db 100644 --- a/content/ja/docs/setup/learning-environment/minikube.md +++ b/content/ja/docs/setup/learning-environment/minikube.md @@ -33,10 +33,10 @@ Minikubeはローカル環境でKubernetesを簡単に実行するためのツ * virtualbox * vmwarefusion -* kvm2 ([driver installation](https://git.k8s.io/minikube/docs/drivers.md#kvm2-driver)) -* kvm ([driver installation](https://git.k8s.io/minikube/docs/drivers.md#kvm-driver)) -* hyperkit ([driver installation](https://git.k8s.io/minikube/docs/drivers.md#hyperkit-driver)) -* xhyve ([driver installation](https://git.k8s.io/minikube/docs/drivers.md#xhyve-driver)) (非推奨) +* kvm2 ([driver installation](https://minikube.sigs.k8s.io/docs/drivers/#kvm2-driver)) +* kvm ([driver installation](https://minikube.sigs.k8s.io/docs/drivers/#kvm-driver)) +* hyperkit ([driver installation](https://minikube.sigs.k8s.io/docs/drivers/#hyperkit-driver)) +* xhyve ([driver installation](https://minikube.sigs.k8s.io/docs/drivers/#xhyve-driver)) (非推奨) * hyperv ([driver installation](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#hyperv-driver)) 注意: 以下のIPは動的であり、変更される可能性があります。IPは `minikube ip` で取得することができます。 * none (VMではなくホスト上でKubernetesコンポーネントを起動する。このドライバを使用するにはDocker ([docker install](https://docs.docker.com/install/linux/docker-ce/ubuntu/)) とLinux環境を必要とします) @@ -200,7 +200,7 @@ minikube start \ ### ドライバープラグイン -サポートされているドライバとプラグインのインストールの詳細については [DRIVERS](https://git.k8s.io/minikube/docs/drivers.md) を参照してください。 +サポートされているドライバとプラグインのインストールの詳細については [DRIVERS](https://minikube.sigs.k8s.io/docs/drivers/) を参照してください。 ### Dockerデーモンの再利用によるローカルイメージの使用 diff --git a/content/ko/docs/concepts/_index.md b/content/ko/docs/concepts/_index.md index 34bc413e7496d..f23bec15298fd 100644 --- a/content/ko/docs/concepts/_index.md +++ b/content/ko/docs/concepts/_index.md @@ -12,60 +12,3 @@ weight: 40 - -## 개요 - -쿠버네티스를 사용하려면, *쿠버네티스 API 오브젝트* 로 클러스터에 대해 사용자가 *바라는 상태* 를 기술해야 한다. 어떤 애플리케이션이나 워크로드를 구동시키려고 하는지, 어떤 컨테이너 이미지를 쓰는지, 복제의 수는 몇 개인지, 어떤 네트워크와 디스크 자원을 쓸 수 있도록 할 것인지 등을 의미한다. 바라는 상태를 설정하는 방법은 쿠버네티스 API를 사용해서 오브젝트를 만드는 것인데, 대개 `kubectl`이라는 커맨드라인 인터페이스를 사용한다. 클러스터와 상호 작용하고 바라는 상태를 설정하거나 수정하기 위해서 쿠버네티스 API를 직접 사용할 수도 있다. - -바라는 상태를 설정하면, *쿠버네티스 컨트롤 플레인* 은 Pod Lifecycle Event Generator ([PLEG](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/pod-lifecycle-event-generator.md))를 통해 클러스터의 현재 상태를 바라는 상태와 일치시킨다. 그렇게 함으로써, 쿠버네티스가 컨테이너를 시작 또는 재시작하거나, 주어진 애플리케이션의 복제 수를 스케일링하는 등의 다양한 작업을 자동으로 수행한다. 쿠버네티스 컨트롤 플레인은 클러스터에서 실행 중인 프로세스의 묶음(collection)으로 구성된다. - -* **쿠버네티스 마스터**는 클러스터 내 마스터 노드로 지정된 노드 내에서 구동되는 세 개의 프로세스 묶음이다. 해당 프로세스는 [kube-apiserver](/docs/admin/kube-apiserver/), [kube-controller-manager](/docs/admin/kube-controller-manager/) 및 [kube-scheduler](/docs/admin/kube-scheduler/)이다. -* 클러스터 내 마스터 노드가 아닌 각각의 노드는 다음 두 개의 프로세스를 구동시킨다. - * 쿠버네티스 마스터와 통신하는 **[kubelet](/docs/admin/kubelet/)**. - * 각 노드의 쿠버네티스 네트워킹 서비스를 반영하는 네트워크 프록시인 **[kube-proxy](/docs/admin/kube-proxy/)**. - -## 쿠버네티스 오브젝트 - -쿠버네티스는 시스템의 상태를 나타내는 추상 개념을 다수 포함하고 있다. 컨테이너화되어 배포된 애플리케이션과 워크로드, 이에 연관된 네트워크와 디스크 자원, 그 밖에 클러스터가 무엇을 하고 있는지에 대한 정보가 이에 해당한다. 이런 추상 개념은 쿠버네티스 API 내 오브젝트로 표현된다. 보다 자세한 내용은 [쿠버네티스 오브젝트 이해하기](/ko/docs/concepts/overview/working-with-objects/kubernetes-objects/#kubernetes-objects) 문서를 참조한다. - -기초적인 쿠버네티스 오브젝트에는 다음과 같은 것들이 있다. - -* [파드](/ko/docs/concepts/workloads/pods/pod-overview/) -* [서비스](/ko/docs/concepts/services-networking/service/) -* [볼륨](/ko/docs/concepts/storage/volumes/) -* [네임스페이스(Namespace)](/ko/docs/concepts/overview/working-with-objects/namespaces/) - -또한, 쿠버네티스에는 기초 오브젝트를 기반으로, 부가 기능 및 편의 기능을 제공하는 [컨트롤러](/ko/docs/concepts/architecture/controller/)에 의존하는 보다 높은 수준의 추상 개념도 포함되어 있다. 다음이 포함된다. - -* [디플로이먼트(Deployment)](/ko/docs/concepts/workloads/controllers/deployment/) -* [데몬셋(DaemonSet)](/ko/docs/concepts/workloads/controllers/daemonset/) -* [스테이트풀셋(StatefulSet)](/ko/docs/concepts/workloads/controllers/statefulset/) -* [레플리카셋(ReplicaSet)](/ko/docs/concepts/workloads/controllers/replicaset/) -* [잡(Job)](/ko/docs/concepts/workloads/controllers/jobs-run-to-completion/) - -## 쿠버네티스 컨트롤 플레인 - -쿠버네티스 마스터와 kubelet 프로세스와 같은 쿠버네티스 컨트롤 플레인의 다양한 구성 요소는 쿠버네티스가 클러스터와 통신하는 방식을 관장한다. 컨트롤 플레인은 시스템 내 모든 쿠버네티스 오브젝트의 레코드를 유지하면서, 오브젝트의 상태를 관리하는 제어 루프를 지속적으로 구동시킨다. 컨트롤 플레인의 제어 루프는 클러스터 내 변경이 발생하면 언제라도 응답하고 시스템 내 모든 오브젝트의 실제 상태가 사용자가 바라는 상태와 일치시키기 위한 일을 한다. - -예를 들어, 쿠버네티스 API를 사용해서 디플로이먼트를 만들 때에는, 바라는 상태를 시스템에 신규로 입력해야한다. 쿠버네티스 컨트롤 플레인이 오브젝트 생성을 기록하고, 사용자 지시대로 필요한 애플리케이션을 시작시키고 클러스터 노드에 스케줄링한다. 그래서 결국 클러스터의 실제 상태가 바라는 상태와 일치하게 된다. - -### 쿠버네티스 마스터 - -클러스터에 대해 바라는 상태를 유지할 책임은 쿠버네티스 마스터에 있다. `kubectl` 커맨드라인 인터페이스와 같은 것을 사용해서 쿠버네티스로 상호 작용할 때에는 쿠버네티스 마스터와 통신하고 있는 셈이다. - -> "마스터"는 클러스터 상태를 관리하는 프로세스의 묶음이다. 주로 모든 프로세스는 클러스터 내 단일 노드에서 구동되며, 이 노드가 바로 마스터이다. 마스터는 가용성과 중복을 위해 복제될 수도 있다. - -### 쿠버네티스 노드 - -클러스터 내 노드는 애플리케이션과 클라우드 워크플로우를 구동시키는 머신(VM, 물리 서버 등)이다. 쿠버네티스 마스터는 각 노드를 관리한다. 직접 노드와 직접 상호 작용할 일은 거의 없을 것이다. - - - - -## {{% heading "whatsnext" %}} - - -개념 페이지를 작성하기를 원하면, -개념 페이지 유형에 대한 정보가 있는 -[페이지 컨텐츠 유형](/docs/contribute/style/page-content-types/#concept)을 참조한다. - diff --git a/content/ko/docs/concepts/architecture/_index.md b/content/ko/docs/concepts/architecture/_index.md index cbcb8e810ddeb..4a83cc3c08c74 100644 --- a/content/ko/docs/concepts/architecture/_index.md +++ b/content/ko/docs/concepts/architecture/_index.md @@ -1,4 +1,6 @@ --- title: "클러스터 아키텍처" weight: 30 +description: > + 쿠버네티스 뒤편의 구조와 설계 개념들 --- diff --git a/content/ko/docs/concepts/architecture/nodes.md b/content/ko/docs/concepts/architecture/nodes.md index e4f5c9564e2c9..54782a20e35cc 100644 --- a/content/ko/docs/concepts/architecture/nodes.md +++ b/content/ko/docs/concepts/architecture/nodes.md @@ -63,11 +63,11 @@ kubelet이 노드의 `metadata.name` 필드와 일치하는 API 서버에 등록 {{< /note >}} 노드 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. ### 노드에 대한 자체-등록 -kubelet 플래그 `--register-node`는 참(기본값)일 경우, kubelet 은 API 서버에 +kubelet 플래그 `--register-node`는 참(기본값)일 경우, kubelet 은 API 서버에 스스로 등록을 시도할 것이다. 이는 대부분의 배포판에 의해 이용되는, 선호하는 패턴이다. 자체-등록에 대해, kubelet은 다음 옵션과 함께 시작된다. @@ -75,7 +75,7 @@ kubelet 플래그 `--register-node`는 참(기본값)일 경우, kubelet 은 API - `--kubeconfig` - apiserver에 스스로 인증하기 위한 자격증명에 대한 경로. - `--cloud-provider` - 자신에 대한 메터데이터를 읽기 위해 어떻게 {{< glossary_tooltip text="클라우드 제공자" term_id="cloud-provider" >}}와 소통할지에 대한 방법. - `--register-node` - 자동으로 API 서버에 등록. - - `--register-with-taints` - 주어진 {{< glossary_tooltip text="테인트(taint)" term_id="taint" >}} 리스트(콤마로 분리된 `=:`)를 가진 노드 등록. + - `--register-with-taints` - 주어진 {{< glossary_tooltip text="테인트(taint)" term_id="taint" >}} 리스트(콤마로 분리된 `=:`)를 가진 노드 등록. `register-node`가 거짓이면 동작 안 함. - `--node-ip` - 노드의 IP 주소. @@ -180,7 +180,7 @@ kubectl describe node ready 컨디션의 상태가 `pod-eviction-timeout` ({{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" >}}에 전달된 인수) 보다 더 길게 `Unknown` 또는 `False`로 유지되는 경우, 노드 상에 모든 파드는 노드 컨트롤러에 의해 삭제되도록 스케줄 된다. 기본 축출 타임아웃 기간은 **5분** 이다. 노드에 접근이 불가할 때와 같은 경우, apiserver는 노드 상의 kubelet과 통신이 불가하다. apiserver와의 통신이 재개될 때까지 파드 삭제에 대한 결정은 kubelet에 전해질 수 없다. 그 사이, 삭제되도록 스케줄 되어진 파드는 분할된 노드 상에서 계속 동작할 수도 있다. 노드 컨트롤러가 클러스터 내 동작 중지된 것을 확신할 때까지는 파드를 -강제로 삭제하지 않는다. 파드가 `Terminating` 또는 `Unknown` 상태로 있을 때 접근 불가한 노드 상에서 +강제로 삭제하지 않는다. 파드가 `Terminating` 또는 `Unknown` 상태로 있을 때 접근 불가한 노드 상에서 동작되고 있는 것을 보게 될 수도 있다. 노드가 영구적으로 클러스터에서 삭제되었는지에 대한 여부를 쿠버네티스가 기반 인프라로부터 유추할 수 없는 경우, 노드가 클러스터를 영구적으로 탈퇴하게 되면, 클러스터 관리자는 손수 노드 오브젝트를 삭제해야 할 수도 있다. @@ -188,7 +188,7 @@ ready 컨디션의 상태가 `pod-eviction-timeout` ({{< glossary_tooltip text=" apiserver로부터 삭제되어 그 이름을 사용할 수 있는 결과를 낳는다. 노드 수명주기 컨트롤러는 자동으로 컨디션을 나타내는 -[테인트(taints)](/docs/concepts/scheduling-eviction/taint-and-toleration/)를 생성한다. +[테인트(taints)](/ko/docs/concepts/scheduling-eviction/taint-and-toleration/)를 생성한다. 스케줄러는 파드를 노드에 할당 할 때 노드의 테인트를 고려한다. 또한 파드는 노드의 테인트를 극복(tolerate)할 수 있는 톨러레이션(toleration)을 가질 수 있다. @@ -197,7 +197,7 @@ apiserver로부터 삭제되어 그 이름을 사용할 수 있는 결과를 낳 ### 용량과 할당가능 {#capacity} -노드 상에 사용 가능한 리소스를 나타낸다. 리소스에는 CPU, 메모리 그리고 +노드 상에 사용 가능한 리소스를 나타낸다. 리소스에는 CPU, 메모리 그리고 노드 상으로 스케줄 되어질 수 있는 최대 파드 수가 있다. 용량 블록의 필드는 노드에 있는 리소스의 총량을 나타낸다. @@ -221,18 +221,18 @@ apiserver로부터 삭제되어 그 이름을 사용할 수 있는 결과를 낳 노드 컨트롤러는 노드가 생성되어 유지되는 동안 다양한 역할을 한다. 첫째는 등록 시점에 (CIDR 할당이 사용토록 설정된 경우) 노드에 CIDR 블럭을 할당하는 것이다. -두 번째는 노드 컨트롤러의 내부 노드 리스트를 클라우드 제공사업자의 -사용 가능한 머신 리스트 정보를 근거로 최신상태로 유지하는 것이다. 클라우드 환경에서 -동작 중일 경우, 노드상태가 불량할 때마다, 노드 컨트롤러는 -해당 노드용 VM이 여전히 사용 가능한지에 대해 클라우드 제공사업자에게 묻는다. 사용 가능하지 않을 경우, +두 번째는 노드 컨트롤러의 내부 노드 리스트를 클라우드 제공사업자의 +사용 가능한 머신 리스트 정보를 근거로 최신상태로 유지하는 것이다. 클라우드 환경에서 +동작 중일 경우, 노드상태가 불량할 때마다, 노드 컨트롤러는 +해당 노드용 VM이 여전히 사용 가능한지에 대해 클라우드 제공사업자에게 묻는다. 사용 가능하지 않을 경우, 노드 컨트롤러는 노드 리스트로부터 그 노드를 삭제한다. -세 번째는 노드의 동작 상태를 모니터링 하는 것이다. 노드 컨트롤러는 -노드가 접근 불가할 경우 (즉 노드 컨트롤러가 어떠한 사유로 하트비트 +세 번째는 노드의 동작 상태를 모니터링 하는 것이다. 노드 컨트롤러는 +노드가 접근 불가할 경우 (즉 노드 컨트롤러가 어떠한 사유로 하트비트 수신을 중지하는 경우, 예를 들어 노드 다운과 같은 경우이다.) -NodeStatus의 NodeReady 컨디션을 ConditionUnknown으로 업데이트 하는 책임을 지고, -노드가 계속 접근 불가할 경우 나중에 노드로부터 (정상적인 종료를 이용하여) 모든 파드를 축출시킨다. -(ConditionUnknown을 알리기 시작하는 기본 타임아웃 값은 40초 이고, +NodeStatus의 NodeReady 컨디션을 ConditionUnknown으로 업데이트 하는 책임을 지고, +노드가 계속 접근 불가할 경우 나중에 노드로부터 (정상적인 종료를 이용하여) 모든 파드를 축출시킨다. +(ConditionUnknown을 알리기 시작하는 기본 타임아웃 값은 40초 이고, 파드를 축출하기 시작하는 값은 5분이다.) 노드 컨트롤러는 매 `--node-monitor-period` 초 마다 각 노드의 상태를 체크한다. @@ -260,30 +260,30 @@ kubelet은 `NodeStatus` 와 리스 오브젝트를 생성하고 업데이트 할 #### 안정성 - 대부분의 경우, 노드 컨트롤러는 초당 `--node-eviction-rate`(기본값 0.1)로 -축출 비율을 제한한다. 이 말은 10초당 1개의 노드를 초과하여 + 대부분의 경우, 노드 컨트롤러는 초당 `--node-eviction-rate`(기본값 0.1)로 +축출 비율을 제한한다. 이 말은 10초당 1개의 노드를 초과하여 파드 축출을 하지 않는다는 의미가 된다. 노드 축출 행위는 주어진 가용성 영역 내 하나의 노드가 상태가 불량할 -경우 변화한다. 노드 컨트롤러는 영역 내 동시에 상태가 불량한 노드의 퍼센티지가 얼마나 되는지 -체크한다(NodeReady 컨디션은 ConditionUnknown 또는 ConditionFalse 다.). -상태가 불량한 노드의 일부가 최소 +경우 변화한다. 노드 컨트롤러는 영역 내 동시에 상태가 불량한 노드의 퍼센티지가 얼마나 되는지 +체크한다(NodeReady 컨디션은 ConditionUnknown 또는 ConditionFalse 다.). +상태가 불량한 노드의 일부가 최소 `--unhealthy-zone-threshold` 기본값 0.55) 가 -되면 축출 비율은 감소한다. 클러스터가 작으면 (즉 -`--large-cluster-size-threshold` 노드 이하면 - 기본값 50) 축출은 중지되고, -그렇지 않으면 축출 비율은 초당 -`--secondary-node-eviction-rate`(기본값 0.01)로 감소된다. -이 정책들이 가용성 영역 단위로 실행되어지는 이유는 나머지가 연결되어 있는 동안 -하나의 가용성 영역이 마스터로부터 분할되어 질 수도 있기 때문이다. -만약 클러스터가 여러 클라우드 제공사업자의 가용성 영역에 걸쳐 있지 않으면, +되면 축출 비율은 감소한다. 클러스터가 작으면 (즉 +`--large-cluster-size-threshold` 노드 이하면 - 기본값 50) 축출은 중지되고, +그렇지 않으면 축출 비율은 초당 +`--secondary-node-eviction-rate`(기본값 0.01)로 감소된다. +이 정책들이 가용성 영역 단위로 실행되어지는 이유는 나머지가 연결되어 있는 동안 +하나의 가용성 영역이 마스터로부터 분할되어 질 수도 있기 때문이다. +만약 클러스터가 여러 클라우드 제공사업자의 가용성 영역에 걸쳐 있지 않으면, 오직 하나의 가용성 영역만 (전체 클러스터) 존재하게 된다. -노드가 가용성 영역들에 걸쳐 퍼져 있는 주된 이유는 하나의 전체 영역이 -장애가 발생할 경우 워크로드가 상태 양호한 영역으로 이전되어질 수 있도록 하기 위해서이다. -그러므로, 하나의 영역 내 모든 노드들이 상태가 불량하면 노드 컨트롤러는 -`--node-eviction-rate` 의 정상 비율로 축출한다. 코너 케이스란 모든 영역이 -완전히 상태불량 (즉 클러스터 내 양호한 노드가 없는 경우) 한 경우이다. -이러한 경우, 노드 컨트롤러는 마스터 연결에 문제가 있어 일부 연결이 +노드가 가용성 영역들에 걸쳐 퍼져 있는 주된 이유는 하나의 전체 영역이 +장애가 발생할 경우 워크로드가 상태 양호한 영역으로 이전되어질 수 있도록 하기 위해서이다. +그러므로, 하나의 영역 내 모든 노드들이 상태가 불량하면 노드 컨트롤러는 +`--node-eviction-rate` 의 정상 비율로 축출한다. 코너 케이스란 모든 영역이 +완전히 상태불량 (즉 클러스터 내 양호한 노드가 없는 경우) 한 경우이다. +이러한 경우, 노드 컨트롤러는 마스터 연결에 문제가 있어 일부 연결이 복원될 때까지 모든 축출을 중지하는 것으로 여긴다. 또한, 노드 컨트롤러는 파드가 테인트를 허용하지 않을 때 `NoExecute` 테인트 상태의 @@ -323,7 +323,7 @@ kubelet은 `NodeStatus` 와 리스 오브젝트를 생성하고 업데이트 할 {{< feature-state state="alpha" for_k8s_version="v1.16" >}} -`TopologyManager` +`TopologyManager` [기능 게이트(feature gate)](/docs/reference/command-line-tools-reference/feature-gates/)를 활성화 시켜두면, kubelet이 리소스 할당 결정을 할 때 토폴로지 힌트를 사용할 수 있다. 자세한 내용은 diff --git a/content/ko/docs/concepts/cluster-administration/_index.md b/content/ko/docs/concepts/cluster-administration/_index.md index e13a5fdb48f2c..c21e17e3ec8c7 100755 --- a/content/ko/docs/concepts/cluster-administration/_index.md +++ b/content/ko/docs/concepts/cluster-administration/_index.md @@ -1,5 +1,71 @@ --- -title: "클러스터 관리" +title: 클러스터 관리 weight: 100 +content_type: concept +description: > + 쿠버네티스 클러스터 생성 또는 관리에 관련된 로우-레벨(lower-level)의 세부 정보를 설명한다. +no_list: true --- + +클러스터 관리 개요는 쿠버네티스 클러스터를 생성하거나 관리하는 모든 사람들을 위한 것이다. +핵심 쿠버네티스 [개념](/ko/docs/concepts/)에 어느 정도 익숙하다고 가정한다. + + +## 클러스터 계획 + +쿠버네티스 클러스터를 계획, 설정 및 구성하는 방법에 대한 예는 [시작하기](/ko/docs/setup/)에 있는 가이드를 참고한다. 이 문서에 나열된 솔루션을 *배포판* 이라고 한다. + + {{< note >}} + 모든 배포판이 활발하게 유지되는 것은 아니다. 최신 버전의 쿠버네티스에서 테스트된 배포판을 선택한다. + {{< /note >}} + +가이드를 선택하기 전에 고려해야 할 사항은 다음과 같다. + + - 컴퓨터에서 쿠버네티스를 그냥 한번 사용해보고 싶은가? 아니면, 고가용 멀티 노드 클러스터를 만들고 싶은가? 사용자의 필요에 따라 가장 적합한 배포판을 선택한다. + - [구글 쿠버네티스 엔진(Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/)과 같은 클라우드 제공자의 **쿠버네티스 클러스터 호스팅** 을 사용할 것인가? 아니면, **자체 클러스터를 호스팅** 할 것인가? + - 클러스터가 **온-프레미스 환경** 에 있나? 아니면, **클라우드(IaaS)** 에 있나? 쿠버네티스는 하이브리드 클러스터를 직접 지원하지는 않는다. 대신 여러 클러스터를 설정할 수 있다. + - **온-프레미스 환경에 쿠버네티스** 를 구성하는 경우, 어떤 [네트워킹 모델](/ko/docs/concepts/cluster-administration/networking/)이 가장 적합한 지 고려한다. + - 쿠버네티스를 **"베어 메탈" 하드웨어** 에서 실행할 것인가? 아니면, **가상 머신(VM)** 에서 실행할 것인가? + - **단지 클러스터만 실행할 것인가?** 아니면, **쿠버네티스 프로젝트 코드를 적극적으로 개발** 하는 것을 기대하는가? 만약 + 후자라면, 활발하게 개발이 진행되고 있는 배포판을 선택한다. 일부 배포판은 바이너리 릴리스만 사용하지만, + 더 다양한 선택을 제공한다. + - 클러스터를 실행하는 데 필요한 [컴포넌트](/ko/docs/concepts/overview/components/)에 익숙해지자. + + +## 클러스터 관리 + +* [클러스터 관리](/ko/docs/tasks/administer-cluster/cluster-management/)는 클러스터 라이프사이클과 관련된 몇 가지 주제를 설명한다. 새로운 클러스터 생성, 클러스터의 마스터 및 워커 노드 업그레이드, 노드 유지 관리 수행(예: 커널 업그레이드) 및 실행 중인 클러스터의 쿠버네티스 API 버전 업그레이드 + +* [노드 관리](/ko/docs/concepts/architecture/nodes/) 방법을 배운다. + +* 공유 클러스터에 대한 [리소스 쿼터](/ko/docs/concepts/policy/resource-quotas/)를 설정하고 관리하는 방법을 배운다. + +## 클러스터 보안 + +* [인증서](/ko/docs/concepts/cluster-administration/certificates/)는 다른 툴 체인을 사용하여 인증서를 생성하는 단계를 설명한다. + +* [쿠버네티스 컨테이너 환경](/ko/docs/concepts/containers/container-environment/)은 쿠버네티스 노드에서 Kubelet으로 관리하는 컨테이너에 대한 환경을 설명한다. + +* [쿠버네티스 API에 대한 접근 제어](/docs/reference/access-authn-authz/controlling-access/)는 사용자와 서비스 어카운트에 대한 권한을 설정하는 방법을 설명한다. + +* [인증](/docs/reference/access-authn-authz/authentication/)은 다양한 인증 옵션을 포함한 쿠버네티스에서의 인증에 대해 설명한다. + +* [인가](/docs/reference/access-authn-authz/authorization/)는 인증과는 별개로, HTTP 호출 처리 방법을 제어한다. + +* [어드미션 컨트롤러 사용하기](/docs/reference/access-authn-authz/admission-controllers/)는 인증과 권한 부여 후 쿠버네티스 API 서버에 대한 요청을 가로채는 플러그인에 대해 설명한다. + +* [쿠버네티스 클러스터에서 Sysctls 사용하기](/docs/concepts/cluster-administration/sysctl-cluster/)는 관리자가 `sysctl` 커맨드라인 도구를 사용하여 커널 파라미터를 설정하는 방법에 대해 설명한다. + +* [감사(audit)](/docs/tasks/debug-application-cluster/audit/)는 쿠버네티스의 감사 로그를 다루는 방법에 대해 설명한다. + +### kubelet 보안 + * [마스터-노드 통신](/ko/docs/concepts/architecture/control-plane-node-communication/) + * [TLS 부트스트래핑(bootstrapping)](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) + * [Kubelet 인증/인가](/docs/admin/kubelet-authentication-authorization/) + +## 선택적 클러스터 서비스 + +* [DNS 통합](/ko/docs/concepts/services-networking/dns-pod-service/)은 DNS 이름을 쿠버네티스 서비스로 직접 확인하는 방법을 설명한다. + +* [클러스터 액티비티 로깅과 모니터링](/ko/docs/concepts/cluster-administration/logging/)은 쿠버네티스에서의 로깅이 어떻게 작동하는지와 구현 방법에 대해 설명한다. diff --git a/content/ko/docs/concepts/cluster-administration/addons.md b/content/ko/docs/concepts/cluster-administration/addons.md index 9e6f5ab7ec2f9..1838688d382a9 100644 --- a/content/ko/docs/concepts/cluster-administration/addons.md +++ b/content/ko/docs/concepts/cluster-administration/addons.md @@ -33,7 +33,7 @@ content_type: concept * [OVN4NFV-K8S-Plugin](https://github.com/opnfv/ovn4nfv-k8s-plugin)은 OVN 기반의 CNI 컨트롤러 플러그인으로 클라우드 네이티브 기반 서비스 기능 체인(Service function chaining(SFC)), 다중 OVN 오버레이 네트워킹, 동적 서브넷 생성, 동적 가상 네트워크 생성, VLAN 공급자 네트워크, 직접 공급자 네트워크와 멀티 클러스터 네트워킹의 엣지 기반 클라우드 등 네이티브 워크로드에 이상적인 멀티 네티워크 플러그인이다. * [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) 컨테이너 플러그인(NCP)은 VMware NSX-T와 쿠버네티스와 같은 컨테이너 오케스트레이터 간의 통합은 물론 NSX-T와 PKS(Pivotal 컨테이너 서비스) 및 OpenShift와 같은 컨테이너 기반 CaaS/PaaS 플랫폼 간의 통합을 제공한다. * [Nuage](https://github.com/nuagenetworks/nuage-kubernetes/blob/v5.1.1-1/docs/kubernetes-1-installation.rst)는 가시성과 보안 모니터링 기능을 통해 쿠버네티스 파드와 비-쿠버네티스 환경 간에 폴리시 기반 네트워킹을 제공하는 SDN 플랫폼이다. -* [Romana](http://romana.io)는 [네트워크폴리시 API](/docs/concepts/services-networking/network-policies/)도 지원하는 파드 네트워크용 Layer 3 네트워킹 솔루션이다. Kubeadm 애드온 설치에 대한 세부 정보는 [여기](https://github.com/romana/romana/tree/master/containerize)에 있다. +* [Romana](http://romana.io)는 [네트워크폴리시 API](/ko/docs/concepts/services-networking/network-policies/)도 지원하는 파드 네트워크용 Layer 3 네트워킹 솔루션이다. Kubeadm 애드온 설치에 대한 세부 정보는 [여기](https://github.com/romana/romana/tree/master/containerize)에 있다. * [Weave Net](https://www.weave.works/docs/net/latest/kube-addon/)은 네트워킹 및 네트워크 폴리시를 제공하고, 네트워크 파티션의 양면에서 작업을 수행하며, 외부 데이터베이스는 필요하지 않다. ## 서비스 검색 @@ -54,5 +54,3 @@ content_type: concept 더 이상 사용되지 않는 [cluster/addons](https://git.k8s.io/kubernetes/cluster/addons) 디렉터리에 다른 여러 애드온이 문서화되어 있다. 잘 관리된 것들이 여기에 연결되어 있어야 한다. PR을 환영한다! - - diff --git a/content/ko/docs/concepts/cluster-administration/cloud-providers.md b/content/ko/docs/concepts/cluster-administration/cloud-providers.md index 91e1a4e7ac3f4..3d5ba7e9d08d9 100644 --- a/content/ko/docs/concepts/cluster-administration/cloud-providers.md +++ b/content/ko/docs/concepts/cluster-administration/cloud-providers.md @@ -99,7 +99,7 @@ _어노테이션_ 을 사용하여 AWS의 로드 밸런서 서비스에 다른 * `service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout`: 서비스에서 연결 드레이닝 타임아웃 값을 지정하는 데 사용된다. * `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout`: 서비스에서 유휴 연결 타임아웃 값을 지정하는 데 사용된다. * `service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled`: 서비스에서 교차 영역의 로드 밸런싱을 활성화하거나 비활성화하는 데 사용된다. -* `service.beta.kubernetes.io/aws-load-balancer-security-groups`: 생성된 ELB에 추가할 보안 그룹을 지정하는 데 사용된다. 이는 이전에 ELB에 할당된 다른 모든 보안 그룹을 대체한다. +* `service.beta.kubernetes.io/aws-load-balancer-security-groups`: 생성된 ELB에 추가할 보안 그룹을 지정하는 데 사용된다. 이는 이전에 ELB에 할당된 다른 모든 보안 그룹을 대체한다. 여기에 정의된 보안 그룹은 서비스 간에 공유해서는 안된다. * `service.beta.kubernetes.io/aws-load-balancer-extra-security-groups`: 서비스에서 생성된 ELB에 추가할 추가적인 보안 그룹을 지정하는 데 사용된다. * `service.beta.kubernetes.io/aws-load-balancer-internal`: 서비스에서 내부 ELB 사용 희망을 표시하기 위해 사용된다. * `service.beta.kubernetes.io/aws-load-balancer-proxy-protocol`: 서비스에서 ELB에서 프록시 프로토콜을 활성화하는 데 사용된다. 현재는 모든 ELB 백엔드에서 프록시 프로토콜을 사용하도록 설정하는 `*` 값만 허용한다. 향후에는 특정 백엔드에서만 프록시 프로토콜을 설정할 수 있도록 이를 조정할 수 있게 된다. diff --git a/content/ko/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/ko/docs/concepts/cluster-administration/cluster-administration-overview.md deleted file mode 100644 index d454b85ca0514..0000000000000 --- a/content/ko/docs/concepts/cluster-administration/cluster-administration-overview.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: 클러스터 관리 개요 -content_type: concept -weight: 10 ---- - - -클러스터 관리 개요는 쿠버네티스 클러스터를 만들거나 관리하는 모든 사람들을 위한 것이다. -여기서는 쿠버네티스의 핵심 [개념](/ko/docs/concepts/)에 대해 잘 알고 있다고 가정한다. - - - -## 클러스터 계획 - -[올바른 솔루션 고르기](/ko/docs/setup/pick-right-solution/)에서 쿠버네티스 클러스터를 어떻게 계획하고, 셋업하고, 구성하는 지에 대한 예시를 참조하자. 이 글에 쓰여진 솔루션들은 *배포판* 이라고 부른다. - -가이드를 고르기 전에, 몇 가지 고려사항이 있다. - - - 단지 자신의 컴퓨터에 쿠버네티스를 테스트를 하는지, 또는 고가용성의 멀티 노드 클러스터를 만들려고 하는지에 따라 니즈에 가장 적절한 배포판을 고르자. - - [구글 쿠버네티스 엔진](https://cloud.google.com/kubernetes-engine/)과 같은 **호스팅된 쿠버네티스 클러스터** 를 사용할 것인지, **자신의 클러스터에 호스팅할 것인지**? - - 클러스터가 **온프레미스** 인지, 또는 **클라우드(IaaS)** 인지? 쿠버네티스는 하이브리드 클러스터를 직접적으로 지원하지는 않는다. 대신에, 사용자는 여러 클러스터를 구성할 수 있다. - - **만약 온프레미스에서 쿠버네티스를 구성한다면**, 어떤 [네트워킹 모델](/docs/concepts/cluster-administration/networking/)이 가장 적합한지 고려한다. - - 쿠버네티스 실행을 **"베어메탈" 하드웨어** 또는, **가상 머신 (VMs)** 중 어디에서 할 것 인지? - - **단지 클러스터 동작** 만 할 것인지, 아니면 **쿠버네티스 프로젝트 코드의 적극적인 개발** 을 원하는지? 만약 후자의 경우라면, - 적극적으로 개발된 배포판을 선택한다. 몇몇 배포판은 바이너리 릴리스 밖에 없지만, - 매우 다양한 선택권을 제공한다. - - 스스로 클러스터 구동에 필요한 [구성요소](/docs/admin/cluster-components/)에 익숙해지자. - -참고: 모든 배포판이 적극적으로 유지되는 것은 아니다. 최근 버전의 쿠버네티스로 테스트 된 배포판을 선택하자. - -## 클러스터 관리 - -* [클러스터 관리](/ko/docs/tasks/administer-cluster/cluster-management/)는 클러스터의 라이프사이클과 관련된 몇 가지 주제를 설명한다. 이는 새 클러스터 생성, 마스터와 워커노드 업그레이드, 노드 유지보수 실행 (예: 커널 업그레이드), 그리고 동작 중인 클러스터의 쿠버네티스 API 버전 업그레이드 등을 포함한다. - -* 어떻게 [노드 관리](/ko/docs/concepts/architecture/nodes/)를 하는지 배워보자. - -* 공유된 클러스터의 [리소스 쿼터](/ko/docs/concepts/policy/resource-quotas/)를 어떻게 셋업하고 관리할 것인지 배워보자. - -## 클러스터 보안 - -* [인증서](/docs/concepts/cluster-administration/certificates/)는 다른 툴 체인을 이용하여 인증서를 생성하는 방법을 설명한다. - -* [쿠버네티스 컨테이너 환경](/ko/docs/concepts/containers/container-environment/)은 쿠버네티스 노드에서 Kubelet에 의해 관리되는 컨테이너 환경에 대해 설명한다. - -* [쿠버네티스 API에 대한 접근 제어](/docs/reference/access-authn-authz/controlling-access/)는 사용자와 서비스 계정에 어떻게 권한 설정을 하는지 설명한다. - -* [인증](/docs/reference/access-authn-authz/authentication/)은 다양한 인증 옵션을 포함한 쿠버네티스에서의 인증을 설명한다. - -* [인가](/docs/reference/access-authn-authz/authorization/)은 인증과 다르며, HTTP 호출이 처리되는 방법을 제어한다. - -* [어드미션 컨트롤러 사용](/docs/reference/access-authn-authz/admission-controllers/)은 쿠버네티스 API 서버에서 인증과 인가 후 요청을 가로채는 플러그인을 설명한다. - -* [쿠버네티스 클러스터에서 Sysctls 사용](/docs/concepts/cluster-administration/sysctl-cluster/)는 관리자가 `sysctl` 커맨드라인 툴을 사용하여 커널 파라미터를 설정하는 방법을 설명한다. - -* [감시](/docs/tasks/debug-application-cluster/audit/)는 쿠버네티스 감시 로그가 상호작용 하는 방법을 설명한다. - -### kubelet 보안 - * [마스터노드 커뮤니케이션](/ko/docs/concepts/architecture/master-node-communication/) - * [TLS 부트스트래핑](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/) - * [Kubelet 인증/인가](/docs/admin/kubelet-authentication-authorization/) - -## 선택적 클러스터 서비스 - -* [DNS 통합](/ko/docs/concepts/services-networking/dns-pod-service/)은 DNS 이름이 쿠버네티스 서비스에 바로 연결되도록 변환하는 방법을 설명한다. - -* [클러스터 활동 로깅과 모니터링](/docs/concepts/cluster-administration/logging/)은 쿠버네티스 로깅이 로깅의 작동 방법과 로깅을 어떻게 구현하는지 설명한다. - - diff --git a/content/ko/docs/concepts/configuration/_index.md b/content/ko/docs/concepts/configuration/_index.md index 11dbabc7b26c3..5485f559a1132 100755 --- a/content/ko/docs/concepts/configuration/_index.md +++ b/content/ko/docs/concepts/configuration/_index.md @@ -1,5 +1,6 @@ --- title: "구성" weight: 80 +description: > + 쿠버네티스가 파드 구성을 위해 제공하는 리소스 --- - diff --git a/content/ko/docs/concepts/configuration/configmap.md b/content/ko/docs/concepts/configuration/configmap.md index f61add3919107..5031b06cf5177 100644 --- a/content/ko/docs/concepts/configuration/configmap.md +++ b/content/ko/docs/concepts/configuration/configmap.md @@ -60,7 +60,7 @@ metadata: name: game-demo data: # 속성과 비슷한 키; 각 키는 간단한 값으로 매핑됨 - player_initial_lives: 3 + player_initial_lives: "3" ui_properties_file_name: "user-interface.properties" # # 파일과 비슷한 키 @@ -85,9 +85,9 @@ data: 방식에 따라 다르게 쓰인다. 처음 세 가지 방법의 경우, {{< glossary_tooltip text="kubelet" term_id="kubelet" >}}은 파드의 컨테이너를 시작할 때 -시크릿의 데이터를 사용한다. +컨피그맵의 데이터를 사용한다. -네 번째 방법은 시크릿과 데이터를 읽기 위해 코드를 작성해야 한다는 것을 의미한다. +네 번째 방법은 컨피그맵과 데이터를 읽기 위해 코드를 작성해야 한다는 것을 의미한다. 그러나, 쿠버네티스 API를 직접 사용하기 때문에, 애플리케이션은 컨피그맵이 변경될 때마다 업데이트를 받기 위해 구독할 수 있고, 업데이트가 있으면 반응한다. 쿠버네티스 API에 직접 접근하면, 이 @@ -126,25 +126,32 @@ spec: configMap: # 마운트하려는 컨피그맵의 이름을 제공한다. name: game-demo + # 컨피그맵에서 파일로 생성할 키 배열 + items: + - key: "game.properties" + path: "game.properties" + - key: "user-interface.properties" + path: "user-interface.properties" ``` 컨피그맵은 단일 라인 속성(single line property) 값과 멀티 라인의 파일과 비슷한(multi-line file-like) 값을 구분하지 않는다. 더 중요한 것은 파드와 다른 오브젝트가 이러한 값을 소비하는 방식이다. + 이 예제에서, 볼륨을 정의하고 `demo` 컨테이너에 -`/config` 로 마운트하면 4개의 파일이 생성된다. +`/config` 로 마운트하면 컨피그맵에 4개의 키가 있더라도 +`/config/game.properties` 와 `/config/user-interface.properties` +2개의 파일이 생성된다. 이것은 파드 정의가 +`volume` 섹션에서 `items` 배열을 지정하기 때문이다. +`items` 배열을 완전히 생략하면, 컨피그맵의 모든 키가 +키와 이름이 같은 파일이 되고, 4개의 파일을 얻게 된다. -- `/config/player_initial_lives` -- `/config/ui_properties_file_name` -- `/config/game.properties` -- `/config/user-interface.properties` +## 컨피그맵 사용하기 -`/config` 에 `.properties` 확장자를 가진 파일만 -포함시키려면, 두 개의 다른 컨피그맵을 사용하고, 파드에 -대해서는 `spec` 의 두 컨피그맵을 참조한다. 첫 번째 컨피그맵은 -`player_initial_lives` 와 `ui_properties_file_name` 을 정의한다. 두 번째 -컨피그맵은 kubelet이 `/config` 에 넣는 파일을 정의한다. +컨피그맵은 데이터 볼륨으로 마운트할 수 있다. 컨피그맵은 파드에 직접적으로 +노출되지 않고, 시스템의 다른 부분에서도 사용할 수 있다. 예를 들어, +컨피그맵은 시스템의 다른 부분이 구성을 위해 사용해야 하는 데이터를 보유할 수 있다. {{< note >}} 컨피그맵을 사용하는 가장 일반적인 방법은 동일한 네임스페이스의 @@ -157,18 +164,12 @@ spec: 사용할 수도 있다. {{< /note >}} -## 컨피그맵 사용하기 - -컨피그맵은 데이터 볼륨으로 마운트할 수 있다. 컨피그맵은 파드에 직접적으로 -노출되지 않고, 시스템의 다른 부분에서도 사용할 수 있다. 예를 들어, -컨피그맵은 시스템의 다른 부분이 구성을 위해 사용해야 하는 데이터를 보유할 수 있다. - ### 파드에서 컨피그맵을 파일로 사용하기 파드의 볼륨에서 컨피그맵을 사용하려면 다음을 수행한다. 1. 컨피그맵을 생성하거나 기존 컨피그맵을 사용한다. 여러 파드가 동일한 컨피그맵을 참조할 수 있다. -1. 파드 정의를 수정해서 `.spec.volumes[]` 아래에 볼륨을 추가한다. 볼륨 이름은 원하는 대로 정하고, 컨피그맵 오브젝트를 참조하도록 `.spec.volumes[].configmap.localObjectReference` 필드를 설정한다. +1. 파드 정의를 수정해서 `.spec.volumes[]` 아래에 볼륨을 추가한다. 볼륨 이름은 원하는 대로 정하고, 컨피그맵 오브젝트를 참조하도록 `.spec.volumes[].configMap.name` 필드를 설정한다. 1. 컨피그맵이 필요한 각 컨테이너에 `.spec.containers[].volumeMounts[]` 를 추가한다. `.spec.containers[].volumeMounts[].readOnly = true` 를 설정하고 컨피그맵이 연결되기를 원하는 곳에 사용하지 않은 디렉터리 이름으로 `.spec.containers[].volumeMounts[].mountPath` 를 지정한다. 1. 프로그램이 해당 디렉터리에서 파일을 찾도록 이미지 또는 커맨드 라인을 수정한다. 컨피그맵의 `data` 맵 각 키는 `mountPath` 아래의 파일 이름이 된다. @@ -250,4 +251,3 @@ immutable: true * [컨피그맵을 사용하도록 파드 구성하기](/docs/tasks/configure-pod-container/configure-pod-configmap/)를 읽어본다. * 코드를 구성에서 분리하려는 동기를 이해하려면 [Twelve-Factor 앱](https://12factor.net/ko/)을 읽어본다. - diff --git a/content/ko/docs/concepts/configuration/manage-resources-containers.md b/content/ko/docs/concepts/configuration/manage-resources-containers.md index 90991bc49e9bc..3c1414fcd6cd3 100644 --- a/content/ko/docs/concepts/configuration/manage-resources-containers.md +++ b/content/ko/docs/concepts/configuration/manage-resources-containers.md @@ -227,7 +227,7 @@ kubelet은 파드의 컨테이너를 시작할 때, CPU와 메모리 제한을 파드는 스크래치 공간, 캐싱 및 로그에 대해 임시 로컬 스토리지를 사용한다. kubelet은 로컬 임시 스토리지를 사용하여 컨테이너에 -[`emptyDir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) +[`emptyDir`](/ko/docs/concepts/storage/volumes/#emptydir) {{< glossary_tooltip term_id="volume" text="볼륨" >}}을 마운트하기 위해 파드에 스크래치 공간을 제공할 수 있다. kubelet은 이러한 종류의 스토리지를 사용하여 @@ -657,7 +657,7 @@ Allocated resources: (Total limits may be over 100 percent, i.e., overcommitted.) CPU Requests CPU Limits Memory Requests Memory Limits ------------ ---------- --------------- ------------- - 680m (34%) 400m (20%) 920Mi (12%) 1070Mi (14%) + 680m (34%) 400m (20%) 920Mi (11%) 1070Mi (13%) ``` 위의 출력에서, ​파드가 1120m 이상의 CPU 또는 6.23Gi의 메모리를 @@ -758,5 +758,3 @@ LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-0 * [ResourceRequirements](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcerequirements-v1-core) API 레퍼런스 읽어보기 * XFS의 [프로젝트 쿼터](http://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html)에 대해 읽어보기 - - diff --git a/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md index 483e664e2405d..a002414b67e30 100644 --- a/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md +++ b/content/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -58,8 +58,8 @@ kubectl config use-context ## KUBECONFIG 환경 변수 `KUBECONFIG` 환경 변수는 kubeconfig 파일 목록을 보유한다. -Linux 및 Mac의 경우 이는 콜론(:)으로 구분된 목록이다. -Windows는 세미콜론(;)으로 구분한다. `KUBECONFIG` 환경 변수가 필수는 아니다. +리눅스 및 Mac의 경우 이는 콜론(:)으로 구분된 목록이다. +윈도우는 세미콜론(;)으로 구분한다. `KUBECONFIG` 환경 변수가 필수는 아니다. `KUBECONFIG` 환경 변수가 없으면, `kubectl`은 기본 kubeconfig 파일인 `$HOME/.kube/config`를 사용한다. diff --git a/content/ko/docs/concepts/configuration/overview.md b/content/ko/docs/concepts/configuration/overview.md index db45ca2d2cbc7..387a794a6b4bb 100644 --- a/content/ko/docs/concepts/configuration/overview.md +++ b/content/ko/docs/concepts/configuration/overview.md @@ -28,16 +28,16 @@ weight: 10 - 더 나은 인트로스펙션(introspection)을 위해서, 어노테이션에 오브젝트의 설명을 넣는다. -## "단독(Naked)" 파드 vs 레플리카 셋, 디플로이먼트, 그리고 잡 {#naked-pods-vs-replicasets-deployments-and-jobs} +## "단독(Naked)" 파드 vs 레플리카셋(ReplicaSet), 디플로이먼트(Deployment), 그리고 잡(Job) {#naked-pods-vs-replicasets-deployments-and-jobs} -- 가능하다면 단독 파드(즉, [레플리카 셋](/ko/docs/concepts/workloads/controllers/replicaset/)이나 [디플로이먼트](/ko/docs/concepts/workloads/controllers/deployment/)에 연결되지 않은 파드)를 사용하지 않는다. 단독 파드는 노드 장애 이벤트가 발생해도 다시 스케줄링되지 않는다. +- 가능하다면 단독 파드(즉, [레플리카셋](/ko/docs/concepts/workloads/controllers/replicaset/)이나 [디플로이먼트](/ko/docs/concepts/workloads/controllers/deployment/)에 연결되지 않은 파드)를 사용하지 않는다. 단독 파드는 노드 장애 이벤트가 발생해도 다시 스케줄링되지 않는다. - 명백하게 [`restartPolicy: Never`](/ko/docs/concepts/workloads/pods/pod-lifecycle/#재시작-정책)를 사용하는 상황을 제외한다면, 의도한 파드의 수가 항상 사용 가능한 상태를 유지하는 레플리카 셋을 생성하고, 파드를 교체하는 전략([롤링 업데이트](/ko/docs/concepts/workloads/controllers/deployment/#디플로이먼트-롤링-업데이트)와 같은)을 명시하는 디플로이먼트는 파드를 직접 생성하기 위해 항상 선호되는 방법이다. [잡](/ko/docs/concepts/workloads/controllers/jobs-run-to-completion/) 또한 적절할 수 있다. + 명백하게 [`restartPolicy: Never`](/ko/docs/concepts/workloads/pods/pod-lifecycle/#재시작-정책)를 사용하는 상황을 제외한다면, 의도한 파드의 수가 항상 사용 가능한 상태를 유지하는 레플리카셋을 생성하고, 파드를 교체하는 전략([롤링 업데이트](/ko/docs/concepts/workloads/controllers/deployment/#디플로이먼트-롤링-업데이트)와 같은)을 명시하는 디플로이먼트는 파드를 직접 생성하기 위해 항상 선호되는 방법이다. [잡](/ko/docs/concepts/workloads/controllers/job/) 또한 적절할 수 있다. ## 서비스 -- 서비스에 대응하는 백엔드 워크로드(디플로이먼트 또는 레플리카 셋) 또는 서비스 접근이 필요한 어떠한 워크로드를 생성하기 전에 [서비스](/ko/docs/concepts/services-networking/service/)를 미리 생성한다. 쿠버네티스가 컨테이너를 시작할 때, 쿠버네티스는 컨테이너 시작 당시에 생성되어 있는 모든 서비스를 가리키는 환경 변수를 컨테이너에 제공한다. 예를 들어, `foo` 라는 이름의 서비스가 존재한다면, 모든 컨테이너들은 초기 환경에서 다음의 변수들을 얻을 것이다. +- 서비스에 대응하는 백엔드 워크로드(디플로이먼트 또는 레플리카셋) 또는 서비스 접근이 필요한 어떠한 워크로드를 생성하기 전에 [서비스](/ko/docs/concepts/services-networking/service/)를 미리 생성한다. 쿠버네티스가 컨테이너를 시작할 때, 쿠버네티스는 컨테이너 시작 당시에 생성되어 있는 모든 서비스를 가리키는 환경 변수를 컨테이너에 제공한다. 예를 들어, `foo` 라는 이름의 서비스가 존재한다면, 모든 컨테이너들은 초기 환경에서 다음의 변수들을 얻을 것이다. ```shell FOO_SERVICE_HOST=<서비스가 동작 중인 호스트> @@ -46,7 +46,7 @@ weight: 10 *이는 순서를 정하는 일이 요구됨을 암시한다* - `파드`가 접근하기를 원하는 어떠한 `서비스`는 `파드` 스스로가 생성되기 전에 미리 생성되어 있어야 하며, 그렇지 않으면 환경 변수가 설정되지 않을 것이다. DNS는 이러한 제한을 가지고 있지 않다. -- 선택적인(그렇지만 매우 권장되는) [클러스터 애드온](/docs/concepts/cluster-administration/addons/)은 DNS 서버이다. +- 선택적인(그렇지만 매우 권장되는) [클러스터 애드온](/ko/docs/concepts/cluster-administration/addons/)은 DNS 서버이다. DNS 서버는 새로운 `서비스`를 위한 쿠버네티스 API를 Watch하며, 각 서비스를 위한 DNS 레코드 셋을 생성한다. 만약 DNS가 클러스터에 걸쳐 활성화되어 있다면, 모든 `파드`는 `서비스`의 이름을 자동으로 해석할 수 있어야 한다. - 반드시 필요한 것이 아니라면 파드에 `hostPort` 를 명시하지 않는다. <`hostIP`, `hostPort`, `protocol`> 조합은 유일해야 하기 때문에, `hostPort`로 바인드하는 것은 파드가 스케줄링될 수 있는 위치의 개수를 제한한다. 만약 `hostIP`와 `protocol`을 뚜렷히 명시하지 않으면, 쿠버네티스는 `hostIP`의 기본 값으로 `0.0.0.0`를, `protocol`의 기본 값으로 `TCP`를 사용한다. @@ -61,13 +61,13 @@ DNS 서버는 새로운 `서비스`를 위한 쿠버네티스 API를 Watch하며 ## 레이블 사용하기 -- `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`처럼 애플리케이션이나 디플로이먼트의 __속성에 대한 의미__를 식별하는 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)을 정의해 사용한다. 다른 리소스를 위해 적절한 파드를 선택하는 용도로 이러한 레이블을 이용할 수 있다. 예를 들어, 모든 `tier: frontend` 파드를 선택하거나, `app: myapp`의 모든 `phase: test` 컴포넌트를 선택하는 서비스를 생각해 볼 수 있다. 이 접근 방법의 예시는 [방명록](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) 앱을 참고한다. +- `{ app: myapp, tier: frontend, phase: test, deployment: v3 }`처럼 애플리케이션이나 디플로이먼트의 __속성에 대한 의미__ 를 식별하는 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)을 정의해 사용한다. 다른 리소스를 위해 적절한 파드를 선택하는 용도로 이러한 레이블을 이용할 수 있다. 예를 들어, 모든 `tier: frontend` 파드를 선택하거나, `app: myapp`의 모든 `phase: test` 컴포넌트를 선택하는 서비스를 생각해 볼 수 있다. 이 접근 방법의 예시는 [방명록](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/guestbook/) 앱을 참고한다. 릴리스에 특정되는 레이블을 서비스의 셀렉터에서 생략함으로써 여러 개의 디플로이먼트에 걸치는 서비스를 생성할 수 있다. [디플로이먼트](/ko/docs/concepts/workloads/controllers/deployment/)는 생성되어 있는 서비스를 다운타임 없이 수정하기 쉽도록 만든다. 오브젝트의 의도한 상태는 디플로이먼트에 의해 기술되며, 만약 그 스펙에 대한 변화가 _적용될_ 경우, 디플로이먼트 컨트롤러는 일정한 비율로 실제 상태를 의도한 상태로 변화시킨다. -- 디버깅을 위해 레이블을 조작할 수 있다. (레플리카 셋과 같은) 쿠버네티스 컨트롤러와 서비스는 셀렉터 레이블을 사용해 파드를 선택하기 때문에, 관련된 레이블을 파드에서 삭제하는 것은 컨트롤러로부터 관리되거나 서비스로부터 트래픽을 전달받는 것을 중단시킨다. 만약 이미 존재하는 파드의 레이블을 삭제한다면, 파드의 컨트롤러는 그 자리를 대신할 새로운 파드를 생성한다. 이것은 이전에 "살아 있는" 파드를 "격리된" 환경에서 디버그할 수 있는 유용한 방법이다. 레이블을 상호적으로 추가하고 삭제하기 위해서, [`kubectl label`](/docs/reference/generated/kubectl/kubectl-commands#label)를 사용할 수 있다. +- 디버깅을 위해 레이블을 조작할 수 있다. (레플리카셋과 같은) 쿠버네티스 컨트롤러와 서비스는 셀렉터 레이블을 사용해 파드를 선택하기 때문에, 관련된 레이블을 파드에서 삭제하는 것은 컨트롤러로부터 관리되거나 서비스로부터 트래픽을 전달받는 것을 중단시킨다. 만약 이미 존재하는 파드의 레이블을 삭제한다면, 파드의 컨트롤러는 그 자리를 대신할 새로운 파드를 생성한다. 이것은 이전에 "살아 있는" 파드를 "격리된" 환경에서 디버그할 수 있는 유용한 방법이다. 레이블을 상호적으로 추가하고 삭제하기 위해서, [`kubectl label`](/docs/reference/generated/kubectl/kubectl-commands#label)를 사용할 수 있다. ## 컨테이너 이미지 @@ -99,8 +99,6 @@ DNS 서버는 새로운 `서비스`를 위한 쿠버네티스 API를 Watch하며 - `kubectl apply -f <디렉터리>`를 사용한다. 이 명령어는 `<디렉터리>` 내부의 모든 `.yaml`, `.yml`, 그리고 `.json` 쿠버네티스 구성 파일을 찾아 `apply`에 전달한다. -- `get`과 `delete` 동작을 위해 특정 오브젝트의 이름 대신 레이블 셀렉터를 사용한다. [레이블 셀렉터](/ko/docs/concepts/overview/working-with-objects/labels/#레이블-셀렉터)와 [효율적으로 레이블 사용하기](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively)를 참고할 수 있다. - -- 단일 컨테이너로 구성된 디플로이먼트와 서비스를 빠르게 생성하기 위해 `kubectl run`와 `kubectl expose`를 사용한다. [클러스터 내부의 애플리케이션에 접근하기 위한 서비스 사용](/docs/tasks/access-application-cluster/service-access-application-cluster/)에서 예시를 확인할 수 있다. - +- `get`과 `delete` 동작을 위해 특정 오브젝트의 이름 대신 레이블 셀렉터를 사용한다. [레이블 셀렉터](/ko/docs/concepts/overview/working-with-objects/labels/#레이블-셀렉터)와 [효율적으로 레이블 사용하기](/ko/docs/concepts/cluster-administration/manage-deployment/#효과적인-레이블-사용)를 참고할 수 있다. +- 단일 컨테이너로 구성된 디플로이먼트와 서비스를 빠르게 생성하기 위해 `kubectl create deployment` 와 `kubectl expose` 를 사용한다. [클러스터 내부의 애플리케이션에 접근하기 위한 서비스 사용](/docs/tasks/access-application-cluster/service-access-application-cluster/)에서 예시를 확인할 수 있다. diff --git a/content/ko/docs/concepts/configuration/pod-overhead.md b/content/ko/docs/concepts/configuration/pod-overhead.md index 3dd850067c35b..d4888ecbfbc73 100644 --- a/content/ko/docs/concepts/configuration/pod-overhead.md +++ b/content/ko/docs/concepts/configuration/pod-overhead.md @@ -11,7 +11,7 @@ weight: 20 노드 위에서 파드를 구동할 때, 파드는 그 자체적으로 많은 시스템 리소스를 사용한다. 이러한 리소스는 파드 내의 컨테이너들을 구동하기 위한 리소스 이외에 추가적으로 필요한 것이다. -_파드 오버헤드_ 는 컨테이너 리소스 요청과 상한 위에서 파드의 인프라에 의해 +_파드 오버헤드_ 는 컨테이너 리소스 요청과 상한 위에서 파드의 인프라에 의해 소비되는 리소스를 계산하는 기능이다. @@ -20,25 +20,25 @@ _파드 오버헤드_ 는 컨테이너 리소스 요청과 상한 위에서 파 -쿠버네티스에서 파드의 오버헤드는 파드의 -[런타임클래스](/ko/docs/concepts/containers/runtime-class/) 와 관련된 오버헤드에 따라 -[어드미션](/docs/reference/access-authn-authz/extensible-admission-controllers/#what-are-admission-webhooks) +쿠버네티스에서 파드의 오버헤드는 파드의 +[런타임클래스](/ko/docs/concepts/containers/runtime-class/) 와 관련된 오버헤드에 따라 +[어드미션](/docs/reference/access-authn-authz/extensible-admission-controllers/#what-are-admission-webhooks) 이 수행될 때 지정된다. -파드 오버헤드가 활성화 되면, 파드를 노드에 스케줄링 할 때 컨테이너 리소스 요청의 합에 -파드의 오버헤드를 추가해서 스케줄링을 고려한다. 마찬가지로, Kubelet은 파드의 cgroups 크기를 변경하거나 +파드 오버헤드가 활성화 되면, 파드를 노드에 스케줄링 할 때 컨테이너 리소스 요청의 합에 +파드의 오버헤드를 추가해서 스케줄링을 고려한다. 마찬가지로, Kubelet은 파드의 cgroups 크기를 변경하거나 파드의 축출 등급을 부여할 때에도 파드의 오버헤드를 포함하여 고려한다. ## 파드 오버헤드 활성화하기 {#set-up} -기능 활성화를 위해 클러스터에서 -`PodOverhead` [기능 게이트](/docs/reference/command-line-tools-reference/feature-gates/) 가 활성화 되어 있고 (1.18 버전에서는 기본적으로 활성화), +기능 활성화를 위해 클러스터에서 +`PodOverhead` [기능 게이트](/docs/reference/command-line-tools-reference/feature-gates/) 가 활성화 되어 있고 (1.18 버전에서는 기본적으로 활성화), `overhead` 필드를 정의하는 `RuntimeClass` 가 사용되고 있는지 확인해야 한다. ## 사용 예제 파드 오버헤드 기능을 사용하기 위하여, `overhead` 필드를 정의하는 런타임클래스가 필요하다. -예를 들어, 가상 머신 및 게스트 OS에 대하여 파드 당 120 MiB를 사용하는 +예를 들어, 가상 머신 및 게스트 OS에 대하여 파드 당 120 MiB를 사용하는 가상화 컨테이너 런타임의 런타임클래스의 경우 다음과 같이 정의 할 수 있다. ```yaml @@ -54,7 +54,7 @@ overhead: cpu: "250m" ``` -`kata-fc` 런타임클래스 핸들러를 지정하는 워크로드는 리소스 쿼터 계산, +`kata-fc` 런타임클래스 핸들러를 지정하는 워크로드는 리소스 쿼터 계산, 노드 스케줄링 및 파드 cgroup 크기 조정을 위하여 메모리와 CPU 오버헤드를 고려한다. 주어진 예제 워크로드 test-pod의 구동을 고려해보자. @@ -83,9 +83,9 @@ spec: memory: 100Mi ``` -어드미션 수행 시에, [어드미션 컨트롤러](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/)는 -런타임클래스에 기술된 `overhead` 를 포함하기 위하여 워크로드의 PodSpec 항목을 갱신한다. 만약 PodSpec이 이미 해당 필드에 정의되어 있으면, -파드는 거부된다. 주어진 예제에서, 오직 런타임클래스의 이름만이 정의되어 있기 때문에, 어드미션 컨트롤러는 파드가 +어드미션 수행 시에, [어드미션 컨트롤러](/docs/reference/access-authn-authz/admission-controllers/)는 +런타임클래스에 기술된 `overhead` 를 포함하기 위하여 워크로드의 PodSpec 항목을 갱신한다. 만약 PodSpec이 이미 해당 필드에 정의되어 있으면, +파드는 거부된다. 주어진 예제에서, 오직 런타임클래스의 이름만이 정의되어 있기 때문에, 어드미션 컨트롤러는 파드가 `overhead` 를 포함하도록 변경한다. 런타임클래스의 어드미션 수행 후에, 파드의 스펙이 갱신된 것을 확인할 수 있다. @@ -99,11 +99,11 @@ kubectl get pod test-pod -o jsonpath='{.spec.overhead}' map[cpu:250m memory:120Mi] ``` -만약 리소스쿼터 항목이 정의되어 있다면, 컨테이너의 리소스 요청의 합에는 +만약 리소스쿼터 항목이 정의되어 있다면, 컨테이너의 리소스 요청의 합에는 `overhead` 필드도 추가된다. -kube-scheduler 는 어떤 노드에 파드가 기동 되어야 할지를 정할 때, 파드의 `overhead` 와 -해당 파드에 대한 컨테이너의 리소스 요청의 합을 고려한다. 이 예제에서, 스케줄러는 +kube-scheduler 는 어떤 노드에 파드가 기동 되어야 할지를 정할 때, 파드의 `overhead` 와 +해당 파드에 대한 컨테이너의 리소스 요청의 합을 고려한다. 이 예제에서, 스케줄러는 리소스 요청과 파드의 오버헤드를 더하고, 2.25 CPU와 320 MiB 메모리가 사용 가능한 노드를 찾는다. 일단 파드가 특정 노드에 스케줄링 되면, 해당 노드에 있는 kubelet 은 파드에 대한 새로운 {{< glossary_tooltip text="cgroup" term_id="cgroup" >}}을 생성한다. @@ -142,7 +142,7 @@ CPU 2250m와 메모리 320MiB 가 리소스로 요청되었으며, 이 결과는 ## 파드 cgroup 상한 확인하기 -워크로드가 실행 중인 노드에서 파드의 메모리 cgroup들을 확인 해보자. 다음의 예제에서, [`crictl`](https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md)은 노드에서 사용되며, +워크로드가 실행 중인 노드에서 파드의 메모리 cgroup들을 확인 해보자. 다음의 예제에서, [`crictl`](https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md)은 노드에서 사용되며, CRI-호환 컨테이너 런타임을 위해서 노드에서 사용할 수 있는 CLI 를 제공한다. 파드의 오버헤드 동작을 보여주는 좋은 예이며, 사용자가 노드에서 직접 cgroup들을 확인하지 않아도 된다. @@ -178,8 +178,8 @@ sudo crictl inspectp -o=json $POD_ID | grep cgroupsPath ``` ### 관찰성 -`kube_pod_overhead` 항목은 [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) -에서 사용할 수 있어, 파드 오버헤드가 사용되는 시기를 식별하고, +`kube_pod_overhead` 항목은 [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) +에서 사용할 수 있어, 파드 오버헤드가 사용되는 시기를 식별하고, 정의된 오버헤드로 실행되는 워크로드의 안정성을 관찰할 수 있다. 이 기능은 kube-state-metrics 의 1.9 릴리스에서는 사용할 수 없지만, 다음 릴리스에서는 가능할 예정이다. 그 전까지는 소스로부터 kube-state-metric 을 빌드해야 한다. @@ -191,5 +191,3 @@ sudo crictl inspectp -o=json $POD_ID | grep cgroupsPath * [런타임클래스](/ko/docs/concepts/containers/runtime-class/) * [파드오버헤드 디자인](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) - - diff --git a/content/ko/docs/concepts/configuration/resource-bin-packing.md b/content/ko/docs/concepts/configuration/resource-bin-packing.md index 4a8a6b7f2febf..998456ca4968e 100644 --- a/content/ko/docs/concepts/configuration/resource-bin-packing.md +++ b/content/ko/docs/concepts/configuration/resource-bin-packing.md @@ -128,23 +128,23 @@ CPU: 1 Node Score: intel.com/foo = resourceScoringFunction((2+1),4) - = (100 - ((4-3)*100/4) - = (100 - 25) - = 75 - = rawScoringFunction(75) - = 7 + = (100 - ((4-3)*100/4) + = (100 - 25) + = 75 # requested + used = 75% * available + = rawScoringFunction(75) + = 7 # floor(75/10) Memory = resourceScoringFunction((256+256),1024) = (100 -((1024-512)*100/1024)) - = 50 + = 50 # requested + used = 50% * available = rawScoringFunction(50) - = 5 + = 5 # floor(50/10) CPU = resourceScoringFunction((2+1),8) = (100 -((8-3)*100/8)) - = 37.5 + = 37.5 # requested + used = 37.5% * available = rawScoringFunction(37.5) - = 3 + = 3 # floor(37.5/10) NodeScore = (7 * 5) + (5 * 1) + (3 * 3) / (5 + 1 + 3) = 5 @@ -189,5 +189,3 @@ NodeScore = (5 * 5) + (7 * 1) + (10 * 3) / (5 + 1 + 3) = 7 ``` - - diff --git a/content/ko/docs/concepts/containers/_index.md b/content/ko/docs/concepts/containers/_index.md index bdcb03bde5ffd..76b1756a19248 100755 --- a/content/ko/docs/concepts/containers/_index.md +++ b/content/ko/docs/concepts/containers/_index.md @@ -1,5 +1,41 @@ --- -title: "컨테이너" +title: 컨테이너 weight: 40 +description: 런타임 의존성과 함께 애플리케이션을 패키징하는 기술 +content_type: concept +no_list: true --- + + +실행하는 각 컨테이너는 반복 가능하다. 의존성이 포함된 표준화는 +어디에서 실행하던지 동일한 동작을 얻는다는 것을 +의미한다. + +컨테이너는 기본 호스트 인프라에서 애플리케이션을 분리한다. +따라서 다양한 클라우드 또는 OS 환경에서 보다 쉽게 ​​배포할 수 있다. + + + + + + +## 컨테이너 이미지 +[컨테이너 이미지](/ko/docs/concepts/containers/images/)는 애플리케이션을 +실행하는 데 필요한 모든 것이 포함된 실행할 준비가 되어있는(ready-to-run) 소프트웨어 패키지이다. +여기에는 실행하는 데 필요한 코드와 모든 런타임, 애플리케이션 및 시스템 라이브러리, +그리고 모든 필수 설정에 대한 기본값이 포함된다. + +설계 상, 컨테이너는 변경할 수 없다. 이미 실행 중인 컨테이너의 코드를 +변경할 수 없다. 컨테이너화된 애플리케이션이 있고 +변경하려는 경우, 변경 사항이 포함된 새 컨테이너를 빌드한 +다음, 업데이트된 이미지에서 시작하도록 컨테이너를 다시 생성해야 한다. + +## 컨테이너 런타임 + +{{< glossary_definition term_id="container-runtime" length="all" >}} + +## {{% heading "whatsnext" %}} + +* [컨테이너 이미지](/ko/docs/concepts/containers/images/)에 대해 읽어보기 +* [파드](/ko/docs/concepts/workloads/pods/)에 대해 읽어보기 diff --git a/content/ko/docs/concepts/containers/images.md b/content/ko/docs/concepts/containers/images.md index f03847c38c08e..0f7bb0cb1317c 100644 --- a/content/ko/docs/concepts/containers/images.md +++ b/content/ko/docs/concepts/containers/images.md @@ -6,18 +6,51 @@ weight: 10 -사용자 Docker 이미지를 생성하고 레지스트리에 푸시(push)하여 쿠버네티스 파드에서 참조되기 이전에 대비한다. +컨테이너 이미지는 애플리케이션과 모든 소프트웨어 의존성을 캡슐화하는 바이너리 데이터를 +나타낸다. 컨테이너 이미지는 독립적으로 실행할 수 있고 런타임 환경에 대해 +잘 정의된 가정을 만드는 실행 가능한 소프트웨어 번들이다. -컨테이너의 `image` 속성은 `docker` 커맨드에서 지원하는 문법과 같은 문법을 지원한다. 이는 프라이빗 레지스트리와 태그를 포함한다. +일반적으로 {{< glossary_tooltip text="파드" term_id="pod" >}}에서 +참조하기 전에 애플리케이션의 컨테이너 이미지를 +생성해서 레지스트리로 푸시한다. + +이 페이지는 컨테이너 이미지 개념의 개요를 제공한다. +## 이미지 이름 + +컨테이너 이미지는 일반적으로 `pause`, `example/mycontainer` 또는 `kube-apiserver` 와 같은 이름을 부여한다. +이미지는 또한 레지스트리 호스트 이름을 포함할 수 있다. 예를 들면, `fictional.registry.example/imagename` +과 같다. 그리고 포트 번호도 포함할 수 있다. 예를 들면, `fictional.registry.example:10443/imagename` 과 같다. + +레지스트리 호스트 이름을 지정하지 않으면, 쿠버네티스는 도커 퍼블릭 레지스트리를 의미한다고 가정한다. + +이미지 이름 부분 다음에 _tag_ 를 추가할 수 있다(`docker` 와 `podman` +등의 명령과 함께 사용). +태그를 사용하면 동일한 시리즈 이미지의 다른 버전을 식별할 수 있다. + +이미지 태그는 소문자와 대문자, 숫자, 밑줄(`_`), +마침표(`.`) 및 대시(`-`)로 구성된다. +이미지 태그 안에서 구분 문자(`_`, `-` 그리고 `.`)를 +배치할 수 있는 위치에 대한 추가 규칙이 있다. +태그를 지정하지 않으면, 쿠버네티스는 태그 `latest` 를 의미한다고 가정한다. + +{{< caution >}} +프로덕션에서 컨테이너를 배포할 때는 `latest` 태그를 사용하지 않아야 한다. +실행 중인 이미지 버전을 추적하기가 어렵고 +이전에 잘 동작하던 버전으로 롤백하기가 더 어렵다. + +대신, `v1.42.0` 과 같은 의미있는 태그를 지정한다. +{{< /caution >}} + ## 이미지 업데이트 -기본 풀(pull) 정책은 `IfNotPresent`이며, 이것은 Kubelet이 이미 +기본 풀(pull) 정책은 `IfNotPresent`이며, 이것은 +{{< glossary_tooltip text="kubelet" term_id="kubelet" >}}이 이미 존재하는 이미지에 대한 풀을 생략하게 한다. 만약 항상 풀을 강제하고 싶다면, 다음 중 하나를 수행하면 된다. @@ -26,46 +59,18 @@ weight: 10 - `imagePullPolicy`와 사용할 이미지의 태그를 생략. - [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) 어드미션 컨트롤러를 활성화. -`:latest` 태그 사용은 피해야 한다는 것을 참고하고, 자세한 정보는 [구성을 위한 모범 사례](/ko/docs/concepts/configuration/overview/#컨테이너-이미지)를 참고한다. +`imagePullPolicy` 가 특정값 없이 정의되면, `Always` 로 설정된다. -## 매니페스트로 멀티-아키텍처 이미지 빌드 - -Docker CLI는 현재 `docker manifest` 커맨드와 `create`, `annotate`, `push`와 같은 서브 커맨드를 함께 지원한다. 이 커맨드는 매니페스트를 빌드하고 푸시하는데 사용할 수 있다. 매니페스트를 보기 위해서는 `docker manifest inspect`를 사용하면 된다. - -다음에서 docker 문서를 확인하기 바란다. -https://docs.docker.com/edge/engine/reference/commandline/manifest/ - -이것을 사용하는 방법에 대한 예제는 빌드 하니스(harness)에서 참조한다. -https://cs.k8s.io/?q=docker%20manifest%20(create%7Cpush%7Cannotate)&i=nope&files=&repos= - -이 커맨드는 Docker CLI에 의존하며 그에 전적으로 구현된다. `$HOME/.docker/config.json` 편집 및 `experimental` 키를 `enabled`로 설정하거나, CLI 커맨드 호출 시 간단히 `DOCKER_CLI_EXPERIMENTAL` 환경 변수를 `enabled`로만 설정해도 된다. - -{{< note >}} -Docker *18.06 또는 그 이상* 을 사용하길 바란다. 더 낮은 버전은 버그가 있거나 실험적인 명령줄 옵션을 지원하지 않는다. 예를 들어 https://github.com/docker/cli/issues/1135 는 containerd에서 문제를 일으킨다. -{{< /note >}} +## 매니페스트가 있는 다중 아키텍처 이미지 -오래된 매니페스트 업로드를 실행하는 데 어려움을 겪는다면, `$HOME/.docker/manifests`에서 오래된 매니페스트를 정리하여 새롭게 시작하면 된다. +바이너리 이미지를 제공할 뿐만 아니라, 컨테이너 레지스트리는 컨테이너 [이미지 매니페스트](https://github.com/opencontainers/image-spec/blob/master/manifest.md)를 제공할 수도 있다. 매니페스트는 아키텍처별 버전의 컨테이너에 대한 이미지 매니페스트를 참조할 수 있다. 아이디어는 이미지의 이름(예를 들어, `pause`, `example/mycontainer`, `kube-apiserver`)을 가질 수 있다는 것이다. 그래서 다른 시스템들이 사용하고 있는 컴퓨터 아키텍처에 적합한 바이너리 이미지를 가져올 수 있다. -쿠버네티스의 경우, 일반적으로 접미사 `-$(ARCH)`가 있는 이미지를 사용해 왔다. 하위 호환성을 위해, 접미사가 있는 구형 이미지를 생성하길 바란다. 접미사에 대한 아이디어는 모든 아키텍처를 위한 매니페스트를 가졌다는 의미가 내포된 `pause` 이미지를 생성하고, 접미사가 붙은 이미지가 하드 코드되어 있을 오래된 구성 또는 YAML 파일에 대해 하위 호환된다는 의미가 내포되어 있는 `pause-amd64`를 생성하기 위한 것이다. +쿠버네티스 자체는 일반적으로 `-$(ARCH)` 접미사로 컨테이너 이미지의 이름을 지정한다. 이전 버전과의 호환성을 위해, 접미사가 있는 오래된 이미지를 생성한다. 아이디어는 모든 아키텍처에 대한 매니페스트가 있는 `pause` 이미지와 이전 구성 또는 이전에 접미사로 이미지를 하드 코딩한 YAML 파일과 호환되는 `pause-amd64` 라고 하는 이미지를 생성한다. ## 프라이빗 레지스트리 사용 프라이빗 레지스트리는 해당 레지스트리에서 이미지를 읽을 수 있는 키를 요구할 것이다. 자격 증명(credential)은 여러 가지 방법으로 제공될 수 있다. - - - Google 컨테이너 레지스트리 사용 - - 각 클러스터에 대하여 - - Google 컴퓨트 엔진 또는 Google 쿠버네티스 엔진에서 자동적으로 구성됨 - - 모든 파드는 해당 프로젝트의 프라이빗 레지스트리를 읽을 수 있음 - - AWS Elastic Container Registry(ECR) 사용 - - IAM 역할 및 정책을 사용하여 ECR 저장소에 접근을 제어함 - - ECR 로그인 자격 증명은 자동으로 갱신됨 - - Oracle 클라우드 인프라스트럭처 레지스트리(OCIR) 사용 - - IAM 역할과 정책을 사용하여 OCIR 저장소에 접근을 제어함 - - Azure 컨테이너 레지스트리(ACR) 사용 - - IAM 역할과 정책을 사용하여 ACR 저장소에 접근을 제어함 - - IBM 클라우드 컨테이너 레지스트리 사용 - - IAM 역할 및 정책을 사용하여 IBM 클라우드 컨테이너 레지스트리에 대한 접근 권한 부여 - 프라이빗 레지스트리에 대한 인증을 위한 노드 구성 - 모든 파드는 구성된 프라이빗 레지스트리를 읽을 수 있음 - 클러스터 관리자에 의한 노드 구성 필요 @@ -74,139 +79,57 @@ Docker *18.06 또는 그 이상* 을 사용하길 바란다. 더 낮은 버전 - 셋업을 위해서는 모든 노드에 대해서 root 접근이 필요 - 파드에 ImagePullSecrets을 명시 - 자신의 키를 제공하는 파드만 프라이빗 레지스트리에 접근 가능 + - 공급 업체별 또는 로컬 확장 + - 사용자 정의 노드 구성을 사용하는 경우, 사용자(또는 클라우드 + 제공자)가 컨테이너 레지스트리에 대한 노드 인증 메커니즘을 + 구현할 수 있다. -각 옵션은 아래에서 더 자세히 설명한다. - - -### Google 컨테이너 레지스트리 사용 - -쿠버네티스는 Google 컴퓨트 엔진(GCE)에서 동작할 때, [Google 컨테이너 -레지스트리(GCR)](https://cloud.google.com/tools/container-registry/)를 자연스럽게 -지원한다. 사용자의 클러스터가 GCE 또는 Google 쿠버네티스 엔진에서 동작 중이라면, 간단히 -이미지의 전체 이름(예: gcr.io/my_project/image:tag)을 사용하면 된다. - -클러스터 내에서 모든 파드는 해당 레지스트리에 있는 이미지에 읽기 접근 권한을 가질 것이다. - -Kubelet은 해당 인스턴스의 Google 서비스 계정을 이용하여 -GCR을 인증할 것이다. 인스턴스의 서비스 계정은 -`https://www.googleapis.com/auth/devstorage.read_only`라서, -프로젝트의 GCR로부터 풀은 할 수 있지만 푸시는 할 수 없다. - -### Amazon Elastic Container Registry 사용 - -쿠버네티스는 노드가 AWS EC2 인스턴스일 때, [Amazon Elastic Container Registry](https://aws.amazon.com/ecr/)를 자연스럽게 지원한다. - -간단히 이미지의 전체 이름(예: `ACCOUNT.dkr.ecr.REGION.amazonaws.com/imagename:tag`)을 -파드 정의에 사용하면 된다. - -파드를 생성할 수 있는 클러스터의 모든 사용자는 ECR 레지스트리에 있는 어떠한 -이미지든지 파드를 실행하는데 사용할 수 있다. - -kubelet은 ECR 자격 증명을 가져오고 주기적으로 갱신할 것이다. 이것을 위해서는 다음에 대한 권한이 필요하다. - -- `ecr:GetAuthorizationToken` -- `ecr:BatchCheckLayerAvailability` -- `ecr:GetDownloadUrlForLayer` -- `ecr:GetRepositoryPolicy` -- `ecr:DescribeRepositories` -- `ecr:ListImages` -- `ecr:BatchGetImage` - -요구 사항: - -- Kubelet 버전 `v1.2.0` 이상을 사용해야 한다. (예: `/usr/bin/kubelet --version=true`를 실행). -- 노드가 지역 A에 있고 레지스트리가 다른 지역 B에 있다면, 버전 `v1.3.0` 이상이 필요하다. -- 사용자의 지역에서 ECR이 지원되어야 한다. +이들 옵션은 아래에서 더 자세히 설명한다. -문제 해결: +### 프라이빗 레지스트리에 인증하도록 노드 구성 -- 위의 모든 요구 사항을 확인한다. -- 워크스테이션에서 $REGION (예: `us-west-2`)의 자격 증명을 얻는다. 그 자격 증명을 사용하여 해당 호스트로 SSH를 하고 Docker를 수동으로 실행한다. 작동하는가? -- kubelet이 `--cloud-provider=aws`로 실행 중인지 확인한다. -- kubelet 로그 수준을 최소 3 이상으로 늘리고 kubelet 로그에서 (예: `journalctl -u kubelet`) 다음과 같은 로그 라인을 확인한다. - - `aws_credentials.go:109] unable to get ECR credentials from cache, checking ECR API` - - `aws_credentials.go:116] Got ECR credentials from ECR API for .dkr.ecr..amazonaws.com` +노드에서 도커를 실행하는 경우, 프라이빗 컨테이너 레지스트리를 인증하도록 +도커 컨테이너 런타임을 구성할 수 있다. -### Azure 컨테이너 레지스트리(ACR) 사용 -쿠버네티스는 Azure 쿠버네티스 서비스(AKS)를 사용할 때 -[Azure 컨테이너 레지스트리(ACR)](https://azure.microsoft.com/ko-kr/services/container-registry/)를 -기본적으로 지원한다. - -AKS 클러스터 서비스 주체(principal)는 ACR 인스턴스에서 `ArcPull` 권한이 있어야 한다. 구성에 대한 -지침은 [Azure 쿠버네티스 서비스에서 Azure 컨테이너 레지스트리로 인증](https://docs.microsoft.com/ko-kr/azure/aks/cluster-container-registry-integration)을 참조한다. 그런 다음, 전체 ACR 이미지 이름(예: `my_registry.azurecr.io/image:tag`)을 사용한다. - -ACR 관리자 또는 서비스 주체를 사용해서 인증할 수도 있다. -어느 경우라도, 인증은 표준 Docker 인증을 통해서 수행된다. 이러한 지침은 -[azure-cli](https://github.com/azure/azure-cli) 명령줄 도구 사용을 가정한다. - -우선 레지스트리를 생성하고 자격 증명을 만들어야한다. 이에 대한 전체 문서는 -[Azure 컨테이너 레지스트리 문서](https://docs.microsoft.com/ko-kr/azure/container-registry/container-registry-get-started-azure-cli)에서 찾을 수 있다. - -컨테이너 레지스트리를 생성하고 나면, 다음의 자격 증명을 사용하여 로그인한다. - - * `DOCKER_USER` : 서비스 주체 또는 관리자 역할의 사용자명 - * `DOCKER_PASSWORD`: 서비스 주체 패스워드 또는 관리자 역할의 사용자 패스워드 - * `DOCKER_REGISTRY_SERVER`: `${some-registry-name}.azurecr.io` - * `DOCKER_EMAIL`: `${some-email-address}` - -해당 변수에 대한 값을 채우고 나면 -[쿠버네티스 시크릿을 구성하고 그것을 파드 디플로이를 위해서 사용](/ko/docs/concepts/containers/images/#파드에-imagepullsecrets-명시)할 수 있다. - -### IBM 클라우드 컨테이너 레지스트리 사용 -IBM 클라우드 컨테이너 레지스트리는 멀티-테넌트 프라이빗 이미지 레지스트리를 제공하여 사용자가 이미지를 안전하게 저장하고 공유할 수 있도록 한다. 기본적으로, 프라이빗 레지스트리의 이미지는 통합된 취약점 조언기(Vulnerability Advisor)를 통해 조사되어 보안 이슈와 잠재적 취약성을 검출한다. IBM 클라우드 계정의 모든 사용자가 이미지에 접근할 수 있도록 하거나, IAM 역할과 정책으로 IBM 클라우드 컨테이너 레지스트리 네임스페이스의 접근 권한을 부여해서 사용할 수 있다. - -IBM 클라우드 컨테이너 레지스트리 CLI 플러그인을 설치하고 사용자 이미지를 위한 네임스페이스를 생성하기 위해서는, [IBM 클라우드 컨테이너 레지스트리 시작하기](https://cloud.ibm.com/docs/Registry?topic=Registry-getting-started)를 참고한다. - -다른 추가적인 구성이 없는 IBM 클라우드 쿠버네티스 서비스 클러스터의 IBM 클라우드 컨테이너 레지스트리 내 기본 네임스페이스에 저장되어 있는 배포된 이미지를 동일 계정과 동일 지역에서 사용하려면 [이미지로부터 컨테이너 빌드하기](https://cloud.ibm.com/docs/containers?topic=containers-images)를 본다. 다른 구성 옵션에 대한 것은 [레지스트리부터 클러스터에 이미지를 가져오도록 권한을 부여하는 방법 이해하기](https://cloud.ibm.com/docs/containers?topic=containers-registry#cluster_registry_auth)를 본다. - -### 프라이빗 레지스트리에 대한 인증을 위한 노드 구성 - -{{< note >}} -Google 쿠버네티스 엔진에서 동작 중이라면, 이미 각 노드에 Google 컨테이너 레지스트리에 대한 자격 증명과 함께 `.dockercfg`가 있을 것이다. 그렇다면 이 방법은 쓸 수 없다. -{{< /note >}} +이 방법은 노드 구성을 제어할 수 있는 경우에 적합하다. {{< note >}} -AWS EC2에서 동작 중이고 EC2 컨테이너 레지스트리(ECR)을 사용 중이라면, 각 노드의 kubelet은 -ECR 로그인 자격 증명을 관리하고 업데이트할 것이다. 그렇다면 이 방법은 쓸 수 없다. -{{< /note >}} - -{{< note >}} -이 방법은 노드의 구성을 제어할 수 있는 경우에만 적합하다. 이 방법은 -GCE 및 자동 노드 교체를 수행하는 다른 클라우드 제공자에 대해서는 신뢰성 있게 작동하지 -않을 것이다. -{{< /note >}} - -{{< note >}} -현재 쿠버네티스는 docker 설정의 `auths`와 `HttpHeaders` 섹션만 지원한다. 이는 자격증명 도우미(`credHelpers` 또는 `credStore`)가 지원되지 않는다는 뜻이다. +쿠버네티스는 도커 구성에서 `auths` 와 `HttpHeaders` 섹션만 지원한다. +도커 자격 증명 도우미(`credHelpers` 또는 `credsStore`)는 지원되지 않는다. {{< /note >}} -Docker는 프라이빗 레지스트리를 위한 키를 `$HOME/.dockercfg` 또는 `$HOME/.docker/config.json` 파일에 저장한다. 만약 동일한 파일을 +도커는 프라이빗 레지스트리를 위한 키를 `$HOME/.dockercfg` 또는 `$HOME/.docker/config.json` 파일에 저장한다. 만약 동일한 파일을 아래의 검색 경로 리스트에 넣으면, kubelete은 이미지를 풀 할 때 해당 파일을 자격 증명 공급자로 사용한다. -* `{--root-dir:-/var/lib/kubelet}/config.json` -* `{cwd of kubelet}/config.json` -* `${HOME}/.docker/config.json` -* `/.docker/config.json` -* `{--root-dir:-/var/lib/kubelet}/.dockercfg` -* `{cwd of kubelet}/.dockercfg` -* `${HOME}/.dockercfg` -* `/.dockercfg` +* `{--root-dir:-/var/lib/kubelet}/config.json` +* `{cwd of kubelet}/config.json` +* `${HOME}/.docker/config.json` +* `/.docker/config.json` +* `{--root-dir:-/var/lib/kubelet}/.dockercfg` +* `{cwd of kubelet}/.dockercfg` +* `${HOME}/.dockercfg` +* `/.dockercfg` {{< note >}} -아마도 kubelet을 위한 사용자의 환경 파일에 `HOME=/root`을 명시적으로 설정해야 할 것이다. +kubelet 프로세스의 환경 변수에서 `HOME=/root` 를 명시적으로 설정해야 할 수 있다. {{< /note >}} 프라이빗 레지스트리를 사용도록 사용자의 노드를 구성하기 위해서 권장되는 단계는 다음과 같다. 이 예제의 경우, 사용자의 데스크탑/랩탑에서 아래 내용을 실행한다. - 1. 사용하고 싶은 각 자격 증명 세트에 대해서 `docker login [서버]`를 실행한다. 이것은 `$HOME/.docker/config.json`를 업데이트한다. + 1. 사용하고 싶은 각 자격 증명 세트에 대해서 `docker login [서버]`를 실행한다. 이것은 여러분 PC의 `$HOME/.docker/config.json`를 업데이트한다. 1. 편집기에서 `$HOME/.docker/config.json`를 보고 사용하고 싶은 자격 증명만 포함하고 있는지 확인한다. 1. 노드의 리스트를 구한다. 예를 들면 다음과 같다. - - 이름을 원하는 경우: `nodes=$(kubectl get nodes -o jsonpath='{range.items[*].metadata}{.name} {end}')` - - IP를 원하는 경우: `nodes=$(kubectl get nodes -o jsonpath='{range .items[*].status.addresses[?(@.type=="ExternalIP")]}{.address} {end}')` + - 이름을 원하는 경우: `nodes=$( kubectl get nodes -o jsonpath='{range.items[*].metadata}{.name} {end}' )` + - IP를 원하는 경우: `nodes=$( kubectl get nodes -o jsonpath='{range .items[*].status.addresses[?(@.type=="ExternalIP")]}{.address} {end}' )` 1. 로컬의 `.docker/config.json`를 위의 검색 경로 리스트 중 하나에 복사한다. - - 예: `for n in $nodes; do scp ~/.docker/config.json root@$n:/var/lib/kubelet/config.json; done` + - 이를 테스트하기 위한 예: `for n in $nodes; do scp ~/.docker/config.json root@"$n":/var/lib/kubelet/config.json; done` + +{{< note >}} +프로덕션 클러스터의 경우, 이 설정을 필요한 모든 노드에 적용할 수 있도록 +구성 관리 도구를 사용한다. +{{< /note >}} 프라이빗 이미지를 사용하는 파드를 생성하여 검증한다. 예를 들면 다음과 같다. @@ -263,11 +186,11 @@ Google 쿠버네티스 엔진에서 동작 중이라면, 이미 각 노드에 Go {{< note >}} 이 방법은 노드의 구성을 제어할 수 있는 경우에만 적합하다. 이 방법은 -GCE 및 자동 노드 교체를 수행하는 다른 클라우드 제공자에 대해서는 신뢰성 있게 작동하지 -않을 것이다. +클라우드 제공자가 노드를 관리하고 자동으로 교체한다면 안정적으로 +작동하지 않을 것이다. {{< /note >}} -기본적으로, kubelet은 지정된 레지스트리에서 각 이미지를 풀 하려고 할 것이다. +기본적으로, kubelet은 지정된 레지스트리에서 각 이미지를 풀 하려고 한다. 그러나, 컨테이너의 `imagePullPolicy` 속성이 `IfNotPresent` 또는 `Never`으로 설정되어 있다면, 로컬 이미지가 사용된다(우선적으로 또는 배타적으로). @@ -281,13 +204,13 @@ GCE 및 자동 노드 교체를 수행하는 다른 클라우드 제공자에 ### 파드에 ImagePullSecrets 명시 {{< note >}} -이 방법은 현재 Google 쿠버네티스 엔진, GCE 및 노드 생성이 자동화된 모든 클라우드 제공자에게 +이 방법은 프라이빗 레지스트리의 이미지를 기반으로 컨테이너를 실행하는 데 권장된다. {{< /note >}} -쿠버네티스는 파드에 레지스트리 키를 명시하는 것을 지원한다. +쿠버네티스는 파드에 컨테이너 이미지 레지스트리 키를 명시하는 것을 지원한다. -#### Docker 구성으로 시크릿 생성 +#### 도커 구성으로 시크릿 생성 대문자 값을 적절히 대체하여, 다음 커맨드를 실행한다. @@ -295,12 +218,14 @@ GCE 및 자동 노드 교체를 수행하는 다른 클라우드 제공자에 kubectl create secret docker-registry --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL ``` -만약 Docker 자격 증명 파일이 이미 존재한다면, 위의 명령을 사용하지 않고, -자격 증명 파일을 쿠버네티스 시크릿으로 가져올 수 있다. -[기존 Docker 자격 증명으로 시크릿 생성](/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials)에서 관련 방법을 설명하고 있다. +만약 도커 자격 증명 파일이 이미 존재한다면, 위의 명령을 사용하지 않고, +자격 증명 파일을 쿠버네티스 {{< glossary_tooltip text="시크릿" term_id="secret" >}}으로 +가져올 수 있다. +[기존 도커 자격 증명으로 시크릿 생성](/ko/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials)에서 관련 방법을 설명하고 있다. + `kubectl create secret docker-registry`는 -하나의 개인 레지스트리에서만 작동하는 시크릿을 생성하기 때문에, -여러 개인 컨테이너 레지스트리를 사용하는 경우 특히 유용하다. +하나의 프라이빗 레지스트리에서만 작동하는 시크릿을 생성하기 때문에, +여러 프라이빗 컨테이너 레지스트리를 사용하는 경우 특히 유용하다. {{< note >}} 파드는 이미지 풀 시크릿을 자신의 네임스페이스에서만 참조할 수 있다. @@ -312,6 +237,8 @@ kubectl create secret docker-registry --docker-server=DOCKER_REGISTRY_SER 이제, `imagePullSecrets` 섹션을 파드의 정의에 추가함으로써 해당 시크릿을 참조하는 파드를 생성할 수 있다. +예를 들면 다음과 같다. + ```shell cat < pod.yaml apiVersion: v1 @@ -337,28 +264,29 @@ EOF 그러나, 이 필드의 셋팅은 [서비스 어카운트](/docs/user-guide/service-accounts) 리소스에 imagePullSecrets을 셋팅하여 자동화할 수 있다. + 자세한 지침을 위해서는 [서비스 어카운트에 ImagePullSecrets 추가](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account)를 확인한다. 이것은 노드 당 `.docker/config.json`와 함께 사용할 수 있다. 자격 증명은 -병합될 것이다. 이 방법은 Google 쿠버네티스 엔진에서 작동될 것이다. +병합될 것이다. -### 유스케이스 +## 유스케이스 프라이빗 레지스트리를 구성하기 위한 많은 솔루션이 있다. 다음은 여러 가지 일반적인 유스케이스와 제안된 솔루션이다. 1. 비소유 이미지(예를 들어, 오픈소스)만 실행하는 클러스터의 경우. 이미지를 숨길 필요가 없다. - - Docker hub의 퍼블릭 이미지를 사용한다. + - 도커 허브의 퍼블릭 이미지를 사용한다. - 설정이 필요 없다. - - GCE 및 Google 쿠버네티스 엔진에서는, 속도와 가용성 향상을 위해서 로컬 미러가 자동적으로 사용된다. + - 일부 클라우드 제공자는 퍼블릭 이미지를 자동으로 캐시하거나 미러링하므로, 가용성이 향상되고 이미지를 가져오는 시간이 줄어든다. 1. 모든 클러스터 사용자에게는 보이지만, 회사 외부에는 숨겨야하는 일부 독점 이미지를 실행하는 클러스터의 경우. - - 호스트 된 프라이빗 [Docker 레지스트리](https://docs.docker.com/registry/)를 사용한다. - - 그것은 [Docker Hub](https://hub.docker.com/signup)에 호스트 되어 있거나, 다른 곳에 되어 있을 것이다. + - 호스트 된 프라이빗 [도커 레지스트리](https://docs.docker.com/registry/)를 사용한다. + - 그것은 [도커 허브](https://hub.docker.com/signup)에 호스트 되어 있거나, 다른 곳에 되어 있을 것이다. - 위에 설명된 바와 같이 수동으로 .docker/config.json을 구성한다. - 또는, 방화벽 뒤에서 읽기 접근 권한을 가진 내부 프라이빗 레지스트리를 실행한다. - 쿠버네티스 구성은 필요 없다. - - 또는, GCE 및 Google 쿠버네티스 엔진에서는, 프로젝트의 Google 컨테이너 레지스트리를 사용한다. + - 이미지 접근을 제어하는 ​​호스팅된 컨테이너 이미지 레지스트리 서비스를 사용한다. - 그것은 수동 노드 구성에 비해서 클러스터 오토스케일링과 더 잘 동작할 것이다. - 또는, 노드의 구성 변경이 불편한 클러스터에서는, `imagePullSecrets`를 사용한다. 1. 독점 이미지를 가진 클러스터로, 그 중 일부가 더 엄격한 접근 제어를 필요로 하는 경우. @@ -372,5 +300,8 @@ imagePullSecrets을 셋팅하여 자동화할 수 있다. 다중 레지스트리에 접근해야 하는 경우, 각 레지스트리에 대해 하나의 시크릿을 생성할 수 있다. -Kubelet은 모든`imagePullSecrets` 파일을 하나의 가상`.docker / config.json` 파일로 병합한다. +Kubelet은 모든 `imagePullSecrets` 파일을 하나의 가상 `.docker/config.json` 파일로 병합한다. + +## {{% heading "whatsnext" %}} +* [OCI 이미지 매니페스트 명세](https://github.com/opencontainers/image-spec/blob/master/manifest.md) 읽어보기 diff --git a/content/ko/docs/concepts/containers/overview.md b/content/ko/docs/concepts/containers/overview.md deleted file mode 100644 index 9fc833a4ca429..0000000000000 --- a/content/ko/docs/concepts/containers/overview.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: 컨테이너 개요 -content_type: concept -weight: 10 ---- - - - -컨테이너는 런타임에 필요한 종속성과 애플리케이션의 -컴파일 된 코드를 패키징 하는 기술이다. 실행되는 각각의 -컨테이너는 반복해서 사용 가능하다. 종속성이 포함된 표준화를 -통해 컨테이너가 실행되는 환경과 무관하게 항상 동일하게 -동작한다. - -컨테이너는 기본 호스트 인프라 환경에서 애플리케이션의 실행환경을 분리한다. -따라서 다양한 클라우드 환경이나 운영체제에서 쉽게 배포 할 수 있다. - - - - - - -## 컨테이너 이미지 -[컨테이너 이미지](/ko/docs/concepts/containers/images/) 는 즉시 실행할 수 있는 -소프트웨어 패키지이며, 애플리케이션을 실행하는데 필요한 모든 것 -(필요한 코드와 런타임, 애플리케이션 및 시스템 라이브러리 등의 모든 필수 설정에 대한 기본값) -을 포함한다. - -원칙적으로, 컨테이너는 변경되지 않는다. 이미 구동 중인 컨테이너의 -코드를 변경할 수 없다. 컨테이너화 된 애플리케이션이 있고 그 -애플리케이션을 변경하려는 경우, 변경사항을 포함하여 만든 -새로운 이미지를 통해 컨테이너를 다시 생성해야 한다. - -## 컨테이너 런타임 - -{{< glossary_definition term_id="container-runtime" length="all" >}} - - -## {{% heading "whatsnext" %}} - -* [컨테이너 이미지](/ko/docs/concepts/containers/images/)에 대해 읽어보기 -* [파드](/ko/docs/concepts/workloads/pods/)에 대해 읽어보기 - diff --git a/content/ko/docs/concepts/containers/runtime-class.md b/content/ko/docs/concepts/containers/runtime-class.md index 54500e7ad863e..ea661af0c6556 100644 --- a/content/ko/docs/concepts/containers/runtime-class.md +++ b/content/ko/docs/concepts/containers/runtime-class.md @@ -72,7 +72,7 @@ handler: myconfiguration # 상응하는 CRI 설정의 이름임 ``` 런타임클래스 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)어이야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)어이야 한다. {{< note >}} 런타임클래스 쓰기 작업(create/update/patch/delete)은 @@ -97,7 +97,7 @@ spec: 이것은 Kubelet이 지명된 런타임클래스를 사용하여 해당 파드를 실행하도록 지시할 것이다. 만약 지명된 런타임클래스가 없거나, CRI가 상응하는 핸들러를 실행할 수 없는 경우, 파드는 -`Failed` 터미널 [단계](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase)로 들어간다. +`Failed` 터미널 [단계](/ko/docs/concepts/workloads/pods/pod-lifecycle/#파드의-단계-phase)로 들어간다. 에러 메시지에 상응하는 [이벤트](/docs/tasks/debug-application-cluster/debug-application-introspection/)를 확인한다. @@ -106,7 +106,7 @@ spec: ### CRI 구성 {#cri-configuration} -CRI 런타임 설치에 대한 자세한 내용은 [CRI 설치](/docs/setup/production-environment/container-runtimes/)를 확인한다. +CRI 런타임 설치에 대한 자세한 내용은 [CRI 설치](/ko/docs/setup/production-environment/container-runtimes/)를 확인한다. #### dockershim @@ -155,7 +155,7 @@ https://github.com/containerd/cri/blob/master/docs/config.md 파드는 거부된다. 지원되는 노드가 테인트(taint)되어서 다른 런타임클래스 파드가 노드에서 구동되는 것을 막고 있다면, -`tolerations`를 런타임클래스에 추가할 수 있다. `nodeSelector`를 사용하면, 어드미션 시 +`tolerations`를 런타임클래스에 추가할 수 있다. `nodeSelector`를 사용하면, 어드미션 시 해당 톨러레이션(toleration)이 파드의 톨러레이션과 병합되어, 실질적으로 각각에 의해 선택된 노드의 합집합을 취한다. @@ -183,7 +183,5 @@ PodOverhead를 사용하려면, PodOverhead [기능 게이트](/docs/reference/c - [런타임클래스 설계](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/runtime-class.md) - [런타임클래스 스케줄링 설계](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/runtime-class-scheduling.md) -- [파드 오버헤드](/docs/concepts/configuration/pod-overhead/) 개념에 대해 읽기 +- [파드 오버헤드](/ko/docs/concepts/configuration/pod-overhead/) 개념에 대해 읽기 - [파드 오버헤드 기능 설계](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md) - - diff --git a/content/ko/docs/concepts/extend-kubernetes/_index.md b/content/ko/docs/concepts/extend-kubernetes/_index.md index ff8525f17125f..29d8672fca688 100644 --- a/content/ko/docs/concepts/extend-kubernetes/_index.md +++ b/content/ko/docs/concepts/extend-kubernetes/_index.md @@ -1,4 +1,206 @@ --- -title: 쿠버네티스 확장하기 +title: 쿠버네티스 확장 weight: 110 +description: 쿠버네티스 클러스터의 동작을 변경하는 다양한 방법 +content_type: concept +no_list: true --- + + + +쿠버네티스는 매우 유연하게 구성할 수 있고 확장 가능하다. 결과적으로 +쿠버네티스 프로젝트를 포크하거나 코드에 패치를 제출할 필요가 +거의 없다. + +이 가이드는 쿠버네티스 클러스터를 사용자 정의하기 위한 옵션을 설명한다. +쿠버네티스 클러스터를 업무 환경의 요구에 맞게 +조정하는 방법을 이해하려는 {{< glossary_tooltip text="클러스터 운영자" term_id="cluster-operator" >}}를 대상으로 한다. +잠재적인 {{< glossary_tooltip text="플랫폼 개발자" term_id="platform-developer" >}} 또는 쿠버네티스 프로젝트 {{< glossary_tooltip text="컨트리뷰터" term_id="contributor" >}}인 개발자에게도 +어떤 익스텐션(extension) 포인트와 패턴이 있는지, +그리고 그것들의 트레이드오프와 제약에 대한 소개 자료로 유용할 것이다. + + + + + + +## 개요 + +사용자 정의 방식은 크게 플래그, 로컬 구성 파일 또는 API 리소스 변경만 포함하는 *구성* 과 추가 프로그램이나 서비스 실행과 관련된 *익스텐션* 으로 나눌 수 있다. 이 문서는 주로 익스텐션에 관한 것이다. + +## 구성 + +*구성 파일* 및 *플래그* 는 온라인 문서의 레퍼런스 섹션에 각 바이너리 별로 문서화되어 있다. + +* [kubelet](/docs/admin/kubelet/) +* [kube-apiserver](/docs/admin/kube-apiserver/) +* [kube-controller-manager](/docs/admin/kube-controller-manager/) +* [kube-scheduler](/docs/admin/kube-scheduler/). + +호스팅된 쿠버네티스 서비스 또는 매니지드 설치 환경의 배포판에서 플래그 및 구성 파일을 항상 변경할 수 있는 것은 아니다. 변경 가능한 경우 일반적으로 클러스터 관리자만 변경할 수 있다. 또한 향후 쿠버네티스 버전에서 변경될 수 있으며, 이를 설정하려면 프로세스를 다시 시작해야 할 수도 있다. 이러한 이유로 다른 옵션이 없는 경우에만 사용해야 한다. + +[리소스쿼터](/ko/docs/concepts/policy/resource-quotas/), [파드시큐리티폴리시(PodSecurityPolicy)](/ko/docs/concepts/policy/pod-security-policy/), [네트워크폴리시](/ko/docs/concepts/services-networking/network-policies/) 및 역할 기반 접근 제어([RBAC](/docs/reference/access-authn-authz/rbac/))와 같은 *빌트인 정책 API(built-in Policy API)* 는 기본적으로 제공되는 쿠버네티스 API이다. API는 일반적으로 호스팅된 쿠버네티스 서비스 및 매니지드 쿠버네티스 설치 환경과 함께 사용된다. 그것들은 선언적이며 파드와 같은 다른 쿠버네티스 리소스와 동일한 규칙을 사용하므로, 새로운 클러스터 구성을 반복할 수 있고 애플리케이션과 동일한 방식으로 관리할 수 ​​있다. 또한, 이들 API가 안정적인 경우, 다른 쿠버네티스 API와 같이 [정의된 지원 정책](/docs/reference/deprecation-policy/)을 사용할 수 있다. 이러한 이유로 인해 구성 파일과 플래그보다 선호된다. + +## 익스텐션 + +익스텐션은 쿠버네티스를 확장하고 쿠버네티스와 긴밀하게 통합되는 소프트웨어 컴포넌트이다. +이들 컴포넌트는 쿠버네티스가 새로운 유형과 새로운 종류의 하드웨어를 지원할 수 있게 해준다. + +대부분의 클러스터 관리자는 쿠버네티스의 호스팅 또는 배포판 인스턴스를 사용한다. +결과적으로 대부분의 쿠버네티스 사용자는 익스텐션 기능을 설치할 필요가 없고 +새로운 익스텐션 기능을 작성할 필요가 있는 사람은 더 적다. + +## 익스텐션 패턴 + +쿠버네티스는 클라이언트 프로그램을 작성하여 자동화 되도록 설계되었다. +쿠버네티스 API를 읽고 쓰는 프로그램은 유용한 자동화를 제공할 수 있다. +*자동화* 는 클러스터 상에서 또는 클러스터 밖에서 실행할 수 있다. 이 문서의 지침에 따라 +고가용성과 강력한 자동화를 작성할 수 있다. +자동화는 일반적으로 호스트 클러스터 및 매니지드 설치 환경을 포함한 모든 +쿠버네티스 클러스터에서 작동한다. + +쿠버네티스와 잘 작동하는 클라이언트 프로그램을 작성하기 위한 특정 패턴은 *컨트롤러* 패턴이라고 한다. +컨트롤러는 일반적으로 오브젝트의 `.spec`을 읽고, 가능한 경우 수행한 다음 +오브젝트의 `.status`를 업데이트 한다. + +컨트롤러는 쿠버네티스의 클라이언트이다. 쿠버네티스가 클라이언트이고 +원격 서비스를 호출할 때 이를 *웹훅(Webhook)* 이라고 한다. 원격 서비스를 +*웹훅 백엔드* 라고 한다. 컨트롤러와 마찬가지로 웹훅은 장애 지점을 +추가한다. + +웹훅 모델에서 쿠버네티스는 원격 서비스에 네트워크 요청을 한다. +*바이너리 플러그인* 모델에서 쿠버네티스는 바이너리(프로그램)를 실행한다. +바이너리 플러그인은 kubelet(예: +[Flex Volume 플러그인](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md)과 +[네트워크 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/))과 +kubectl에서 +사용한다. + +아래는 익스텐션 포인트가 쿠버네티스 컨트롤 플레인과 상호 작용하는 방법을 +보여주는 다이어그램이다. + + + + + + +## 익스텐션 포인트 + +이 다이어그램은 쿠버네티스 시스템의 익스텐션 포인트를 보여준다. + + + + + +1. 사용자는 종종 `kubectl`을 사용하여 쿠버네티스 API와 상호 작용한다. [Kubectl 플러그인](/ko/docs/tasks/extend-kubectl/kubectl-plugins/)은 kubectl 바이너리를 확장한다. 개별 사용자의 로컬 환경에만 영향을 미치므로 사이트 전체 정책을 적용할 수는 없다. +2. apiserver는 모든 요청을 처리한다. apiserver의 여러 유형의 익스텐션 포인트는 요청을 인증하거나, 콘텐츠를 기반으로 요청을 차단하거나, 콘텐츠를 편집하고, 삭제 처리를 허용한다. 이 내용은 [API 접근 익스텐션](/ko/docs/concepts/extend-kubernetes/extend-cluster/#api-접근-익스텐션) 섹션에 설명되어 있다. +3. apiserver는 다양한 종류의 *리소스* 를 제공한다. `pods`와 같은 *빌트인 리소스 종류* 는 쿠버네티스 프로젝트에 의해 정의되며 변경할 수 없다. 직접 정의한 리소스를 추가할 수도 있고, [커스텀 리소스](/ko/docs/concepts/extend-kubernetes/extend-cluster/#사용자-정의-유형) 섹션에 설명된대로 *커스텀 리소스* 라고 부르는 다른 프로젝트에서 정의한 리소스를 추가할 수도 있다. 커스텀 리소스는 종종 API 접근 익스텐션과 함께 사용된다. +4. 쿠버네티스 스케줄러는 파드를 배치할 노드를 결정한다. 스케줄링을 확장하는 몇 가지 방법이 있다. 이들은 [스케줄러 익스텐션](/ko/docs/concepts/extend-kubernetes/extend-cluster/#스케줄러-익스텐션) 섹션에 설명되어 있다. +5. 쿠버네티스의 많은 동작은 API-Server의 클라이언트인 컨트롤러(Controller)라는 프로그램으로 구현된다. 컨트롤러는 종종 커스텀 리소스와 함께 사용된다. +6. kubelet은 서버에서 실행되며 파드가 클러스터 네트워크에서 자체 IP를 가진 가상 서버처럼 보이도록 한다. [네트워크 플러그인](/ko/docs/concepts/extend-kubernetes/extend-cluster/#네트워크-플러그인)을 사용하면 다양한 파드 네트워킹 구현이 가능하다. +7. kubelet은 컨테이너의 볼륨을 마운트 및 마운트 해제한다. 새로운 유형의 스토리지는 [스토리지 플러그인](/ko/docs/concepts/extend-kubernetes/extend-cluster/#스토리지-플러그인)을 통해 지원될 수 있다. + +어디서부터 시작해야 할지 모르겠다면, 이 플로우 차트가 도움이 될 수 있다. 일부 솔루션에는 여러 유형의 익스텐션이 포함될 수 있다. + + + + + + +## API 익스텐션 +### 사용자 정의 유형 + +새 컨트롤러, 애플리케이션 구성 오브젝트 또는 기타 선언적 API를 정의하고 `kubectl` 과 같은 쿠버네티스 도구를 사용하여 관리하려면 쿠버네티스에 커스텀 리소스를 추가하자. + +애플리케이션, 사용자 또는 모니터링 데이터의 데이터 저장소로 커스텀 리소스를 사용하지 않는다. + +커스텀 리소스에 대한 자세한 내용은 [커스텀 리소스 개념 가이드](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)를 참고하길 바란다. + + +### 새로운 API와 자동화의 결합 + +사용자 정의 리소스 API와 컨트롤 루프의 조합을 [오퍼레이터(operator) 패턴](/ko/docs/concepts/extend-kubernetes/operator/)이라고 한다. 오퍼레이터 패턴은 특정 애플리케이션, 일반적으로 스테이트풀(stateful) 애플리케이션을 관리하는 데 사용된다. 이러한 사용자 정의 API 및 컨트롤 루프를 사용하여 스토리지나 정책과 같은 다른 리소스를 제어할 수도 있다. + +### 빌트인 리소스 변경 + +사용자 정의 리소스를 추가하여 쿠버네티스 API를 확장하면 추가된 리소스는 항상 새로운 API 그룹에 속한다. 기존 API 그룹을 바꾸거나 변경할 수 없다. +API를 추가해도 기존 API(예: 파드)의 동작에 직접 영향을 미치지는 않지만 API 접근 익스텐션은 영향을 준다. + + +### API 접근 익스텐션 + +요청이 쿠버네티스 API 서버에 도달하면 먼저 인증이 되고, 그런 다음 승인된 후 다양한 유형의 어드미션 컨트롤이 적용된다. 이 흐름에 대한 자세한 내용은 [쿠버네티스 API에 대한 접근 제어](/docs/reference/access-authn-authz/controlling-access/)를 참고하길 바란다. + +이러한 각 단계는 익스텐션 포인트를 제공한다. + +쿠버네티스에는 이를 지원하는 몇 가지 빌트인 인증 방법이 있다. 또한 인증 프록시 뒤에 있을 수 있으며 인증 헤더에서 원격 서비스로 토큰을 전송하여 확인할 수 있다(웹훅). 이러한 방법은 모두 [인증 설명서](/docs/reference/access-authn-authz/authentication/)에 설명되어 있다. + +### 인증 + +[인증](/docs/reference/access-authn-authz/authentication/)은 모든 요청의 헤더 또는 인증서를 요청하는 클라이언트의 사용자 이름에 매핑한다. + +쿠버네티스는 몇 가지 빌트인 인증 방법과 필요에 맞지 않는 경우 [인증 웹훅](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication) 방법을 제공한다. + + +### 인가 + +[인가](/docs/reference/access-authn-authz/webhook/)은 특정 사용자가 API 리소스에서 읽고, 쓰고, 다른 작업을 수행할 수 있는지를 결정한다. 전체 리소스 레벨에서 작동하며 임의의 오브젝트 필드를 기준으로 구별하지 않는다. 빌트인 인증 옵션이 사용자의 요구를 충족시키지 못하면 [인가 웹훅](/docs/reference/access-authn-authz/webhook/)을 통해 사용자가 제공한 코드를 호출하여 인증 결정을 내릴 수 있다. + + +### 동적 어드미션 컨트롤 + +요청이 승인된 후, 쓰기 작업인 경우 [어드미션 컨트롤](/docs/reference/access-authn-authz/admission-controllers/) 단계도 수행된다. 빌트인 단계 외에도 몇 가지 익스텐션이 있다. + +* [이미지 정책 웹훅](/docs/reference/access-authn-authz/admission-controllers/#imagepolicywebhook)은 컨테이너에서 실행할 수 있는 이미지를 제한한다. +* 임의의 어드미션 컨트롤 결정을 내리기 위해 일반적인 [어드미션 웹훅](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)을 사용할 수 있다. 어드미션 웹훅은 생성 또는 업데이트를 거부할 수 있다. + +## 인프라스트럭처 익스텐션 + + +### 스토리지 플러그인 + +[Flex Volumes](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/flexvolume-deployment.md)을 사용하면 +Kubelet이 바이너리 플러그인을 호출하여 볼륨을 마운트하도록 함으로써 +빌트인 지원 없이 볼륨 유형을 마운트 할 수 있다. + + +### 장치 플러그인 + +장치 플러그인은 노드가 [장치 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/)을 +통해 새로운 노드 리소스(CPU 및 메모리와 같은 빌트인 자원 외에)를 +발견할 수 있게 해준다. + + +### 네트워크 플러그인 + +노드-레벨의 [네트워크 플러그인](/docs/admin/network-plugins/)을 통해 다양한 네트워킹 패브릭을 지원할 수 있다. + +### 스케줄러 익스텐션 + +스케줄러는 파드를 감시하고 파드를 노드에 할당하는 특수한 유형의 +컨트롤러이다. 다른 쿠버네티스 컴포넌트를 계속 사용하면서 +기본 스케줄러를 완전히 교체하거나, +[여러 스케줄러](/docs/tasks/administer-cluster/configure-multiple-schedulers/)를 +동시에 실행할 수 있다. + +이것은 중요한 부분이며, 거의 모든 쿠버네티스 사용자는 스케줄러를 수정할 +필요가 없다는 것을 알게 된다. + +스케줄러는 또한 웹훅 백엔드(스케줄러 익스텐션)가 +파드에 대해 선택된 노드를 필터링하고 우선 순위를 지정할 수 있도록 하는 +[웹훅](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/scheduler_extender.md)을 +지원한다. + + + + +## {{% heading "whatsnext" %}} + + +* [커스텀 리소스](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에 대해 더 알아보기 +* [동적 어드미션 컨트롤](/docs/reference/access-authn-authz/extensible-admission-controllers/)에 대해 알아보기 +* 인프라스트럭처 익스텐션에 대해 더 알아보기 + * [네트워크 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) + * [장치 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) +* [kubectl 플러그인](/ko/docs/tasks/extend-kubectl/kubectl-plugins/)에 대해 알아보기 +* [오퍼레이터 패턴](/ko/docs/concepts/extend-kubernetes/operator/)에 대해 알아보기 diff --git a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index 0d7c08a9a4624..159c0fc846aef 100644 --- a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -25,7 +25,7 @@ weight: 10 동적 등록을 통해 실행 중인 클러스터에서 커스텀 리소스가 나타나거나 사라질 수 있으며 클러스터 관리자는 클러스터 자체와 독립적으로 커스텀 리소스를 업데이트 할 수 있다. 커스텀 리소스가 설치되면 사용자는 *파드* 와 같은 빌트인 리소스와 마찬가지로 -[kubectl](/docs/user-guide/kubectl-overview/)을 사용하여 해당 오브젝트를 생성하고 +[kubectl](/ko/docs/reference/kubectl/overview/)을 사용하여 해당 오브젝트를 생성하고 접근할 수 있다. ## 커스텀 컨트롤러 @@ -234,7 +234,7 @@ CRD는 항상 API 서버의 빌트인 리소스와 동일한 인증, 권한 부 ## 커스텀 리소스에 접근 -쿠버네티스 [클라이언트 라이브러리](/docs/reference/using-api/client-libraries/)를 사용하여 커스텀 리소스에 접근할 수 있다. 모든 클라이언트 라이브러리가 커스텀 리소스를 지원하는 것은 아니다. _Go_ 와 _python_ 클라이언트 라이브러리가 지원한다. +쿠버네티스 [클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/)를 사용하여 커스텀 리소스에 접근할 수 있다. 모든 클라이언트 라이브러리가 커스텀 리소스를 지원하는 것은 아니다. _Go_ 와 _python_ 클라이언트 라이브러리가 지원한다. 커스텀 리소스를 추가하면 다음을 사용하여 접근할 수 있다. @@ -251,4 +251,3 @@ CRD는 항상 API 서버의 빌트인 리소스와 동일한 인증, 권한 부 * [애그리게이션 레이어(aggregation layer)로 쿠버네티스 API 확장](/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)하는 방법에 대해 배우기. * [커스텀리소스데피니션으로 쿠버네티스 API 확장](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)하는 방법에 대해 배우기. - diff --git a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 553ab99cf0977..bf4eb55898419 100644 --- a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -38,7 +38,7 @@ service Registration { * 유닉스 소켓의 이름. * 빌드된 장치 플러그인 API 버전. * 알리려는 `ResourceName`. 여기서 `ResourceName` 은 - [확장된 리소스 네이밍 체계](/docs/concepts/configuration/manage-compute-resources-container/#extended-resources)를 + [확장된 리소스 네이밍 체계](/ko/docs/concepts/configuration/manage-resources-containers/#확장된-리소스)를 `vendor-domain/resourcetype` 의 형식으로 따라야 한다. (예를 들어, NVIDIA GPU는 `nvidia.com/gpu` 로 알려진다.) @@ -228,9 +228,7 @@ pluginapi.Device{ID: "25102017", Health: pluginapi.Healthy, Topology:&pluginapi. ## {{% heading "whatsnext" %}} -* 장치 플러그인을 사용한 [GPU 리소스 스케줄링](/docs/tasks/manage-gpus/scheduling-gpus/)에 대해 알아보기 +* 장치 플러그인을 사용한 [GPU 리소스 스케줄링](/ko/docs/tasks/manage-gpus/scheduling-gpus/)에 대해 알아보기 * 노드에서의 [확장 리소스 알리기](/docs/tasks/administer-cluster/extended-resource-node/)에 대해 배우기 * 쿠버네티스에서 [TLS 수신에 하드웨어 가속](https://kubernetes.io/blog/2019/04/24/hardware-accelerated-ssl/tls-termination-in-ingress-controllers-using-kubernetes-device-plugins-and-runtimeclass/) 사용에 대해 읽기 * [토폴로지 관리자](/docs/tasks/adminster-cluster/topology-manager/)에 대해 알아보기 - - diff --git a/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md b/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md index ecf57f49fcf68..543b5cfa48f2b 100644 --- a/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md +++ b/content/ko/docs/concepts/extend-kubernetes/extend-cluster.md @@ -45,7 +45,7 @@ weight: 10 이들 컴포넌트는 쿠버네티스가 새로운 유형과 새로운 종류의 하드웨어를 지원할 수 있게 해준다. 대부분의 클러스터 관리자는 쿠버네티스의 호스팅 또는 배포판 인스턴스를 사용한다. -결과적으로 대부분의 쿠버네티스 사용자는 익스텐션 기능을 설치할 필요가 있고 +결과적으로 대부분의 쿠버네티스 사용자는 익스텐션 기능을 설치할 필요가 없고 새로운 익스텐션 기능을 작성할 필요가 있는 사람은 더 적다. ## 익스텐션 패턴 @@ -70,7 +70,7 @@ weight: 10 *바이너리 플러그인* 모델에서 쿠버네티스는 바이너리(프로그램)를 실행한다. 바이너리 플러그인은 kubelet(예: [Flex Volume 플러그인](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md)과 -[네트워크 플러그인](/docs/concepts/cluster-administration/network-plugins/))과 +[네트워크 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/))과 kubectl에서 사용한다. @@ -90,7 +90,7 @@ kubectl에서 -1. 사용자는 종종 `kubectl`을 사용하여 쿠버네티스 API와 상호 작용한다. [Kubectl 플러그인](/docs/tasks/extend-kubectl/kubectl-plugins/)은 kubectl 바이너리를 확장한다. 개별 사용자의 로컬 환경에만 영향을 미치므로 사이트 전체 정책을 적용할 수는 없다. +1. 사용자는 종종 `kubectl`을 사용하여 쿠버네티스 API와 상호 작용한다. [Kubectl 플러그인](/ko/docs/tasks/extend-kubectl/kubectl-plugins/)은 kubectl 바이너리를 확장한다. 개별 사용자의 로컬 환경에만 영향을 미치므로 사이트 전체 정책을 적용할 수는 없다. 2. apiserver는 모든 요청을 처리한다. apiserver의 여러 유형의 익스텐션 포인트는 요청을 인증하거나, 콘텐츠를 기반으로 요청을 차단하거나, 콘텐츠를 편집하고, 삭제 처리를 허용한다. 이 내용은 [API 접근 익스텐션](/ko/docs/concepts/extend-kubernetes/extend-cluster/#api-접근-익스텐션) 섹션에 설명되어 있다. 3. apiserver는 다양한 종류의 *리소스* 를 제공한다. `pods`와 같은 *빌트인 리소스 종류* 는 쿠버네티스 프로젝트에 의해 정의되며 변경할 수 없다. 직접 정의한 리소스를 추가할 수도 있고, [커스텀 리소스](/ko/docs/concepts/extend-kubernetes/extend-cluster/#사용자-정의-유형) 섹션에 설명된대로 *커스텀 리소스* 라고 부르는 다른 프로젝트에서 정의한 리소스를 추가할 수도 있다. 커스텀 리소스는 종종 API 접근 익스텐션과 함께 사용된다. 4. 쿠버네티스 스케줄러는 파드를 배치할 노드를 결정한다. 스케줄링을 확장하는 몇 가지 방법이 있다. 이들은 [스케줄러 익스텐션](/ko/docs/concepts/extend-kubernetes/extend-cluster/#스케줄러-익스텐션) 섹션에 설명되어 있다. @@ -164,7 +164,7 @@ Kubelet이 바이너리 플러그인을 호출하여 볼륨을 마운트하도 ### 장치 플러그인 -장치 플러그인은 노드가 [장치 플러그인](/docs/concepts/cluster-administration/device-plugins/)을 +장치 플러그인은 노드가 [장치 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/)을 통해 새로운 노드 리소스(CPU 및 메모리와 같은 빌트인 자원 외에)를 발견할 수 있게 해준다. @@ -198,9 +198,7 @@ Kubelet이 바이너리 플러그인을 호출하여 볼륨을 마운트하도 * [커스텀 리소스](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에 대해 더 알아보기 * [동적 어드미션 컨트롤](/docs/reference/access-authn-authz/extensible-admission-controllers/)에 대해 알아보기 * 인프라스트럭처 익스텐션에 대해 더 알아보기 - * [네트워크 플러그인](/docs/concepts/cluster-administration/network-plugins/) - * [장치 플러그인](/docs/concepts/cluster-administration/device-plugins/) -* [kubectl 플러그인](/docs/tasks/extend-kubectl/kubectl-plugins/)에 대해 알아보기 -* [오퍼레이터 패턴](/docs/concepts/extend-kubernetes/operator/)에 대해 알아보기 - - + * [네트워크 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) + * [장치 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) +* [kubectl 플러그인](/ko/docs/tasks/extend-kubectl/kubectl-plugins/)에 대해 알아보기 +* [오퍼레이터 패턴](/ko/docs/concepts/extend-kubernetes/operator/)에 대해 알아보기 diff --git a/content/ko/docs/concepts/overview/_index.md b/content/ko/docs/concepts/overview/_index.md index ae91f70ffd39b..0b3df10062a03 100755 --- a/content/ko/docs/concepts/overview/_index.md +++ b/content/ko/docs/concepts/overview/_index.md @@ -1,4 +1,5 @@ --- title: "개요" weight: 20 ---- \ No newline at end of file +description: 쿠버네티스와 그 컴포넌트에 대한 하이-레벨(high-level) 개요를 제공한다. +--- diff --git a/content/ko/docs/concepts/overview/components.md b/content/ko/docs/concepts/overview/components.md index 8d0efc2212914..ace13900c6ef6 100644 --- a/content/ko/docs/concepts/overview/components.md +++ b/content/ko/docs/concepts/overview/components.md @@ -1,8 +1,11 @@ --- title: 쿠버네티스 컴포넌트 content_type: concept +description: > + 쿠버네티스 클러스터는 컴퓨터 집합인 노드 컴포넌트와 컨트롤 플레인 + 컴포넌트로 구성된다. weight: 20 -card: +card: name: concepts weight: 20 --- @@ -96,7 +99,7 @@ kube-controller-manager와 마찬가지로 cloud-controller-manager는 논리적 애드온에 대한 네임스페이스 리소스는 `kube-system` 네임스페이스에 속한다. 선택된 일부 애드온은 아래에 설명하였고, 사용 가능한 전체 확장 애드온 리스트는 -[애드온](/docs/concepts/cluster-administration/addons/)을 참조한다. +[애드온](/ko/docs/concepts/cluster-administration/addons/)을 참조한다. ### DNS @@ -117,7 +120,7 @@ kube-controller-manager와 마찬가지로 cloud-controller-manager는 논리적 ### 클러스터-레벨 로깅 -[클러스터-레벨 로깅](/docs/concepts/cluster-administration/logging/) 메커니즘은 +[클러스터-레벨 로깅](/ko/docs/concepts/cluster-administration/logging/) 메커니즘은 검색/열람 인터페이스와 함께 중앙 로그 저장소에 컨테이너 로그를 저장하는 책임을 진다. @@ -127,4 +130,3 @@ kube-controller-manager와 마찬가지로 cloud-controller-manager는 논리적 * [컨트롤러](/ko/docs/concepts/architecture/controller/)에 대해 더 배우기 * [kube-scheduler](/ko/docs/concepts/scheduling-eviction/kube-scheduler/)에 대해 더 배우기 * etcd의 공식 [문서](https://etcd.io/docs/) 읽기 - diff --git a/content/ko/docs/concepts/overview/kubernetes-api.md b/content/ko/docs/concepts/overview/kubernetes-api.md index d1d1e3d49948f..26047a881469b 100644 --- a/content/ko/docs/concepts/overview/kubernetes-api.md +++ b/content/ko/docs/concepts/overview/kubernetes-api.md @@ -2,6 +2,9 @@ title: 쿠버네티스 API content_type: concept weight: 30 +description: > + 쿠버네티스 API를 사용하면 쿠버네티스 오브젝트들의 상태를 쿼리하고 조작할 수 있다. + 쿠버네티스 컨트롤 플레인의 핵심은 API 서버와 그것이 노출하는 HTTP API이다. 사용자와 클러스터의 다른 부분 및 모든 외부 컴포넌트는 API 서버를 통해 서로 통신한다. card: name: concepts weight: 30 diff --git a/content/ko/docs/concepts/overview/what-is-kubernetes.md b/content/ko/docs/concepts/overview/what-is-kubernetes.md index f94ab988a33dc..ad083dd49df08 100644 --- a/content/ko/docs/concepts/overview/what-is-kubernetes.md +++ b/content/ko/docs/concepts/overview/what-is-kubernetes.md @@ -71,7 +71,7 @@ card: ## 쿠버네티스가 아닌 것 -쿠버네티스는 전통적인, 모든 것이 포함된 Platform as a Service(PaaS)가 아니다. 쿠버네티스는 하드웨어 수준보다는 컨테이너 수준에서 운영되기 때문에, PaaS가 일반적으로 제공하는 배포, 스케일링, 로드 밸런싱, 로깅 및 모니터링과 같은 기능에서 공통점이 있기도 하다. 하지만, 쿠버네티스는 모놀리식(monolithic)이 아니어서, 이런 기본 솔루션이 선택적이며 추가나 제거가 용이하다. 쿠버네티스는 개발자 플랫폼을 만드는 구성 요소를 제공하지만, 필요한 경우 사용자의 선택권과 유연성을 지켜준다. +쿠버네티스는 전통적인, 모든 것이 포함된 Platform as a Service(PaaS)가 아니다. 쿠버네티스는 하드웨어 수준보다는 컨테이너 수준에서 운영되기 때문에, PaaS가 일반적으로 제공하는 배포, 스케일링, 로드 밸런싱과 같은 기능을 제공하며, 사용자가 로깅, 모니터링 및 알림 솔루션을 통합할 수 있다. 하지만, 쿠버네티스는 모놀리식(monolithic)이 아니어서, 이런 기본 솔루션이 선택적이며 추가나 제거가 용이하다. 쿠버네티스는 개발자 플랫폼을 만드는 구성 요소를 제공하지만, 필요한 경우 사용자의 선택권과 유연성을 지켜준다. 쿠버네티스는: @@ -89,4 +89,3 @@ card: * [쿠버네티스 구성요소](/ko/docs/concepts/overview/components/) 살펴보기 * [시작하기](/ko/docs/setup/) 준비가 되었는가? - diff --git a/content/ko/docs/concepts/overview/working-with-objects/_index.md b/content/ko/docs/concepts/overview/working-with-objects/_index.md index a27acb856cd9a..26aa4dc83be72 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/_index.md +++ b/content/ko/docs/concepts/overview/working-with-objects/_index.md @@ -1,4 +1,7 @@ --- title: "쿠버네티스 오브젝트로 작업하기" weight: 40 +description: > + 쿠버네티스 오브젝트는 쿠버네티스 시스템의 영구 엔티티이다. 쿠버네티스는 이러한 엔티티들을 사용하여 클러스터의 상태를 나타낸다. + 쿠버네티스 오브젝트 모델과 쿠버네티스 오브젝트를 사용하는 방법에 대해 학습한다. --- diff --git a/content/ko/docs/concepts/overview/working-with-objects/annotations.md b/content/ko/docs/concepts/overview/working-with-objects/annotations.md index aa9c29cb640a1..96db884a4f0f9 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/ko/docs/concepts/overview/working-with-objects/annotations.md @@ -51,13 +51,13 @@ weight: 50 * 경량 롤아웃 도구 메타데이터. 예: 구성 또는 체크포인트 * 책임자의 전화번호 또는 호출기 번호, 또는 팀 웹 사이트 같은 - 해당 정보를 찾을 수 있는 디렉토리 진입점. + 해당 정보를 찾을 수 있는 디렉터리 진입점. * 행동을 수정하거나 비표준 기능을 수행하기 위한 최종 사용자의 지시 사항. 어노테이션을 사용하는 대신, 이 유형의 정보를 -외부 데이터베이스 또는 디렉토리에 저장할 수 있지만, 이는 배포, 관리, 인트로스펙션(introspection) 등을 위한 +외부 데이터베이스 또는 디렉터리에 저장할 수 있지만, 이는 배포, 관리, 인트로스펙션(introspection) 등을 위한 공유 클라이언트 라이브러리와 도구 생성을 훨씬 더 어렵게 만들 수 있다. diff --git a/content/ko/docs/concepts/overview/working-with-objects/field-selectors.md b/content/ko/docs/concepts/overview/working-with-objects/field-selectors.md index 9a343ca5c52e2..cb16ce9d58ca0 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/field-selectors.md +++ b/content/ko/docs/concepts/overview/working-with-objects/field-selectors.md @@ -9,7 +9,7 @@ _필드 셀렉터_ 는 한 개 이상의 리소스 필드 값에 따라 [쿠버 * `metadata.namespace!=default` * `status.phase=Pending` -다음의 `kubectl` 커맨드는 [`status.phase`](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) 필드의 값이 `Running` 인 모든 파드를 선택한다. +다음의 `kubectl` 커맨드는 [`status.phase`](/ko/docs/concepts/workloads/pods/pod-lifecycle/#파드의-단계-phase) 필드의 값이 `Running` 인 모든 파드를 선택한다. ```shell kubectl get pods --field-selector status.phase=Running diff --git a/content/ko/docs/concepts/overview/working-with-objects/labels.md b/content/ko/docs/concepts/overview/working-with-objects/labels.md index e06daff9295e3..ed896ce0050d3 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/labels.md +++ b/content/ko/docs/concepts/overview/working-with-objects/labels.md @@ -87,7 +87,7 @@ API는 현재 _일치성 기준_ 과 _집합성 기준_ 이라는 두 종류의 문서화해야 한다. {{< note >}} -레플리카 셋과 같은 일부 API 유형에서 두 인스턴스의 레이블 셀렉터는 네임스페이스 내에서 겹치지 않아야 한다. 그렇지 않으면 컨트롤러는 상충하는 명령으로 보고, 얼마나 많은 복제본이 필요한지 알 수 없다. +레플리카셋(ReplicaSet)과 같은 일부 API 유형에서 두 인스턴스의 레이블 셀렉터는 네임스페이스 내에서 겹치지 않아야 한다. 그렇지 않으면 컨트롤러는 상충하는 명령으로 보고, 얼마나 많은 복제본이 필요한지 알 수 없다. {{< /note >}} {{< caution >}} diff --git a/content/ko/docs/concepts/overview/working-with-objects/namespaces.md b/content/ko/docs/concepts/overview/working-with-objects/namespaces.md index d5eb45f21c4e9..ec4df0668d323 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/ko/docs/concepts/overview/working-with-objects/namespaces.md @@ -27,7 +27,7 @@ weight: 30 네임스페이스는 클러스터 자원을 ([리소스 쿼터](/ko/docs/concepts/policy/resource-quotas/)를 통해) 여러 사용자 사이에서 나누는 방법이다. -이후 버전의 쿠버네티스에서는 같은 네임스페이스의 오브젝트는 기본적으로 +이후 버전의 쿠버네티스에서는 같은 네임스페이스의 오브젝트는 기본적으로 동일한 접근 제어 정책을 갖게 된다. 동일한 소프트웨어의 다른 버전과 같이 약간 다른 리소스를 분리하기 위해 @@ -39,6 +39,10 @@ weight: 30 네임스페이스의 생성과 삭제는 [네임스페이스 관리자 가이드 문서](/docs/tasks/administer-cluster/namespaces/)에 기술되어 있다. +{{< note >}} + 쿠버네티스 시스템 네임스페이스용으로 예약되어 있으므로, `kube-` 접두사로 네임스페이스를 생성하지 않는다. +{{< /note >}} + ### 네임스페이스 조회 사용 중인 클러스터의 현재 네임스페이스를 나열할 수 있다. @@ -54,11 +58,12 @@ kube-public Active 1d kube-system Active 1d ``` -쿠버네티스는 처음에 세 개의 초기 네임스페이스를 갖는다. +쿠버네티스는 처음에 네 개의 초기 네임스페이스를 갖는다. * `default` 다른 네임스페이스가 없는 오브젝트를 위한 기본 네임스페이스 * `kube-system` 쿠버네티스 시스템에서 생성한 오브젝트를 위한 네임스페이스 * `kube-public` 이 네임스페이스는 자동으로 생성되며 모든 사용자(인증되지 않은 사용자 포함)가 읽기 권한으로 접근할 수 있다. 이 네임스페이스는 주로 전체 클러스터 중에 공개적으로 드러나서 읽을 수 있는 리소스를 위해 예약되어 있다. 이 네임스페이스의 공개적인 성격은 단지 관례이지 요구 사항은 아니다. + * `kube-node-lease` 클러스터가 스케일링될 때 노드 하트비트의 성능을 향상시키는 각 노드와 관련된 리스(lease) 오브젝트에 대한 네임스페이스 ### 요청에 네임스페이스 설정하기 @@ -114,6 +119,3 @@ kubectl api-resources --namespaced=false * [신규 네임스페이스 생성](/docs/tasks/administer-cluster/namespaces/#creating-a-new-namespace)에 대해 더 배우기. * [네임스페이스 삭제](/docs/tasks/administer-cluster/namespaces/#deleting-a-namespace)에 대해 더 배우기. - - - diff --git a/content/ko/docs/concepts/overview/working-with-objects/object-management.md b/content/ko/docs/concepts/overview/working-with-objects/object-management.md index 9b59aae529bfe..590116af6f9f1 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/object-management.md +++ b/content/ko/docs/concepts/overview/working-with-objects/object-management.md @@ -7,7 +7,7 @@ weight: 15 `kubectl` 커맨드라인 툴은 쿠버네티스 오브젝트를 생성하고 관리하기 위한 몇 가지 상이한 방법을 지원한다. 이 문서는 여러가지 접근법에 대한 개요을 -제공한다. Kubectl로 오브젝트 관리하기에 대한 자세한 설명은 +제공한다. Kubectl로 오브젝트 관리하기에 대한 자세한 설명은 [Kubectl 서적](https://kubectl.docs.kubernetes.io)에서 확인한다. @@ -40,12 +40,6 @@ weight: 15 디플로이먼트 오브젝트를 생성하기 위해 nginx 컨테이너의 인스턴스를 구동시킨다. -```sh -kubectl run nginx --image nginx -``` - -다른 문법을 이용하여 동일한 작업을 수행한다. - ```sh kubectl create deployment nginx --image nginx ``` @@ -75,11 +69,11 @@ kubectl create deployment nginx --image nginx 참고한다. {{< warning >}} -명령형 `replace` 커맨드는 기존 spec을 새로 제공된 spec으로 바꾸고 -구성 파일에서 누락된 오브젝트의 모든 변경 사항을 삭제한다. -이 방법은 spec이 구성 파일과는 별개로 업데이트되는 리소스 유형에는 -사용하지 말아야한다. -예를 들어 `LoadBalancer` 유형의 서비스는 클러스터의 구성과 별도로 +명령형 `replace` 커맨드는 기존 spec을 새로 제공된 spec으로 바꾸고 +구성 파일에서 누락된 오브젝트의 모든 변경 사항을 삭제한다. +이 방법은 spec이 구성 파일과는 별개로 업데이트되는 리소스 유형에는 +사용하지 말아야한다. +예를 들어 `LoadBalancer` 유형의 서비스는 클러스터의 구성과 별도로 `externalIPs` 필드가 업데이트된다. {{< /warning >}} @@ -124,7 +118,7 @@ kubectl replace -f nginx.yaml 선언형 오브젝트 구성에 비해 단점은 다음과 같다. -- 명령형 오브젝트 구성은 디렉토리가 아닌, 파일에 대해 가장 효과가 있다. +- 명령형 오브젝트 구성은 디렉터리가 아닌, 파일에 대해 가장 효과가 있다. - 활성 오브젝트에 대한 업데이트는 구성 파일에 반영되어야 한다. 그렇지 않으면 다음 교체 중에 손실된다. ## 선언형 오브젝트 구성 @@ -133,19 +127,19 @@ kubectl replace -f nginx.yaml 구성 파일을 대상으로 작동시키지만, 사용자는 파일에서 수행 할 작업을 정의하지 않는다. 생성, 업데이트, 그리고 삭제 작업은 `kubectl`에 의해 오브젝트 마다 자동으로 감지된다. 이를 통해 다른 오브젝트에 대해 -다른 조작이 필요할 수 있는 디렉토리에서 작업할 수 있다. +다른 조작이 필요할 수 있는 디렉터리에서 작업할 수 있다. {{< note >}} -선언형 오브젝트 구성은 변경 사항이 오브젝트 구성 파일에 -다시 병합되지 않더라도 다른 작성자가 작성한 변경 사항을 유지한다. +선언형 오브젝트 구성은 변경 사항이 오브젝트 구성 파일에 +다시 병합되지 않더라도 다른 작성자가 작성한 변경 사항을 유지한다. 이것은 전체 오브젝트 구성 변경을 위한 `replace` API를 -사용하는 대신, `patch` API를 사용하여 인지되는 차이만 +사용하는 대신, `patch` API를 사용하여 인지되는 차이만 작성하기 때문에 가능하다. {{< /note >}} ### 예시 -`configs` 디렉토리 내 모든 오브젝트 구성 파일을 처리하고 활성 오브젝트를 +`configs` 디렉터리 내 모든 오브젝트 구성 파일을 처리하고 활성 오브젝트를 생성 또는 패치한다. 먼저 어떠한 변경이 이루어지게 될지 알아보기 위해 `diff` 하고 나서 적용할 수 있다. @@ -154,7 +148,7 @@ kubectl diff -f configs/ kubectl apply -f configs/ ``` -재귀적으로 디렉토리를 처리한다. +재귀적으로 디렉터리를 처리한다. ```sh kubectl diff -R -f configs/ @@ -166,7 +160,7 @@ kubectl apply -R -f configs/ 명령형 오브젝트 구성에 비해 장점은 다음과 같다. - 활성 오브젝트에 직접 작성된 변경 사항은 구성 파일로 다시 병합되지 않더라도 유지된다. -- 선언형 오브젝트 구성은 디렉토리에서의 작업 및 오브젝트 별 작업 유형(생성, 패치, 삭제)의 자동 감지에 더 나은 지원을 제공한다. +- 선언형 오브젝트 구성은 디렉터리에서의 작업 및 오브젝트 별 작업 유형(생성, 패치, 삭제)의 자동 감지에 더 나은 지원을 제공한다. 명령형 오브젝트 구성에 비해 단점은 다음과 같다. @@ -185,5 +179,3 @@ kubectl apply -R -f configs/ - [Kubectl 커맨드 참조](/docs/reference/generated/kubectl/kubectl-commands/) - [Kubectl 서적](https://kubectl.docs.kubernetes.io) - [쿠버네티스 API 참조](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) - - diff --git a/content/ko/docs/concepts/policy/_index.md b/content/ko/docs/concepts/policy/_index.md index ae03c565c1a3f..425e7250373ea 100644 --- a/content/ko/docs/concepts/policy/_index.md +++ b/content/ko/docs/concepts/policy/_index.md @@ -1,4 +1,6 @@ --- title: "정책" weight: 90 +description: > + 리소스의 그룹에 적용되도록 구성할 수 있는 정책 --- diff --git a/content/ko/docs/concepts/policy/limit-range.md b/content/ko/docs/concepts/policy/limit-range.md index ee30ec64374df..84656375c0311 100644 --- a/content/ko/docs/concepts/policy/limit-range.md +++ b/content/ko/docs/concepts/policy/limit-range.md @@ -61,11 +61,11 @@ _리밋레인지_ 는 다음과 같은 제약 조건을 제공한다. 제한의 사용에 대한 예시는 다음을 참조한다. -- [네임스페이스당 최소 및 최대 CPU 제약 조건을 설정하는 방법](/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/). -- [네임스페이스당 최소 및 최대 메모리 제약 조건을 설정하는 방법](/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/). -- [네임스페이스당 기본 CPU 요청과 제한을 설정하는 방법](/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/). -- [네임스페이스당 기본 메모리 요청과 제한을 설정하는 방법](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/). +- [네임스페이스당 최소 및 최대 CPU 제약 조건을 설정하는 방법](/ko/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/). +- [네임스페이스당 최소 및 최대 메모리 제약 조건을 설정하는 방법](/ko/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/). +- [네임스페이스당 기본 CPU 요청과 제한을 설정하는 방법](/ko/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/). +- [네임스페이스당 기본 메모리 요청과 제한을 설정하는 방법](/ko/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/). - [네임스페이스당 최소 및 최대 스토리지 사용량을 설정하는 방법](/docs/tasks/administer-cluster/limit-storage-consumption/#limitrange-to-limit-requests-for-storage). -- [네임스페이스당 할당량을 설정하는 자세한 예시](/docs/tasks/administer-cluster/quota-memory-cpu-namespace/). +- [네임스페이스당 할당량을 설정하는 자세한 예시](/ko/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/). diff --git a/content/ko/docs/concepts/policy/pod-security-policy.md b/content/ko/docs/concepts/policy/pod-security-policy.md index de6ab7a25deb9..aa3e786af752e 100644 --- a/content/ko/docs/concepts/policy/pod-security-policy.md +++ b/content/ko/docs/concepts/policy/pod-security-policy.md @@ -455,7 +455,7 @@ allowedHostPaths: (다른 컨테이너들에 있는 데이터를 읽고, 시스템 서비스의 자격 증명을 어뷰징(abusing)하는 등)할 수 있도록 만드는 다양한 방법이 있다. 예를 들면, Kubelet과 같다. -쓰기 가능한 hostPath 디렉토리 볼륨을 사용하면, 컨테이너가 `pathPrefix` 외부의 +쓰기 가능한 hostPath 디렉터리 볼륨을 사용하면, 컨테이너가 `pathPrefix` 외부의 호스트 파일시스템에 대한 통행을 허용하는 방식으로 컨테이너의 파일시스템 쓰기(write)를 허용한다. 쿠버네티스 1.11 이상 버전에서 사용 가능한 `readOnly: true`는 지정된 `pathPrefix`에 대한 접근을 효과적으로 제한하기 위해 **모든** `allowedHostPaths`에서 사용해야 한다. @@ -592,7 +592,7 @@ spec: ### AppArmor 파드시큐리티폴리시의 어노테이션을 통해 제어된다. [AppArmor -문서](/docs/tutorials/clusters/apparmor/#podsecuritypolicy-annotations)를 참고하길 바란다. +문서](/ko/docs/tutorials/clusters/apparmor/#podsecuritypolicy-annotations)를 참고하길 바란다. ### Seccomp @@ -636,4 +636,3 @@ spec: 폴리시 권장 사항에 대해서는 [파드 보안 표준](/docs/concepts/security/pod-security-standards/)을 참조한다. API 세부 정보는 [파드 시큐리티 폴리시 레퍼런스](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritypolicy-v1beta1-policy) 참조한다. - diff --git a/content/ko/docs/concepts/policy/resource-quotas.md b/content/ko/docs/concepts/policy/resource-quotas.md index 7c7a6f64a87a9..ca1af3adc2555 100644 --- a/content/ko/docs/concepts/policy/resource-quotas.md +++ b/content/ko/docs/concepts/policy/resource-quotas.md @@ -33,7 +33,7 @@ weight: 10 - `cpu`, `memory`와 같은 컴퓨트 리소스에 대해 네임스페이스에서 쿼터가 활성화된 경우 사용자는 해당값에 대한 요청 또는 제한을 지정해야 한다. 그렇지 않으면 쿼터 시스템이 파드 생성을 거부할 수 있다. 힌트: 컴퓨트 리소스 요구 사항이 없는 파드를 기본값으로 설정하려면 `LimitRanger` 어드미션 컨트롤러를 사용하자. - 이 문제를 회피하는 방법에 대한 예제는 [연습](/docs/tasks/administer-cluster/quota-memory-cpu-namespace/)을 참고하길 바란다. + 이 문제를 회피하는 방법에 대한 예제는 [연습](/ko/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/)을 참고하길 바란다. `ResourceQuota` 오브젝트의 이름은 유효한 [DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names#dns-서브도메인-이름)이어야 한다. @@ -75,7 +75,7 @@ API 서버 `--enable-admission-plugins=` 플래그의 인수 중 하나로 ### 확장된 리소스에 대한 리소스 쿼터 위에서 언급한 리소스 외에도 릴리스 1.10에서는 -[확장된 리소스](/docs/concepts/configuration/manage-compute-resources-container/#extended-resources)에 대한 쿼터 지원이 추가되었다. +[확장된 리소스](/ko/docs/concepts/configuration/manage-resources-containers/#확장된-리소스)에 대한 쿼터 지원이 추가되었다. 확장된 리소스에는 오버커밋(overcommit)이 허용되지 않으므로 하나의 쿼터에서 동일한 확장된 리소스에 대한 `requests`와 `limits`을 모두 지정하는 것은 의미가 없다. 따라서 확장된 @@ -197,7 +197,7 @@ GPU 리소스를 다음과 같이 쿼터를 정의할 수 있다. {{< feature-state for_k8s_version="v1.12" state="beta" >}} -특정 [우선 순위](/docs/concepts/configuration/pod-priority-preemption/#pod-priority)로 파드를 생성할 수 있다. +특정 [우선 순위](/ko/docs/concepts/configuration/pod-priority-preemption/#파드-우선순위)로 파드를 생성할 수 있다. 쿼터 스펙의 `scopeSelector` 필드를 사용하여 파드의 우선 순위에 따라 파드의 시스템 리소스 사용을 제어할 수 있다. @@ -600,4 +600,3 @@ plugins: 자세한 내용은 [리소스쿼터 디자인 문서](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md)를 참고하길 바란다. - diff --git a/content/ko/docs/concepts/scheduling-eviction/_index.md b/content/ko/docs/concepts/scheduling-eviction/_index.md index d368e230d7156..5cd57c3a295c0 100644 --- a/content/ko/docs/concepts/scheduling-eviction/_index.md +++ b/content/ko/docs/concepts/scheduling-eviction/_index.md @@ -1,4 +1,7 @@ --- title: "스케줄링과 축출(eviction)" weight: 90 +description: > + 쿠버네티스에서, 스케줄링은 kubelet이 파드를 실행할 수 있도록 파드가 노드와 일치하는지 확인하는 것을 말한다. + 축출은 리소스가 부족한 노드에서 하나 이상의 파드를 사전에 장애로 처리하는 프로세스이다. --- diff --git a/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md index 576d966a9b5c6..3c0a4c511034f 100644 --- a/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -28,7 +28,7 @@ weight: 10 ## kube-scheduler -[kube-scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/)는 +[kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/)는 쿠버네티스의 기본 스케줄러이며 {{< glossary_tooltip text="컨트롤 플레인" term_id="control-plane" >}}의 일부로 실행된다. kube-scheduler는 원하거나 필요에 따라 자체 스케줄링 컴포넌트를 @@ -95,4 +95,3 @@ _스코어링_ 단계에서 스케줄러는 목록에 남아있는 노드의 순 * [멀티 스케줄러 구성하기](/docs/tasks/administer-cluster/configure-multiple-schedulers/)에 대해 배우기 * [토폴로지 관리 정책](/docs/tasks/administer-cluster/topology-manager/)에 대해 배우기 * [파드 오버헤드](/ko/docs/concepts/configuration/pod-overhead/)에 대해 배우기 - diff --git a/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md b/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md index 2100a6de105c1..a72b857d07f87 100644 --- a/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md +++ b/content/ko/docs/concepts/scheduling-eviction/taint-and-toleration.md @@ -171,7 +171,7 @@ tolerations: 사용자 정의 [어드미션 컨트롤러](/docs/reference/access-authn-authz/admission-controllers/)를 사용하여 톨러레이션를 적용하는 것이 가장 쉬운 방법이다. 예를 들어, [확장된 -리소스](/docs/concepts/configuration/manage-compute-resources-container/#extended-resources)를 +리소스](/ko/docs/concepts/configuration/manage-resources-containers/#확장된-리소스)를 사용하여 특별한 하드웨어를 나타내고, 확장된 리소스 이름으로 특별한 하드웨어 노드를 테인트시키고 [ExtendedResourceToleration](/docs/reference/access-authn-authz/admission-controllers/#extendedresourcetoleration) diff --git a/content/ko/docs/concepts/security/_index.md b/content/ko/docs/concepts/security/_index.md index 079e3dd8f88ef..d71d63c77a1b0 100644 --- a/content/ko/docs/concepts/security/_index.md +++ b/content/ko/docs/concepts/security/_index.md @@ -1,4 +1,6 @@ --- title: "보안" weight: 81 +description: > + 클라우드 네이티브 워크로드를 안전하게 유지하기 위한 개념 --- diff --git a/content/ko/docs/concepts/security/overview.md b/content/ko/docs/concepts/security/overview.md index b64f946acc871..bcbc22d915066 100644 --- a/content/ko/docs/concepts/security/overview.md +++ b/content/ko/docs/concepts/security/overview.md @@ -1,59 +1,53 @@ --- title: 클라우드 네이티브 보안 개요 content_type: concept -weight: 1 +weight: 10 --- -{{< toc >}} - -쿠버네티스 보안(일반적인 보안)은 관련된 많은 부분이 상호작용하는 -방대한 주제다. 오늘날에는 웹 애플리케이션의 실행을 돕는 -수많은 시스템에 오픈소스 소프트웨어가 통합되어 있으며, -전체적인 보안에 대하여 생각할 수 있는 방법에 대한 통찰력을 도울 수 있는 -몇 가지 중요한 개념이 있다. 이 가이드는 클라우드 네이티브 보안과 관련된 -몇 가지 일반적인 개념에 대한 멘탈 모델(mental model)을 정의한다. 멘탈 모델은 완전히 임의적이며 -소프트웨어 스택을 보호할 위치를 생각하는데 도움이되는 경우에만 사용해야 -한다. +이 개요는 클라우드 네이티브 보안의 맥락에서 쿠버네티스 보안에 대한 생각의 모델을 정의한다. + +{{< warning >}} +이 컨테이너 보안 모델은 입증된 정보 보안 정책이 아닌 제안 사항을 제공한다. +{{< /warning >}} ## 클라우드 네이티브 보안의 4C -계층적인 보안에 대해서 어떻게 생각할 수 있는지 이해하는 데 도움이 될 수 있는 다이어그램부터 살펴보자. + +보안은 계층으로 생각할 수 있다. 클라우드 네이티브 보안의 4C는 클라우드(Cloud), +클러스터(Cluster), 컨테이너(Container)와 코드(Code)이다. + {{< note >}} 이 계층화된 접근 방식은 보안에 대한 [심층 방어](https://en.wikipedia.org/wiki/Defense_in_depth_(computing)) -접근 방식을 강화하며, 소프트웨어 시스템의 보안을 위한 모범 사례로 -널리 알려져 있다. 4C는 클라우드(Cloud), 클러스터(Clusters), 컨테이너(Containers) 및 코드(Code)이다. +컴퓨팅 접근 방식을 강화하며, 소프트웨어 시스템의 보안을 위한 모범 사례로 +널리 알려져 있다. {{< /note >}} {{< figure src="/images/docs/4c.png" title="클라우드 네이티브 보안의 4C" >}} - -위 그림에서 볼 수 있듯이, -4C는 각각의 사각형의 보안에 따라 다르다. 코드 -수준의 보안만 처리하여 클라우드, 컨테이너 및 코드의 열악한 보안 표준으로부터 -보호하는 것은 거의 불가능하다. 그러나 이런 영역들의 보안이 적절하게 -처리되고, 코드에 보안을 추가한다면 이미 강력한 기반이 더욱 -강화될 것이다. 이러한 관심 분야는 아래에서 더 자세히 설명한다. +클라우드 네이티브 보안 모델의 각 계층은 다음의 가장 바깥쪽 계층을 기반으로 한다. +코드 계층은 강력한 기본(클라우드, 클러스터, 컨테이너) 보안 계층의 이점을 제공한다. +코드 수준에서 보안을 처리하여 기본 계층의 열악한 보안 표준을 +보호할 수 없다. ## 클라우드 여러 면에서 클라우드(또는 공동 위치 서버, 또는 기업의 데이터 센터)는 쿠버네티스 클러스터 구성을 위한 [신뢰 컴퓨팅 기반(trusted computing base)](https://en.wikipedia.org/wiki/Trusted_computing_base) -이다. 이러한 구성 요소 자체가 취약하거나(또는 취약한 방법으로 구성된) -경우 이 기반 위에서 구축된 모든 구성 요소의 보안을 -실제로 보장할 방법이 없다. 각 클라우드 공급자는 그들의 환경에서 워크로드를 -안전하게 실행하는 방법에 대해 고객에게 광범위한 보안 권장 사항을 -제공한다. 모든 클라우드 공급자와 워크로드는 다르기 때문에 -클라우드 보안에 대한 권장 사항을 제공하는 것은 이 가이드의 범위를 벗어난다. 다음은 -알려진 클라우드 공급자의 보안 문서의 일부와 -쿠버네티스 클러스터를 구성하기 위한 인프라 -보안에 대한 일반적인 지침을 제공한다. +이다. 클라우드 계층이 취약하거나 취약한 방식으로 +구성된 경우 이 기반 위에서 구축된 구성 요소가 안전하다는 +보장은 없다. 각 클라우드 공급자는 해당 환경에서 워크로드를 안전하게 실행하기 +위한 보안 권장 사항을 제시한다. -### 클라우드 공급자 보안 표 +### 클라우드 공급자 보안 +자신의 하드웨어 또는 다른 클라우드 공급자에서 쿠버네티스 클러스터를 실행 중인 경우, +보안 모범 사례는 설명서를 참고한다. +다음은 인기있는 클라우드 공급자의 보안 문서 중 일부에 대한 링크이다. +{{< table caption="클라우드 공급자 보안" >}} IaaS 공급자 | 링크 | -------------------- | ------------ | @@ -64,43 +58,46 @@ IBM Cloud | https://www.ibm.com/cloud/security | Microsoft Azure | https://docs.microsoft.com/en-us/azure/security/azure-security | VMWare VSphere | https://www.vmware.com/security/hardening-guides.html | +{{< /table >}} + +### 인프라스트럭처 보안 {#infrastructure-security} -자체 하드웨어나 다른 클라우드 공급자를 사용하는 경우 보안에 대한 -모범 사례는 해당 문서를 참조한다. +쿠버네티스 클러스터에서 인프라 보안을 위한 제안은 다음과 같다. -### 일반적인 인프라 지침 표 +{{< table caption="인프라스트럭처 보안" >}} 쿠버네티스 인프라에서 고려할 영역 | 추천 | --------------------------------------------- | ------------ | -API 서버에 대한 네트워크 접근(마스터) | 이상적으로는 인터넷에서 쿠버네티스 마스터에 대한 모든 접근을 공개적으로 허용하지 않으며 클러스터를 관리하는데 필요한 IP 주소 집합으로 제한된 네트워크 접근 제어 목록(ACL)에 의해 제어되어야 한다. | -노드에 대한 네트워크 접근(워커 서버) | 노드는 마스터의 지정된 포트 연결_만_ 허용하고(네트워크 접근 제어 목록의 사용), NodePort와 LoadBalancer 유형의 쿠버네티스 서비스에 대한 연결을 허용하도록 구성해야 한다. 가능한 노드가 공용 인터넷에 완전히 노출되어서는 안된다. -클라우드 공급자 API에 대한 쿠버네티스 접근 | 각 클라우드 공급자는 쿠버네티스 마스터 및 노드에 서로 다른 권한을 부여해야 함으로써, 이런 권장 사항이 더 일반적이다. 관리해야 하는 리소스에 대한 [최소 권한의 원칙](https://en.wikipedia.org/wiki/Principle_of_least_privilege)을 따르는 클라우드 공급자의 접근 권한을 클러스터에 구성하는 것이 가장 좋다. AWS의 Kops에 대한 예제: https://github.com/kubernetes/kops/blob/master/docs/iam_roles.md#iam-roles -etcd에 대한 접근 | etcd (쿠버네티스의 데이터저장소)에 대한 접근은 마스터로만 제한되어야 한다. 구성에 따라 TLS를 통해 etcd를 사용해야 한다. 자세한 정보: https://github.com/etcd-io/etcd/tree/master/Documentation#security -etcd 암호화 | 가능한 모든 드라이브를 유휴 상태에서 암호화 하는 것이 좋은 방법이지만, etcd는 전체 클러스터(시크릿 포함)의 상태를 유지하고 있기에 디스크의 암호화는 유휴 상태에서 암호화 되어야 한다. +API 서버에 대한 네트워크 접근(컨트롤 플레인) | 쿠버네티스 컨트롤 플레인에 대한 모든 접근은 인터넷에서 공개적으로 허용되지 않으며 클러스터 관리에 필요한 IP 주소 집합으로 제한된 네트워크 접근 제어 목록에 의해 제어된다. | +노드에 대한 네트워크 접근(노드) | 지정된 포트의 컨트롤 플레인에서 _만_ (네트워크 접근 제어 목록을 통한) 연결을 허용하고 NodePort와 LoadBalancer 유형의 쿠버네티스 서비스에 대한 연결을 허용하도록 노드를 구성해야 한다. 가능하면 이러한 노드가 공용 인터넷에 완전히 노출되어서는 안된다. +클라우드 공급자 API에 대한 쿠버네티스 접근 | 각 클라우드 공급자는 쿠버네티스 컨트롤 플레인 및 노드에 서로 다른 권한 집합을 부여해야 한다. 관리해야하는 리소스에 대해 [최소 권한의 원칙](https://en.wikipedia.org/wiki/Principle_of_least_privilege)을 따르는 클라우드 공급자의 접근 권한을 클러스터에 구성하는 것이 가장 좋다. [Kops 설명서](https://github.com/kubernetes/kops/blob/master/docs/iam_roles.md#iam-roles)는 IAM 정책 및 역할에 대한 정보를 제공한다. +etcd에 대한 접근 | etcd(쿠버네티스의 데이터 저장소)에 대한 접근은 컨트롤 플레인으로만 제한되어야 한다. 구성에 따라 TLS를 통해 etcd를 사용해야 한다. 자세한 내용은 [etcd 문서](https://github.com/etcd-io/etcd/tree/master/Documentation)에서 확인할 수 있다. +etcd 암호화 | 가능한 한 모든 드라이브를 암호화하는 것이 좋은 방법이지만, etcd는 전체 클러스터(시크릿 포함)의 상태를 유지하고 있기에 특히 디스크는 암호화되어 있어야 한다. + +{{< /table >}} ## 클러스터 -이 섹션에서는 쿠버네티스의 워크로드 -보안을 위한 링크를 제공한다. 쿠버네티스 -보안에 영향을 미치는 다음 두 가지 영역이 있다. +쿠버네티스 보안에는 다음의 두 가지 영역이 있다. -* 클러스터를 구성하는 설정 가능한 컴포넌트의 보안 -* 클러스터에서 실행되는 컴포넌트의 보안 +* 설정 가능한 클러스터 컴포넌트의 보안 +* 클러스터에서 실행되는 애플리케이션의 보안 -### 클러스터_의_ 컴포넌트 +### 클러스터의 컴포넌트 {#cluster-components} 우발적이거나 악의적인 접근으로부터 클러스터를 보호하고, 모범 사례에 대한 정보를 채택하기 위해서는 [클러스터 보안](/docs/tasks/administer-cluster/securing-a-cluster/)에 대한 조언을 읽고 따른다. -### 클러스터 _내_ 컴포넌트(애플리케이션) +### 클러스터 내 컴포넌트(애플리케이션) {#cluster-applications} + 애플리케이션의 공격 영역에 따라, 보안의 특정 측면에 중점을 둘 수 있다. 예를 들어, 다른 리소스 체인에 중요한 서비스(서비스 A)와 리소스 소진 공격에 취약한 별도의 작업 부하(서비스 B)를 실행하는 경우, -리소스 제한을 설정하지 않은 서비스 B에 의해 -서비스 A 또한 손상시킬 위험이 있다. 다음은 쿠버네티스에서 -실행 중인 워크로드를 보호할 때 고려해야 할 사항에 대한 링크 표이다. +서비스 B의 리소스를 제한하지 않으면 +서비스 A가 손상될 위험이 높다. 다음은 쿠버네티스에서 +실행되는 워크로드를 보호하기 위한 보안 문제 및 권장 사항이 나와 있는 표이다. 워크로드 보안에서 고려할 영역 | 추천 | ------------------------------ | ------------ | @@ -112,51 +109,45 @@ RBAC 인증(쿠버네티스 API에 대한 접근) | https://kubernetes.io/docs/r 네트워크 정책 | https://kubernetes.io/ko/docs/concepts/services-networking/network-policies/ 쿠버네티스 인그레스를 위한 TLS | https://kubernetes.io/ko/docs/concepts/services-networking/ingress/#tls - - ## 컨테이너 -쿠버네티스에서 소프트웨어를 실행하려면, 소프트웨어는 컨테이너에 있어야 한다. 이로 인해, -쿠버네티스의 원시적인 워크로드 보안으로부터 이점을 얻기 위해서 -반드시 고려해야 할 보안 사항이 있다. 컨테이너 보안 -또한 이 가이드의 범위를 벗어나지만, 해당 주제에 대한 추가적인 설명을 위하여 -일반 권장사항 및 링크 표를 아래에 제공한다. +컨테이너 보안은 이 가이드의 범위를 벗어난다. 다음은 일반적인 권장사항과 +이 주제에 대한 링크이다. 컨테이너에서 고려할 영역 | 추천 | ------------------------------ | ------------ | -컨테이너 취약점 스캔 및 OS에 종속적인 보안 | 이미지 빌드 단계의 일부 또는 정기적으로 [CoreOS의 Clair](https://github.com/coreos/clair/)와 같은 도구를 사용해서 컨테이너에 알려진 취약점이 있는지 검사한다. -이미지 서명 및 시행 | 두 개의 다른 CNCF 프로젝트(TUF 와 Notary)는 컨테이너 이미지에 서명하고 컨테이너 내용에 대한 신뢰 시스템을 유지하는데 유용한 도구이다. 도커를 사용하는 경우 도커 엔진에 [도커 컨텐츠 신뢰](https://docs.docker.com/engine/security/trust/content_trust/)가 내장되어 있다. 시행 부분에서의 [IBM의 Portieris](https://github.com/IBM/portieris) 프로젝트는 쿠버네티스 다이나믹 어드미션 컨트롤러로 실행되는 도구로, 클러스터에서 허가하기 전에 Notary를 통해 이미지가 적절하게 서명되었는지 확인한다. +컨테이너 취약점 스캔 및 OS에 종속적인 보안 | 이미지 빌드 단계의 일부로 컨테이너에 알려진 취약점이 있는지 검사해야 한다. +이미지 서명 및 시행 | 컨테이너 이미지에 서명하여 컨테이너의 내용에 대한 신뢰 시스템을 유지한다. 권한있는 사용자의 비허용 | 컨테이너를 구성할 때 컨테이너의 목적을 수행하는데 필요한 최소 권한을 가진 사용자를 컨테이너 내에 만드는 방법에 대해서는 설명서를 참조한다. ## 코드 -마지막으로 애플리케이션의 코드 수준으로 내려가면, 가장 많은 제어를 할 수 있는 -주요 공격 영역 중 하나이다. 이런 코드 수준은 쿠버네티스의 범위 -밖이지만 몇 가지 권장사항이 있다. +애플리케이션 코드는 가장 많은 제어를 할 수 있는 주요 공격 영역 중 하나이다. +애플리케이션 코드 보안은 쿠버네티스 보안 주제를 벗어나지만, +애플리케이션 코드를 보호하기 위한 권장 사항은 다음과 같다. -### 일반적인 코드 보안 지침표 +### 코드 보안 + +{{< table caption="코드 보안" >}} 코드에서 고려할 영역 | 추천 | ---------------------------------------------- | ------------ | -TLS를 통한 접근 | 코드가 TCP를 통해 통신해야 한다면, 클라이언트와 먼저 TLS 핸드 셰이크를 수행하는 것이 이상적이다. 몇 가지 경우를 제외하고, 기본 동작은 전송 중인 모든 것을 암호화하는 것이다. 한걸음 더 나아가, VPC의 "방화벽 뒤"에서도 서비스 간 네트워크 트래픽을 암호화하는 것이 좋다. 이것은 인증서를 가지고 있는 두 서비스의 양방향 검증을 [mTLS](https://en.wikipedia.org/wiki/Mutual_authentication)를 통해 수행할 수 있다. 이것을 수행하기 위해 쿠버네티스에는 [Linkerd](https://linkerd.io/) 및 [Istio](https://istio.io/)와 같은 수많은 도구가 있다. | +-------------------------| -------------- | +TLS를 통한 접근 | 코드가 TCP를 통해 통신해야 한다면, 미리 클라이언트와 TLS 핸드 셰이크를 수행한다. 몇 가지 경우를 제외하고, 전송 중인 모든 것을 암호화한다. 한 걸음 더 나아가, 서비스 간 네트워크 트래픽을 암호화하는 것이 좋다. 이것은 인증서를 가지고 있는 두 서비스의 양방향 검증을 [mTLS](https://en.wikipedia.org/wiki/Mutual_authentication)를 통해 수행할 수 있다. | 통신 포트 범위 제한 | 이 권장사항은 당연할 수도 있지만, 가능하면 통신이나 메트릭 수집에 꼭 필요한 서비스의 포트만 노출시켜야 한다. | -타사 종속성 보안 | 애플리케이션은 자체 코드베이스의 외부에 종속적인 경향이 있기 때문에, 코드의 종속성을 정기적으로 스캔하여 현재 알려진 취약점이 없는지 확인하는 것이 좋다. 각 언어에는 이런 검사를 자동으로 수행하는 도구를 가지고 있다. | +타사 종속성 보안 | 애플리케이션의 타사 라이브러리를 정기적으로 스캔하여 현재 알려진 취약점이 없는지 확인하는 것이 좋다. 각 언어에는 이런 검사를 자동으로 수행하는 도구를 가지고 있다. | 정적 코드 분석 | 대부분 언어에는 잠재적으로 안전하지 않은 코딩 방법에 대해 코드 스니펫을 분석할 수 있는 방법을 제공한다. 가능한 언제든지 일반적인 보안 오류에 대해 코드베이스를 스캔할 수 있는 자동화된 도구를 사용하여 검사를 한다. 도구는 다음에서 찾을 수 있다. https://owasp.org/www-community/Source_Code_Analysis_Tools | -동적 탐지 공격 | 일반적으로 서비스에서 발생할 수 있는 잘 알려진 공격 중 일부를 서비스에 테스트할 수 있는 자동화된 몇 가지 도구가 있다. 이런 잘 알려진 공격에는 SQL 인젝션, CSRF 및 XSS가 포함된다. 가장 널리 사용되는 동적 분석 도구는 OWASP Zed Attack 프록시다. https://owasp.org/www-project-zap/ | - - -## 강력한(robust) 자동화 - -위에서 언급한 대부분의 제안사항은 실제로 일련의 보안 검사의 일부로 코드를 -전달하는 파이프라인에 의해 자동화 될 수 있다. 소프트웨어 전달을 위한 -"지속적인 해킹(Continuous Hacking)"에 대한 접근 방식에 대해 알아 보려면, 자세한 설명을 제공하는 [이 기사](https://thenewstack.io/beyond-ci-cd-how-continuous-hacking-of-docker-containers-and-pipeline-driven-security-keeps-ygrene-secure/)를 참고한다. +동적 탐지 공격 | 잘 알려진 공격 중 일부를 서비스에 테스트할 수 있는 자동화된 몇 가지 도구가 있다. 여기에는 SQL 인젝션, CSRF 및 XSS가 포함된다. 가장 널리 사용되는 동적 분석 도구는 [OWASP Zed Attack 프록시](https://owasp.org/www-project-zap/)이다. | +{{< /table >}} ## {{% heading "whatsnext" %}} -* [파드에 대한 네트워크 정책](/ko/docs/concepts/services-networking/network-policies/) 알아보기 -* [클러스터 보안](/docs/tasks/administer-cluster/securing-a-cluster/)에 대해 알아보기 -* [API 접근 통제](/docs/reference/access-authn-authz/controlling-access/)에 대해 알아보기 -* 컨트롤 플레인에 대한 [전송 데이터 암호화](/docs/tasks/tls/managing-tls-in-a-cluster/) 알아보기 -* [Rest에서 데이터 암호화](/docs/tasks/administer-cluster/encrypt-data/) 알아보기 -* [쿠버네티스 시크릿](/docs/concepts/configuration/secret/)에 대해 알아보기 +쿠버네티스 보안 주제에 관련한 내용들을 배워보자. + +* [파드 보안 표준](/docs/concepts/security/pod-security-standards/) +* [파드에 대한 네트워크 정책](/ko/docs/concepts/services-networking/network-policies/) +* [클러스터 보안](/docs/tasks/administer-cluster/securing-a-cluster/) +* [API 접근 통제](/docs/reference/access-authn-authz/controlling-access/) +* 컨트롤 플레인을 위한 [전송 데이터 암호화](/docs/tasks/tls/managing-tls-in-a-cluster/) +* [Rest에서 데이터 암호화](/docs/tasks/administer-cluster/encrypt-data/) +* [쿠버네티스 시크릿](/docs/concepts/configuration/secret/) diff --git a/content/ko/docs/concepts/services-networking/_index.md b/content/ko/docs/concepts/services-networking/_index.md index 101f141102572..9cfcfd540bcc9 100644 --- a/content/ko/docs/concepts/services-networking/_index.md +++ b/content/ko/docs/concepts/services-networking/_index.md @@ -1,4 +1,12 @@ --- title: "서비스, 로드밸런싱, 네트워킹" weight: 60 +description: > + 쿠버네티스의 네트워킹에 대한 개념과 리소스에 대해 설명한다. --- + +쿠버네티스 네트워킹은 다음의 네 가지 문제를 해결한다. +- 파드 내의 컨테이너는 루프백(loopback)을 통한 네트워킹을 사용하여 통신한다. +- 클러스터 네트워킹은 서로 다른 파드 간의 통신을 제공한다. +- 서비스 리소스를 사용하면 파드에서 실행 중인 애플리케이션을 클러스터 외부에서 접근할 수 있다. +- 또한 서비스를 사용하여 클러스터 내부에서 사용할 수 있는 서비스만 게시할 수 있다. diff --git a/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md index ad5b1f9bbbf8f..be39f13f210c0 100644 --- a/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/content/ko/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md @@ -2,27 +2,28 @@ title: HostAliases로 파드의 /etc/hosts 항목 추가하기 content_type: concept weight: 60 +min-kubernetes-server-version: 1.7 --- -{{< toc >}} -파드의 /etc/hosts 파일에 항목을 추가하는 것은 DNS나 다른 방법들이 적용되지 않을 때 파드 수준의 호스트네임 해석을 제공한다. 1.7 버전에서는, 사용자들이 PodSpec의 HostAliases 항목을 사용하여 이러한 사용자 정의 항목들을 추가할 수 있다. -HostAliases를 사용하지 않은 수정은 권장하지 않는데, 이는 호스트 파일이 Kubelet에 의해 관리되고, 파드 생성/재시작 중에 덮어쓰여질 수 있기 때문이다. +파드의 `/etc/hosts` 파일에 항목을 추가하는 것은 DNS나 다른 방법들이 적용되지 않을 때 파드 수준의 호스트네임 해석을 제공한다. PodSpec의 HostAliases 항목을 사용하여 이러한 사용자 정의 항목들을 추가할 수 있다. + +HostAliases를 사용하지 않은 수정은 권장하지 않는데, 이는 호스트 파일이 kubelet에 의해 관리되고, 파드 생성/재시작 중에 덮어쓰여질 수 있기 때문이다. ## 기본 호스트 파일 내용 -파드 IP가 할당된 Nginx 파드를 시작해보자. +파드 IP가 할당된 Nginx 파드를 시작한다. ```shell -kubectl run nginx --image nginx --generator=run-pod/v1 +kubectl run nginx --image nginx ``` -```shell +``` pod/nginx created ``` @@ -32,7 +33,7 @@ pod/nginx created kubectl get pods --output=wide ``` -```shell +``` NAME READY STATUS RESTARTS AGE IP NODE nginx 1/1 Running 0 13s 10.200.0.4 worker0 ``` @@ -43,7 +44,7 @@ nginx 1/1 Running 0 13s 10.200.0.4 worker0 kubectl exec nginx -- cat /etc/hosts ``` -```none +``` # Kubernetes-managed hosts file. 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback @@ -57,43 +58,44 @@ fe00::2 ip6-allrouters 기본적으로, `hosts` 파일은 `localhost`와 자기 자신의 호스트네임과 같은 IPv4와 IPv6 상용구들만 포함하고 있다. -## HostAliases를 사용하여 추가 항목들 추가하기 +## hostAliases를 사용하여 추가 항목들 추가하기 -기본 상용구 이외에, `foo.local`, `bar.local`이 `127.0.0.1`로, -`foo.remote`, `bar.remote`가 `10.1.2.3`로 해석될 수 있도록 -추가 항목들을 `hosts` 파일에 추가할 수 있으며, -이는 `.spec.hostAliases` 항목에서 정의하여 파드에 HostAliases를 추가하면 가능하다. +기본 상용구 이외에, 추가 항목들을 `hosts` 파일에 +추가할 수 있다. +예를 들어, `foo.local`, `bar.local`이 `127.0.0.1`로, +`foo.remote`, `bar.remote`가 `10.1.2.3`로 해석될 수 있도록, `.spec.hostAliases` 항목에서 정의하여 파드에 +HostAliases를 추가하면 가능하다. {{< codenew file="service/networking/hostaliases-pod.yaml" >}} -이 파드는 다음의 명령어를 통해 시작될 수 있다. +다음을 실행하여 해당 구성으로 파드를 실행할 수 있다. ```shell -kubectl apply -f hostaliases-pod.yaml +kubectl apply -f https://k8s.io/examples/service/networking/hostaliases-pod.yaml ``` -```shell +``` pod/hostaliases-pod created ``` -파드의 IP와 상태를 확인해보자. +파드의 세부 정보를 검토하여 IPv4 주소와 상태를 확인해보자. ```shell kubectl get pod --output=wide ``` -```shell +``` NAME READY STATUS RESTARTS AGE IP NODE hostaliases-pod 0/1 Completed 0 6s 10.200.0.5 worker0 ``` -`hosts` 파일 내용은 아래와 같을 것이다. +`hosts` 파일 내용은 아래와 같다. ```shell -kubectl exec hostaliases-pod -- cat /etc/hosts +kubectl logs hostaliases-pod ``` -```none +``` # Kubernetes-managed hosts file. 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback @@ -110,14 +112,16 @@ fe00::2 ip6-allrouters 가장 마지막에 추가 항목들이 정의되어 있는 것을 확인할 수 있다. -## 왜 Kubelet이 호스트 파일을 관리하는가? +## 왜 Kubelet이 호스트 파일을 관리하는가? {#why-does-kubelet-manage-the-hosts-file} 컨테이너가 이미 시작되고 난 후 도커가 파일을 [수정](https://github.com/moby/moby/issues/17190)하는 것을 방지하기 위해 Kubelet은 파드의 각 컨테이너의 `hosts` 파일을 [관리](https://github.com/kubernetes/kubernetes/issues/14633)한다. -호스트 파일이 관리된다는 특성으로 인해, 컨테이너 재시작이나 파드 리스케줄 이벤트로 -`hosts` 파일이 Kubelet에 의해 다시 마운트될 때마다 사용자가 작성한 모든 내용이 -덮어 쓰인다. 따라서, 호스트 파일의 내용을 -직접 바꾸는 것은 권장하지 않는다. +{{< caution >}} +컨테이너 내부의 호스트 파일을 수동으로 변경하면 안된다. + +호스트 파일을 수동으로 변경하면, +컨테이너가 종료되면 변경 사항이 손실된다. +{{< /caution >}} diff --git a/content/ko/docs/concepts/services-networking/connect-applications-service.md b/content/ko/docs/concepts/services-networking/connect-applications-service.md index eda8ab3d6823a..1993fc860dd80 100644 --- a/content/ko/docs/concepts/services-networking/connect-applications-service.md +++ b/content/ko/docs/concepts/services-networking/connect-applications-service.md @@ -50,7 +50,7 @@ kubectl get pods -l run=my-nginx -o yaml | grep podIP 클러스터의 모든 노드로 ssh 접속하고 두 IP로 curl을 할수 있어야 한다. 컨테이너는 노드의 포트 80을 사용하지 *않으며* , 트래픽을 파드로 라우팅하는 특별한 NAT 규칙도 없다는 것을 참고한다. 이것은 동일한 containerPort를 사용해서 동일한 노드에서 여러 nginx 파드를 실행하고 IP를 사용해서 클러스터의 다른 파드나 노드에서 접근할 수 있다는 의미이다. 도커와 마찬가지로 포트는 여전히 호스트 노드의 인터페이스에 게시될 수 있지만, 네트워킹 모델로 인해 포트의 필요성이 크게 줄어든다. -만약 궁금하다면 [우리가 이것을 달성하는 방법](/docs/concepts/cluster-administration/networking/#how-to-achieve-this)을 자세히 읽어본다. +만약 궁금하다면 [우리가 이것을 달성하는 방법](/ko/docs/concepts/cluster-administration/networking/#쿠버네티스-네트워크-모델의-구현-방법)을 자세히 읽어본다. ## 서비스 생성하기 @@ -198,7 +198,7 @@ kube-dns ClusterIP 10.0.0.10 53/UDP,53/TCP 8m ``` 이 섹션의 나머지 부분에서는 수명이 긴 IP의 서비스(my-nginx)와 이 IP -에 이름을 할당한 DNS 서버가 있다고 가정한다. 여기서는 CoreDNS 클러스터 애드온(애플리케이션 이름 `kube-dns`)을 사용하므로, 표준 방법(예: `gethostbyname()`)을 사용해서 클러스터의 모든 파드에서 서비스와 통신할 수 있다. 만약 CoreDNS가 실행 중이 아니라면 [CoreDNS README](https://github.com/coredns/deployment/tree/master/kubernetes) 또는 [CoreDNS 설치](/docs/tasks/administer-cluster/coredns/#installing-coredns)를 참조해서 활성화 할 수 있다. 이것을 테스트하기 위해 다른 curl 애플리케이션을 실행한다. +에 이름을 할당한 DNS 서버가 있다고 가정한다. 여기서는 CoreDNS 클러스터 애드온(애플리케이션 이름 `kube-dns`)을 사용하므로, 표준 방법(예: `gethostbyname()`)을 사용해서 클러스터의 모든 파드에서 서비스와 통신할 수 있다. 만약 CoreDNS가 실행 중이 아니라면 [CoreDNS README](https://github.com/coredns/deployment/tree/master/kubernetes) 또는 [CoreDNS 설치](/ko/docs/tasks/administer-cluster/coredns/#coredns-설치)를 참조해서 활성화 할 수 있다. 이것을 테스트하기 위해 다른 curl 애플리케이션을 실행한다. ```shell kubectl run curl --image=radial/busyboxplus:curl -i --tty @@ -422,5 +422,3 @@ LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.el * [서비스를 사용해서 클러스터 내 애플리케이션에 접근하기](/docs/tasks/access-application-cluster/service-access-application-cluster/)를 더 자세히 알아본다. * [서비스를 사용해서 프론트 엔드부터 백 엔드까지 연결하기](/docs/tasks/access-application-cluster/connecting-frontend-backend/)를 더 자세히 알아본다. * [외부 로드 밸런서를 생성하기](/docs/tasks/access-application-cluster/create-external-load-balancer/)를 더 자세히 알아본다. - - diff --git a/content/ko/docs/concepts/services-networking/endpoint-slices.md b/content/ko/docs/concepts/services-networking/endpoint-slices.md index d07b03ea5d22e..d5f7a2abf3e55 100644 --- a/content/ko/docs/concepts/services-networking/endpoint-slices.md +++ b/content/ko/docs/concepts/services-networking/endpoint-slices.md @@ -41,7 +41,7 @@ term_id="selector" >}} 가 지정되면 EndpointSlice 서비스 셀렉터와 매치되는 모든 파드들을 포함하고 참조한다. 엔드포인트슬라이스는 고유한 서비스와 포트 조합을 통해 네트워크 엔드포인트를 그룹화 한다. EndpointSlice 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. 예를 들어, 여기에 `example` 쿠버네티스 서비스를 위한 EndpointSlice 리소스 샘플이 있다. @@ -180,4 +180,3 @@ text="kube-controller-manager" term_id="kube-controller-manager" >}} 플래그 * [엔드포인트슬라이스 활성화하기](/docs/tasks/administer-cluster/enabling-endpointslices) * [애플리케이션을 서비스와 함께 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)를 읽어보기 - diff --git a/content/ko/docs/concepts/services-networking/ingress-controllers.md b/content/ko/docs/concepts/services-networking/ingress-controllers.md index 85aebe74afdfe..47d2687dee58a 100644 --- a/content/ko/docs/concepts/services-networking/ingress-controllers.md +++ b/content/ko/docs/concepts/services-networking/ingress-controllers.md @@ -31,7 +31,7 @@ kube-controller-manager 바이너리의 일부로 실행되는 컨트롤러의 * [Contour](https://projectcontour.io/)는 [Envoy](https://www.envoyproxy.io/) 기반 인그레스 컨트롤러로 VMware에서 제공하고 지원한다. * Citrix는 [베어메탈](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/deployment/baremetal)과 [클라우드](https://github.com/citrix/citrix-k8s-ingress-controller/tree/master/deployment) 배포를 위해 하드웨어 (MPX), 가상화 (VPX) 및 [무료 컨테이너화 (CPX) ADC](https://www.citrix.com/products/citrix-adc/cpx-express.html)를 위한 [인그레스 컨트롤러](https://github.com/citrix/citrix-k8s-ingress-controller)를 제공한다. -* F5 Networks는 [쿠버네티스를 위한 F5 BIG-IP 컨트롤러](http://clouddocs.f5.com/products/connectors/k8s-bigip-ctlr/latest)에 대한 +* F5 Networks는 [쿠버네티스를 위한 F5 BIG-IP 컨테이너 인그레스 서비스](http://clouddocs.f5.com/products/connectors/k8s-bigip-ctlr/latest)에 대한 [지원과 유지 보수](https://support.f5.com/csp/article/K86859508)를 제공한다. * [Gloo](https://gloo.solo.io)는 [solo.io](https://www.solo.io)의 엔터프라이즈 지원과 함께 API 게이트웨이 기능을 제공하는 [Envoy](https://www.envoyproxy.io) 기반의 오픈 소스 인그레스 컨트롤러다. * [HAProxy 인그레스](https://haproxy-ingress.github.io)는 HAProxy를 위한 고도로 커스터마이징 가능한 커뮤니티 주도형 인그레스 컨트롤러다. diff --git a/content/ko/docs/concepts/services-networking/ingress.md b/content/ko/docs/concepts/services-networking/ingress.md index 19982fd447dea..23ab2d1adefe9 100644 --- a/content/ko/docs/concepts/services-networking/ingress.md +++ b/content/ko/docs/concepts/services-networking/ingress.md @@ -18,7 +18,7 @@ weight: 40 * 노드(Node): 클러스터의 일부이며, 쿠버네티스에 속한 워커 머신. * 클러스터(Cluster): 쿠버네티스에서 관리되는 컨테이너화 된 애플리케이션을 실행하는 노드 집합. 이 예시와 대부분의 일반적인 쿠버네티스 배포에서 클러스터에 속한 노드는 퍼블릭 인터넷의 일부가 아니다. * 에지 라우터(Edge router): 클러스터에 방화벽 정책을 적용하는 라우터. 이것은 클라우드 공급자 또는 물리적 하드웨어의 일부에서 관리하는 게이트웨이일 수 있다. -* 클러스터 네트워크(Cluster network): 쿠버네티스 [네트워킹 모델](/docs/concepts/cluster-administration/networking/)에 따라 클러스터 내부에서 통신을 용이하게 하는 논리적 또는 물리적 링크 집합. +* 클러스터 네트워크(Cluster network): 쿠버네티스 [네트워킹 모델](/ko/docs/concepts/cluster-administration/networking/)에 따라 클러스터 내부에서 통신을 용이하게 하는 논리적 또는 물리적 링크 집합. * 서비스: {{< glossary_tooltip text="레이블" term_id="label" >}} 셀렉터를 사용해서 파드 집합을 식별하는 쿠버네티스 {{< glossary_tooltip text="서비스" term_id="service" >}}. 달리 언급하지 않으면 서비스는 클러스터 네트워크 내에서만 라우팅 가능한 가상 IP를 가지고 있다고 가정한다. ## 인그레스란? @@ -79,8 +79,8 @@ spec: 다른 모든 쿠버네티스 리소스와 마찬가지로 인그레스에는 `apiVersion`, `kind`, 그리고 `metadata` 필드가 필요하다. 인그레스 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)이어야 한다. -설정 파일의 작성에 대한 일반적인 내용은 [애플리케이션 배포하기](/docs/tasks/run-application/run-stateless-application-deployment/), [컨테이너 구성하기](/docs/tasks/configure-pod-container/configure-pod-configmap/), [리소스 관리하기](/docs/concepts/cluster-administration/manage-deployment/)를 참조한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. +설정 파일의 작성에 대한 일반적인 내용은 [애플리케이션 배포하기](/docs/tasks/run-application/run-stateless-application-deployment/), [컨테이너 구성하기](/docs/tasks/configure-pod-container/configure-pod-configmap/), [리소스 관리하기](/ko/docs/concepts/cluster-administration/manage-deployment/)를 참조한다. 인그레스는 종종 어노테이션을 이용해서 인그레스 컨트롤러에 따라 몇 가지 옵션을 구성하는데, 그 예시는 [재작성-타겟 어노테이션](https://github.com/kubernetes/ingress-nginx/blob/master/docs/examples/rewrite/README.md)이다. 다른 [인그레스 컨트롤러](/ko/docs/concepts/services-networking/ingress-controllers)는 다른 어노테이션을 지원한다. @@ -88,7 +88,7 @@ spec: 인그레스 [사양](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) 에는 로드 밸런서 또는 프록시 서버를 구성하는데 필요한 모든 정보가 있다. 가장 중요한 것은, -들어오는 요청과 일치하는 규칙 목록을 포함하는 것이다. 인그레스 리소스는 HTTP 트래픽을 +들어오는 요청과 일치하는 규칙 목록을 포함하는 것이다. 인그레스 리소스는 HTTP(S) 트래픽을 지시하는 규칙만 지원한다. ### 인그레스 규칙 @@ -547,4 +547,3 @@ Events: * [인그레스] API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#ingress-v1beta1-networking-k8s-io)에 대해 배우기 * [인그레스 컨트롤러](/ko/docs/concepts/services-networking/ingress-controllers/)에 대해 배우기 * [NGINX 컨트롤러로 Minikube에서 인그레스 구성하기](/docs/tasks/access-application-cluster/ingress-minikube) - diff --git a/content/ko/docs/concepts/services-networking/service.md b/content/ko/docs/concepts/services-networking/service.md index 51845e9496524..4779d0504c2e2 100644 --- a/content/ko/docs/concepts/services-networking/service.md +++ b/content/ko/docs/concepts/services-networking/service.md @@ -72,7 +72,7 @@ _서비스_ 로 들어가보자. 마찬가지로, 서비스 정의를 API 서버에 `POST`하여 새 인스턴스를 생성할 수 있다. 서비스 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. 예를 들어, 각각 TCP 포트 9376에서 수신하고 `app=MyApp` 레이블을 가지고 있는 파드 세트가 있다고 가정해 보자. @@ -168,7 +168,7 @@ subsets: ``` 엔드포인트 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. {{< note >}} 엔드포인트 IP는 루프백(loopback) (IPv4의 경우 127.0.0.0/8, IPv6의 경우 ::1/128), 또는 @@ -272,7 +272,7 @@ kube-proxy가 iptables 모드에서 실행 중이고 선택된 첫 번째 파드 다르다. 해당 시나리오에서는, kube-proxy는 첫 번째 파드에 대한 연결이 실패했음을 감지하고 다른 백엔드 파드로 자동으로 재시도한다. -파드 [준비성 프로브(readiness probe)](/ko/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)를 사용하여 +파드 [준비성 프로브(readiness probe)](/ko/docs/concepts/workloads/pods/pod-lifecycle/#컨테이너-프로브-probe)를 사용하여 백엔드 파드가 제대로 작동하는지 확인할 수 있으므로, iptables 모드의 kube-proxy는 정상으로 테스트된 백엔드만 볼 수 있다. 이렇게 하면 트래픽이 kube-proxy를 통해 실패한 것으로 알려진 파드로 전송되는 것을 막을 수 있다. @@ -418,7 +418,7 @@ DNS 만 사용하여 서비스의 클러스터 IP를 검색하는 경우, 이 ### DNS -[애드-온](/docs/concepts/cluster-administration/addons/)을 사용하여 쿠버네티스 +[애드-온](/ko/docs/concepts/cluster-administration/addons/)을 사용하여 쿠버네티스 클러스터의 DNS 서비스를 설정할 수(대개는 필수적임) 있다. CoreDNS와 같은, 클러스터-인식 DNS 서버는 새로운 서비스를 위해 쿠버네티스 API를 감시하고 @@ -1094,7 +1094,7 @@ IP 주소를 정리한다. 실제로 고정된 목적지로 라우팅되는 파드 IP 주소와 달리, 서비스 IP는 실제로 단일 호스트에서 응답하지 않는다. 대신에, kube-proxy는 -iptables (Linux의 패킷 처리 로직)를 필요에 따라 +iptables (리눅스의 패킷 처리 로직)를 필요에 따라 명백하게 리다이렉션되는 _가상_ IP 주소를 정의하기 위해 사용한다. 클라이언트가 VIP에 연결하면, 트래픽이 자동으로 적절한 엔드포인트로 전송된다. 환경 변수와 서비스 용 DNS는 실제로 서비스의 @@ -1176,7 +1176,7 @@ HTTP / HTTPS 서비스를 노출할 수도 있다. ### PROXY 프로토콜 -클라우드 공급자가 지원하는 경우에 (예: [AWS](/docs/concepts/cluster-administration/cloud-providers/#aws)), +클라우드 공급자가 지원하는 경우에 (예: [AWS](/ko/docs/concepts/cluster-administration/cloud-providers/#aws)), LoadBalancer 모드의 서비스를 사용하여 쿠버네티스 자체 외부에 로드 밸런서를 구성할 수 있으며, 이때 접두사가 [PROXY 프로토콜](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) 인 연결을 전달하게 된다. @@ -1213,10 +1213,10 @@ PROXY TCP4 192.0.2.202 10.0.42.7 12345 7\r\n 클라우드 공급자의 로드 밸런서 구현이 프로토콜로서 SCTP를 지원하는 경우에만 LoadBalancer `유형`과 SCTP `프로토콜`을 사용하여 서비스를 생성할 수 있다. 그렇지 않으면, 서비스 생성 요청이 거부된다. 현재 클라우드 로드 밸런서 공급자 세트 (Azure, AWS, CloudStack, GCE, OpenStack)는 모두 SCTP에 대한 지원이 없다. {{< /warning >}} -##### Windows {#caveat-sctp-windows-os} +##### 윈도우 {#caveat-sctp-windows-os} {{< warning >}} -SCTP는 Windows 기반 노드를 지원하지 않는다. +SCTP는 윈도우 기반 노드를 지원하지 않는다. {{< /warning >}} ##### 유저스페이스 kube-proxy {#caveat-sctp-kube-proxy-userspace} diff --git a/content/ko/docs/concepts/storage/_index.md b/content/ko/docs/concepts/storage/_index.md index 1e0fb99a5ded2..dc9ae5cd82608 100644 --- a/content/ko/docs/concepts/storage/_index.md +++ b/content/ko/docs/concepts/storage/_index.md @@ -1,5 +1,6 @@ --- title: "스토리지" weight: 70 +description: > + 클러스터의 파드에 장기(long-term) 및 임시 스토리지를 모두 제공하는 방법 --- - diff --git a/content/ko/docs/concepts/storage/persistent-volumes.md b/content/ko/docs/concepts/storage/persistent-volumes.md index c345a472fe068..0e418f414bb33 100644 --- a/content/ko/docs/concepts/storage/persistent-volumes.md +++ b/content/ko/docs/concepts/storage/persistent-volumes.md @@ -24,7 +24,7 @@ weight: 20 _퍼시스턴트볼륨_ (PV)은 관리자가 프로비저닝하거나 [스토리지 클래스](/ko/docs/concepts/storage/storage-classes/)를 사용하여 동적으로 프로비저닝한 클러스터의 스토리지이다. 노드가 클러스터 리소스인 것처럼 PV는 클러스터 리소스이다. PV는 Volumes와 같은 볼륨 플러그인이지만, PV를 사용하는 개별 파드와는 별개의 라이프사이클을 가진다. 이 API 오브젝트는 NFS, iSCSI 또는 클라우드 공급자별 스토리지 시스템 등 스토리지 구현에 대한 세부 정보를 담아낸다. -_퍼시스턴트볼륨클레임_ (PVC)은 사용자의 스토리지에 대한 요청이다. 파드와 비슷하다. 파드는 노드 리소스를 사용하고 PVC는 PV 리소스를 사용한다. 파드는 특정 수준의 리소스(CPU 및 메모리)를 요청할 수 있다. 클레임은 특정 크기 및 접근 모드를 요청할 수 있다(예: 한 번 읽기/쓰기 또는 여러 번 읽기 전용으로 마운트 할 수 있음). +_퍼시스턴트볼륨클레임_ (PVC)은 사용자의 스토리지에 대한 요청이다. 파드와 비슷하다. 파드는 노드 리소스를 사용하고 PVC는 PV 리소스를 사용한다. 파드는 특정 수준의 리소스(CPU 및 메모리)를 요청할 수 있다. 클레임은 특정 크기 및 접근 모드를 요청할 수 있다(예: ReadWriteOnce, ReadOnlyMany 또는 ReadWriteMany로 마운트 할 수 있음. [AccessModes](#접근-모드) 참고). 퍼시스턴트볼륨클레임을 사용하면 사용자가 추상화된 스토리지 리소스를 사용할 수 있지만, 다른 문제들 때문에 성능과 같은 다양한 속성을 가진 퍼시스턴트볼륨이 필요한 경우가 일반적이다. 클러스터 관리자는 사용자에게 해당 볼륨의 구현 방법에 대한 세부 정보를 제공하지 않고 단순히 크기와 접근 모드와는 다른 방식으로 다양한 퍼시스턴트볼륨을 제공할 수 있어야 한다. 이러한 요구에는 _스토리지클래스_ 리소스가 있다. @@ -277,7 +277,7 @@ EBS 볼륨 확장은 시간이 많이 걸리는 작업이다. 또한 6시간마 각 PV에는 스펙과 상태(볼륨의 명세와 상태)가 포함된다. 퍼시스턴트볼륨 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. ```yaml apiVersion: v1 @@ -667,7 +667,7 @@ spec: {{< feature-state for_k8s_version="v1.17" state="beta" >}} -CSI 볼륨 플러그인만 지원하도록 볼륨 스냅샷 기능이 추가되었다. 자세한 내용은 [볼륨 스냅샷](/docs/concepts/storage/volume-snapshots/)을 참고한다. +CSI 볼륨 플러그인만 지원하도록 볼륨 스냅샷 기능이 추가되었다. 자세한 내용은 [볼륨 스냅샷](/ko/docs/concepts/storage/volume-snapshots/)을 참고한다. 볼륨 스냅샷 데이터 소스에서 볼륨 복원을 지원하려면 apiserver와 controller-manager에서 `VolumeSnapshotDataSource` 기능 게이트를 활성화한다. diff --git a/content/ko/docs/concepts/storage/storage-classes.md b/content/ko/docs/concepts/storage/storage-classes.md index 13b4f4917517b..7df975db14d91 100644 --- a/content/ko/docs/concepts/storage/storage-classes.md +++ b/content/ko/docs/concepts/storage/storage-classes.md @@ -34,9 +34,9 @@ weight: 30 처음 생성할 때 클래스의 이름과 기타 파라미터를 설정하며, 일단 생성된 오브젝트는 업데이트할 수 없다. -관리자는 특정 클래스에 바인딩을 요청하지 않는 PVC에 대해서만 기본 +관리자는 특정 클래스에 바인딩을 요청하지 않는 PVC에 대해서만 기본 스토리지클래스를 지정할 수 있다. 자세한 내용은 -[퍼시스턴트볼륨클레임 섹션](/ko/docs/concepts/storage/persistent-volumes/#클래스-1)을 +[퍼시스턴트볼륨클레임 섹션](/ko/docs/concepts/storage/persistent-volumes/#퍼시스턴트볼륨클레임)을 본다. ```yaml @@ -167,7 +167,7 @@ CSI | 1.14 (alpha), 1.16 (beta) [노드 셀렉터](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#노드-셀렉터-nodeselector), [파드 어피니티(affinity)와 안티-어피니티(anti-affinity)](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#어피니티-affinity-와-안티-어피니티-anti-affinity) -그리고 [테인트(taint)와 톨러레이션(toleration)](/docs/concepts/configuration/taint-and-toleration/)이 포함된다. +그리고 [테인트(taint)와 톨러레이션(toleration)](/ko/docs/concepts/scheduling-eviction/taint-and-toleration/)이 포함된다. 다음 플러그인은 동적 프로비저닝과 `WaitForFirstConsumer` 를 지원한다. @@ -251,11 +251,11 @@ parameters: * `iopsPerGB`: `io1` 볼륨 전용이다. 1초당 GiB에 대한 I/O 작업 수이다. AWS 볼륨 플러그인은 요청된 볼륨 크기에 곱셈하여 볼륨의 IOPS를 계산하고 이를 20,000 IOPS로 제한한다(AWS에서 지원하는 최대값으로, - [AWS 문서](https://docs.aws.amazon.com/ko_kr/AWSEC2/latest/UserGuide/ebs-volume-types.html)를 본다). + [AWS 문서](https://docs.aws.amazon.com/ko_kr/AWSEC2/latest/UserGuide/ebs-volume-types.html)를 본다). 여기에는 문자열, 즉 `10` 이 아닌, `"10"` 이 필요하다. * `fsType`: fsType은 쿠버네티스에서 지원된다. 기본값: `"ext4"`. * `encrypted`: EBS 볼륨의 암호화 여부를 나타낸다. - 유효한 값은 `"ture"` 또는 `"false"` 이다. 여기에는 문자열, + 유효한 값은 `"ture"` 또는 `"false"` 이다. 여기에는 문자열, 즉 `true` 가 아닌, `"true"` 가 필요하다. * `kmsKeyId`: 선택 사항. 볼륨을 암호화할 때 사용할 키의 전체 Amazon 리소스 이름이다. 아무것도 제공되지 않지만, `encrypted` 가 true라면 @@ -348,7 +348,7 @@ parameters: * `secretNamespace`, `secretName` : Gluster REST 서비스와 통신할 때 사용할 사용자 암호가 포함된 시크릿 인스턴스를 식별한다. 이 파라미터는 선택 사항으로 `secretNamespace` 와 `secretName` 을 모두 생략하면 - 빈 암호가 사용된다. 제공된 시크릿은 `"kubernetes.io/glusterfs"` 유형이어야 + 빈 암호가 사용된다. 제공된 시크릿은 `"kubernetes.io/glusterfs"` 유형이어야 하며, 예를 들어 다음과 같이 생성한다. ``` @@ -664,7 +664,7 @@ parameters: [RBAC](/docs/reference/access-authn-authz/rbac/)과 [컨트롤러의 롤(role)들](/docs/reference/access-authn-authz/rbac/#controller-roles)을 모두 활성화한 경우, clusterrole `system:controller:persistent-volume-binder` -에 대한 `secret` 리소스에 `create` 권한을 추가한다. +에 대한 `secret` 리소스에 `create` 권한을 추가한다. 다중 테넌시 컨텍스트에서 `secretNamespace` 의 값을 명시적으로 설정하는 것을 권장하며, 그렇지 않으면 다른 사용자가 스토리지 계정 자격증명을 @@ -681,23 +681,23 @@ provisioner: kubernetes.io/portworx-volume parameters: repl: "1" snap_interval: "70" - io_priority: "high" + priority_io: "high" ``` * `fs`: 배치할 파일 시스템: `none/xfs/ext4` (기본값: `ext4`) * `block_size`: Kbytes 단위의 블록 크기(기본값: `32`). * `repl`: 레플리케이션 팩터 `1..3` (기본값: `1`)의 형태로 제공될 - 동기 레플리카의 수. 여기에는 문자열, + 동기 레플리카의 수. 여기에는 문자열, 즉 `0` 이 아닌, `"0"` 이 필요하다. -* `io_priority`: 볼륨이 고성능 또는 우선 순위가 낮은 스토리지에서 +* `priority_io`: 볼륨이 고성능 또는 우선 순위가 낮은 스토리지에서 생성될 것인지를 결정한다 `high/medium/low` (기본값: `low`). * `snap_interval`: 스냅샷을 트리거할 때의 시각/시간 간격(분). 스냅샷은 이전 스냅샷과의 차이에 따라 증분되며, 0은 스냅을 - 비활성화 한다(기본값: `0`). 여기에는 문자열, + 비활성화 한다(기본값: `0`). 여기에는 문자열, 즉 `70` 이 아닌, `"70"` 이 필요하다. * `aggregation_level`: 볼륨이 분배될 청크 수를 지정하며, 0은 집계되지 않은 - 볼륨을 나타낸다(기본값: `0`). 여기에는 문자열, + 볼륨을 나타낸다(기본값: `0`). 여기에는 문자열, 즉 `0` 이 아닌, `"0"` 이 필요하다. * `ephemeral`: 마운트 해제 후 볼륨을 정리해야 하는지 혹은 지속적이어야 하는지를 지정한다. `emptyDir` 에 대한 유스케이스는 이 값을 true로 @@ -815,4 +815,3 @@ volumeBindingMode: WaitForFirstConsumer 볼륨 바인딩을 지연시키면 스케줄러가 퍼시스턴트볼륨클레임에 적절한 퍼시스턴트볼륨을 선택할 때 파드의 모든 스케줄링 제약 조건을 고려할 수 있다. - diff --git a/content/ko/docs/concepts/storage/volume-snapshot-classes.md b/content/ko/docs/concepts/storage/volume-snapshot-classes.md index cc31fae666ba3..bdf1920567d8b 100644 --- a/content/ko/docs/concepts/storage/volume-snapshot-classes.md +++ b/content/ko/docs/concepts/storage/volume-snapshot-classes.md @@ -6,7 +6,7 @@ weight: 30 -이 문서는 쿠버네티스의 `VolumeSnapshotClass` 개요를 설명한다. +이 문서는 쿠버네티스의 볼륨스냅샷클래스(VolumeSnapshotClass) 개요를 설명한다. [볼륨 스냅샷](/ko/docs/concepts/storage/volume-snapshots/)과 [스토리지 클래스](/ko/docs/concepts/storage/storage-classes)의 숙지를 추천한다. @@ -17,29 +17,42 @@ weight: 30 ## 소개 -`StorageClass` 는 관리자가 볼륨을 프로비저닝할 때 제공하는 스토리지의 "클래스"를 -설명하는 방법을 제공하는 것처럼, `VolumeSnapshotClass` 는 볼륨 스냅샷을 +스토리지클래스(StorageClass)는 관리자가 볼륨을 프로비저닝할 때 제공하는 스토리지의 "클래스"를 +설명하는 방법을 제공하는 것처럼, 볼륨스냅샷클래스는 볼륨 스냅샷을 프로비저닝할 때 스토리지의 "클래스"를 설명하는 방법을 제공한다. ## VolumeSnapshotClass 리소스 -각 `VolumeSnapshotClass` 에는 클래스에 속하는 `VolumeSnapshot` 을 +각 볼륨스냅샷클래스에는 클래스에 속하는 볼륨스냅샷을 동적으로 프로비전 할 때 사용되는 `driver`, `deletionPolicy` 그리고 `parameters` 필드를 포함한다. -`VolumeSnapshotClass` 오브젝트의 이름은 중요하며, 사용자가 특정 -클래스를 요청할 수 있는 방법이다. 관리자는 `VolumeSnapshotClass` 오브젝트를 +볼륨스냅샷클래스 오브젝트의 이름은 중요하며, 사용자가 특정 +클래스를 요청할 수 있는 방법이다. 관리자는 볼륨스냅샷클래스 오브젝트를 처음 생성할 때 클래스의 이름과 기타 파라미터를 설정하고, 오브젝트가 생성된 이후에는 업데이트할 수 없다. -관리자는 특정 클래스의 바인딩을 요청하지 않는 볼륨스냅샷에만 -기본 `VolumeSnapshotClass` 를 지정할 수 있다. +```yaml +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: csi-hostpath-snapclass +driver: hostpath.csi.k8s.io +deletionPolicy: Delete +parameters: +``` + +관리자는`snapshot.storage.kubernetes.io/is-default-class: "true"` 어노테이션을 추가하여 +바인딩할 특정 클래스를 요청하지 않는 볼륨스냅샷에 대한 +기본 볼륨스냅샷클래스를 지정할 수 있다. ```yaml apiVersion: snapshot.storage.k8s.io/v1beta1 kind: VolumeSnapshotClass metadata: name: csi-hostpath-snapclass + annotations: + snapshot.storage.kubernetes.io/is-default-class: "true" driver: hostpath.csi.k8s.io deletionPolicy: Delete parameters: @@ -52,9 +65,9 @@ parameters: ### 삭제정책(DeletionPolicy) -볼륨 스냅샷 클래스는 삭제정책을 가지고 있다. 바인딩 된 `VolumeSnapshot` 오브젝트를 삭제할 때 `VolumeSnapshotContent` 의 상황을 구성할 수 있다. 볼륨 스냅삿의 삭제정책은 `Retain` 또는 `Delete` 일 수 있다. 이 필드는 반드시 지정해야 한다. +볼륨 스냅샷 클래스는 삭제정책을 가지고 있다. 바인딩된 볼륨스냅샷 오브젝트를 삭제할 때 VolumeSnapshotContent의 상황을 구성할 수 있다. 볼륨 스냅삿의 삭제정책은 `Retain` 또는 `Delete` 일 수 있다. 이 필드는 반드시 지정해야 한다. -삭제정책이 `Delete` 인 경우 기본 스토리지 스냅샷이 `VolumeSnapshotContent` 오브젝트와 함께 삭제된다. 삭제정책이 `Retain` 인 경우 기본 스냅샷과 `VolumeSnapshotContent` 모두 유지된다. +삭제정책이 `Delete` 인 경우 기본 스토리지 스냅샷이 VolumeSnapshotContent 오브젝트와 함께 삭제된다. 삭제정책이 `Retain` 인 경우 기본 스냅샷과 VolumeSnapshotContent 모두 유지된다. ## 파라미터 diff --git a/content/ko/docs/concepts/storage/volume-snapshots.md b/content/ko/docs/concepts/storage/volume-snapshots.md index 997f3d6fee48c..9aadbe3726306 100644 --- a/content/ko/docs/concepts/storage/volume-snapshots.md +++ b/content/ko/docs/concepts/storage/volume-snapshots.md @@ -7,7 +7,7 @@ weight: 20 {{< feature-state for_k8s_version="v1.17" state="beta" >}} -쿠버네티스에서 스토리지 시스템 볼륨 스냅샷은 _VolumeSnapshot_ 을 나타낸다. 이 문서는 이미 쿠버네티스 [퍼시스턴트 볼륨](/docs/concepts/storage/persistent-volumes/)에 대해 잘 알고 있다고 가정한다. +쿠버네티스에서 스토리지 시스템 볼륨 스냅샷은 _VolumeSnapshot_ 을 나타낸다. 이 문서는 이미 쿠버네티스 [퍼시스턴트 볼륨](/ko/docs/concepts/storage/persistent-volumes/)에 대해 잘 알고 있다고 가정한다. @@ -44,7 +44,7 @@ API 리소스 `PersistentVolume` 및 `PersistentVolumeClaim` 가 사용자 및 클러스터 관리자는 많은 `VolumeSnapshotContents` 을 생성한다. 그들은 클러스터 사용자들이 사용 가능한 스토리지 시스템의 실제 볼륨 스냅샷 세부 정보를 제공한다. 이것은 쿠버네티스 API에 있고 사용 가능하다. #### 동적 -사전 프로비저닝을 사용하는 대신 퍼시스턴트볼륨클레임에서 스냅샷을 동적으로 가져오도록 요청할 수 있다. [볼륨스냅샷클래스](/docs/concepts/storage/volume-snapshot-classes/)는 스냅샷 사용 시 스토리지 제공자의 특정 파라미터를 명세한다. +사전 프로비저닝을 사용하는 대신 퍼시스턴트볼륨클레임에서 스냅샷을 동적으로 가져오도록 요청할 수 있다. [볼륨스냅샷클래스](/ko/docs/concepts/storage/volume-snapshot-classes/)는 스냅샷 사용 시 스토리지 제공자의 특정 파라미터를 명세한다. ### 바인딩 @@ -82,7 +82,7 @@ spec: `persistentVolumeClaimName` 은 스냅샷을 위한 퍼시스턴트볼륨클레임 데이터 소스의 이름이다. 이 필드는 동적 프로비저닝 스냅샷이 필요하다. 볼륨 스냅샷은 `volumeSnapshotClassName` 속성을 사용하여 -[볼륨스냅샷클래스](/docs/concepts/storage/volume-snapshot-classes/)의 이름을 지정하여 +[볼륨스냅샷클래스](/ko/docs/concepts/storage/volume-snapshot-classes/)의 이름을 지정하여 특정 클래스를 요청할 수 있다. 아무것도 설정하지 않으면, 사용 가능한 경우 기본 클래스가 사용될 것이다. 사전 프로비저닝된 스냅샷의 경우, 다음 예와 같이 `volumeSnapshotContentName`을 스냅샷 소스로 지정해야 한다. 사전 프로비저닝된 스냅샷에는 `volumeSnapshotContentName` 소스 필드가 필요하다. @@ -145,6 +145,4 @@ spec: 스냅샷 데이터로 미리 채워진 새 볼륨을 프로비저닝할 수 있다. 보다 자세한 사항은 -[볼륨 스냅샷 및 스냅샷에서 볼륨 복원](/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support)에서 확인할 수 있다. - - +[볼륨 스냅샷 및 스냅샷에서 볼륨 복원](/ko/docs/concepts/storage/persistent-volumes/#볼륨-스냅샷-및-스냅샷-지원에서-볼륨-복원)에서 확인할 수 있다. diff --git a/content/ko/docs/concepts/storage/volumes.md b/content/ko/docs/concepts/storage/volumes.md index b8ff826d673b3..ce31b183a41c1 100644 --- a/content/ko/docs/concepts/storage/volumes.md +++ b/content/ko/docs/concepts/storage/volumes.md @@ -23,7 +23,7 @@ kubelet은 컨테이너를 재시작시키지만, 컨테이너는 깨끗한 상 ## 배경 도커는 다소 느슨하고, 덜 관리되지만 -[볼륨](https://docs.docker.com/engine/admin/volumes/)이라는 +[볼륨](https://docs.docker.com/storage/)이라는 개념을 가지고 있다. 도커에서 볼륨은 단순한 디스크 내 디렉터리 또는 다른 컨테이너에 있는 디렉터리다. 수명은 관리되지 않으며 최근까지는 로컬 디스크 백업 볼륨만 있었다. 도커는 이제 볼륨 드라이버를 @@ -214,7 +214,7 @@ CephFS를 사용하기 위해선 먼저 Ceph 서버를 실행하고 공유를 {{< note >}} 전제 조건: 오픈스택 클라우드 공급자로 구성된 쿠버네티스. 클라우드 공급자 -구성에 대해서는 [오픈스택 클라우드 공급자](/docs/concepts/cluster-administration/cloud-providers/#openstack)를 참조한다. +구성에 대해서는 [오픈스택 클라우드 공급자](/ko/docs/concepts/cluster-administration/cloud-providers/#openstack)를 참조한다. {{< /note >}} `cinder` 는 오픈스택 Cinder 볼륨을 파드에 마운트하는 데 사용한다. diff --git a/content/ko/docs/concepts/workloads/_index.md b/content/ko/docs/concepts/workloads/_index.md index c704540b85ed6..c898502b39503 100644 --- a/content/ko/docs/concepts/workloads/_index.md +++ b/content/ko/docs/concepts/workloads/_index.md @@ -1,4 +1,6 @@ --- title: "워크로드" weight: 50 +description: > + 쿠버네티스에서 배포할 수 있는 가장 작은 컴퓨트 오브젝트인 파드와, 이를 실행하는 데 도움이 되는 하이-레벨(higher-level) 추상화 --- diff --git a/content/ko/docs/concepts/workloads/controllers/daemonset.md b/content/ko/docs/concepts/workloads/controllers/daemonset.md index 5917eb288d681..c06a43edf6783 100644 --- a/content/ko/docs/concepts/workloads/controllers/daemonset.md +++ b/content/ko/docs/concepts/workloads/controllers/daemonset.md @@ -54,7 +54,7 @@ kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml `.spec.template` 는 `.spec` 의 필수 필드 중 하나이다. -`.spec.template` 는 [파드 템플릿](/ko/docs/concepts/workloads/pods/pod-overview/#pod-templates)이다. 이것은 중첩되어 있다는 점과 `apiVersion` 또는 `kind` 를 가지지 않는 것을 제외하면 [파드](/ko/docs/concepts/workloads/pods/pod/)와 정확히 같은 스키마를 가진다. +`.spec.template` 는 [파드 템플릿](/ko/docs/concepts/workloads/pods/pod-overview/#파드-템플릿)이다. 이것은 중첩되어 있다는 점과 `apiVersion` 또는 `kind` 를 가지지 않는 것을 제외하면 [파드](/ko/docs/concepts/workloads/pods/pod/)와 정확히 같은 스키마를 가진다. 데몬셋의 파드 템플릿에는 파드의 필수 필드 외에도 적절한 레이블이 명시되어야 한다([파드 셀렉터](#파드-셀렉터)를 본다). @@ -206,7 +206,7 @@ nodeAffinity: ### 스태틱(static) 파드 -Kubelet이 감시하는 특정 디렉토리에 파일을 작성하는 파드를 생성할 수 있다. 이것을 +Kubelet이 감시하는 특정 디렉터리에 파일을 작성하는 파드를 생성할 수 있다. 이것을 [스태틱 파드](/ko/docs/tasks/configure-pod-container/static-pod/)라고 부른다. 데몬셋과는 다르게 스태틱 파드는 kubectl 또는 다른 쿠버네티스 API 클라이언트로 관리할 수 없다. 스태틱 파드는 API 서버에 의존하지 diff --git a/content/ko/docs/concepts/workloads/controllers/deployment.md b/content/ko/docs/concepts/workloads/controllers/deployment.md index 269d72f51061e..91b20304c738c 100644 --- a/content/ko/docs/concepts/workloads/controllers/deployment.md +++ b/content/ko/docs/concepts/workloads/controllers/deployment.md @@ -316,7 +316,7 @@ kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml 디플로이먼트 컨트롤러는 각 시간마다 새로운 디플로이먼트에서 레플리카셋이 의도한 파드를 생성하고 띄우는 것을 주시한다. 만약 디플로이먼트가 업데이트되면, 기존 레플리카셋에서 `.spec.selector` 레이블과 일치하는 파드를 컨트롤 하지만, 템플릿과 `.spec.template` 이 불일치하면 스케일 다운이 된다. -결국 새로운 레플리카셋은 `.spec.replicas` 로 스케일되고, 모든 기존 레플리카 셋은 0개로 스케일된다. +결국 새로운 레플리카셋은 `.spec.replicas` 로 스케일되고, 모든 기존 레플리카셋은 0개로 스케일된다. 만약 기존 롤아웃이 진행되는 중에 디플로이먼트를 업데이트하는 경우 디플로이먼트가 업데이트에 따라 새 레플리카셋을 생성하고, 스케일 업하기 시작한다. 그리고 이전에 스케일 업 하던 레플리카셋에 롤오버 한다. @@ -671,7 +671,7 @@ deployment.apps/nginx-deployment scaled 디플로이먼트 컨트롤러는 새로운 5개의 레플리카의 추가를 위한 위치를 결정해야 한다. 만약 비례적 스케일링을 사용하지 않으면 5개 모두 새 레플리카셋에 추가된다. 비례적 스케일링으로 추가 레플리카를 모든 레플리카셋에 걸쳐 분산할 수 있다. -비율이 높을수록 가장 많은 레플리카가 있는 레플리카셋으로 이동하고, 비율이 낮을 수록 적은 레플리카가 있는 레플리카 셋으로 이동한다. +비율이 높을수록 가장 많은 레플리카가 있는 레플리카셋으로 이동하고, 비율이 낮을 수록 적은 레플리카가 있는 레플리카셋으로 이동한다. 남은 것들은 대부분의 레플리카가 있는 레플리카셋에 추가된다. 0개의 레플리카가 있는 레플리카셋은 스케일 업 되지 않는다. 위의 예시에서 기존 레플리카셋에 3개의 레플리카가 추가되고, 2개의 레플리카는 새 레플리카에 추가된다. @@ -861,7 +861,12 @@ kubectl rollout status deployment.v1.apps/nginx-deployment ``` Waiting for rollout to finish: 2 of 3 updated replicas are available... deployment.apps/nginx-deployment successfully rolled out -$ echo $? +``` +그리고 `kubectl rollout` 의 종료 상태는 0(success)이다. +```shell +echo $? +``` +``` 0 ``` @@ -1003,7 +1008,12 @@ kubectl rollout status deployment.v1.apps/nginx-deployment ``` Waiting for rollout to finish: 2 out of 3 new replicas have been updated... error: deployment "nginx" exceeded its progress deadline -$ echo $? +``` +그리고 `kubectl rollout` 의 종료 상태는 1(error를 의미함)이다. +```shell +echo $? +``` +``` 1 ``` @@ -1026,7 +1036,7 @@ $ echo $? ## 카나리 디플로이먼트 만약 디플로이먼트를 이용해서 일부 사용자 또는 서버에 릴리즈를 롤아웃 하기 위해서는 -[리소스 관리](/docs/concepts/cluster-administration/manage-deployment/#canary-deployments)에 +[리소스 관리](/ko/docs/concepts/cluster-administration/manage-deployment/#카나리-canary-디플로이먼트)에 설명된 카나리 패던에 따라 각 릴리스 마다 하나씩 여러 디플로이먼트를 생성할 수 있다. ## 디플로이먼트 사양 작성 @@ -1035,7 +1045,7 @@ $ echo $? 설정 파일 작업에 대한 일반적인 내용은 [애플리케이션 배포하기](/docs/tutorials/stateless-application/run-stateless-application-deployment/), 컨테이너 구성하기 그리고 [kubectl을 사용해서 리소스 관리하기](/ko/docs/concepts/overview/working-with-objects/object-management/) 문서를 참조한다. 디플로이먼트 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. 디플로이먼트에는 [`.spec` 섹션](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)도 필요하다. diff --git a/content/ko/docs/concepts/workloads/controllers/garbage-collection.md b/content/ko/docs/concepts/workloads/controllers/garbage-collection.md index 4f4762fe9d0ed..03083b86edc96 100644 --- a/content/ko/docs/concepts/workloads/controllers/garbage-collection.md +++ b/content/ko/docs/concepts/workloads/controllers/garbage-collection.md @@ -1,7 +1,7 @@ --- title: 가비지(Garbage) 수집 content_type: concept -weight: 60 +weight: 70 --- @@ -10,8 +10,6 @@ weight: 60 소유자가 없는 오브젝트들을 삭제하는 역할을 한다. - - ## 소유자(owner)와 종속(dependent) @@ -170,15 +168,9 @@ kubectl delete replicaset my-repset --cascade=false - ## {{% heading "whatsnext" %}} [디자인 문서 1](https://git.k8s.io/community/contributors/design-proposals/api-machinery/garbage-collection.md) [디자인 문서 2](https://git.k8s.io/community/contributors/design-proposals/api-machinery/synchronous-garbage-collection.md) - - - - - diff --git a/content/ko/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/ko/docs/concepts/workloads/controllers/job.md similarity index 100% rename from content/ko/docs/concepts/workloads/controllers/jobs-run-to-completion.md rename to content/ko/docs/concepts/workloads/controllers/job.md diff --git a/content/ko/docs/concepts/workloads/controllers/replicaset.md b/content/ko/docs/concepts/workloads/controllers/replicaset.md index e99bb4f7c584a..18c618a6b17e3 100644 --- a/content/ko/docs/concepts/workloads/controllers/replicaset.md +++ b/content/ko/docs/concepts/workloads/controllers/replicaset.md @@ -223,7 +223,7 @@ pod2 1/1 Running 0 36s API 버전에 대해서는 `frontend.yaml` 예제의 첫 번째 줄을 참고한다. 레플리카셋 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. 레플리카셋도 [`.spec` 섹션](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)이 필요하다. @@ -233,7 +233,7 @@ API 버전에 대해서는 `frontend.yaml` 예제의 첫 번째 줄을 참고한 우리는 `frontend.yaml` 예제에서 `tier: frontend`이라는 레이블을 하나 가지고 있다. 이 파드를 다른 컨트롤러가 취하지 않도록 다른 컨트롤러의 셀렉터와 겹치지 않도록 주의해야 한다. -템플릿의 [재시작 정책](/ko/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) 필드인 +템플릿의 [재시작 정책](/ko/docs/concepts/workloads/pods/pod-lifecycle/#재시작-정책) 필드인 `.spec.template.spec.restartPolicy`는 기본값인 `Always`만 허용된다. ### 파드 셀렉터 @@ -250,7 +250,7 @@ matchLabels: 그렇지 않으면 API에 의해 거부된다. {{< note >}} -2개의 레플리카셋이 동일한 `.spec.selector`필드를 지정한 반면, 다른 `.spec.template.metadata.labels`와 `.spec.template.spec` 필드를 명시한 경우, 각 레플리카 셋은 다른 레플리카 셋이 생성한 파드를 무시한다. +2개의 레플리카셋이 동일한 `.spec.selector`필드를 지정한 반면, 다른 `.spec.template.metadata.labels`와 `.spec.template.spec` 필드를 명시한 경우, 각 레플리카셋은 다른 레플리카셋이 생성한 파드를 무시한다. {{< /note >}} ### 레플리카 @@ -307,7 +307,7 @@ curl -X DELETE 'localhost:8080/apis/apps/v1/namespaces/default/replicasets/fron ### 레플리카셋을 Horizontal Pod Autoscaler 대상으로 설정 -레플리카 셋은 +레플리카셋은 [Horizontal Pod Autoscalers (HPA)](/ko/docs/tasks/run-application/horizontal-pod-autoscale/)의 대상이 될 수 있다. 즉, 레플리카셋은 HPA에 의해 오토스케일될 수 있다. 다음은 이전에 만든 예시에서 만든 레플리카셋을 대상으로 하는 HPA 예시이다. @@ -316,7 +316,7 @@ curl -X DELETE 'localhost:8080/apis/apps/v1/namespaces/default/replicasets/fron 이 매니페스트를 `hpa-rs.yaml`로 저장한 다음 쿠버네티스 클러스터에 적용하면 CPU 사용량에 따라 파드가 복제되는 -오토스케일 레플리카 셋 HPA가 생성된다. +오토스케일 레플리카셋 HPA가 생성된다. ```shell kubectl apply -f https://k8s.io/examples/controllers/hpa-rs.yaml @@ -361,5 +361,3 @@ kubectl autoscale rs frontend --max=10 --min=3 --cpu-percent=50 이 두 개의 용도는 동일하고, 유사하게 동작하며, 레플리케이션 컨트롤러가 [레이블 사용자 가이드](/ko/docs/concepts/overview/working-with-objects/labels/#레이블-셀렉터)에 설명된 설정-기반의 셀렉터의 요건을 지원하지 않는다는 점을 제외하면 유사하다. 따라서 레플리카셋이 레플리케이션 컨트롤러보다 선호된다. - - diff --git a/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md b/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md index 7f78c8d6f0aec..53ff69b288365 100644 --- a/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md @@ -114,7 +114,7 @@ nginx-3ntk0 nginx-4ok8v nginx-qrm3m 다른 모든 쿠버네티스 컨피그와 마찬가지로 레플리케이션 컨트롤러는 `apiVersion`, `kind`, `metadata` 와 같은 필드가 필요하다. 레플리케이션 컨트롤러 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)이어야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. 컨피그 파일의 동작에 관련된 일반적인 정보는 다음을 참조하라 [쿠버네티스 오브젝트 관리 ](/ko/docs/concepts/overview/working-with-objects/object-management/). 레플리케이션 컨트롤러는 또한 [`.spec` section](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) 도 필요하다. @@ -123,7 +123,7 @@ nginx-3ntk0 nginx-4ok8v nginx-qrm3m `.spec.template` 는 오직 `.spec` 필드에서 요구되는 것이다. -`.spec.template` 는 [파드 개요](/ko/docs/concepts/workloads/pods/pod-overview/#pod-templates) 이다. 정확하게 [파드](/ko/docs/concepts/workloads/pods/pod/) 스키마와 동일하나, 중첩되어 있고 `apiVersion` 혹은 `kind`를 갖지 않는다. +`.spec.template` 는 [파드 개요](/ko/docs/concepts/workloads/pods/pod-overview/#파드-템플릿) 이다. 정확하게 [파드](/ko/docs/concepts/workloads/pods/pod/) 스키마와 동일하나, 중첩되어 있고 `apiVersion` 혹은 `kind`를 갖지 않는다. 파드에 필요한 필드 외에도 레플리케이션 컨트롤러의 파드 템플릿은 적절한 레이블과 적절한 재시작 정책을 지정해야 한다. 레이블의 경우 다른 컨트롤러와 중첩되지 않도록 하라. [파드 셀렉터](#파드-셀렉터)를 참조하라. @@ -255,7 +255,7 @@ API 오브젝트에 대한 더 자세한 것은 [`레플리카셋`](/ko/docs/concepts/workloads/controllers/replicaset/)은 새로운 [집합성 기준 레이블 셀렉터](/ko/docs/concepts/overview/working-with-objects/labels/#집합성-기준-요건) 이다. 이것은 주로 [`디플로이먼트`](/ko/docs/concepts/workloads/controllers/deployment/) 에 의해 파드의 생성, 삭제 및 업데이트를 오케스트레이션 하는 메커니즘으로 사용된다. -사용자 지정 업데이트 조정이 필요하거나 업데이트가 필요하지 않은 경우가 아니면 레플리카 셋을 직접 사용하는 대신 디플로이먼트를 사용하는 것이 좋다. +사용자 지정 업데이트 조정이 필요하거나 업데이트가 필요하지 않은 경우가 아니면 레플리카셋을 직접 사용하는 대신 디플로이먼트를 사용하는 것이 좋다. ### 디플로이먼트 (권장되는) diff --git a/content/ko/docs/concepts/workloads/controllers/statefulset.md b/content/ko/docs/concepts/workloads/controllers/statefulset.md index 39836fd7a152b..06cdbdc5e3c0b 100644 --- a/content/ko/docs/concepts/workloads/controllers/statefulset.md +++ b/content/ko/docs/concepts/workloads/controllers/statefulset.md @@ -25,7 +25,7 @@ weight: 40 위의 안정은 파드의 (재)스케줄링 전반에 걸친 지속성과 같은 의미이다. 만약 애플리케이션이 안정적인 식별자 또는 순차적인 배포, -삭제 또는 스케일링이 필요하지 않으면, 스테이트리스 레플리카 셋을 +삭제 또는 스케일링이 필요하지 않으면, 스테이트리스 레플리카셋(ReplicaSet)을 제공하는 워크로드 오브젝트를 사용해서 애플리케이션을 배포해야 한다. [디플로이먼트](/ko/docs/concepts/workloads/controllers/deployment/) 또는 [레플리카셋](/ko/docs/concepts/workloads/controllers/replicaset/)과 같은 컨트롤러가 스테이트리스 요구에 더 적합할 수 있다. diff --git a/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md index de92b4f3ea26b..99d274a00d935 100644 --- a/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -1,7 +1,7 @@ --- title: 완료된 리소스를 위한 TTL 컨트롤러 content_type: concept -weight: 65 +weight: 70 --- @@ -10,7 +10,7 @@ weight: 65 TTL 컨트롤러는 실행이 완료된 리소스 오브젝트의 수명을 제한하는 TTL (time to live) 메커니즘을 제공한다. TTL 컨트롤러는 현재 -[잡(Job)](/ko/docs/concepts/workloads/controllers/jobs-run-to-completion/)만 +{{< glossary_tooltip text="잡(Job)" term_id="job" >}}만 처리하며, 파드와 커스텀 리소스와 같이 실행을 완료할 다른 리소스를 처리하도록 확장될 수 있다. @@ -29,7 +29,7 @@ kube-apiserver와 kube-controller-manager와 함께 ## TTL 컨트롤러 현재의 TTL 컨트롤러는 잡만 지원한다. 클러스터 운영자는 -[예시](/ko/docs/concepts/workloads/controllers/jobs-run-to-completion/#완료된-잡을-자동으로-정리) +[예시](/ko/docs/concepts/workloads/controllers/job/#완료된-잡을-자동으로-정리) 와 같이 `.spec.ttlSecondsAfterFinished` 필드를 명시하여 완료된 잡(`완료` 또는 `실패`)을 자동으로 정리하기 위해 이 기능을 사용할 수 있다. 리소스의 작업이 완료된 TTL 초(sec) 후 (다른 말로는, TTL이 만료되었을 때), diff --git a/content/ko/docs/concepts/workloads/pods/init-containers.md b/content/ko/docs/concepts/workloads/pods/init-containers.md index d29cc514e35c9..10baf43a7c03c 100644 --- a/content/ko/docs/concepts/workloads/pods/init-containers.md +++ b/content/ko/docs/concepts/workloads/pods/init-containers.md @@ -62,7 +62,7 @@ weight: 40 다른 이미지로부터(`FROM`) 새로운 이미지를 만들 필요가 없다. * 애플리케이션 이미지 빌더와 디플로이어 역할은 독립적으로 동작될 수 있어서 공동의 단일 앱 이미지 형태로 빌드될 필요가 없다. -* 초기화 컨테이너는 앱 컨테이너와 다른 파일 시스템 뷰를 가지도록 Linux 네임스페이스를 사용한다. +* 초기화 컨테이너는 앱 컨테이너와 다른 파일 시스템 뷰를 가지도록 리눅스 네임스페이스를 사용한다. 결과적으로, 초기화 컨테이너에는 앱 컨테이너가 가질 수 없는 {{< glossary_tooltip text="시크릿" term_id="secret" >}}에 접근 권한이 주어질 수 있다. * 앱 컨테이너들은 병렬로 실행되는 반면, 초기화 컨테이너들은 어떠한 앱 diff --git a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md index 18f4b3e813fab..470970f284e2b 100644 --- a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md @@ -77,7 +77,7 @@ PodCondition 배열의 각 요소는 다음 여섯 가지 필드를 가질 수 컨테이너에서 [kubelet](/docs/admin/kubelet/)에 의해 주기적으로 수행되는 진단(diagnostic)이다. 진단을 수행하기 위해서, kubelet은 컨테이너에 의해서 구현된 -[핸들러](https://godoc.org/k8s.io/kubernetes/pkg/api/v1#Handler)를 호출한다. +[핸들러](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#handler-v1-core)를 호출한다. 핸들러에는 다음과 같이 세 가지 타입이 있다. * [ExecAction](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#execaction-v1-core) @@ -406,4 +406,3 @@ spec: - diff --git a/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index f1f586f5d8405..8a81e708dd1f1 100644 --- a/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -198,7 +198,7 @@ spec: `PodTopologySpread` 플러그인의 일부로 설정할 수 있다. 제약 조건은 `labelSelector` 가 비어 있어야 한다는 점을 제외하고, [위와 동일한 API](#api)로 제약 조건을 지정한다. 셀렉터는 파드가 속한 서비스, 레플리케이션 컨트롤러, -레플리카 셋 또는 스테이트풀셋에서 계산한다. +레플리카셋 또는 스테이트풀셋에서 계산한다. 예시 구성은 다음과 같다. diff --git a/content/ko/docs/concepts/workloads/pods/pod.md b/content/ko/docs/concepts/workloads/pods/pod.md index 05de1ccb61c34..e3464ff5cf9ce 100644 --- a/content/ko/docs/concepts/workloads/pods/pod.md +++ b/content/ko/docs/concepts/workloads/pods/pod.md @@ -29,7 +29,7 @@ _파드_ 는 (고래 떼(pod of whales)나 콩꼬투리(pea pod)와 마찬가지 쿠버네티스는 도커 이외에도 많은 컨테이너 런타임을 지원하지만, 도커는 가장 일반적으로 알려진 런타임이므로 도커 용어로 파드를 설명하는 것이 도움이 된다. -파드의 공유 컨텍스트는 Linux 네임 스페이스, 컨트롤 그룹(cgroup) 및 +파드의 공유 컨텍스트는 리눅스 네임스페이스, 컨트롤 그룹(cgroup) 및 도커 컨테이너를 격리하는 것과 같이 잠재적으로 다른 격리 요소들이다. 파드의 컨텍스트 내에서 개별 응용 프로그램은 추가적으로 하위 격리가 적용된다. @@ -180,7 +180,7 @@ _컨테이너의 어피니티(affinity) 기반 공동 스케줄링을 지원하 1. 유예 기간이 만료되면, 파드에서 실행중이던 모든 프로세스가 SIGKILL로 종료된다. 1. Kubelet은 유예기간 0(즉시 삭제)을 세팅하여 API 서버에서 파드 삭제를 끝낼 것이다. API 서버에서 사라진 파드는 클라이언트에게서 더 이상 보이지 않는다. -기본적으로 모든 삭제는 30초 이내에 끝이 난다. `kubectl delete` 명령은 사용자가 기본 설정을 오버라이드하고 자신이 원하는 값을 설정할 수 있게 해주는 `--grace-period=` 옵션을 지원한다. `0` 값은 파드를 [강제로 삭제한다](/ko/docs/concepts/workloads/pods/pod/#파드-강제-삭제). +기본적으로 모든 삭제는 30초 이내에 끝이 난다. `kubectl delete` 명령은 사용자가 기본 설정을 오버라이드하고 자신이 원하는 값을 설정할 수 있게 해주는 `--grace-period=` 옵션을 지원한다. `0` 값은 파드를 [강제로 삭제한다](/ko/docs/concepts/workloads/pods/pod/#파드-강제-삭제). kubectl 1.5 버전 이상에서는, 강제 삭제 수행을 위해서 반드시 `--grace-period=0` 와 함께 추가 플래그인 `--force` 를 지정해야 한다. ### 파드 강제 삭제 diff --git a/content/ko/docs/contribute/_index.md b/content/ko/docs/contribute/_index.md index 8f9ef72ba6c89..9034f3d98b9aa 100644 --- a/content/ko/docs/contribute/_index.md +++ b/content/ko/docs/contribute/_index.md @@ -3,6 +3,7 @@ content_type: concept title: 쿠버네티스 문서에 기여하기 linktitle: 기여 main_menu: true +no_list: true weight: 80 card: name: contribute @@ -12,7 +13,7 @@ card: -이 웹사이트는 [쿠버네티스 SIG Docs](/docs/contribute/#get-involved-with-sig-docs)에 의해서 관리됩니다. +이 웹사이트는 [쿠버네티스 SIG Docs](/ko/docs/contribute/#sig-docs에-참여)에 의해서 관리됩니다. 쿠버네티스 문서 기여자들은 @@ -23,8 +24,6 @@ card: 쿠버네티스 문서는 새롭고 경험이 풍부한 모든 기여자의 개선을 환영합니다! - - ## 시작하기 @@ -35,7 +34,7 @@ card: 1. CNCF [Contributor License Agreement](https://github.com/kubernetes/community/blob/master/CLA.md)에 서명합니다. 2. [문서 리포지터리](https://github.com/kubernetes/website) 와 웹사이트의 [정적 사이트 생성기](https://gohugo.io)를 숙지합니다. -3. [풀 리퀘스트 열기](/docs/contribute/new-content/new-content/)와 [변경 검토](/docs/contribute/review/reviewing-prs/)의 기본 프로세스를 이해하도록 합니다. +3. [풀 리퀘스트 열기](/ko/docs/contribute/new-content/new-content/)와 [변경 검토](/ko/docs/contribute/review/reviewing-prs/)의 기본 프로세스를 이해하도록 합니다. 일부 작업에는 쿠버네티스 조직에서 더 많은 신뢰와 더 많은 접근이 필요할 수 있습니다. 역할과 권한에 대한 자세한 내용은 @@ -43,16 +42,16 @@ card: ## 첫 번째 기여 -- [기여 개요](/docs/contribute/new-content/overview/)를 읽고 기여할 수 있는 다양한 방법에 대해 알아봅니다. +- [기여 개요](/ko/docs/contribute/new-content/overview/)를 읽고 기여할 수 있는 다양한 방법에 대해 알아봅니다. - [kubernetes/website에 기여하기](https://github.com/kubernetes/website/contribute)를 참조하여 좋은 진입점이 되는 이슈를 찾을 수 있습니다. -- 기존 문서에 대해 [GitHub을 사용해서 풀 리퀘스트 열거나](/docs/contribute/new-content/new-content/#changes-using-github) GitHub에서의 이슈 제기에 대해 자세히 알아봅니다. -- 정확성과 언어에 대해 다른 쿠버네티스 커뮤니티 맴버의 [풀 리퀘스트 검토](/docs/contribute/review/reviewing-prs/)를 합니다. +- 기존 문서에 대해 [GitHub을 사용해서 풀 리퀘스트 열거나](/ko/docs/contribute/new-content/new-content/#github을-사용하여-변경하기) GitHub에서의 이슈 제기에 대해 자세히 알아봅니다. +- 정확성과 언어에 대해 다른 쿠버네티스 커뮤니티 맴버의 [풀 리퀘스트 검토](/ko/docs/contribute/review/reviewing-prs/)를 합니다. - 쿠버네티스 [콘텐츠](/docs/contribute/style/content-guide/)와 [스타일 가이드](/docs/contribute/style/style-guide/)를 읽고 정보에 대한 코멘트를 남길 수 있습니다. - [페이지 템플릿 사용](/docs/contribute/style/page-content-types/)과 [휴고(Hugo) 단축코드(shortcodes)](/docs/contribute/style/hugo-shortcodes/)를 사용해서 큰 변경을 하는 방법에 대해 배워봅니다. ## 다음 단계 -- 리포지터리의 [로컬 복제본에서 작업](/docs/contribute/new-content/new-content/#fork-the-repo)하는 방법을 배워봅니다. +- 리포지터리의 [로컬 복제본에서 작업](/ko/docs/contribute/new-content/new-content/#fork-the-repo)하는 방법을 배워봅니다. - [릴리스된 기능](/docs/contribute/new-content/new-features/)을 문서화 합니다. - [SIG Docs](/ko/docs/contribute/participating/)에 참여하고, [멤버 또는 검토자](/ko/docs/contribute/participating/#역할과-책임)가 되어봅니다. - [현지화](/ko/docs/contribute/localization_ko/)를 시작하거나 도와줍니다. diff --git a/content/ko/docs/contribute/advanced.md b/content/ko/docs/contribute/advanced.md index 3f30f6eff930c..55661a673428b 100644 --- a/content/ko/docs/contribute/advanced.md +++ b/content/ko/docs/contribute/advanced.md @@ -17,66 +17,6 @@ weight: 98 -## 일주일 동안 PR 랭글러(Wrangler) 되기 - -SIG Docs [승인자](/ko/docs/contribute/participating/#승인자)는 리포지터리에 대해 1주일 정도씩 [PR을 조정(wrangling)](https://github.com/kubernetes/website/wiki/PR-Wranglers)하는 역할을 맡는다. - -PR 랭글러의 임무는 다음과 같다. - -- [스타일](/docs/contribute/style/style-guide/)과 [콘텐츠](/docs/contribute/style/content-guide/) 가이드를 준수하는지에 대해 [열린(open) 풀 리퀘스트](https://github.com/kubernetes/website/pulls)를 매일 리뷰한다. - - 가장 작은 PR(`size/XS`)을 먼저 리뷰한 다음, 가장 큰(`size/XXL`) PR까지 옮겨가며 리뷰를 반복한다. - - 가능한 한 많은 PR을 리뷰한다. -- 각 기여자가 CLA에 서명했는지 확인한다. - - 새로운 기여자가 [CLA](https://github.com/kubernetes/community/blob/master/CLA.md)에 서명하도록 도와준다. - - CLA에 서명하지 않은 기여자에게 CLA에 서명하도록 자동으로 알리려면 [이](https://github.com/zparnold/k8s-docs-pr-botherer) 스크립트를 사용한다. -- 제안된 변경 사항에 대한 피드백을 제공하고 다른 SIG의 멤버로부터의 기술 리뷰가 잘 진행되게 조율한다. - - 제안된 콘텐츠 변경에 대해 PR에 인라인 제안(inline suggestion)을 제공한다. - - 내용을 확인해야 하는 경우, PR에 코멘트를 달고 자세한 내용을 요청한다. - - 관련 `sig/` 레이블을 할당한다. - - 필요한 경우, 파일의 머리말(front matter)에 있는 `reviewers:` 블록의 리뷰어를 할당한다. - - PR의 리뷰 상태를 표시하기 위해 `Docs Review` 와 `Tech Review` 레이블을 할당한다. - - 아직 리뷰되지 않은 PR에 `Needs Doc Review` 나 `Needs Tech Review` 를 할당한다. - - 리뷰가 진행되었고, 병합하기 전에 추가 입력이나 조치가 필요한 PR에 `Doc Review: Open Issues` 나 `Tech Review: Open Issues` 를 할당한다. - - 병합할 수 있는 PR에 `/lgtm` 과 `/approve` 를 할당한다. -- PR이 준비가 되면 병합하거나, 수락해서는 안되는 PR을 닫는다. -- 새로운 이슈를 매일 심사하고 태그를 지정한다. SIG Docs가 메타데이터를 사용하는 방법에 대한 지침은 [이슈 심사 및 분류](/ko/docs/contribute/review/for-approvers/#이슈-심사와-분류)를 참고한다. - -## 랭글러에게 유용한 GitHub 쿼리 - -다음의 쿼리는 랭글러에게 도움이 된다. 이 쿼리들을 수행하여 작업한 후에는, 리뷰할 나머지 PR 목록은 -일반적으로 작다. 이 쿼리들은 특히 현지화 PR을 제외하고, `master` 브랜치만 포함한다(마지막 쿼리는 제외). - -- [CLA 서명 없음, 병합할 수 없음](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3A%22cncf-cla%3A+no%22+-label%3Ado-not-merge+label%3Alanguage%2Fen): - CLA에 서명하도록 기여자에게 상기시킨다. 봇과 사람이 이미 알렸다면, PR을 닫고 - CLA에 서명한 후 PR을 열 수 있음을 알린다. - **작성자가 CLA에 서명하지 않은 PR은 리뷰하지 않는다!** -- [LGTM 필요](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fen+-label%3Algtm+): - 기술 리뷰가 필요한 경우, 봇이 제안한 리뷰어 중 한 명을 지정한다. 문서 리뷰나 - 교정이 필요한 경우, 변경 사항을 제안하거나 교정하는 커밋을 PR에 추가하여 진행한다. -- [LGTM 보유, 문서 승인 필요](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fen+label%3Algtm): - PR을 병합하기 위해 추가 변경이나 업데이트가 필요한지 여부를 결정한다. PR을 병합할 준비가 되었다고 생각되면, `/approve` 코멘트를 남긴다. -- [퀵윈(Quick Wins)](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amaster+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22+): 명확한 결격 사유가 없는 master에 대한 작은 PR인 경우. ([XS, S, M, L, XL, XXL] 크기의 PR을 작업할 때 크기 레이블에서 "XS"를 변경한다) -- [master 이외의 브랜치에 대한 PR](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fen+-base%3Amaster): `dev-` 브랜치에 대한 것일 경우, 곧 출시될 예정인 릴리스이다. `/assign @` 을 코멘트로 추가하여 [릴리스 마이스터](https://github.com/kubernetes/sig-release/tree/master/release-team)가 그것에 대해 알고 있는지 확인한다. 오래된 브랜치에 대한 PR인 경우, PR 작성자가 가장 적합한 브랜치를 대상으로 하고 있는지 여부를 파악할 수 있도록 도와준다. - -### 풀 리퀘스트를 종료하는 시기 - -리뷰와 승인은 PR 대기열을 최신 상태로 유지하는 도구 중 하나이다. 또 다른 도구는 종료(closure)이다. - -- CLA가 2주 동안 서명되지 않은 모든 PR을 닫는다. -PR 작성자는 CLA에 서명한 후 PR을 다시 열 수 있으므로, 이는 어떤 것도 CLA 서명없이 병합되지 않게 하는 위험이 적은 방법이다. - -- 작성자가 2주 이상 동안 코멘트나 피드백에 응답하지 않은 모든 PR을 닫는다. - -풀 리퀘스트를 닫는 것을 두려워하지 말자. 기여자는 진행 중인 작업을 쉽게 다시 열고 다시 시작할 수 있다. 종종 종료 통지는 작성자가 기여를 재개하고 끝내도록 자극하는 것이다. - -풀 리퀘스트를 닫으려면, PR에 `/close` 코멘트를 남긴다. - -{{< note >}} - -[`fejta-bot`](https://github.com/fejta-bot)이라는 자동화 서비스는 90일 동안 활동이 없으면 자동으로 이슈를 오래된 것으로 표시한 다음, 그 상태에서 추가로 30일 동안 활동이 없으면 종료한다. PR 랭글러는 14-30일 동안 활동이 없으면 이슈를 닫아야 한다. - -{{< /note >}} - ## 개선 제안 SIG Docs [멤버](/ko/docs/contribute/participating/#멤버)는 개선을 제안할 수 있다. @@ -244,5 +184,3 @@ SIG Docs [승인자](/ko/docs/contribute/participating/#승인자)는 SIG Docs 녹화를 중지하려면, Stop을 클릭한다. 비디오가 자동으로 유튜브에 업로드된다. - - diff --git a/content/ko/docs/contribute/new-content/open-a-pr.md b/content/ko/docs/contribute/new-content/open-a-pr.md index 46439d3565ecc..82c72e2ce82b8 100644 --- a/content/ko/docs/contribute/new-content/open-a-pr.md +++ b/content/ko/docs/contribute/new-content/open-a-pr.md @@ -97,10 +97,12 @@ git에 익숙하거나, 변경 사항이 몇 줄보다 클 경우, ### 로컬 클론 생성 및 업스트림 설정 -3. 터미널 창에서, 포크를 클론한다. +3. 터미널 창에서, 포크를 클론하고 [Docsy Hugo 테마](https://github.com/google/docsy#readme)를 업데이트한다. ```bash git clone git@github.com//website + cd website + git submodule update --init --recursive --depth 1 ``` 4. 새 `website` 디렉터리로 이동한다. `kubernetes/website` 리포지터리를 `upstream` 원격으로 설정한다. @@ -217,21 +219,39 @@ git에 익숙하거나, 변경 사항이 몇 줄보다 클 경우, 변경 사항을 푸시하거나 풀 리퀘스트를 열기 전에 변경 사항을 로컬에서 미리 보는 것이 좋다. 미리보기를 사용하면 빌드 오류나 마크다운 형식 문제를 알아낼 수 있다. -website의 도커 이미지를 만들거나 Hugo를 로컬에서 실행할 수 있다. 도커 이미지 빌드는 느리지만 [Hugo 단축코드](/docs/contribute/style/hugo-shortcodes/)를 표시하므로, 디버깅에 유용할 수 있다. +website의 컨테이너 이미지를 만들거나 Hugo를 로컬에서 실행할 수 있다. 도커 이미지 빌드는 느리지만 [Hugo 단축코드](/docs/contribute/style/hugo-shortcodes/)를 표시하므로, 디버깅에 유용할 수 있다. {{< tabs name="tab_with_hugo" >}} {{% tab name="Hugo 컨테이너" %}} +{{< note >}} +아래 명령은 도커를 기본 컨테이너 엔진으로 사용한다. 이 동작을 무시하려면 `CONTAINER_ENGINE` 환경변수를 설정한다. +{{< /note >}} + 1. 로컬에서 이미지를 빌드한다. ```bash make docker-image + # docker 사용(기본값) + make container-image + + ### 또는 ### + + # podman 사용 + CONTAINER_ENGINE=podman make container-image ``` 2. 로컬에서 `kubernetes-hugo` 이미지를 빌드한 후, 사이트를 빌드하고 서비스한다. ```bash make docker-serve + # docker 사용(기본값) + make container-serve + + ### 또는 ### + + # podman 사용 + CONTAINER_ENGINE=podman make container-serve ``` 3. 웹 브라우저에서 `https://localhost:1313` 로 이동한다. Hugo는 @@ -245,18 +265,26 @@ website의 도커 이미지를 만들거나 Hugo를 로컬에서 실행할 수 또는, 컴퓨터에 `hugo` 명령을 설치하여 사용한다. -5. [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/master/netlify.toml)에 지정된 [Hugo](https://gohugo.io/getting-started/installing/) 버전을 설치한다. +1. [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/master/netlify.toml)에 지정된 [Hugo](https://gohugo.io/getting-started/installing/) 버전을 설치한다. -6. 터미널에서, 쿠버네티스 website 리포지터리로 이동하여 Hugo 서버를 시작한다. +2. website 리포지터리를 업데이트하지 않았다면, `website/themes/docsy` 디렉터리가 비어 있다. +테마의 로컬 복제본이 없으면 사이트를 빌드할 수 없다. website 테마를 업데이트하려면, 다음을 실행한다. + + ```bash + git submodule update --init --recursive --depth 1 + ``` + +3. 터미널에서, 쿠버네티스 website 리포지터리로 이동하여 Hugo 서버를 시작한다. ```bash cd /website - hugo server + hugo server --buildFuture ``` -7. 브라우저의 주소 표시줄에 `https://localhost:1313` 을 입력한다. +4. 웹 브라우저에서 `https://localhost:1313` 으로 이동한다. Hugo는 + 변경 사항을 보고 필요에 따라 사이트를 다시 구축한다. -8. 로컬의 Hugo 인스턴스를 중지하려면, 터미널로 돌아가서 `Ctrl+C` 를 입력하거나, +5. 로컬의 Hugo 인스턴스를 중지하려면, 터미널로 돌아가서 `Ctrl+C` 를 입력하거나,     터미널 창을 닫는다. {{% /tab %}} @@ -286,7 +314,7 @@ PR을 연 후, GitHub는 자동 테스트를 실행하고 [Netlify](https://www. - Netlify 빌드가 실패하면, 자세한 정보를 위해 **Details** 를 선택한다. - Netlify 빌드가 성공하면, **Details** 를 선택하면 변경 사항이 적용된 쿠버네티스 website의 커밋하기 직전의 버전(staged version)이 열린다. 리뷰어가 변경 사항을 확인하는 방법이다. -또한 GitHub는 리뷰어에게 도움을 주기 위해 PR에 레이블을 자동으로 할당한다. 필요한 경우 직접 추가할 수도 있다. 자세한 내용은 [이슈 레이블 추가와 제거](/docs/contribute/review/for-approvers/#adding-and-removing-issue-labels)를 참고한다. +또한 GitHub는 리뷰어에게 도움을 주기 위해 PR에 레이블을 자동으로 할당한다. 필요한 경우 직접 추가할 수도 있다. 자세한 내용은 [이슈 레이블 추가와 제거](/ko/docs/contribute/review/for-approvers/#이슈-레이블-추가와-제거)를 참고한다. ### 로컬에서 피드백 해결 @@ -480,6 +508,4 @@ PR에 여러 커밋이 있는 경우, PR을 병합하기 전에 해당 커밋을 ## {{% heading "whatsnext" %}} -- 리뷰 프로세스에 대한 자세한 내용은 [리뷰하기](/ko/docs/contribute/reviewing/revewing-prs)를 읽어본다. - - +- 리뷰 프로세스에 대한 자세한 내용은 [리뷰하기](/ko/docs/contribute/review/reviewing-prs)를 읽어본다. diff --git a/content/ko/docs/contribute/participate/_index.md b/content/ko/docs/contribute/participate/_index.md new file mode 100644 index 0000000000000..610815e65befe --- /dev/null +++ b/content/ko/docs/contribute/participate/_index.md @@ -0,0 +1,120 @@ +--- +title: SIG Docs에 참여하기 +content_type: concept +weight: 60 +card: + name: contribute + weight: 60 +--- + + + +SIG Docs는 쿠버네티스 프로젝트의 +[분과회(special interest group)](https://github.com/kubernetes/community/blob/master/sig-list.md) +중 하나로, 쿠버네티스 전반에 대한 문서를 작성하고, 업데이트하며 유지보수하는 일을 주로 수행한다. +분과회에 대한 보다 자세한 정보는 +[커뮤니티 GitHub 저장소 내 SIG Docs](https://github.com/kubernetes/community/tree/master/sig-docs) +를 참조한다. + +SIG Docs는 모든 컨트리뷰터의 콘텐츠와 리뷰를 환영한다. +누구나 풀 리퀘스트(PR)를 요청할 수 있고, +누구나 콘텐츠에 대해 이슈를 등록하거나 진행 중인 풀 리퀘스트에 코멘트를 등록할 수 있다. + +[멤버](/ko/docs/contribute/participating/roles-and-responsibilities/#멤버), [리뷰어](/ko/docs/contribute/participating/roles-and-responsibilities/#리뷰어), 또는 [승인자](/ko/docs/contribute/participating/roles-and-responsibilities/#승인자)가 될 수 있다. +이런 역할은 변경을 승인하고 커밋할 수 있도록 보다 많은 접근 권한과 이에 상응하는 책임이 수반된다. +쿠버네티스 커뮤니티 내에서 멤버십이 운영되는 방식에 대한 보다 많은 정보를 확인하려면 +[커뮤니티 멤버십](https://github.com/kubernetes/community/blob/master/community-membership.md) +문서를 확인한다. + +문서의 나머지에서는 대외적으로 쿠버네티스를 가장 잘 드러내는 수단 중 하나인 쿠버네티스 웹사이트와 +문서를 관리하는 책임을 가지는 SIG Docs에서, +이런 체계가 작동하는 특유의 방식에 대한 윤곽을 잡아보겠다. + + + + + +## SIG Docs 의장 + +SIG Docs를 포함한 각 SIG는, 한 명 이상의 SIG 멤버가 의장 역할을 하도록 선정한다. 이들은 SIG Docs와 +다른 쿠버네티스 조직 간 연락책(point of contact)이 된다. 이들은 쿠버네티스 프로젝트 전반의 조직과 +그 안에서 SIG Docs가 어떻게 운영되는지에 대한 폭넓은 지식을 갖추어야한다. +현재 의장의 목록을 확인하려면 +[리더십](https://github.com/kubernetes/community/tree/master/sig-docs#leadership) +문서를 참조한다. + +## SIG Docs 팀과 자동화 + +SIG Docs의 자동화는 다음의 두 가지 메커니즘에 의존한다. +GitHub 팀과 OWNERS 파일이다. + +### GitHub 팀 + +GitHub의 SIG Docs [팀]에는 두 분류가 있다. + +- 승인자와 리더를 위한 `@sig-docs-{language}-owners` +- 리뷰어를 위한 `@sig-docs-{language}-reviewers` + +그룹의 전원과 의사소통하기 위해서 +각각 GitHub 코멘트에서 그룹의 `@name`으로 참조할 수 있다. + +가끔은 Prow와 GitHub 팀은 정확히 일치하지 않고 중복된다. 이슈, 풀 리퀘스트를 할당하고, PR 승인을 지원하기 위해서 +자동화 시스템이 `OWNERS` 파일의 정보를 활용한다. + +### OWNERS 파일과 전문(front-matter) + +쿠버네티스 프로젝트는 GitHub 이슈와 풀 리퀘스트 자동화와 관련해서 prow라고 부르는 자동화 툴을 사용한다. +[쿠버네티스 웹사이트 리포지터리](https://github.com/kubernetes/website)는 +다음의 두개의 [prow 플러그인](https://github.com/kubernetes/test-infra/tree/master/prow/plugins)을 +사용한다. + +- blunderbuss +- approve + +이 두 플러그인은 `kubernetes/website` GitHub 리포지터리 최상위 수준에 있는 +[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS)와 +[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES) +파일을 사용해서 +해당 리포지터리에 대해 prow가 작동하는 방식을 제어한다. + +OWNERS 파일은 SIG Docs 리뷰어와 승인자의 목록을 포함한다. OWNERS 파일은 하위 디렉터리에 있을 수 +있고, 해당 하위 디렉터리와 그 이하의 파일에 대해 리뷰어와 승인자 역할을 수행할 사람을 새로 지정할 수 있다. +일반적인 OWNERS 파일에 대한 보다 많은 정보는 +[OWNERS](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md) +문서를 참고한다. + +추가로, 개별 마크다운(Markdown) 파일 내 전문에 +리뷰어와 승인자를 개별 GitHub 사용자 이름이나 GitHub 그룹으로 열거할 수 있다. + +OWNERS 파일과 마크다운 파일 내 전문의 조합은 +자동화 시스템이 누구에게 기술적, 편집적 리뷰를 요청해야 할지를 +PR 소유자에게 조언하는데 활용된다. + +## 병합 작업 방식 + +풀 리퀘스트 요청이 콘텐츠를 발행하는데 사용하는 +브랜치에 병합되면, 해당 콘텐츠는 http://kubernetes.io 에 공개된다. 게시된 콘텐츠의 +품질을 높히기 위해 SIG Docs 승인자가 풀 리퀘스트를 병합하는 것을 제한한다. +작동 방식은 다음과 같다. + +- 풀 리퀘스트에 `lgtm` 과 `approve` 레이블이 있고, `hold` 레이블이 없고, + 모든 테스트를 통과하면 풀 리퀘스트는 자동으로 병합된다. +- 쿠버네티스 조직의 멤버와 SIG Docs 승인자들은 지정된 풀 리퀘스트의 + 자동 병합을 방지하기 위해 코멘트를 추가할 수 있다(코멘트에 `/hold` 추가 또는 + `/lgtm` 코멘트 보류). +- 모든 쿠버네티스 멤버는 코멘트에 `/lgtm` 을 추가해서 `lgtm` 레이블을 추가할 수 있다. +- SIG Docs 승인자들만이 코멘트에 `/approve` 를 + 추가해서 풀 리퀘스트를 병합할 수 있다. 일부 승인자들은 + [PR Wrangler](/ko/docs/contribute/advanced/#일주일-동안-pr-랭글러-wrangler-되기) 또는 [SIG Docs 의장](#sig-docs-의장)과 + 같은 특정 역할도 수행한다. + + + +## {{% heading "whatsnext" %}} + + +쿠버네티스 문서화에 기여하는 일에 대한 보다 많은 정보는 다음 문서를 참고한다. + +- [신규 콘텐츠 기여하기](/ko/docs/contribute/new-content/overview/) +- [콘텐츠 검토하기](/ko/docs/contribute/review/reviewing-prs/) +- [문서 스타일 가이드](/ko/docs/contribute/style/) diff --git a/content/ko/docs/contribute/participate/pr-wranglers.md b/content/ko/docs/contribute/participate/pr-wranglers.md new file mode 100644 index 0000000000000..4581400ea3803 --- /dev/null +++ b/content/ko/docs/contribute/participate/pr-wranglers.md @@ -0,0 +1,70 @@ +--- +title: PR 랭글러(PR Wrangler) +content_type: concept +weight: 20 +--- + + + +SIG Docs [승인자](/ko/docs/contribute/participating/roles-and-responsibilites/#승인자)는 리포지터리에 대해 일주일 동안 교대로 [풀 리퀘스트 관리](https://github.com/kubernetes/website/wiki/PR-Wranglers)를 수행한다. + +이 섹션은 PR 랭글러의 의무에 대해 다룬다. 좋은 리뷰 제공에 대한 자세한 내용은 [Reviewing changes](/ko/docs/contribute/review/)를 참고한다. + + + +## 의무 + +PR 랭글러는 일주일 간 매일 다음의 일을 해야 한다. + +- 매일 새로 올라오는 이슈를 심사하고 태그를 지정한다. SIG Docs가 메타데이터를 사용하는 방법에 대한 지침은 [이슈 심사 및 분류](/docs/contribute/review/for-approvers/#triage-and-categorize-issues)를 참고한다. +- [스타일](/docs/contribute/style/style-guide/)과 [콘텐츠](/docs/contribute/style/content-guide/) 가이드를 준수하는지에 대해 [열린(open) 풀 리퀘스트](https://github.com/kubernetes/website/pulls)를 매일 리뷰한다. + - 가장 작은 PR(`size/XS`)부터 시작하고, 가장 큰(`size/XXL`) PR까지 리뷰한다. 가능한 한 많은 PR을 리뷰한다. +- PR 기여자들이 [CLA]()에 서명했는지 확인한다. + - CLA에 서명하지 않은 기여자에게 CLA에 서명하도록 알리려면 [이](https://github.com/zparnold/k8s-docs-pr-botherer) 스크립트를 사용한다. +- 제안된 변경 사항에 대한 피드백을 제공하고 다른 SIG의 멤버에게 기술 리뷰를 요청한다. + - 제안된 콘텐츠 변경에 대해 PR에 인라인 제안(inline suggestion)을 제공한다. + - 내용을 확인해야 하는 경우, PR에 코멘트를 달고 자세한 내용을 요청한다. + - 관련 `sig/` 레이블을 할당한다. + - 필요한 경우, 파일의 머리말(front matter)에 있는 `reviewers:` 블록의 리뷰어를 할당한다. +- PR을 병합하려면 승인을 위한 `approve` 코멘트를 사용한다. 준비가 되면 PR을 병합한다. + - 병합하기 전에 PR은 다른 멤버의 `/lgtm` 코멘트를 받아야 한다. + - [스타일 지침]을 충족하지 않지만 기술적으로는 정확한 PR은 수락하는 것을 고려한다. 스타일 문제를 해결하는 `good first issue` 레이블의 새로운 이슈를 올리면 된다. + +### 랭글러를 위해 도움이 되는 GitHub 쿼리 + +다음의 쿼리는 랭글러에게 도움이 된다. +이 쿼리들을 수행하여 작업한 후에는, 리뷰할 나머지 PR 목록은 일반적으로 작다. +이 쿼리들은 특히 현지화 PR을 제외한다. 모든 쿼리는 마지막 쿼리를 제외하고 메인 브렌치를 대상으로 한다. + +- [CLA 서명 없음, 병합할 수 없음](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3A%22cncf-cla%3A+no%22+-label%3Ado-not-merge+label%3Alanguage%2Fen): + CLA에 서명하도록 기여자에게 상기시킨다. 봇과 사람이 이미 알렸다면, PR을 닫고 + CLA에 서명한 후 PR을 열 수 있음을 알린다. + **작성자가 CLA에 서명하지 않은 PR은 리뷰하지 않는다!** +- [LGTM 필요](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fen+-label%3Algtm+): + 멤버의 LGTM이 필요한 PR을 나열한다. PR에 기술 리뷰가 필요한 경우, 봇이 제안한 리뷰어 중 한 명을 + 지정한다. 콘텐츠에 대한 작업이 필요하다면, 제안하거나 인라인 피드백을 추가한다. +- [LGTM 보유, 문서 승인 필요](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fen+label%3Algtm): + 병합을 위해 `/approve` 코멘트가 필요한 PR을 나열한다. +- [퀵윈(Quick Wins)](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amaster+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22+): 명확한 결격 사유가 없는 메인 브랜치에 대한 PR을 나열한다. ([XS, S, M, L, XL, XXL] 크기의 PR을 작업할 때 크기 레이블에서 "XS"를 변경한다) +- [메인 브랜치이외의 브랜치에 대한 PR](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Aopen+is%3Apr+-label%3Ado-not-merge+label%3Alanguage%2Fen+-base%3Amaster): `dev-` 브랜치에 대한 것일 경우, 곧 출시될 예정인 릴리스이다. `/assign @` 을 사용하여 [문서 릴리스 관리자](https://github.com/kubernetes/sig-release/tree/master/release-team#kubernetes-release-team-roles)를 할당한다. 오래된 브랜치에 대한 PR인 경우, PR 작성자가 가장 적합한 브랜치를 대상으로 하고 있는지 여부를 파악할 수 있도록 도와준다. + +### 풀 리퀘스트를 종료하는 시기 + +리뷰와 승인은 PR 대기열을 최신 상태로 유지하는 도구 중 하나이다. 또 다른 도구는 종료(closure)이다. + +다음의 상황에서 PR을 닫는다. +- 작성자가 CLA에 2주 동안 서명하지 않았다. + + 작성자는 CLA에 서명한 후 PR을 다시 열 수 있다. 이는 어떤 것도 CLA 서명없이 병합되지 않게 하는 위험이 적은 방법이다. + +- 작성자가 2주 이상 동안 코멘트나 피드백에 응답하지 않았다. + +풀 리퀘스트를 닫는 것을 두려워하지 말자. 기여자는 진행 중인 작업을 쉽게 다시 열고 다시 시작할 수 있다. 종종 종료 통지는 작성자가 기여를 재개하고 끝내도록 자극하는 것이다. + +풀 리퀘스트를 닫으려면, PR에 `/close` 코멘트를 남긴다. + +{{< note >}} + +[`fejta-bot`](https://github.com/fejta-bot)이라는 봇은 90일 동안 활동이 없으면 이슈를 오래된 것(stale)으로 표시한다. 30일이 더 지나면 rotten으로 표시하고 종료한다. PR 랭글러는 14-30일 동안 활동이 없으면 이슈를 닫아야 한다. + +{{< /note >}} diff --git a/content/ko/docs/contribute/participate/roles-and-responsibilties.md b/content/ko/docs/contribute/participate/roles-and-responsibilties.md new file mode 100644 index 0000000000000..e5dbfb85ffe3f --- /dev/null +++ b/content/ko/docs/contribute/participate/roles-and-responsibilties.md @@ -0,0 +1,195 @@ +--- +title: 역할과 책임 +content_type: concept +weight: 10 +--- + + + +누구나 쿠버네티스에 기여할 수 있다. SIG Docs에 대한 기여가 커짐에 따라, 커뮤니티의 다양한 멤버십을 신청할 수 있다. +이러한 역할을 통해 커뮤니티 내에서 더 많은 책임을 질 수 있다. +각 역할마다 많은 시간과 노력이 필요하다. 역할은 다음과 같다. + +- 모든 사람: 쿠버네티스 문서에 정기적으로 기여하는 기여자 +- 멤버: 이슈를 할당, 심사하고 풀 리퀘스트에 대한 구속력 없는 리뷰를 제공할 수 있다. +- 리뷰어: 문서의 풀 리퀘스트에 대한 리뷰를 리딩할 수 있으며 변경 사항에 대한 품질을 보증할 수 있다. +- 승인자: 문서에 대한 리뷰를 리딩하고 변경 사항을 병합할 수 있다 + + + +## 모든 사람 + +GitHub 계정을 가진 누구나 쿠버네티스에 기여할 수 있다. SIG Docs는 모든 새로운 기여자를 환영한다! + +모든 사람은 다음의 작업을 할 수 있다. + +- [`kubernetes/website`](https://github.com/kubernetes/website)를 포함한 모든 [쿠버네티스] 리포지터리에서 이슈를 올린다. +- 풀 리퀘스트에 대해 구속력 없는 피드백을 제공한다. +- 현지화에 기여한다. +- [슬랙](http://slack.k8s.io/) 또는 [SIG docs 메일링 리스트](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)에 개선을 제안한다. + +[CLA에 서명](/ko/docs/contribute/new-content/overview/#sign-the-cla) 후에 누구나 다음을 할 수 있다. + +- 기존 콘텐츠를 개선하거나, 새 콘텐츠를 추가하거나, 블로그 게시물 또는 사례연구 작성을 위해 풀 리퀘스트를 연다. +- 다이어그램, 그래픽 자산 그리고 포함할 수 있는 스크린캐스트와 비디오를 제작한다. + +자세한 내용은 [새로운 콘텐츠 기여하기](/ko/docs/contribute/new-content/)를 참고한다. + +## 멤버 + +멤버는 `kubernetes/website` 에 여러 개의 풀 리퀘스트를 제출한 사람이다. 멤버는 [쿠버네티스 GitHub 조직](https://github.com/kubernetes)의 회원이다. + +멤버는 다음의 작업을 할 수 있다. + +- [모든 사람](#모든-사람)에 나열된 모든 것을 한다. +- 풀 리퀘스트에 `/lgtm` 코멘트를 사용하여 LGTM(looks good to me) 레이블을 추가한다. + + {{< note >}} + `/lgtm` 사용은 자동화를 트리거한다. 만약 구속력 없는 승인을 제공하려면, 단순히 "LGTM" 코멘트를 남기는 것도 좋다! + {{< /note >}} +- `/hold` 코멘트를 사용하여 풀 리퀘스트에 대한 병합을 차단한다. +- `/assign` 코멘트를 사용하여 풀 리퀘스트에 리뷰어를 지정한다. +- 풀 리퀘스트에 구속력 없는 리뷰를 제공한다. +- 자동화를 사용하여 이슈를 심사하고 분류한다. +- 새로운 기능에 대한 문서를 작성한다. + +### 멤버 되기 + +최소 5개의 실질적인 풀 리퀘스트를 제출하고 다른 [요구 사항](https://github.com/kubernetes/community/blob/master/community-membership.md#member)을 충족시킨 후, 다음의 단계를 따른다. + +1. 멤버십을 [후원](/docs/contribute/advanced#sponsor-a-new-contributor)해 줄 두 명의 [리뷰어](#리뷰어) 또는 [승인자](#승인자)를 찾는다. + + [슬랙의 #sig-docs 채널](https://kubernetes.slack.com) 또는 + [SIG Docs 메일링 리스트](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)에서 후원을 요청한다. + + {{< note >}} + SIG Docs 멤버 개인에게 직접 email을 보내거나 + 슬랙 다이렉트 메시지를 보내지 않는다. 반드시 지원서를 제출하기 전에 후원을 요청해야 한다. + {{< /note >}} + +2. [`kubernetes/org`](https://github.com/kubernetes/org/) 리포지터리에 GitHub 이슈를 등록한다. **Organization Membership Request** 이슈 템플릿을 사용한다. + +3. 후원자에게 GitHub 이슈를 알린다. 다음 중 하나를 수행할 수 있다. + - 이슈에서 후원자의 GitHub 사용자 이름을 코멘트로 추가한다. (`@`) + - 슬랙 또는 이메일을 사용해 이슈 링크를 후원자에게 보낸다. + + 후원자는 `+1` 투표로 여러분의 요청을 승인할 것이다. 후원자가 요청을 승인하면, 쿠버네티스 GitHub 관리자가 여러분을 멤버로 추가한다. 축하한다! + + 만약 멤버십이 수락되지 않으면 피드백을 받게 될 것이다. 피드백의 내용을 해결한 후, 다시 지원하자. + +4. 여러분의 이메일 계정으로 수신된 쿠버네티스 GitHub 조직으로의 초대를 수락한다. + + {{< note >}} + GitHub은 초대를 여러분 계정의 기본 이메일 주소로 보낸다. + {{< /note >}} + +## 리뷰어 + +리뷰어는 열린 풀 리퀘스트를 리뷰할 책임이 있다. 멤버 피드백과는 달리, 여러분은 리뷰어의 피드백을 반드시 해결해야 한다. 리뷰어는 [@kubernetes/sig-docs-{language}-reviews](https://github.com/orgs/kubernetes/teams?query=sig-docs) GitHub 팀의 멤버이다. + +리뷰어는 다음의 작업을 수행할 수 있다. + +- [모든 사람](#모든-사람)과 [멤버](#멤버)에 나열된 모든 것을 수행한다. +- 풀 리퀘스트 리뷰와 구속력 있는 피드백을 제공한다. + + {{< note >}} + 구속력 없는 피드백을 제공하려면, 코멘트에 "선택 사항: "과 같은 문구를 접두어로 남긴다. + {{< /note >}} + +- 코드에서 사용자 화면 문자열 편집 +- 코드 코멘트 개선 + +여러분은 SIG DOcs 리뷰어이거나, 특정 주제 영역의 문서에 대한 리뷰어일 수 있다. + +### 풀 리퀘스트에 대한 리뷰어 할당 + +자동화 시스템은 모든 풀 리퀘스트에 대해 리뷰어를 할당한다. `/assign +[@_github_handle]` 코멘트를 남겨 특정 사람에게 리뷰를 요청할 수 +있다. + +지정된 리뷰어가 PR에 코멘트를 남기지 않는다면, 다른 리뷰어가 개입할 수 있다. 필요에 따라 기술 리뷰어를 지정할 수도 있다. + +### `/lgtm` 사용하기 + +LGTM은 "Looks good to me"의 약자이며 풀 리퀘스트가 기술적으로 정확하고 병합할 준비가 되었음을 나타낸다. 모든 PR은 리뷰어의 `/lgtm` 코멘트가 필요하고 병합을 위해 승인자의 `/approve` 코멘트가 필요하다. + +리뷰어의 `/lgtm` 코멘트는 구속력 있고 자동화 시스템이 `lgtm` 레이블을 추가하도록 트리거한다. + +### 리뷰어 되기 + +[요건](https://github.com/kubernetes/community/blob/master/community-membership.md#reviewer)을 +충족하면, SIG Docs 리뷰어가 될 수 있다. 다른 SIG의 리뷰어는 SIG Docs의 리뷰어 자격에 반드시 별도로 지원해야 한다. + +지원하려면, 다음을 수행한다. + +1. `kubernetes/website` 리포지터리 내 +[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) 파일의 섹션에 +여러분의 GitHub 사용자 이름을 추가하는 풀 리퀘스트를 연다. + + {{< note >}} + 자신을 추가할 위치가 확실하지 않으면, `sig-docs-ko-reviews` 에 추가한다. + {{< /note >}} + +2. PR을 하나 이상의 SIG-Docs 승인자(`sig-docs-{language}-owners` 에 나열된 사용자 이름)에게 지정한다. + +승인되면, SIG Docs 리더가 적당한 GitHub 팀에 여러분을 추가한다. 일단 추가되면, [K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home)이 새로운 풀 리퀘스트에서 리뷰어로 여러분을 할당하고 제안한다. + +## 승인자 + +승인자는 병합하기 위해 풀 리퀘스트를 리뷰하고 승인한다. 승인자는 +[@kubernetes/sig-docs-{language}-owners](https://github.com/orgs/kubernetes/teams/?query=sig-docs) GitHub 팀의 멤버이다. + +승인자는 다음의 작업을 할 수 있다. + +- [모든 사람](#모든-사람), [멤버](#멤버) 그리고 [리뷰어](#리뷰어) 하위의 모든 목록을 할 수 있다. +- 코멘트에 `/approve` 를 사용해서 풀 리퀘스트를 승인하고, 병합해서 기여자의 컨텐츠를 게시한다. +- 스타일 가이드 개선을 제안한다. +- 문서 테스트 개선을 제안한다. +- 쿠버네티스 웹사이트 또는 다른 도구 개선을 제안한다. + +PR에 이미 `/lgtm` 이 있거나, 승인자도 `/lgtm` 코멘트를 남긴다면, PR은 자동으로 병합된다. SIG Docs 승인자는 추가적인 기술 리뷰가 필요치 않는 변경에 대해서만 `/lgtm` 을 남겨야 한다. + + +### 풀 리퀘스트 승인 + +승인자와 SIG Docs 리더는 website 리포지터리로 풀 리퀘스트를 병합할 수 있는 유일한 사람들이다. 이것은 특정한 책임이 따른다. + +- 승인자는 PR들을 리포지터리에 병합하는 `/approve` 명령을 사용할 수 있다. + + {{< warning >}} + 부주의한 머지로 인해 사이트를 파괴할 수 있으므로, 머지할 때에 그 의미를 확인해야 한다. + {{< /warning >}} + +- 제안된 변경이 [컨트리뷰션 가이드 라인](/docs/contribute/style/content-guide/#contributing-content)에 적합한지 확인한다. + + 질문이 생기거나 확실하지 않다면 자유롭게 추가 리뷰를 요청한다. + +- PR을 `/approve` 하기 전에 Netlify 테스트 결과를 검토한다. + + 승인 전에 반드시 Netlify 테스트를 통과해야 한다 + +- 승인 전에 PR에 대한 Netlify 프리뷰 페이지를 방문하여, 제대로 보이는지 확인한다. + +- 주간 로테이션을 위해 [PR Wrangler 로테이션 스케줄](https://github.com/kubernetes/website/wiki/PR-Wranglers)에 참여한다. SIG Docs는 모든 승인자들이 이 로테이션에 참여할 +것으로 기대한다. 자세한 내용은 [PR 랭글러(PR wrangler)](/ko/docs/contribute/participating/pr-wranglers/)를 +참고한다. + +## 승인자 되기 + +[요구 사항](https://github.com/kubernetes/community/blob/master/community-membership.md#approver)을 충족하면 SIG Docs 승인자가 될 수 있다. 다른 SIG의 승인자는 SIG Docs의 승인자 자격에 대해 별도로 신청해야 한다. + +지원하려면 다음을 수행한다. + +1. `kubernetes/website` 리포지터리 내 [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) 파일의 섹션에 자신을 추가하는 풀 리퀘스트를 연다. + + {{< note >}} + 자신을 추가할 위치가 확실하지 않으면, `sig-docs-ko-owners` 에 추가한다. + {{< /note >}} + +2. PR에 한 명 이상의 현재 SIG Docs 승인자를 지정한다. + +승인되면, SIG Docs 리더가 적당한 GitHub 팀에 여러분을 추가한다. 일단 추가되면, [@k8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home)이 새로운 풀 리퀘스트에서 승인자로 여러분을 할당하고 제안한다. + +## {{% heading "whatsnext" %}} + +- 모든 승인자가 교대로 수행하는 역할인 [PR 랭글러](/ko/docs/contribute/participating/pr-wranglers)에 대해 읽어보기 diff --git a/content/ko/docs/contribute/participating.md b/content/ko/docs/contribute/participating.md deleted file mode 100644 index a1897f350c1a5..0000000000000 --- a/content/ko/docs/contribute/participating.md +++ /dev/null @@ -1,314 +0,0 @@ ---- -title: SIG Docs에 참여하기 -content_type: concept -weight: 60 -card: - name: contribute - weight: 60 ---- - - - -SIG Docs는 쿠버네티스 프로젝트의 -[분과회(special interest group)](https://github.com/kubernetes/community/blob/master/sig-list.md) -중 하나로, 쿠버네티스 전반에 대한 문서를 작성하고, 업데이트하며 유지보수하는 일을 주로 수행한다. -분과회에 대한 보다 자세한 정보는 -[커뮤니티 GitHub 저장소 내 SIG Docs](https://github.com/kubernetes/community/tree/master/sig-docs) -를 참조한다. - -SIG Docs는 모든 컨트리뷰터의 콘텐츠와 리뷰를 환영한다. -누구나 풀 리퀘스트(PR)를 요청할 수 있고, -누구나 콘텐츠에 대해 이슈를 등록하거나 진행 중인 풀 리퀘스트에 코멘트를 등록할 수 있다. - -[멤버](#멤버), [리뷰어](#리뷰어), 또는 [승인자](#승인자)가 될 수 있다. -이런 역할은 변경을 승인하고 커밋할 수 있도록 보다 많은 접근 권한과 이에 상응하는 책임이 수반된다. -쿠버네티스 커뮤니티 내에서 멤버십이 운영되는 방식에 대한 보다 많은 정보를 확인하려면 -[커뮤니티 멤버십](https://github.com/kubernetes/community/blob/master/community-membership.md) -문서를 확인한다. - -문서의 나머지에서는 대외적으로 쿠버네티스를 가장 잘 드러내는 수단 중 하나인 쿠버네티스 웹사이트와 -문서를 관리하는 책임을 가지는 SIG Docs에서, -이런 체계가 작동하는 특유의 방식에 대한 윤곽을 잡아보겠다. - - - - - -## 역할과 책임 - -- **모든 사람** 은 쿠버네티스 문서에 기여할 수 있다. 기여 시 [CLA에 서명](/docs/contribute/new-content/overview/#sign-the-cla)하고 GitHub 계정을 가지고 있어야 한다. -- 쿠버네티스 조직의 **멤버** 는 쿠버네티스 프로젝트에 시간과 노력을 투자한 기여자이다. 일반적으로 승인되는 변경이 되는 풀 리퀘스트를 연다. 멤버십 기준은 [커뮤니티 멤버십](https://github.com/kubernetes/community/blob/master/community-membership.md)을 참조한다. -- SIG Docs의 **리뷰어** 는 쿠버네티스 조직의 일원으로 - 문서 풀 리퀘스트에 관심을 표명했고, SIG Docs 승인자에 - 의해 GitHub 리포지터리에 있는 GitHub - 그룹과 `OWNER` 파일에 추가되었다. -- SIG Docs의 **승인자** 는 프로젝트에 대한 지속적인 헌신을 보여준 - 좋은 멤버이다. 승인자는 쿠버네티스 조직을 대신해서 - 풀 리퀘스트를 병합하고 컨텐츠를 게시할 수 있다. - 또한 승인자는 더 큰 쿠버네티스 커뮤니티의 SIG Docs를 대표할 수 있다. - 릴리즈 조정과 같은 SIG Docs 승인자의 일부 의무에는 - 상당한 시간 투입이 필요하다. - -## 모든 사람 - -누구나 다음 작업을 할 수 있다. - -- 문서를 포함한 쿠버네티스의 모든 부분에 대해 GitHub 이슈 열기. -- 풀 리퀘스트에 대한 구속력 없는 피드백 제공 -- 기존 컨텐츠를 현지화하는데 도움주는 것 -- [슬랙](http://slack.k8s.io/) 또는 [SIG docs 메일링 리스트](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)에 개선할 아이디어를 제시한다. -- `/lgtm` Prow 명령 ("looks good to me" 의 줄임말)을 사용해서 병합을 위한 풀 리퀘스트의 변경을 추천한다. - {{< note >}} - 만약 쿠버네티스 조직의 멤버가 아니라면, `/lgtm` 을 사용하는 것은 자동화된 시스템에 아무런 영향을 주지 않는다. - {{< /note >}} - -[CLA에 서명](/docs/contribute/new-content/overview/#sign-the-cla)) 후에 누구나 다음을 할 수 있다. -- 기존 콘텐츠를 개선하거나, 새 콘텐츠를 추가하거나, 블로그 게시물 또는 사례연구 작성을 위해 풀 리퀘스트를 연다. - -## 멤버 - -멤버는 [멤버 기준](https://github.com/kubernetes/community/blob/master/community-membership.md#member)을 충족하는 쿠버네티스 프로젝트에 기여한 사람들이다. SIG Docs는 쿠버네티스 커뮤니티의 모든 멤버로부터 기여를 환경하며, -기술적 정확성에 대한 다른 SIG 멤버들의 검토를 수시로 요청한다. - -쿠버네티스 조직의 모든 멤버는 다음 작업을 할 수 있다. - -- [모든 사람](#모든-사람) 하위에 나열된 모든 것 -- 풀 리퀘스트 코멘트에 `/lgtm` 을 사용해서 LGTM(looks good to me) 레이블을 붙일 수 있다. -- 풀 리퀘스트에 이미 LGTM 과 승인 레이블이 있는 경우에 풀 리퀘스트가 병합되지 않도록 코멘트에 `/hold` 를 사용할 수 있다. -- 코멘트에 `/assign` 을 사용해서 풀 리퀘스트에 리뷰어를 배정한다. - -### 멤버 되기 - -최소 5개의 실질적인 풀 리퀘스트를 성공적으로 제출한 경우, 쿠버네티스 조직의 -[멤버십](https://github.com/kubernetes/community/blob/master/community-membership.md#member)을 -요청할 수 있다. 다음의 단계를 따른다. - -1. 멤버십을 [후원](/docs/contribute/advanced#sponsor-a-new-contributor)해 줄 두 명의 리뷰어 또는 승인자를 - 찾는다. - - [쿠버네티스 Slack 인스턴스의 #sig-docs 채널](https://kubernetes.slack.com) 또는 - [SIG Docs 메일링 리스트](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)에서 - 후원을 요청한다. - - {{< note >}} - SIG Docs 멤버 개인에게 직접 email을 보내거나 - Slack 다이렉트 메시지를 보내지 않는다. - {{< /note >}} - -2. `kubernetes/org` 리포지터리에 멤버십을 요청하는 GitHub 이슈를 등록한다. - [커뮤니티 멤버십](https://github.com/kubernetes/community/blob/master/community-membership.md) - 문서의 가이드라인을 따라서 양식을 채운다. - -3. 해당 GitHub 이슈에 후원자를 at-mentioning(`@`을 포함한 코멘트를 추가)하거나 - 링크를 직접 보내주어서 - 후원자가 해당 GitHub 이슈를 확인하고 `+1` 표를 줄 수 있도록 한다. - -4. 멤버십이 승인되면, 요청에 할당된 GitHub 관리자 팀 멤버가 승인되었음을 업데이트해주고 - 해당 GitHub 이슈를 종료한다. - 축하한다, 이제 멤버가 되었다! - -만약 멤버십 요청이 받아들여지지 않으면, -멤버십 위원회에서 재지원 전에 -필요한 정보나 단계를 알려준다. - -## 리뷰어 - -리뷰어는 -[@kubernetes/sig-docs-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews) -GitHub 그룹의 멤버이다. 리뷰어는 문서 풀 리퀘스트를 리뷰하고 제안받은 변경에 대한 피드백을 -제공한다. 리뷰어는 다음 작업을 수행할 수 있다. - -- [모든 사람](#모든-사람)과 [멤버](#멤버)에 나열된 모든 것을 수행 -- 새 기능의 문서화 -- 이슈 해결 및 분류 -- 풀 리퀘스트 리뷰와 구속력있는 피드백 제공 -- 다이어그램, 그래픽 자산과 포함가능한 스크린샷과 비디오를 생성 -- 코드에서 사용자 화면 문자열 편집 -- 코드 코멘트 개선 - -### 풀 리퀘스트에 대한 리뷰어 할당 - -자동화 시스템은 풀 리퀘스트에 대해 리뷰어를 할당하고, 사용자는 해당 풀 리퀘스트에 -`/assign [@_github_handle]` 코멘트를 남겨서 특정 리뷰어에게 리뷰를 요청할 수 있다. -풀 리퀘스트가 기술적으로 정확하고 더 변경이 필요하지 않다는 의미로, -리뷰어는 `/lgtm` 코멘트를 -해당 풀 리퀘스트에 추가할 수 있다. - -할당된 리뷰어가 내용을 아직 리뷰하지 않은 경우, -다른 리뷰어가 나설 수 있다. 추가로, 기술 리뷰어를 -할당해서 그들이 `/lgtm`을 주기를 기다릴 수도 있다. - -사소한 변경이나 기술적 리뷰가 필요한 PR의 경우, SIG Docs [승인자](#승인자)가 `/lgtm`을 줄 -수도 있다. - -리뷰어의 `/approve` 코멘트는 자동화 시스템에서 무시된다. - -### 리뷰어 되기 - -[요건](https://github.com/kubernetes/community/blob/master/community-membership.md#reviewer)을 -충족하면, SIG Docs 리뷰어가 될 수 있다. -다른 SIG의 리뷰어는 SIG Docs의 리뷰어 자격에 -반드시 별도로 지원해야 한다. - -지원하려면, `kubernetes/website` 저장소의 -[최상위 OWNERS 파일](https://github.com/kubernetes/website/blob/master/OWNERS) -내 `reviewers` 섹션에 자신을 추가하는 풀 리퀘스트를 연다. PR을 한 명 이상의 현재 SIG Docs -승인자에게 할당한다. - -풀 리퀘스트가 승인되면, 이제 SIG Docs 리뷰어가 된다. -[K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home)이 -새로운 풀 리퀘스트에 대한 리뷰어로 당신을 추천하게 된다. - -일단 승인되면, 현재 SIG Docs 승인자가 -[@kubernetes/sig-docs-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews) -GitHub 그룹에 당신을 추가하기를 요청한다. `kubernetes-website-admins` GitHub 그룹의 -멤버만이 신규 멤버를 GitHub 그룹에 추가할 수 있다. - -## 승인자 - -승인자는 -[@kubernetes/sig-docs-maintainers](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers) -GitHub 그룹의 멤버이다. [SIG Docs 팀과 자동화](#sig-docs-팀과-자동화) 문서를 참조한다. - -승인자는 다음의 작업을 할 수 있다. - -- [모든 사람](#모든-사람), [멤버](#멤버) 그리고 [리뷰어](#리뷰어) 하위의 모든 목록을 할 수 있다. -- 코멘트에 `/approve` 를 사용해서 풀 리퀘스트를 승인하고, 병합해서 기여자의 컨텐츠를 게시한다. - 만약 승인자가 아닌 사람이 코멘트에 승인을 남기면 자동화 시스템에서 이를 무시한다. -- 쿠버네티스 릴리즈팀에 문서 담당자로 참여 -- 스타일 가이드 개선 제안 -- 문서 테스트 개선 제안 -- 쿠버네티스 웹사이트 또는 다른 도구 개선 제안 - -PR이 이미 `/lgtm`을 받았거나, 승인자가 `/lgtm`을 포함한 코멘트를 남긴 경우에는 -해당 PR이 자동으로 머지된다. SIG Docs 승인자는 추가적인 기술 리뷰가 필요하지 않은 변경에 대해서만 -`/lgtm`을 남겨야한다. - -### 승인자 되기 - -[요건](https://github.com/kubernetes/community/blob/master/community-membership.md#approver)을 -충족하면, SIG Docs 승인자가 될 수 있다. -다른 SIG의 승인자는 SIG Docs의 승인자 자격에 -반드시 별도로 지원해야 한다. - -지원하려면, `kubernetes/website` 저장소의 -[최상위 OWNERS 파일](https://github.com/kubernetes/website/blob/master/OWNERS) -내 `approvers` 섹션에 자신을 추가하는 풀 리퀘스트를 연다. PR을 한 명 이상의 현재 SIG Docs -승인자에게 할당한다. - -풀 리퀘스트가 승인되면, 이제 SIG Docs 승인자가 된다. -[K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home)이 -새로운 풀 리퀘스트에 대한 리뷰어로 당신을 추천하게 된다. - -일단 승인되면, 현재 SIG Docs 승인자가 -[@kubernetes/sig-docs-maintainers](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers) -GitHub 그룹에 당신을 추가하기를 요청한다. `kubernetes-website-admins` GitHub 그룹의 -멤버만이 신규 멤버를 GitHub 그룹에 추가할 수 있다. - -### 승인자의 책임 - -승인자는 리뷰와 풀리퀘스트를 웹사이트 리포지터리에 머지하여 문서를 개선한다. 이 역할에는 추가적인 권한이 필요하므로, 승인자에게는 별도의 책임이 부여된다. - -- 승인자는 PR들을 리포에 머지하는 `/approve` 명령을 사용할 수 있다. - - 부주의한 머지로 인해 사이트를 파괴할 수 있으므로, 머지할 때에 그 의미를 확인해야 한다. - -- 제안된 변경이 [컨트리뷰션 가이드 라인](/docs/contribute/style/content-guide/#contributing-content)에 적합한지 확인한다. - - 질문이 생기거나 확실하지 않다면 자유롭게 추가 리뷰를 요청한다. - -- PR을 `/approve` 하기 전에 Netlify 테스트 결과를 검토한다. - - 승인 전에 반드시 Netlify 테스트를 통과해야 한다 - -- 승인 전에 PR에 대한 Netlify 프리뷰 페이지를 방문하여, 제대로 보이는지 확인한다. - -- 주간 로테이션을 위해 [PR Wrangler 로테이션 스케줄](https://github.com/kubernetes/website/wiki/PR-Wranglers)에 참여한다. SIG Docs는 모든 승인자들이 이 로테이션에 참여할 -것으로 기대한다. [일주일 간 PR Wrangler 되기](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) -문서를 참고한다. - -## SIG Docs 의장 - -SIG Docs를 포함한 각 SIG는, 한 명 이상의 SIG 멤버가 의장 역할을 하도록 선정한다. 이들은 SIG Docs와 -다른 쿠버네티스 조직 간 연락책(point of contact)이 된다. 이들은 쿠버네티스 프로젝트 전반의 조직과 -그 안에서 SIG Docs가 어떻게 운영되는지에 대한 폭넓은 지식을 갖추어야한다. -현재 의장의 목록을 확인하려면 -[리더십](https://github.com/kubernetes/community/tree/master/sig-docs#leadership) -문서를 참조한다. - -## SIG Docs 팀과 자동화 - -SIG Docs의 자동화는 다음의 두 가지 자동화 메커니즘에 의존한다. -GitHub 그룹과 OWNERS 파일이다. - -### GitHub 그룹 - -GitHub의 SIG Docs 그룹은 두 팀을 정의한다. - - - [@kubernetes/sig-docs-maintainers](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers) - - [@kubernetes/sig-docs-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews) - -그룹의 전원과 의사소통하기 위해서 -각각 GitHub 코멘트에서 그룹의 `@name`으로 참조할 수 있다. - -이 팀은 중복되지만, 정확히 일치하지는 않으며, 이 그룹은 자동화 툴에서 사용된다. -이슈, 풀 리퀘스트를 할당하고, -PR 승인을 지원하기 위해서 자동화 시스템이 OWNERS 파일의 정보를 활용한다. - -### OWNERS 파일과 전문(front-matter) - -쿠버네티스 프로젝트는 GitHub 이슈와 풀 리퀘스트 자동화와 관련해서 prow라고 부르는 자동화 툴을 사용한다. -[쿠버네티스 웹사이트 리포지터리](https://github.com/kubernetes/website)는 -다음의 두개의 [prow 플러그인](https://github.com/kubernetes/test-infra/tree/master/prow/plugins)을 -사용한다. - -- blunderbuss -- approve - -이 두 플러그인은 `kubernetes/website` GitHub 리포지터리 최상위 수준에 있는 -[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS)와 -[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES) -파일을 사용해서 -해당 리포지터리에 대해 prow가 작동하는 방식을 제어한다. - -OWNERS 파일은 SIG Docs 리뷰어와 승인자의 목록을 포함한다. OWNERS 파일은 하위 디렉터리에 있을 수 -있고, 해당 하위 디렉터리와 그 이하의 파일에 대해 리뷰어와 승인자 역할을 수행할 사람을 새로 지정할 수 있다. -일반적인 OWNERS 파일에 대한 보다 많은 정보는 -[OWNERS](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md) -문서를 참고한다. - -추가로, 개별 마크다운(Markdown) 파일 내 전문에 -리뷰어와 승인자를 개별 GitHub 사용자 이름이나 GitHub 그룹으로 열거할 수 있다. - -OWNERS 파일과 마크다운 파일 내 전문의 조합은 -자동화 시스템이 누구에게 기술적, 편집적 리뷰를 요청해야 할지를 -PR 소유자에게 조언하는데 활용된다. - -## 병합 작업 방식 - -풀 리퀘스트 요청이 콘텐츠(현재 `master`)를 발행하는데 사용하는 -브랜치에 병합되면 그 내용이 전 세계에 공개된다. 게시된 콘텐츠의 -품질을 높히기 위해 SIG Docs 승인자가 풀 리퀘스트를 병합하는 것을 제한한다. -작동 방식은 다음과 같다. - -- 풀 리퀘스트에 `lgtm` 과 `approve` 레이블이 있고, `hold` 레이블이 없고, - 모든 테스트를 통과하면 풀 리퀘스트는 자동으로 병합된다. -- 쿠버네티스 조직의 멤버와 SIG Docs 승인자들은 지정된 풀 리퀘스트의 - 자동 병합을 방지하기 위해 코멘트를 추가할 수 있다(코멘트에 `/hold` 추가 또는 - `/lgtm` 코멘트 보류). -- 모든 쿠버네티스 멤버는 코멘트에 `/lgtm` 을 추가해서 `lgtm` 레이블을 추가할 수 있다. -- SIG Docs 승인자들만이 코멘트에 `/approve` 를 - 추가해서 풀 리퀘스트를 병합할 수 있다. 일부 승인자들은 - [PR Wrangler](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) 또는 [SIG Docs 의장](#sig-docs-의장)과 - 같은 특정 역할도 수행한다. - - - -## {{% heading "whatsnext" %}} - - -쿠버네티스 문서화에 기여하는 일에 대한 보다 많은 정보는 다음 문서를 참고한다. - -- [신규 콘텐츠 기여하기](/ko/docs/contribute/new-content/overview/) -- [콘텐츠 검토하기](/ko/docs/contribute/review/reviewing-prs/) -- [문서 스타일 가이드](/ko/docs/contribute/style/) diff --git a/content/ko/docs/contribute/review/reviewing-prs.md b/content/ko/docs/contribute/review/reviewing-prs.md index 06c2b81f21223..ca3d38a752917 100644 --- a/content/ko/docs/contribute/review/reviewing-prs.md +++ b/content/ko/docs/contribute/review/reviewing-prs.md @@ -17,7 +17,7 @@ weight: 10 - 적합한 코멘트를 남길 수 있도록 [콘텐츠 가이드](/docs/contribute/style/content-guide/)와 [스타일 가이드](/docs/contribute/style/style-guide/)를 읽는다. -- 쿠버네티스 문서화 커뮤니티의 다양한 [역할과 책임](/docs/contribute/participating/#roles-and-responsibilities)을 이해한다. +- 쿠버네티스 문서화 커뮤니티의 다양한 [역할과 책임](/ko/docs/contribute/participating/#역할과-책임)을 이해한다. @@ -44,7 +44,7 @@ weight: 10 표시된다. 2. 다음 레이블 중 하나 또는 모두를 사용하여 열린 PR을 필터링한다. - - `cncf-cla: yes`(권장): CLA에 서명하지 않은 기여자가 제출한 PR은 병합할 수 없다. 자세한 내용은 [CLA 서명](/docs/contribute/new-content/overview/#sign-the-cla)을 참고한다. + - `cncf-cla: yes`(권장): CLA에 서명하지 않은 기여자가 제출한 PR은 병합할 수 없다. 자세한 내용은 [CLA 서명](/ko/docs/contribute/new-content/overview/#sign-the-cla)을 참고한다. - `language/en`(권장): 영어 문서에 대한 PR 전용 필터이다. - `size/`: 특정 크기의 PR을 필터링한다. 새로 시작하는 사람이라면, 더 작은 PR로 시작한다. @@ -94,5 +94,3 @@ weight: 10 ### 기타 오타나 공백과 같은 작은 이슈의 PR인 경우, 코멘트 앞에 `nit:` 를 추가한다. 이를 통해 문서의 저자는 이슈가 긴급하지 않다는 것을 알 수 있다. - - diff --git a/content/ko/docs/contribute/style/write-new-topic.md b/content/ko/docs/contribute/style/write-new-topic.md index ac7252a50f393..9bff308df1597 100644 --- a/content/ko/docs/contribute/style/write-new-topic.md +++ b/content/ko/docs/contribute/style/write-new-topic.md @@ -28,9 +28,17 @@ weight: 20 튜토리얼 | 튜토리얼 페이지는 여러 쿠버네티스의 특징들을 하나로 묶어서 목적을 달성하는 방법을 보여준다. 튜토리얼은 독자들이 페이지를 읽을 때 실제로 할 수 있는 몇 가지 단계의 순서를 제공한다. 또는 관련 코드 일부에 대한 설명을 제공할 수도 있다. 예를 들어 튜토리얼은 코드 샘플의 연습을 제공할 수 있다. 튜토리얼에는 쿠버네티스의 특징에 대한 간략한 설명이 포함될 수 있지만 개별 기능에 대한 자세한 설명은 관련 개념 문서과 연결지어야 한다. {{< /table >}} +### 새 페이지 작성 + 작성하는 각각의 새 페이지에 대해 [콘텐츠 타입](/docs/contribute/style/page-content-types/)을 -사용하자. 페이지 타입을 사용하면 -지정된 타입의 문서 간에 일관성을 보장할 수 있다. +사용하자. 문서 사이트는 새 콘텐츠 페이지를 작성하기 위한 템플리트 또는 +[Hugo archetypes](https://gohugo.io/content-management/archetypes/)을 +제공한다. 새로운 타입의 페이지를 작성하려면, 작성하려는 파일의 경로로 `hugo new` 를 +실행한다. 예를 들면, 다음과 같다. + +``` +hugo new docs/concepts/my-first-concept.md +``` ## 제목과 파일 이름 선택 @@ -55,30 +63,30 @@ YAML 블록이다. 여기 예시가 있다. title: HTTP 프록시를 사용하여 쿠버네티스 API에 접근 --- -## 디렉토리 선택 +## 디렉터리 선택 -페이지 타입에 따라 새로운 파일을 다음 중 하나의 하위 디렉토리에 넣자. +페이지 타입에 따라 새로운 파일을 다음 중 하나의 하위 디렉터리에 넣자. * /content/en/docs/tasks/ * /content/en/docs/tutorials/ * /content/en/docs/concepts/ -파일을 기존 하위 디렉토리에 넣거나 새 하위 디렉토리에 +파일을 기존 하위 디렉터리에 넣거나 새 하위 디렉터리에 넣을 수 있다. ## 목차에 항목 배치 -목차는 문서 소스의 디렉토리 구조를 사용하여 -동적으로 작성된다. `/content/en/docs/` 아래의 최상위 디렉토리는 최상위 레벨 탐색 기능을 -생성하고, 하위 디렉토리는 각각 목차에 항목을 +목차는 문서 소스의 디렉터리 구조를 사용하여 +동적으로 작성된다. `/content/en/docs/` 아래의 최상위 디렉터리는 최상위 레벨 탐색 기능을 +생성하고, 하위 디렉터리는 각각 목차에 항목을 갖는다. -각 하위 디렉토리에는 `_index.md` 파일이 있으며 이는 해당 하위 디렉토리의 컨텐츠에 대한 +각 하위 디렉터리에는 `_index.md` 파일이 있으며 이는 해당 하위 디렉터리의 컨텐츠에 대한 "홈" 페이지를 나타낸다. `_index.md`에는 템플릿이 필요없다. 그것은 -하위 디렉토리의 항목에 대한 개요 내용을 포함할 수 있다. +하위 디렉터리의 항목에 대한 개요 내용을 포함할 수 있다. -디렉토리의 다른 파일들은 기본적으로 알파벳순으로 정렬된다. 이것은 거의 -최적의 순서가 아니다. 하위 디렉토리에서 항목의 상대적 정렬을 제어하려면 +디렉터리의 다른 파일들은 기본적으로 알파벳순으로 정렬된다. 이것은 거의 +최적의 순서가 아니다. 하위 디렉터리에서 항목의 상대적 정렬을 제어하려면 `가중치:` 전문의 키를 정수로 설정하자. 일반적으로 우리는 나중에 항목을 추가하기 위해 10의 배수를 사용한다. 예를 들어 가중치가 `10`인 항목은 가중치가 `20`인 항목보다 우선한다. @@ -112,13 +120,13 @@ YAML 블록이다. 여기 예시가 있다. 샘플 YAML 파일을 포함시키려면 이 방법을 사용하자. YAML 파일과 같은 새로운 독립형 샘플 파일을 추가할 때 -`/examples/` 의 하위 디렉토리 중 하나에 코드를 배치하자. 여기서 ``은 +`/examples/` 의 하위 디렉터리 중 하나에 코드를 배치하자. 여기서 ``은 주제에 관한 언어이다. 문서 파일에서 `codenew` 단축 코드(shortcode)를 사용하자. ```none {{/my-example-yaml>" */>}} ``` -여기서 `` 는 `examples` 디렉토리와 관련하여 포함될 +여기서 `` 는 `examples` 디렉터리와 관련하여 포함될 파일의 경로이다. 다음 Hugo 단축 코드(shortcode)는 `/content/en/examples/pods/storage/gce-volume.yaml` 에 있는 YAML 파일을 참조한다. @@ -135,7 +143,7 @@ YAML 파일과 같은 새로운 독립형 샘플 파일을 추가할 때 ## 구성 파일에서 API 오브젝트를 작성하는 방법 표시 구성 파일을 기반으로 API 오브젝트를 생성하는 방법을 보여주려면 -`/examples` 아래의 하위 디렉토리 중 하나에 +`/examples` 아래의 하위 디렉터리 중 하나에 구성 파일을 배치하자. 문서에서 이 명령을 띄워보자. @@ -145,18 +153,18 @@ kubectl create -f https://k8s.io/examples/pods/storage/gce-volume.yaml ``` {{< note >}} -`/examples` 디렉토리에 새 YAMl 파일을 추가할 때 파일이 +`/examples` 디렉터리에 새 YAMl 파일을 추가할 때 파일이 `/examples_test.go` 파일에도 포함되어 있는지 확인하자. 웹 사이트의 Travis CI 는 PR이 제출될 때 이 예제를 자동으로 실행하여 모든 예제가 테스트를 통과하도록 한다. {{< /note >}} 이 기술을 사용하는 문서의 예로 -[단일 인스턴스 스테이트풀 어플리케이션 실행](/docs/tutorials/stateful-application/run-stateful-application/)을 참조하자. +[단일 인스턴스 스테이트풀 어플리케이션 실행](/ko/docs/tasks/run-application/run-single-instance-stateful-application/)을 참조하자. ## 문서에 이미지 추가 -이미지 파일을 `/images` 디렉토리에 넣는다. 기본 +이미지 파일을 `/images` 디렉터리에 넣는다. 기본 이미지 형식은 SVG 이다. @@ -164,5 +172,4 @@ kubectl create -f https://k8s.io/examples/pods/storage/gce-volume.yaml ## {{% heading "whatsnext" %}} * [페이지 콘텐츠 타입 사용](/docs/contribute/style/page-content-types/)에 대해 알아보기. -* [풀 리퀘스트 작성](/docs/contribute/new-content/open-a-pr/)에 대해 알아보기. - +* [풀 리퀘스트 작성](/ko/docs/contribute/new-content/new-content/)에 대해 알아보기. diff --git a/content/ko/docs/home/_index.md b/content/ko/docs/home/_index.md index b47bbf5c14846..20377994f1eb1 100644 --- a/content/ko/docs/home/_index.md +++ b/content/ko/docs/home/_index.md @@ -57,6 +57,8 @@ cards: - name: release-notes title: 릴리스 노트 description: 쿠버네티스를 설치하거나 최신의 버전으로 업그레이드하는 경우, 현재 릴리스 노트를 참고한다. + button: "쿠버네티스 다운로드" + button_path: "/docs/setup/release/notes" - name: about title: 문서에 대하여 description: 이 웹사이트는 현재 버전과 이전 4개 버전의 쿠버네티스 문서를 포함한다. diff --git a/content/ko/docs/reference/command-line-tools-reference/_index.md b/content/ko/docs/reference/command-line-tools-reference/_index.md new file mode 100644 index 0000000000000..0639d05b1596c --- /dev/null +++ b/content/ko/docs/reference/command-line-tools-reference/_index.md @@ -0,0 +1,5 @@ +--- +title: 커맨드 라인 도구 레퍼런스 +weight: 60 +toc-hide: true +--- diff --git a/content/ko/docs/reference/command-line-tools-reference/feature-gates.md b/content/ko/docs/reference/command-line-tools-reference/feature-gates.md new file mode 100644 index 0000000000000..1e2796b484a94 --- /dev/null +++ b/content/ko/docs/reference/command-line-tools-reference/feature-gates.md @@ -0,0 +1,521 @@ +--- +weight: 10 +title: 기능 게이트 +content_type: concept +--- + + +이 페이지에는 관리자가 다른 쿠버네티스 컴포넌트에서 지정할 수 있는 다양한 +기능 게이트에 대한 개요가 포함되어 있다. + +기능의 단계(stage)에 대한 설명은 [기능 단계](#기능-단계)를 참고한다. + + + +## 개요 + +기능 게이트는 쿠버네티스 기능을 설명하는 일련의 키=값 쌍이다. +각 쿠버네티스 컴포넌트에서 `--feature-gates` 커맨드 라인 플래그를 사용하여 +이러한 기능을 켜거나 끌 수 있다. + + +각 쿠버네티스 컴포넌트를 사용하면 해당 컴포넌트와 관련된 기능 게이트 집합을 +활성화 또는 비활성화할 수 있다. +모든 컴포넌트에 대한 전체 기능 게이트 집합을 보려면 `-h` 플래그를 사용한다. +kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 쌍 목록에 지정된 `--feature-gates` 플래그를 사용한다. + +```shell +--feature-gates="...,DynamicKubeletConfig=true" +``` + +다음 표는 다른 쿠버네티스 컴포넌트에서 설정할 수 있는 기능 게이트를 +요약한 것이다. + +- "도입" 열에는 기능이 소개되거나 릴리스 단계가 변경될 때의 + 쿠버네티스 릴리스가 포함된다. +- "종료" 열이 비어 있지 않으면, 여전히 기능 게이트를 사용할 수 있는 마지막 + 쿠버네티스 릴리스가 포함된다. +- 기능이 알파 또는 베타 상태인 경우, + [알파/베타 기능 게이트 테이블](#알파-또는-베타-기능을-위한-기능-게이트)에서 나열된 기능을 찾을 수 있다. +- 기능이 안정된 경우 해당 기능에 대한 모든 단계를 + [GA(graduated)/사용 중단(deprecated) 기능 게이트 테이블](#GA-또는-사용-중단된-기능을-위한-기능-게이트)에 나열할 수 있다. +- [GA/사용 중단 기능 게이트 테이블](#GA-또는-사용-중단된-기능을-위한-기능-게이트)에는 + 사용 중단된 기능과 철회(withdrawn) 기능의 목록도 있다. + +### 알파 또는 베타 기능을 위한 기능 게이트 + +{{< table caption="알파 또는 베타 단계에 있는 기능을 위한 기능 게이트" >}} + +| 기능 | 디폴트 | 단계 | 도입 | 종료 | +|---------|---------|-------|-------|-------| +| `AnyVolumeDataSource` | `false` | 알파 | 1.18 | | +| `APIListChunking` | `false` | 알파 | 1.8 | 1.8 | +| `APIListChunking` | `true` | 베타 | 1.9 | | +| `APIPriorityAndFairness` | `false` | 알파 | 1.17 | | +| `APIResponseCompression` | `false` | 알파 | 1.7 | | +| `AppArmor` | `true` | 베타 | 1.4 | | +| `BalanceAttachedNodeVolumes` | `false` | 알파 | 1.11 | | +| `BoundServiceAccountTokenVolume` | `false` | 알파 | 1.13 | | +| `CPUManager` | `false` | 알파 | 1.8 | 1.9 | +| `CPUManager` | `true` | 베타 | 1.10 | | +| `CRIContainerLogRotation` | `false` | 알파 | 1.10 | 1.10 | +| `CRIContainerLogRotation` | `true` | 베타| 1.11 | | +| `CSIInlineVolume` | `false` | 알파 | 1.15 | 1.15 | +| `CSIInlineVolume` | `true` | 베타 | 1.16 | - | +| `CSIMigration` | `false` | 알파 | 1.14 | 1.16 | +| `CSIMigration` | `true` | 베타 | 1.17 | | +| `CSIMigrationAWS` | `false` | 알파 | 1.14 | | +| `CSIMigrationAWS` | `false` | 베타 | 1.17 | | +| `CSIMigrationAWSComplete` | `false` | 알파 | 1.17 | | +| `CSIMigrationAzureDisk` | `false` | 알파 | 1.15 | | +| `CSIMigrationAzureDiskComplete` | `false` | 알파 | 1.17 | | +| `CSIMigrationAzureFile` | `false` | 알파 | 1.15 | | +| `CSIMigrationAzureFileComplete` | `false` | 알파 | 1.17 | | +| `CSIMigrationGCE` | `false` | 알파 | 1.14 | 1.16 | +| `CSIMigrationGCE` | `false` | 베타 | 1.17 | | +| `CSIMigrationGCEComplete` | `false` | 알파 | 1.17 | | +| `CSIMigrationOpenStack` | `false` | 알파 | 1.14 | | +| `CSIMigrationOpenStackComplete` | `false` | 알파 | 1.17 | | +| `ConfigurableFSGroupPolicy` | `false` | 알파 | 1.18 | | +| `CustomCPUCFSQuotaPeriod` | `false` | 알파 | 1.12 | | +| `CustomResourceDefaulting` | `false` | 알파| 1.15 | 1.15 | +| `CustomResourceDefaulting` | `true` | 베타 | 1.16 | | +| `DevicePlugins` | `false` | 알파 | 1.8 | 1.9 | +| `DevicePlugins` | `true` | 베타 | 1.10 | | +| `DryRun` | `false` | 알파 | 1.12 | 1.12 | +| `DryRun` | `true` | 베타 | 1.13 | | +| `DynamicAuditing` | `false` | 알파 | 1.13 | | +| `DynamicKubeletConfig` | `false` | 알파 | 1.4 | 1.10 | +| `DynamicKubeletConfig` | `true` | 베타 | 1.11 | | +| `EndpointSlice` | `false` | 알파 | 1.16 | 1.16 | +| `EndpointSlice` | `false` | 베타 | 1.17 | | +| `EndpointSlice` | `true` | 베타 | 1.18 | | +| `EndpointSliceProxying` | `false` | 알파 | 1.18 | | +| `EphemeralContainers` | `false` | 알파 | 1.16 | | +| `ExpandCSIVolumes` | `false` | 알파 | 1.14 | 1.15 | +| `ExpandCSIVolumes` | `true` | 베타 | 1.16 | | +| `ExpandInUsePersistentVolumes` | `false` | 알파 | 1.11 | 1.14 | +| `ExpandInUsePersistentVolumes` | `true` | 베타 | 1.15 | | +| `ExpandPersistentVolumes` | `false` | 알파 | 1.8 | 1.10 | +| `ExpandPersistentVolumes` | `true` | 베타 | 1.11 | | +| `ExperimentalHostUserNamespaceDefaulting` | `false` | 베타 | 1.5 | | +| `EvenPodsSpread` | `false` | 알파 | 1.16 | 1.17 | +| `EvenPodsSpread` | `true` | 베타 | 1.18 | | +| `HPAScaleToZero` | `false` | 알파 | 1.16 | | +| `HugePageStorageMediumSize` | `false` | 알파 | 1.18 | | +| `HyperVContainer` | `false` | 알파 | 1.10 | | +| `ImmutableEphemeralVolumes` | `false` | 알파 | 1.18 | | +| `IPv6DualStack` | `false` | 알파 | 1.16 | | +| `KubeletPodResources` | `false` | 알파 | 1.13 | 1.14 | +| `KubeletPodResources` | `true` | 베타 | 1.15 | | +| `LegacyNodeRoleBehavior` | `true` | 알파 | 1.16 | | +| `LocalStorageCapacityIsolation` | `false` | 알파 | 1.7 | 1.9 | +| `LocalStorageCapacityIsolation` | `true` | 베타 | 1.10 | | +| `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | 알파 | 1.15 | | +| `MountContainers` | `false` | 알파 | 1.9 | | +| `NodeDisruptionExclusion` | `false` | 알파 | 1.16 | | +| `NonPreemptingPriority` | `false` | 알파 | 1.15 | | +| `PodDisruptionBudget` | `false` | 알파 | 1.3 | 1.4 | +| `PodDisruptionBudget` | `true` | 베타 | 1.5 | | +| `PodOverhead` | `false` | 알파 | 1.16 | - | +| `ProcMountType` | `false` | 알파 | 1.12 | | +| `QOSReserved` | `false` | 알파 | 1.11 | | +| `RemainingItemCount` | `false` | 알파 | 1.15 | | +| `ResourceLimitsPriorityFunction` | `false` | 알파 | 1.9 | | +| `RotateKubeletClientCertificate` | `true` | 베타 | 1.8 | | +| `RotateKubeletServerCertificate` | `false` | 알파 | 1.7 | 1.11 | +| `RotateKubeletServerCertificate` | `true` | 베타 | 1.12 | | +| `RunAsGroup` | `true` | 베타 | 1.14 | | +| `RuntimeClass` | `false` | 알파 | 1.12 | 1.13 | +| `RuntimeClass` | `true` | 베타 | 1.14 | | +| `SCTPSupport` | `false` | 알파 | 1.12 | | +| `ServerSideApply` | `false` | 알파 | 1.14 | 1.15 | +| `ServerSideApply` | `true` | 베타 | 1.16 | | +| `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | | +| `ServiceAppProtocol` | `false` | 알파 | 1.18 | | +| `ServiceNodeExclusion` | `false` | 알파 | 1.8 | | +| `ServiceTopology` | `false` | 알파 | 1.17 | | +| `StartupProbe` | `false` | 알파 | 1.16 | 1.17 | +| `StartupProbe` | `true` | 베타 | 1.18 | | +| `StorageVersionHash` | `false` | 알파 | 1.14 | 1.14 | +| `StorageVersionHash` | `true` | 베타 | 1.15 | | +| `StreamingProxyRedirects` | `false` | 베타 | 1.5 | 1.5 | +| `StreamingProxyRedirects` | `true` | 베타 | 1.6 | | +| `SupportNodePidsLimit` | `false` | 알파 | 1.14 | 1.14 | +| `SupportNodePidsLimit` | `true` | 베타 | 1.15 | | +| `SupportPodPidsLimit` | `false` | 알파 | 1.10 | 1.13 | +| `SupportPodPidsLimit` | `true` | 베타 | 1.14 | | +| `Sysctls` | `true` | 베타 | 1.11 | | +| `TokenRequest` | `false` | 알파 | 1.10 | 1.11 | +| `TokenRequest` | `true` | 베타 | 1.12 | | +| `TokenRequestProjection` | `false` | 알파 | 1.11 | 1.11 | +| `TokenRequestProjection` | `true` | 베타 | 1.12 | | +| `TTLAfterFinished` | `false` | 알파 | 1.12 | | +| `TopologyManager` | `false` | 알파 | 1.16 | | +| `ValidateProxyRedirects` | `false` | 알파 | 1.12 | 1.13 | +| `ValidateProxyRedirects` | `true` | 베타 | 1.14 | | +| `VolumeSnapshotDataSource` | `false` | 알파 | 1.12 | 1.16 | +| `VolumeSnapshotDataSource` | `true` | 베타 | 1.17 | - | +| `WindowsGMSA` | `false` | 알파 | 1.14 | | +| `WindowsGMSA` | `true` | 베타 | 1.16 | | +| `WinDSR` | `false` | 알파 | 1.14 | | +| `WinOverlay` | `false` | 알파 | 1.14 | | +{{< /table >}} + +### GA 또는 사용 중단된 기능을 위한 기능 게이트 + +{{< table caption="GA 또는 사용 중단 기능을 위한 기능 게이트" >}} + +| 기능 | 디폴트 | 단계 | 도입 | 종료 | +|---------|---------|-------|-------|-------| +| `Accelerators` | `false` | 알파 | 1.6 | 1.10 | +| `Accelerators` | - | 사용 중단 | 1.11 | - | +| `AdvancedAuditing` | `false` | 알파 | 1.7 | 1.7 | +| `AdvancedAuditing` | `true` | 베타 | 1.8 | 1.11 | +| `AdvancedAuditing` | `true` | GA | 1.12 | - | +| `AffinityInAnnotations` | `false` | 알파 | 1.6 | 1.7 | +| `AffinityInAnnotations` | - | 사용 중단 | 1.8 | - | +| `AllowExtTrafficLocalEndpoints` | `false` | 베타 | 1.4 | 1.6 | +| `AllowExtTrafficLocalEndpoints` | `true` | GA | 1.7 | - | +| `BlockVolume` | `false` | 알파 | 1.9 | 1.12 | +| `BlockVolume` | `true` | 베타 | 1.13 | 1.17 | +| `BlockVolume` | `true` | GA | 1.18 | - | +| `CSIBlockVolume` | `false` | 알파 | 1.11 | 1.13 | +| `CSIBlockVolume` | `true` | 베타 | 1.14 | 1.17 | +| `CSIBlockVolume` | `true` | GA | 1.18 | - | +| `CSIDriverRegistry` | `false` | 알파 | 1.12 | 1.13 | +| `CSIDriverRegistry` | `true` | 베타 | 1.14 | 1.17 | +| `CSIDriverRegistry` | `true` | GA | 1.18 | | +| `CSINodeInfo` | `false` | 알파 | 1.12 | 1.13 | +| `CSINodeInfo` | `true` | 베타 | 1.14 | 1.16 | +| `CSINodeInfo` | `true` | GA | 1.17 | | +| `AttachVolumeLimit` | `false` | 알파 | 1.11 | 1.11 | +| `AttachVolumeLimit` | `true` | 베타 | 1.12 | 1.16 | +| `AttachVolumeLimit` | `true` | GA | 1.17 | - | +| `CSIPersistentVolume` | `false` | 알파 | 1.9 | 1.9 | +| `CSIPersistentVolume` | `true` | 베타 | 1.10 | 1.12 | +| `CSIPersistentVolume` | `true` | GA | 1.13 | - | +| `CustomPodDNS` | `false` | 알파 | 1.9 | 1.9 | +| `CustomPodDNS` | `true` | 베타| 1.10 | 1.13 | +| `CustomPodDNS` | `true` | GA | 1.14 | - | +| `CustomResourcePublishOpenAPI` | `false` | 알파| 1.14 | 1.14 | +| `CustomResourcePublishOpenAPI` | `true` | 베타| 1.15 | 1.15 | +| `CustomResourcePublishOpenAPI` | `true` | GA | 1.16 | - | +| `CustomResourceSubresources` | `false` | 알파 | 1.10 | 1.10 | +| `CustomResourceSubresources` | `true` | 베타 | 1.11 | 1.15 | +| `CustomResourceSubresources` | `true` | GA | 1.16 | - | +| `CustomResourceValidation` | `false` | 알파 | 1.8 | 1.8 | +| `CustomResourceValidation` | `true` | 베타 | 1.9 | 1.15 | +| `CustomResourceValidation` | `true` | GA | 1.16 | - | +| `CustomResourceWebhookConversion` | `false` | 알파 | 1.13 | 1.14 | +| `CustomResourceWebhookConversion` | `true` | 베타 | 1.15 | 1.15 | +| `CustomResourceWebhookConversion` | `true` | GA | 1.16 | - | +| `DynamicProvisioningScheduling` | `false` | 알파 | 1.11 | 1.11 | +| `DynamicProvisioningScheduling` | - | 사용 중단| 1.12 | - | +| `DynamicVolumeProvisioning` | `true` | 알파 | 1.3 | 1.7 | +| `DynamicVolumeProvisioning` | `true` | GA | 1.8 | - | +| `EnableEquivalenceClassCache` | `false` | 알파 | 1.8 | 1.14 | +| `EnableEquivalenceClassCache` | - | 사용 중단 | 1.15 | - | +| `ExperimentalCriticalPodAnnotation` | `false` | 알파 | 1.5 | 1.12 | +| `ExperimentalCriticalPodAnnotation` | `false` | 사용 중단 | 1.13 | - | +| `GCERegionalPersistentDisk` | `true` | 베타 | 1.10 | 1.12 | +| `GCERegionalPersistentDisk` | `true` | GA | 1.13 | - | +| `HugePages` | `false` | 알파 | 1.8 | 1.9 | +| `HugePages` | `true` | 베타| 1.10 | 1.13 | +| `HugePages` | `true` | GA | 1.14 | - | +| `Initializers` | `false` | 알파 | 1.7 | 1.13 | +| `Initializers` | - | 사용 중단 | 1.14 | - | +| `KubeletConfigFile` | `false` | 알파 | 1.8 | 1.9 | +| `KubeletConfigFile` | - | 사용 중단 | 1.10 | - | +| `KubeletPluginsWatcher` | `false` | 알파 | 1.11 | 1.11 | +| `KubeletPluginsWatcher` | `true` | 베타 | 1.12 | 1.12 | +| `KubeletPluginsWatcher` | `true` | GA | 1.13 | - | +| `MountPropagation` | `false` | 알파 | 1.8 | 1.9 | +| `MountPropagation` | `true` | 베타 | 1.10 | 1.11 | +| `MountPropagation` | `true` | GA | 1.12 | - | +| `NodeLease` | `false` | 알파 | 1.12 | 1.13 | +| `NodeLease` | `true` | 베타 | 1.14 | 1.16 | +| `NodeLease` | `true` | GA | 1.17 | - | +| `PersistentLocalVolumes` | `false` | 알파 | 1.7 | 1.9 | +| `PersistentLocalVolumes` | `true` | 베타 | 1.10 | 1.13 | +| `PersistentLocalVolumes` | `true` | GA | 1.14 | - | +| `PodPriority` | `false` | 알파 | 1.8 | 1.10 | +| `PodPriority` | `true` | 베타 | 1.11 | 1.13 | +| `PodPriority` | `true` | GA | 1.14 | - | +| `PodReadinessGates` | `false` | 알파 | 1.11 | 1.11 | +| `PodReadinessGates` | `true` | 베타 | 1.12 | 1.13 | +| `PodReadinessGates` | `true` | GA | 1.14 | - | +| `PodShareProcessNamespace` | `false` | 알파 | 1.10 | 1.11 | +| `PodShareProcessNamespace` | `true` | 베타 | 1.12 | 1.16 | +| `PodShareProcessNamespace` | `true` | GA | 1.17 | - | +| `PVCProtection` | `false` | 알파 | 1.9 | 1.9 | +| `PVCProtection` | - | 사용 중단 | 1.10 | - | +| `RequestManagement` | `false` | 알파 | 1.15 | 1.16 | +| `ResourceQuotaScopeSelectors` | `false` | 알파 | 1.11 | 1.11 | +| `ResourceQuotaScopeSelectors` | `true` | 베타 | 1.12 | 1.16 | +| `ResourceQuotaScopeSelectors` | `true` | GA | 1.17 | - | +| `ScheduleDaemonSetPods` | `false` | 알파 | 1.11 | 1.11 | +| `ScheduleDaemonSetPods` | `true` | 베타 | 1.12 | 1.16 | +| `ScheduleDaemonSetPods` | `true` | GA | 1.17 | - | +| `ServiceLoadBalancerFinalizer` | `false` | 알파 | 1.15 | 1.15 | +| `ServiceLoadBalancerFinalizer` | `true` | 베타 | 1.16 | 1.16 | +| `ServiceLoadBalancerFinalizer` | `true` | GA | 1.17 | - | +| `StorageObjectInUseProtection` | `true` | 베타 | 1.10 | 1.10 | +| `StorageObjectInUseProtection` | `true` | GA | 1.11 | - | +| `SupportIPVSProxyMode` | `false` | 알파 | 1.8 | 1.8 | +| `SupportIPVSProxyMode` | `false` | 베타 | 1.9 | 1.9 | +| `SupportIPVSProxyMode` | `true` | 베타 | 1.10 | 1.10 | +| `SupportIPVSProxyMode` | `true` | GA | 1.11 | - | +| `TaintBasedEvictions` | `false` | 알파 | 1.6 | 1.12 | +| `TaintBasedEvictions` | `true` | 베타 | 1.13 | 1.17 | +| `TaintBasedEvictions` | `true` | GA | 1.18 | - | +| `TaintNodesByCondition` | `false` | 알파 | 1.8 | 1.11 | +| `TaintNodesByCondition` | `true` | 베타 | 1.12 | 1.16 | +| `TaintNodesByCondition` | `true` | GA | 1.17 | - | +| `VolumePVCDataSource` | `false` | 알파 | 1.15 | 1.15 | +| `VolumePVCDataSource` | `true` | 베타 | 1.16 | 1.17 | +| `VolumePVCDataSource` | `true` | GA | 1.18 | - | +| `VolumeScheduling` | `false` | 알파 | 1.9 | 1.9 | +| `VolumeScheduling` | `true` | 베타 | 1.10 | 1.12 | +| `VolumeScheduling` | `true` | GA | 1.13 | - | +| `VolumeSubpath` | `true` | GA | 1.13 | - | +| `VolumeSubpathEnvExpansion` | `false` | 알파 | 1.14 | 1.14 | +| `VolumeSubpathEnvExpansion` | `true` | 베타 | 1.15 | 1.16 | +| `VolumeSubpathEnvExpansion` | `true` | GA | 1.17 | - | +| `WatchBookmark` | `false` | 알파 | 1.15 | 1.15 | +| `WatchBookmark` | `true` | 베타 | 1.16 | 1.16 | +| `WatchBookmark` | `true` | GA | 1.17 | - | +| `WindowsGMSA` | `false` | 알파 | 1.14 | 1.15 | +| `WindowsGMSA` | `true` | 베타 | 1.16 | 1.17 | +| `WindowsGMSA` | `true` | GA | 1.18 | - | +| `WindowsRunAsUserName` | `false` | 알파 | 1.16 | 1.16 | +| `WindowsRunAsUserName` | `true` | 베타 | 1.17 | 1.17 | +| `WindowsRunAsUserName` | `true` | GA | 1.18 | - | +{{< /table >}} + +## 기능 사용 + +### 기능 단계 + +기능은 *알파*, *베타* 또는 *GA* 단계일 수 있다. +*알파* 기능은 다음을 의미한다. + +* 기본적으로 비활성화되어 있다. +* 버그가 있을 수 있다. 이 기능을 사용하면 버그에 노출될 수 있다. +* 기능에 대한 지원은 사전 통지없이 언제든지 중단될 수 있다. +* API는 이후 소프트웨어 릴리스에서 예고없이 호환되지 않는 방식으로 변경될 수 있다. +* 버그의 위험이 증가하고 장기 지원이 부족하여, 단기 테스트 + 클러스터에서만 사용하는 것이 좋다. + +*베타* 기능은 다음을 의미한다. + +* 기본적으로 활성화되어 있다. +* 이 기능은 잘 테스트되었다. 이 기능을 활성화하면 안전한 것으로 간주된다. +* 세부 내용은 변경될 수 있지만, 전체 기능에 대한 지원은 중단되지 않는다. +* 오브젝트의 스키마 및/또는 시맨틱은 후속 베타 또는 안정 릴리스에서 + 호환되지 않는 방식으로 변경될 수 있다. 이러한 상황이 발생하면, 다음 버전으로 마이그레이션하기 위한 + 지침을 제공한다. API 오브젝트를 삭제, 편집 및 재작성해야 + 할 수도 있다. 편집 과정에서 약간의 생각이 필요할 수 있다. + 해당 기능에 의존하는 애플리케이션의 경우 다운타임이 필요할 수 있다. +* 후속 릴리스에서 호환되지 않는 변경이 발생할 수 있으므로 + 업무상 중요하지 않은(non-business-critical) 용도로만 + 권장한다. 독립적으로 업그레이드할 수 있는 여러 클러스터가 있는 경우, 이 제한을 완화할 수 있다. + +{{< note >}} +*베타* 기능을 사용해 보고 의견을 보내주길 바란다! +베타 기간이 종료된 후에는, 더 많은 변경을 하는 것이 실용적이지 않을 수 있다. +{{< /note >}} + +*GA*(General Availability) 기능은 *안정* 기능이라고도 한다. 이 의미는 다음과 같다. + +* 이 기능은 항상 활성화되어 있다. 비활성화할 수 없다. +* 해당 기능 게이트는 더 이상 필요하지 않다. +* 여러 후속 버전의 릴리스된 소프트웨어에 안정적인 기능의 버전이 포함된다. + +## 기능 게이트 목록 {#feature-gates} + +각 기능 게이트는 특정 기능을 활성화/비활성화하도록 설계되었다. + +- `Accelerators`: 도커 사용 시 Nvidia GPU 지원 활성화한다. +- `AdvancedAuditing`: [고급 감사](/docs/tasks/debug-application-cluster/audit/#advanced-audit) 기능을 활성화한다. +- `AffinityInAnnotations`(*사용 중단됨*): [파드 어피니티 또는 안티-어피니티](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#어피니티-affinity-와-안티-어피니티-anti-affinity) 설정을 활성화한다. +- `AllowExtTrafficLocalEndpoints`: 서비스가 외부 요청을 노드의 로컬 엔드포인트로 라우팅할 수 있도록 한다. +- `AnyVolumeDataSource`: {{< glossary_tooltip text="PVC" term_id="persistent-volume-claim" >}}의 + `DataSource` 로 모든 사용자 정의 리소스 사용을 활성화한다. +- `APIListChunking`: API 클라이언트가 API 서버에서 (`LIST` 또는 `GET`) 리소스를 청크(chunks)로 검색할 수 있도록 한다. +- `APIPriorityAndFairness`: 각 서버의 우선 순위와 공정성을 통해 동시 요청을 관리할 수 ​​있다. (`RequestManagement` 에서 이름이 변경됨) +- `APIResponseCompression`: `LIST` 또는 `GET` 요청에 대한 API 응답을 압축한다. +- `AppArmor`: 도커를 사용할 때 리눅스 노드에서 AppArmor 기반의 필수 접근 제어를 활성화한다. + 자세한 내용은 [AppArmor 튜토리얼](/ko/docs/tutorials/clusters/apparmor/)을 참고한다. +- `AttachVolumeLimit`: 볼륨 플러그인이 노드에 연결될 수 있는 볼륨 수에 + 대한 제한을 보고하도록 한다. + 자세한 내용은 [동적 볼륨 제한](/docs/concepts/storage/storage-limits/#dynamic-volume-limits)을 참고한다. +- `BalanceAttachedNodeVolumes`: 스케줄링 시 균형 잡힌 리소스 할당을 위해 고려할 노드의 볼륨 수를 + 포함한다. 스케줄러가 결정을 내리는 동안 CPU, 메모리 사용률 및 볼륨 수가 + 더 가까운 노드가 선호된다. +- `BlockVolume`: 파드에서 원시 블록 장치의 정의와 사용을 활성화한다. + 자세한 내용은 [원시 블록 볼륨 지원](/ko/docs/concepts/storage/persistent-volumes/#원시-블록-볼륨-지원)을 + 참고한다. +- `BoundServiceAccountTokenVolume`: ServiceAccountTokenVolumeProjection으로 구성된 프로젝션 볼륨을 사용하도록 서비스어카운트 볼륨을 + 마이그레이션한다. + 자세한 내용은 [서비스 어카운트 토큰 볼륨](https://git.k8s.io/community/contributors/design-proposals/storage/svcacct-token-volume-source.md)을 + 확인한다. +- `ConfigurableFSGroupPolicy`: 파드에 볼륨을 마운트할 때 fsGroups에 대한 볼륨 권한 변경 정책을 구성할 수 있다. 자세한 내용은 [파드에 대한 볼륨 권한 및 소유권 변경 정책 구성](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods)을 참고한다. +- `CPUManager`: 컨테이너 수준의 CPU 어피니티 지원을 활성화한다. [CPU 관리 정책](/docs/tasks/administer-cluster/cpu-management-policies/)을 참고한다. +- `CRIContainerLogRotation`: cri 컨테이너 런타임에 컨테이너 로그 로테이션을 활성화한다. +- `CSIBlockVolume`: 외부 CSI 볼륨 드라이버가 블록 스토리지를 지원할 수 있게 한다. 자세한 내용은 [`csi` 원시 블록 볼륨 지원](/ko/docs/concepts/storage/volumes/#csi-원시-raw-블록-볼륨-지원) 문서를 참고한다. +- `CSIDriverRegistry`: csi.storage.k8s.io에서 CSIDriver API 오브젝트와 관련된 모든 로직을 활성화한다. +- `CSIInlineVolume`: 파드에 대한 CSI 인라인 볼륨 지원을 활성화한다. +- `CSIMigration`: shim 및 변환 로직을 통해 볼륨 작업을 인-트리 플러그인에서 사전 설치된 해당 CSI 플러그인으로 라우팅할 수 있다. +- `CSIMigrationAWS`: shim 및 변환 로직을 통해 볼륨 작업을 AWS-EBS 인-트리 플러그인에서 EBS CSI 플러그인으로 라우팅할 수 있다. 노드에 EBS CSI 플러그인이 설치와 구성이 되어 있지 않은 경우 인-트리 EBS 플러그인으로 폴백(falling back)을 지원한다. CSIMigration 기능 플래그가 필요하다. +- `CSIMigrationAWSComplete`: kubelet 및 볼륨 컨트롤러에서 EBS 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 사용하여 볼륨 작업을 AWS-EBS 인-트리 플러그인에서 EBS CSI 플러그인으로 라우팅할 수 있다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationAWS 기능 플래그가 활성화되고 EBS CSI 플러그인이 설치 및 구성이 되어 있어야 한다. +- `CSIMigrationAzureDisk`: shim 및 변환 로직을 통해 볼륨 작업을 Azure-Disk 인-트리 플러그인에서 AzureDisk CSI 플러그인으로 라우팅할 수 있다. 노드에 AzureDisk CSI 플러그인이 설치와 구성이 되어 있지 않은 경우 인-트리 AzureDisk 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. +- `CSIMigrationAzureDiskComplete`: kubelet 및 볼륨 컨트롤러에서 Azure-Disk 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 사용하여 볼륨 작업을 Azure-Disk 인-트리 플러그인에서 AzureDisk CSI 플러그인으로 라우팅할 수 있다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationAzureDisk 기능 플래그가 활성화되고 AzureDisk CSI 플러그인이 설치 및 구성이 되어 있어야 한다. +- `CSIMigrationAzureFile`: shim 및 변환 로직을 통해 볼륨 작업을 Azure-File 인-트리 플러그인에서 AzureFile CSI 플러그인으로 라우팅할 수 있다. 노드에 AzureFile CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 AzureFile 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. +- `CSIMigrationAzureFileComplete`: kubelet 및 볼륨 컨트롤러에서 Azure 파일 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 통해 볼륨 작업을 Azure 파일 인-트리 플러그인에서 AzureFile CSI 플러그인으로 라우팅할 수 있다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationAzureFile 기능 플래그가 활성화되고 AzureFile CSI 플러그인이 설치 및 구성이 되어 있어야 한다. +- `CSIMigrationGCE`: shim 및 변환 로직을 통해 볼륨 작업을 GCE-PD 인-트리 플러그인에서 PD CSI 플러그인으로 라우팅할 수 있다. 노드에 PD CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 GCE 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. +- `CSIMigrationGCEComplete`: kubelet 및 볼륨 컨트롤러에서 GCE-PD 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직을 통해 볼륨 작업을 GCE-PD 인-트리 플러그인에서 PD CSI 플러그인으로 라우팅할 수 있다. CSIMigration과 CSIMigrationGCE 기능 플래그가 필요하다. +- `CSIMigrationOpenStack`: shim 및 변환 로직을 통해 볼륨 작업을 Cinder 인-트리 플러그인에서 Cinder CSI 플러그인으로 라우팅할 수 있다. 노드에 Cinder CSI 플러그인이 설치 및 구성이 되어 있지 않은 경우 인-트리 Cinder 플러그인으로 폴백을 지원한다. CSIMigration 기능 플래그가 필요하다. +- `CSIMigrationOpenStackComplete`: kubelet 및 볼륨 컨트롤러에서 Cinder 인-트리 플러그인 등록을 중지하고 shim 및 변환 로직이 Cinder 인-트리 플러그인에서 Cinder CSI 플러그인으로 볼륨 작업을 라우팅할 수 있도록 한다. 클러스터의 모든 노드에 CSIMigration과 CSIMigrationOpenStack 기능 플래그가 활성화되고 Cinder CSI 플러그인이 설치 및 구성이 되어 있어야 한다. +- `CSINodeInfo`: csi.storage.k8s.io에서 CSINodeInfo API 오브젝트와 관련된 모든 로직을 활성화한다. +- `CSIPersistentVolume`: [CSI (Container Storage Interface)](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/container-storage-interface.md) + 호환 볼륨 플러그인을 통해 프로비저닝된 볼륨을 감지하고 + 마운트할 수 있다. + 자세한 내용은 [`csi` 볼륨 유형](/ko/docs/concepts/storage/volumes/#csi) 문서를 확인한다. +- `CustomCPUCFSQuotaPeriod`: 노드가 CPUCFSQuotaPeriod를 변경하도록 한다. +- `CustomPodDNS`: `dnsConfig` 속성을 사용하여 파드의 DNS 설정을 사용자 정의할 수 있다. + 자세한 내용은 [파드의 DNS 설정](/ko/docs/concepts/services-networking/dns-pod-service/#pod-dns-config)을 + 확인한다. +- `CustomResourceDefaulting`: OpenAPI v3 유효성 검사 스키마에서 기본값에 대한 CRD 지원을 활성화한다. +- `CustomResourcePublishOpenAPI`: CRD OpenAPI 사양을 게시할 수 있다. +- `CustomResourceSubresources`: [커스텀리소스데피니션](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에서 + 생성된 리소스에서 `/status` 및 `/scale` 하위 리소스를 활성화한다. +- `CustomResourceValidation`: [커스텀리소스데피니션](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에서 + 생성된 리소스에서 스키마 기반 유효성 검사를 활성화한다. +- `CustomResourceWebhookConversion`: [커스텀리소스데피니션](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에서 + 생성된 리소스에 대해 웹 훅 기반의 변환을 활성화한다. + 실행 중인 파드 문제를 해결한다. +- `DevicePlugins`: 노드에서 [장치 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) + 기반 리소스 프로비저닝을 활성화한다. +- `DryRun`: 서버 측의 [dry run](/docs/reference/using-api/api-concepts/#dry-run) 요청을 + 요청을 활성화하여 커밋하지 않고 유효성 검사, 병합 및 변화를 테스트할 수 있다. +- `DynamicAuditing`: [동적 감사](/docs/tasks/debug-application-cluster/audit/#dynamic-backend) 기능을 활성화한다. +- `DynamicKubeletConfig`: kubelet의 동적 구성을 활성화한다. [kubelet 재구성](/docs/tasks/administer-cluster/reconfigure-kubelet/)을 참고한다. +- `DynamicProvisioningScheduling`: 볼륨 스케줄을 인식하고 PV 프로비저닝을 처리하도록 기본 스케줄러를 확장한다. + 이 기능은 v1.12의 `VolumeScheduling` 기능으로 대체되었다. +- `DynamicVolumeProvisioning`(*사용 중단됨*): 파드에 퍼시스턴트 볼륨의 [동적 프로비저닝](/ko/docs/concepts/storage/dynamic-provisioning/)을 활성화한다. +- `EnableAggregatedDiscoveryTimeout` (*사용 중단됨*): 수집된 검색 호출에서 5초 시간 초과를 활성화한다. +- `EnableEquivalenceClassCache`: 스케줄러가 파드를 스케줄링할 때 노드의 동등성을 캐시할 수 있게 한다. +- `EphemeralContainers`: 파드를 실행하기 위한 {{< glossary_tooltip text="임시 컨테이너" + term_id="ephemeral-container" >}}를 추가할 수 있다. +- `EvenPodsSpread`: 토폴로지 도메인 간에 파드를 균등하게 스케줄링할 수 있다. [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)을 참고한다. +- `ExpandInUsePersistentVolumes`: 사용 중인 PVC를 확장할 수 있다. [사용 중인 퍼시스턴트볼륨클레임 크기 조정](/ko/docs/concepts/storage/persistent-volumes/#사용-중인-퍼시스턴트볼륨클레임-크기-조정)을 참고한다. +- `ExpandPersistentVolumes`: 퍼시스턴트 볼륨 확장을 활성화한다. [퍼시스턴트 볼륨 클레임 확장](/ko/docs/concepts/storage/persistent-volumes/#퍼시스턴트-볼륨-클레임-확장)을 참고한다. +- `ExperimentalCriticalPodAnnotation`: 특정 파드에 *critical* 로 어노테이션을 달아서 [스케줄링이 보장되도록](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) 한다. + 이 기능은 v1.13부터 파드 우선 순위 및 선점으로 인해 사용 중단되었다. +- `ExperimentalHostUserNamespaceDefaultingGate`: 사용자 네임스페이스를 호스트로 + 기본 활성화한다. 이것은 다른 호스트 네임스페이스, 호스트 마운트, + 권한이 있는 컨테이너 또는 특정 비-네임스페이스(non-namespaced) 기능(예: `MKNODE`, `SYS_MODULE` 등)을 + 사용하는 컨테이너를 위한 것이다. 도커 데몬에서 사용자 네임스페이스 + 재 매핑이 활성화된 경우에만 활성화해야 한다. +- `EndpointSlice`: 보다 스케일링 가능하고 확장 가능한 네트워크 엔드포인트에 대한 + 엔드포인트 슬라이스를 활성화한다. [엔드포인트 슬라이스 활성화](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. +- `EndpointSliceProxying`: 이 기능 게이트가 활성화되면, kube-proxy는 + 엔드포인트슬라이스를 엔드포인트 대신 기본 데이터 소스로 사용하여 + 확장성과 성능을 향상시킨다. [엔드포인트 슬라이스 활성화](/docs/tasks/administer-cluster/enabling-endpointslices/)를 참고한다. +- `GCERegionalPersistentDisk`: GCE에서 지역 PD 기능을 활성화한다. +- `HugePages`: 사전 할당된 [huge page](/ko/docs/tasks/manage-hugepages/scheduling-hugepages/)의 할당 및 사용을 활성화한다. +- `HugePageStorageMediumSize`: 사전 할당된 [huge page](/ko/docs/tasks/manage-hugepages/scheduling-hugepages/)의 여러 크기를 지원한다. +- `HyperVContainer`: 윈도우 컨테이너를 위한 [Hyper-V 격리](https://docs.microsoft.com/ko-kr/virtualization/windowscontainers/manage-containers/hyperv-container) 기능을 활성화한다. +- `HPAScaleToZero`: 사용자 정의 또는 외부 메트릭을 사용할 때 `HorizontalPodAutoscaler` 리소스에 대해 `minReplicas` 를 0으로 설정한다. +- `ImmutableEphemeralVolumes`: 안정성과 성능 향상을 위해 개별 시크릿(Secret)과 컨피그맵(ConfigMap)을 변경할 수 없는(immutable) 것으로 표시할 수 있다. +- `KubeletConfigFile`: 구성 파일을 사용하여 지정된 파일에서 kubelet 구성을 로드할 수 있다. + 자세한 내용은 [구성 파일을 통해 kubelet 파라미터 설정](/docs/tasks/administer-cluster/kubelet-config-file/)을 참고한다. +- `KubeletPluginsWatcher`: kubelet이 [CSI 볼륨 드라이버](/ko/docs/concepts/storage/volumes/#csi)와 같은 + 플러그인을 검색할 수 있도록 프로브 기반 플러그인 감시자(watcher) 유틸리티를 사용한다. +- `KubeletPodResources`: kubelet의 파드 리소스 grpc 엔드포인트를 활성화한다. + 자세한 내용은 [장치 모니터링 지원](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/compute-device-assignment.md)을 참고한다. +- `LegacyNodeRoleBehavior`: 비활성화되면, 서비스 로드 밸런서 및 노드 중단의 레거시 동작은 기능별 레이블을 대신하여 `node-role.kubernetes.io/master` 레이블을 무시한다. +- `LocalStorageCapacityIsolation`: [로컬 임시 스토리지](/ko/docs/concepts/configuration/manage-resources-containers/)와 [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir)의 `sizeLimit` 속성을 사용할 수 있게 한다. +- `LocalStorageCapacityIsolationFSQuotaMonitoring`: [로컬 임시 스토리지](/ko/docs/concepts/configuration/manage-resources-containers/)에 대해 `LocalStorageCapacityIsolation`이 활성화되고 [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir)에 대한 백업 파일시스템이 프로젝트 쿼터를 지원하고 활성화된 경우, 프로젝트 쿼터를 사용하여 파일시스템 사용보다는 [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir) 스토리지 사용을 모니터링하여 성능과 정확성을 향상시킨다. +- `MountContainers`: 호스트의 유틸리티 컨테이너를 볼륨 마운터로 사용할 수 있다. +- `MountPropagation`: 한 컨테이너에서 다른 컨테이너 또는 파드로 마운트된 볼륨을 공유할 수 있다. + 자세한 내용은 [마운트 전파(propagation)](/ko/docs/concepts/storage/volumes/#마운트-전파-propagation)을 참고한다. +- `NodeDisruptionExclusion`: 영역(zone) 장애 시 노드가 제외되지 않도록 노드 레이블 `node.kubernetes.io/exclude-disruption` 사용을 활성화한다. +- `NodeLease`: 새로운 리스(Lease) API가 노드 상태 신호로 사용될 수 있는 노드 하트비트(heartbeats)를 보고할 수 있게 한다. +- `NonPreemptingPriority`: 프라이어리티클래스(PriorityClass)와 파드에 NonPreempting 옵션을 활성화한다. +- `PersistentLocalVolumes`: 파드에서 `local` 볼륨 유형의 사용을 활성화한다. + `local` 볼륨을 요청하는 경우 파드 어피니티를 지정해야 한다. +- `PodDisruptionBudget`: [PodDisruptionBudget](/docs/tasks/run-application/configure-pdb/) 기능을 활성화한다. +- `PodOverhead`: 파드 오버헤드를 판단하기 위해 [파드오버헤드(PodOverhead)](/ko/docs/concepts/configuration/pod-overhead/) 기능을 활성화한다. +- `PodPriority`: [우선 순위](/ko/docs/concepts/configuration/pod-priority-preemption/)를 기반으로 파드의 스케줄링 취소와 선점을 활성화한다. +- `PodReadinessGates`: 파드 준비성 평가를 확장하기 위해 + `PodReadinessGate` 필드 설정을 활성화한다. 자세한 내용은 [파드의 준비성 게이트](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate)를 + 참고한다. +- `PodShareProcessNamespace`: 파드에서 실행되는 컨테이너 간에 단일 프로세스 네임스페이스를 + 공유하기 위해 파드에서 `shareProcessNamespace` 설정을 활성화한다. 자세한 내용은 + [파드의 컨테이너 간 프로세스 네임스페이스 공유](/docs/tasks/configure-pod-container/share-process-namespace/)에서 확인할 수 있다. +- `ProcMountType`: 컨테이너의 ProcMountType 제어를 활성화한다. +- `PVCProtection`: 파드에서 사용 중일 때 퍼시스턴트볼륨클레임(PVC)이 + 삭제되지 않도록 한다. +- `QOSReserved`: QoS 수준에서 리소스 예약을 허용하여 낮은 QoS 수준의 파드가 더 높은 QoS 수준에서 + 요청된 리소스로 파열되는 것을 방지한다(현재 메모리만 해당). +- `ResourceLimitsPriorityFunction`: 입력 파드의 CPU 및 메모리 한도 중 + 하나 이상을 만족하는 노드에 가능한 최저 점수 1을 할당하는 + 스케줄러 우선 순위 기능을 활성화한다. 의도는 동일한 점수를 가진 + 노드 사이의 관계를 끊는 것이다. +- `ResourceQuotaScopeSelectors`: 리소스 쿼터 범위 셀렉터를 활성화한다. +- `RotateKubeletClientCertificate`: kubelet에서 클라이언트 TLS 인증서의 로테이션을 활성화한다. + 자세한 내용은 [kubelet 구성](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration)을 참고한다. +- `RotateKubeletServerCertificate`: kubelet에서 서버 TLS 인증서의 로테이션을 활성화한다. + 자세한 내용은 [kubelet 구성](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#kubelet-configuration)을 참고한다. +- `RunAsGroup`: 컨테이너의 init 프로세스에 설정된 기본 그룹 ID 제어를 활성화한다. +- `RuntimeClass`: 컨테이너 런타임 구성을 선택하기 위해 [런타임클래스(RuntimeClass)](/ko/docs/concepts/containers/runtime-class/) 기능을 활성화한다. +- `ScheduleDaemonSetPods`: 데몬셋(DaemonSet) 컨트롤러 대신 기본 스케줄러로 데몬셋 파드를 스케줄링할 수 있다. +- `SCTPSupport`: SCTP를 `Service`, `Endpoint`, `NetworkPolicy` 및 `Pod` 정의에서 `protocol` 값으로 사용하는 것을 활성화한다. +- `ServerSideApply`: API 서버에서 [SSA(Sever Side Apply)](/docs/reference/using-api/api-concepts/#server-side-apply) 경로를 활성화한다. +- `ServiceAccountIssuerDiscovery`: API 서버에서 서비스 어카운트 발행자에 대해 OIDC 디스커버리 엔드포인트(발급자 및 JWKS URL)를 활성화한다. 자세한 내용은 [파드의 서비스 어카운트 구성](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery)을 참고한다. +- `ServiceAppProtocol`: 서비스와 엔드포인트에서 `AppProtocol` 필드를 활성화한다. +- `ServiceLoadBalancerFinalizer`: 서비스 로드 밸런서에 대한 Finalizer 보호를 활성화한다. +- `ServiceNodeExclusion`: 클라우드 제공자가 생성한 로드 밸런서에서 노드를 제외할 수 있다. + "`alpha.service-controller.kubernetes.io/exclude-balancer`" 키 또는 `node.kubernetes.io/exclude-from-external-load-balancers` 로 레이블이 지정된 경우 노드를 제외할 수 있다. +- `ServiceTopology`: 서비스가 클러스터의 노드 토폴로지를 기반으로 트래픽을 라우팅할 수 있도록 한다. 자세한 내용은 [서비스토폴로지(ServiceTopology)](/ko/docs/concepts/services-networking/service-topology/)를 참고한다. +- `StartupProbe`: kubelet에서 [스타트업](/ko/docs/concepts/workloads/pods/pod-lifecycle/#언제-스타트업-프로브를-사용해야-하는가) 프로브를 활성화한다. +- `StorageObjectInUseProtection`: 퍼시스턴트볼륨 또는 퍼시스턴트볼륨클레임 오브젝트가 여전히 + 사용 중인 경우 삭제를 연기한다. +- `StorageVersionHash`: API 서버가 디스커버리에서 스토리지 버전 해시를 노출하도록 허용한다. +- `StreamingProxyRedirects`: 스트리밍 요청을 위해 백엔드(kubelet)에서 리디렉션을 + 가로채서 따르도록 API 서버에 지시한다. + 스트리밍 요청의 예로는 `exec`, `attach` 및 `port-forward` 요청이 있다. +- `SupportIPVSProxyMode`: IPVS를 사용하여 클러스터 내 서비스 로드 밸런싱을 제공한다. + 자세한 내용은 [서비스 프록시](/ko/docs/concepts/services-networking/service/#가상-ip와-서비스-프록시)를 참고한다. +- `SupportPodPidsLimit`: 파드의 PID 제한을 지원한다. +- `Sysctls`: 각 파드에 설정할 수 있는 네임스페이스 커널 파라미터(sysctl)를 지원한다. + 자세한 내용은 [sysctl](/docs/tasks/administer-cluster/sysctl-cluster/)을 참고한다. +- `TaintBasedEvictions`: 노드의 테인트(taint) 및 파드의 톨러레이션(toleration)을 기반으로 노드에서 파드를 축출할 수 있다. + 자세한 내용은 [테인트와 톨러레이션](/ko/docs/concepts/scheduling-eviction/taint-and-toleration/)을 참고한다. +- `TaintNodesByCondition`: [노드 컨디션](/ko/docs/concepts/architecture/nodes/#condition)을 기반으로 자동 테인트 노드를 활성화한다. +- `TokenRequest`: 서비스 어카운트 리소스에서 `TokenRequest` 엔드포인트를 활성화한다. +- `TokenRequestProjection`: [`projected` 볼륨](/ko/docs/concepts/storage/volumes/#projected)을 통해 서비스 어카운트 + 토큰을 파드에 주입할 수 있다. +- `TopologyManager`: 쿠버네티스의 다른 컴포넌트에 대한 세분화된 하드웨어 리소스 할당을 조정하는 메커니즘을 활성화한다. [노드의 토폴로지 관리 정책 제어](/docs/tasks/administer-cluster/topology-manager/)를 참고한다. +- `TTLAfterFinished`: [TTL 컨트롤러](/ko/docs/concepts/workloads/controllers/ttlafterfinished/)가 실행이 끝난 후 리소스를 정리하도록 허용한다. +- `VolumePVCDataSource`: 기존 PVC를 데이터 소스로 지정하는 기능을 지원한다. +- `VolumeScheduling`: 볼륨 토폴로지 인식 스케줄링을 활성화하고 + 퍼시스턴트볼륨클레임(PVC) 바인딩이 스케줄링 결정을 인식하도록 한다. 또한 + `PersistentLocalVolumes` 기능 게이트와 함께 사용될 때 + [`local`](/ko/docs/concepts/storage/volumes/#local) 볼륨 유형을 사용할 수 있다. +- `VolumeSnapshotDataSource`: 볼륨 스냅샷 데이터 소스 지원을 활성화한다. +- `VolumeSubpathEnvExpansion`: 환경 변수를 `subPath`로 확장하기 위해 `subPathExpr` 필드를 활성화한다. +- `WatchBookmark`: 감시자 북마크(watch bookmark) 이벤트 지원을 활성화한다. +- `WindowsGMSA`: 파드에서 컨테이너 런타임으로 GMSA 자격 증명 스펙을 전달할 수 있다. +- `WindowsRunAsUserName` : 기본 사용자가 아닌(non-default) 사용자로 윈도우 컨테이너에서 애플리케이션을 실행할 수 있도록 지원한다. + 자세한 내용은 [RunAsUserName 구성](/docs/tasks/configure-pod-container/configure-runasusername)을 참고한다. +- `WinDSR`: kube-proxy가 윈도우용 DSR 로드 밸런서를 생성할 수 있다. +- `WinOverlay`: kube-proxy가 윈도우용 오버레이 모드에서 실행될 수 있도록 한다. + + +## {{% heading "whatsnext" %}} + +* [사용 중단 정책](/docs/reference/using-api/deprecation-policy/)은 쿠버네티스에 대한 + 기능과 컴포넌트를 제거하는 프로젝트의 접근 방법을 설명한다. diff --git a/content/ko/docs/reference/glossary/cni.md b/content/ko/docs/reference/glossary/cni.md index a88ac5277ae4a..28fc3602f3f53 100644 --- a/content/ko/docs/reference/glossary/cni.md +++ b/content/ko/docs/reference/glossary/cni.md @@ -2,17 +2,17 @@ title: 컨테이너 네트워크 인터페이스(Container network interface, CNI) id: cni date: 2018-05-25 -full_link: /docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni +full_link: /ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni short_description: > 컨테이너 네트워크 인터페이스(CNI) 플러그인은 appc/CNI 스팩을 따르는 네트워크 플러그인의 일종이다. -aka: +aka: tags: -- networking +- networking --- 컨테이너 네트워크 인터페이스(CNI) 플러그인은 appc/CNI 스팩을 따르는 네트워크 플러그인의 일종이다. - + -* 쿠버네티스와 CNI에 대한 정보는 [여기](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni)를 참고한다. -* 쿠버네티스와 CNI에 대한 정보는 ["네트워크 플러그인"](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni)에서 볼 수 있다. +* 쿠버네티스와 CNI에 대한 정보는 [여기](/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni)를 참고한다. +* 쿠버네티스와 CNI에 대한 정보는 ["네트워크 플러그인"](/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#cni)에서 볼 수 있다. diff --git a/content/ko/docs/reference/glossary/cronjob.md b/content/ko/docs/reference/glossary/cronjob.md index b0f8342b66f77..453bb6b652206 100755 --- a/content/ko/docs/reference/glossary/cronjob.md +++ b/content/ko/docs/reference/glossary/cronjob.md @@ -11,9 +11,9 @@ tags: - core-object - workload --- - 주기적인 일정에 따라 실행되는 [잡](/ko/docs/concepts/workloads/controllers/jobs-run-to-completion/)을 관리. + 주기적인 일정에 따라 실행되는 [잡](/ko/docs/concepts/workloads/controllers/job/)을 관리. -*crontab* 파일의 라인과 유사하게, 크론잡 오브젝트는 [크론](https://en.wikipedia.org/wiki/Cron) 형식을 사용하여 일정을 지정한다. +*crontab* 파일의 라인과 유사하게, 크론잡 오브젝트는 [크론](https://ko.wikipedia.org/wiki/Cron) 형식을 사용하여 일정을 지정한다. diff --git a/content/ko/docs/reference/glossary/device-plugin.md b/content/ko/docs/reference/glossary/device-plugin.md index 85fe177e6b0c3..4d7ec8debe78e 100644 --- a/content/ko/docs/reference/glossary/device-plugin.md +++ b/content/ko/docs/reference/glossary/device-plugin.md @@ -24,6 +24,6 @@ tags: 장치 플러그인을 {{< glossary_tooltip term_id="daemonset" >}}으로 배포하거나, 각 대상 노드에 직접 장치 플러그인 소프트웨어를 설치할 수 있다. -[장치 플러그인](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) +[장치 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) 의 더 자세한 정보를 본다 diff --git a/content/ko/docs/reference/glossary/docker.md b/content/ko/docs/reference/glossary/docker.md index 64d264479dce6..4e0d7a305dea4 100755 --- a/content/ko/docs/reference/glossary/docker.md +++ b/content/ko/docs/reference/glossary/docker.md @@ -14,5 +14,5 @@ tags: -Docker는 Linux 커널의 리소스 격리 기능을 사용하며, 그 격리 기능의 예는 cgroups, 커널 네임스페이스, OverlayFS와 같은 조합 가능한 파일 시스템, 컨테이너가 단일 Linux 인스턴스에서 독립적으로 실행되게 하여 가상 머신(VM)을 시작하고 관리하는 오버헤드를 피할 수 있도록 하는 기타 기능 등이 있다. +Docker는 리눅스 커널의 리소스 격리 기능을 사용하며, 그 격리 기능의 예는 cgroups, 커널 네임스페이스, OverlayFS와 같은 조합 가능한 파일 시스템, 컨테이너가 단일 리눅스 인스턴스에서 독립적으로 실행되게 하여 가상 머신(VM)을 시작하고 관리하는 오버헤드를 피할 수 있도록 하는 기타 기능 등이 있다. diff --git a/content/ko/docs/reference/glossary/job.md b/content/ko/docs/reference/glossary/job.md index a2c5a83466dd1..8f227f76ac2d3 100755 --- a/content/ko/docs/reference/glossary/job.md +++ b/content/ko/docs/reference/glossary/job.md @@ -2,7 +2,7 @@ title: 잡(Job) id: job date: 2018-04-12 -full_link: /docs/concepts/workloads/controllers/jobs-run-to-completion +full_link: /docs/concepts/workloads/controllers/job short_description: > 완료를 목표로 실행되는 유한 또는 배치 작업. diff --git a/content/ko/docs/reference/glossary/minikube.md b/content/ko/docs/reference/glossary/minikube.md index 57267cddd44fa..f43966260e748 100755 --- a/content/ko/docs/reference/glossary/minikube.md +++ b/content/ko/docs/reference/glossary/minikube.md @@ -17,5 +17,4 @@ tags: Minikube는 VM이나 사용자 컴퓨터에서 단일 노드 클러스터를 실행한다. Minikube를 사용하여 -[학습 환경에서 쿠버네티스 시도하기](/docs/setup/learning-environment/)를 할 수 있다. - +[학습 환경에서 쿠버네티스 시도하기](/ko/docs/setup/learning-environment/)를 할 수 있다. diff --git a/content/ko/docs/reference/glossary/volume.md b/content/ko/docs/reference/glossary/volume.md index bbe78369a21d8..6aa9985eb063e 100755 --- a/content/ko/docs/reference/glossary/volume.md +++ b/content/ko/docs/reference/glossary/volume.md @@ -4,18 +4,17 @@ id: volume date: 2018-04-12 full_link: /ko/docs/concepts/storage/volumes/ short_description: > - 데이터를 포함하고 있는 디렉토리이며, 파드의 컨테이너에서 접근 가능하다. + 데이터를 포함하고 있는 디렉터리이며, 파드의 컨테이너에서 접근 가능하다. -aka: +aka: tags: - core-object - fundamental --- - 데이터를 포함하고 있는 디렉토리이며, {{< glossary_tooltip text="파드" term_id="pod" >}}의 {{< glossary_tooltip text="컨테이너" term_id="container" >}}에서 접근 가능하다. + 데이터를 포함하고 있는 디렉터리이며, {{< glossary_tooltip text="파드" term_id="pod" >}}의 {{< glossary_tooltip text="컨테이너" term_id="container" >}}에서 접근 가능하다. - + 쿠버네티스 볼륨은 그것을 포함하고 있는 파드만큼 오래 산다. 결과적으로, 볼륨은 파드 안에서 실행되는 모든 컨테이너 보다 오래 지속되며, 데이터는 컨테이너의 재시작 간에도 보존된다. -더 많은 정보는 [스토리지](https://kubernetes.io/ko/docs/concepts/storage/)를 본다. - +더 많은 정보는 [스토리지](/ko/docs/concepts/storage/)를 본다. diff --git a/content/ko/docs/reference/kubectl/cheatsheet.md b/content/ko/docs/reference/kubectl/cheatsheet.md index bcf654b0bd597..42d761ee93fe2 100644 --- a/content/ko/docs/reference/kubectl/cheatsheet.md +++ b/content/ko/docs/reference/kubectl/cheatsheet.md @@ -255,7 +255,7 @@ KUBE_EDITOR="nano" kubectl edit svc/docker-registry # 다른 편집기 사용 ## 리소스 스케일링 ```bash -kubectl scale --replicas=3 rs/foo # 'foo'라는 레플리카 셋을 3으로 스케일 +kubectl scale --replicas=3 rs/foo # 'foo'라는 레플리카셋을 3으로 스케일 kubectl scale --replicas=3 -f foo.yaml # "foo.yaml"에 지정된 리소스의 크기를 3으로 스케일 kubectl scale --current-replicas=2 --replicas=3 deployment/mysql # mysql이라는 디플로이먼트의 현재 크기가 2인 경우, mysql을 3으로 스케일 kubectl scale --replicas=5 rc/foo rc/bar rc/baz # 여러 개의 레플리케이션 컨트롤러 스케일 @@ -286,6 +286,11 @@ kubectl logs -f my-pod # 실시간 스트림 파드 kubectl logs -f my-pod -c my-container # 실시간 스트림 파드 로그(stdout, 멀티-컨테이너 경우) kubectl logs -f -l name=myLabel --all-containers # name이 myLabel인 모든 파드의 로그 스트리밍 (stdout) kubectl run -i --tty busybox --image=busybox -- sh # 대화형 셸로 파드를 실행 +kubectl run nginx --image=nginx -n +mynamespace # 특정 네임스페이스에서 nginx 파드 실행 +kubectl run nginx --image=nginx # nginx 파드를 실행하고 해당 스펙을 pod.yaml 파일에 기록 +--dry-run=client -o yaml > pod.yaml + kubectl attach my-pod -i # 실행중인 컨테이너에 연결 kubectl port-forward my-pod 5000:6000 # 로컬 머신의 5000번 포트를 리스닝하고, my-pod의 6000번 포트로 전달 kubectl exec my-pod -- ls / # 기존 파드에서 명령 실행(한 개 컨테이너 경우) @@ -310,7 +315,7 @@ kubectl taint nodes foo dedicated=special-user:NoSchedule ### 리소스 타입 -단축명, [API 그룹](/ko/docs/concepts/overview/kubernetes-api/#api-groups)과 함께 지원되는 모든 리소스 유형들, 그것들의 [네임스페이스](/ko/docs/concepts/overview/working-with-objects/namespaces)와 [종류(Kind)](/ko/docs/concepts/overview/working-with-objects/kubernetes-objects)를 나열: +단축명, [API 그룹](/ko/docs/concepts/overview/kubernetes-api/#api-그룹)과 함께 지원되는 모든 리소스 유형들, 그것들의 [네임스페이스](/ko/docs/concepts/overview/working-with-objects/namespaces)와 [종류(Kind)](/ko/docs/concepts/overview/working-with-objects/kubernetes-objects)를 나열: ```bash kubectl api-resources @@ -385,5 +390,3 @@ Kubectl 로그 상세 레벨(verbosity)은 `-v` 또는`--v` 플래그와 로그 * 재사용 스크립트에서 kubectl 사용 방법을 이해하기 위해 [kubectl 사용법](/docs/reference/kubectl/conventions/)을 참고한다. * 더 많은 [kubectl 치트 시트](https://github.com/dennyzhang/cheatsheet-kubernetes-A4) 커뮤니티 확인 - - diff --git a/content/ko/docs/reference/kubectl/overview.md b/content/ko/docs/reference/kubectl/overview.md index 0a4a3fefbeb8d..6eab3cbafe0f4 100644 --- a/content/ko/docs/reference/kubectl/overview.md +++ b/content/ko/docs/reference/kubectl/overview.md @@ -1,6 +1,6 @@ --- title: kubectl 개요 -content_template: templates/concept +content_type: concept weight: 20 card: name: reference @@ -8,7 +8,7 @@ card: --- -Kubectl은 쿠버네티스 클러스터를 제어하기 위한 커맨드 라인 도구이다. `kubectl` 은 config 파일을 $HOME/.kube 에서 찾는다. KUBECONFIG 환경 변수를 설정하거나 [`--kubeconfig`](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/) 플래그를 설정하여 다른 [kubeconfig](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/) 파일을 지정할 수 있다. +Kubectl은 쿠버네티스 클러스터를 제어하기 위한 커맨드 라인 도구이다. 구성을 위해, `kubectl` 은 config 파일을 $HOME/.kube 에서 찾는다. KUBECONFIG 환경 변수를 설정하거나 [`--kubeconfig`](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/) 플래그를 설정하여 다른 [kubeconfig](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/) 파일을 지정할 수 있다. 이 개요는 `kubectl` 구문을 다루고, 커맨드 동작을 설명하며, 일반적인 예제를 제공한다. 지원되는 모든 플래그 및 하위 명령을 포함한 각 명령에 대한 자세한 내용은 [kubectl](/docs/reference/generated/kubectl/kubectl-commands/) 참조 문서를 참고한다. 설치 방법에 대해서는 [kubectl 설치](/ko/docs/tasks/tools/install-kubectl/)를 참고한다. @@ -29,11 +29,11 @@ kubectl [command] [TYPE] [NAME] [flags] * `TYPE`: [리소스 타입](#리소스-타입)을 지정한다. 리소스 타입은 대소문자를 구분하지 않으며 단수형, 복수형 또는 약어 형식을 지정할 수 있다. 예를 들어, 다음의 명령은 동일한 출력 결과를 생성한다. - ```shell - kubectl get pod pod1 - kubectl get pods pod1 - kubectl get po pod1 - ``` + ```shell + kubectl get pod pod1 + kubectl get pods pod1 + kubectl get po pod1 + ``` * `NAME`: 리소스 이름을 지정한다. 이름은 대소문자를 구분한다. 이름을 생략하면, 모든 리소스에 대한 세부 사항이 표시된다. 예: `kubectl get pods` @@ -110,13 +110,13 @@ kubectl [command] [TYPE] [NAME] [flags] `version` | `kubectl version [--client] [flags]` | 클라이언트와 서버에서 실행 중인 쿠버네티스 버전을 표시한다. `wait` | kubectl wait ([-f FILENAME] | resource.group/resource.name | resource.group [(-l label | --all)]) [--for=delete|--for condition=available] [options] | 실험(experimental) 기능: 하나 이상의 리소스에서 특정 조건을 기다린다. -기억하기: 명령 동작에 대한 자세한 내용은 [kubectl](/docs/user-guide/kubectl/) 참조 문서를 참고한다. +명령 동작에 대한 자세한 내용을 배우려면 [kubectl](/docs/reference/kubectl/kubectl/) 참조 문서를 참고한다. ## 리소스 타입 다음 표에는 지원되는 모든 리소스 타입과 해당 약어가 나열되어 있다. -(이 출력은 `kubectl api-resources` 에서 확인할 수 있으며, 쿠버네티스 1.13.3 부터 일치한다.) +(이 출력은 `kubectl api-resources` 에서 확인할 수 있으며, 쿠버네티스 1.13.3 부터 일치했다.) | 리소스 이름 | 짧은 이름 | API 그룹 | 네임스페이스 | 리소스 종류 | |---|---|---|---|---| @@ -172,7 +172,7 @@ kubectl [command] [TYPE] [NAME] [flags] ## 출력 옵션 -특정 명령의 출력을 서식화하거나 정렬하는 방법에 대한 정보는 다음 섹션을 참고한다. 다양한 출력 옵션을 지원하는 명령에 대한 자세한 내용은 [kubectl](/docs/user-guide/kubectl/) 참조 문서를 참고한다. +특정 명령의 출력을 서식화하거나 정렬하는 방법에 대한 정보는 다음 섹션을 참고한다. 다양한 출력 옵션을 지원하는 명령에 대한 자세한 내용은 [kubectl](/docs/reference/kubectl/kubectl/) 참조 문서를 참고한다. ### 출력 서식화 @@ -231,9 +231,9 @@ kubectl get pods -o custom-columns-file=template.txt NAME RSRC metadata.name metadata.resourceVersion ``` -두 명령 중 하나를 실행한 결과는 다음과 같다. +두 명령 중 하나를 실행한 결과는 다음과 비슷하다. -```shell +``` NAME RSRC submit-queue 610995 ``` @@ -244,7 +244,7 @@ submit-queue 610995 이는 클라이언트가 출력할 수 있도록, 주어진 리소스에 대해 서버가 해당 리소스와 관련된 열과 행을 반환한다는 것을 의미한다. 이는 서버가 출력의 세부 사항을 캡슐화하도록 하여, 동일한 클러스터에 대해 사용된 클라이언트에서 사람이 읽을 수 있는 일관된 출력을 허용한다. -이 기능은 기본적으로 `kubectl` 1.11 이상에서 활성화되어 있다. 사용하지 않으려면, +이 기능은 기본적으로 활성화되어 있다. 사용하지 않으려면, `kubectl get` 명령에 `--server-print=false` 플래그를 추가한다. ##### 예제 @@ -255,9 +255,9 @@ submit-queue 610995 kubectl get pods --server-print=false ``` -출력 결과는 다음과 같다. +출력 결과는 다음과 비슷하다. -```shell +``` NAME AGE pod-name 1m ``` @@ -402,16 +402,20 @@ cat service.yaml | kubectl diff -f - # 어떤 언어로든 간단한 플러그인을 만들고 "kubectl-" 접두사로 # 시작하도록 실행 파일의 이름을 지정한다. cat ./kubectl-hello -#!/bin/bash +``` +```shell +#!/bin/sh # 이 플러그인은 "hello world"라는 단어를 출력한다 echo "hello world" - -# 작성한 플러그인을 실행 가능하게 한다 -sudo chmod +x ./kubectl-hello +``` +작성한 플러그인을 실행 가능하게 한다 +```bash +chmod a+x ./kubectl-hello # 그리고 PATH의 위치로 옮긴다 sudo mv ./kubectl-hello /usr/local/bin +sudo chown root:root /usr/local/bin # 이제 kubectl 플러그인을 만들고 "설치했다". # kubectl에서 플러그인을 일반 명령처럼 호출하여 플러그인을 사용할 수 있다 @@ -422,16 +426,18 @@ hello world ``` ```shell -# PATH에서 플러그인 파일을 간단히 삭제하여, 플러그인을 "제거"할 수 있다 +# 플러그인을 배치한 $PATH의 폴더에서 플러그인을 삭제하여, +# 플러그인을 "제거"할 수 있다 sudo rm /usr/local/bin/kubectl-hello ``` `kubectl` 에 사용할 수 있는 모든 플러그인을 보려면, -`kubectl plugin list` 하위 명령을 사용할 수 있다. +`kubectl plugin list` 하위 명령을 사용한다. ```shell kubectl plugin list ``` +출력 결과는 다음과 비슷하다. ``` The following kubectl-compatible plugins are available: @@ -439,11 +445,11 @@ The following kubectl-compatible plugins are available: /usr/local/bin/kubectl-foo /usr/local/bin/kubectl-bar ``` + +`kubectl plugin list` 는 또한 실행 가능하지 않거나, +다른 플러그인에 의해 차단된 플러그인에 대해 경고한다. 예를 들면 다음과 같다. ```shell -# 또한, 이 명령은 예를 들어 실행 불가능한 파일이거나, -# 다른 플러그인에 의해 가려진 플러그인에 대해 -# 경고할 수 있다 -sudo chmod -x /usr/local/bin/kubectl-foo +sudo chmod -x /usr/local/bin/kubectl-foo # 실행 권한 제거 kubectl plugin list ``` ``` @@ -462,6 +468,10 @@ error: one plugin warning was found ```shell cat ./kubectl-whoami +``` +다음 몇 가지 예는 이미 `kubectl-whoami` 에 +다음 내용이 있다고 가정한다. +```shell #!/bin/bash # 이 플러그인은 현재 선택된 컨텍스트를 기반으로 현재 사용자에 대한 @@ -469,7 +479,7 @@ cat ./kubectl-whoami kubectl config view --template='{{ range .contexts }}{{ if eq .name "'$(kubectl config current-context)'" }}Current user: {{ printf "%s\n" .context.user }}{{ end }}{{ end }}' ``` -위의 플러그인을 실행하면 KUBECONFIG 파일에서 현재 선택된 컨텍스트에 대한 +위의 플러그인을 실행하면 KUBECONFIG 파일에서 현재의 컨텍스트에 대한 사용자가 포함된 출력이 제공된다. ```shell @@ -483,11 +493,10 @@ kubectl whoami Current user: plugins-user ``` -플러그인에 대한 자세한 내용은 [cli plugin 예제](https://github.com/kubernetes/sample-cli-plugin)를 참고한다. ## {{% heading "whatsnext" %}} -[kubectl](/docs/reference/generated/kubectl/kubectl-commands/) 명령을 사용하여 시작한다. - +* [kubectl](/docs/reference/generated/kubectl/kubectl-commands/) 명령을 사용하여 시작한다. +* 플러그인에 대한 자세한 내용은 [cli plugin 예제](https://github.com/kubernetes/sample-cli-plugin)를 참고한다. diff --git a/content/ko/docs/reference/tools.md b/content/ko/docs/reference/tools.md index f9a9836bdc2c7..ceac0a510131b 100644 --- a/content/ko/docs/reference/tools.md +++ b/content/ko/docs/reference/tools.md @@ -12,7 +12,7 @@ content_type: concept ## Kubectl -[`kubectl`](/docs/tasks/tools/install-kubectl/)은 쿠버네티스를 위한 커맨드라인 툴이며, 쿠버네티스 클러스터 매니저을 제어한다. +[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)은 쿠버네티스를 위한 커맨드라인 툴이며, 쿠버네티스 클러스터 매니저을 제어한다. ## Kubeadm @@ -20,7 +20,7 @@ content_type: concept ## Minikube -[`minikube`](/ko/docs/tasks/tools/install-minikube/)는 개발과 테스팅 목적으로 하는 +[`minikube`](/ko/docs/tasks/tools/install-minikube/)는 개발과 테스팅 목적으로 하는 단일 노드 쿠버네티스 클러스터를 로컬 워크스테이션에서 쉽게 구동시키는 도구이다. @@ -51,4 +51,3 @@ Kompose의 용도 * 도커 컴포즈 파일을 쿠버네티스 오브젝트로 변환 * 로컬 도커 개발 환경에서 나의 애플리케이션을 쿠버네티스를 통해 관리하도록 이전 * V1 또는 V2 도커 컴포즈 `yaml` 파일 또는 [분산 애플리케이션 번들](https://docs.docker.com/compose/bundles/)을 변환 - diff --git a/content/ko/docs/reference/using-api/api-overview.md b/content/ko/docs/reference/using-api/api-overview.md index 098e9ad9bff6b..d961283b99545 100644 --- a/content/ko/docs/reference/using-api/api-overview.md +++ b/content/ko/docs/reference/using-api/api-overview.md @@ -78,9 +78,9 @@ API 버전의 차이는 수준의 안정성과 지원의 차이를 나타낸다. * *핵심* (또는 *레거시*라고 불리는) 그룹은 `apiVersion: v1`와 같이 `apiVersion` 필드에 명시되지 않고 REST 경로 `/api/v1`에 있다. * 이름이 있는 그룹은 REST 경로 `/apis/$GROUP_NAME/$VERSION`에 있으며 `apiVersion: $GROUP_NAME/$VERSION`을 사용한다 - (예를 들어 `apiVersion: batch/v1`). 지원되는 API 그룹 전체의 목록은 [쿠버네티스 API 참조 문서](/docs/reference/)에서 확인할 수 있다. + (예를 들어 `apiVersion: batch/v1`). 지원되는 API 그룹 전체의 목록은 [쿠버네티스 API 참조 문서](/ko/docs/reference/)에서 확인할 수 있다. -[사용자 정의 리소스](/docs/concepts/api-extension/custom-resources/)로 API를 확장하는 경우에는 다음 두 종류의 경로가 지원된다. +[사용자 정의 리소스](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)로 API를 확장하는 경우에는 다음 두 종류의 경로가 지원된다. - 기본적인 CRUD 요구에는 [커스텀리소스데피니션(CustomResourceDefinition)](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) @@ -109,6 +109,3 @@ API 버전의 차이는 수준의 안정성과 지원의 차이를 나타낸다. `--runtime-config=extensions/v1beta1/deployments=true,extensions/v1beta1/daemonsets=true` 를 입력한다. {{< note >}}개별 리소스의 활성화/비활성화는 레거시 문제로 `extensions/v1beta1` API 그룹에서만 지원된다. {{< /note >}} - - - diff --git a/content/ko/docs/reference/using-api/client-libraries.md b/content/ko/docs/reference/using-api/client-libraries.md index 321cf5ed2f9f6..a0a87e08375fe 100644 --- a/content/ko/docs/reference/using-api/client-libraries.md +++ b/content/ko/docs/reference/using-api/client-libraries.md @@ -16,7 +16,7 @@ API 호출 또는 요청/응답 타입을 직접 구현할 필요는 없다. 클라이언트 라이브러리는 대체로 인증과 같은 공통의 태스크를 처리한다. 대부분의 클라이언트 라이브러리들은 API 클라이언트가 쿠버네티스 클러스터 내부에서 동작하는 경우 인증 -또는 [kubeconfig 파일](/docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig/) 포맷을 통해 +또는 [kubeconfig 파일](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) 포맷을 통해 자격증명과 API 서버 주소를 읽을 수 있게 쿠버네티스 서비스 어카운트를 발견하고 사용할 수 있다. diff --git a/content/ko/docs/setup/_index.md b/content/ko/docs/setup/_index.md index 21cd27976483d..b09963d0e2d09 100644 --- a/content/ko/docs/setup/_index.md +++ b/content/ko/docs/setup/_index.md @@ -16,35 +16,20 @@ card: -본 섹션에서는 쿠버네티스를 구축하고 실행하는 여러가지 옵션을 다룬다. - -각각의 쿠버네티스 솔루션은 유지보수의 용이성, 보안, 제어, 가용 자원, 클러스터를 운영하고 관리하기 위해 필요한 전문성과 같은 제각각의 요구사항을 충족한다. - -쿠버네티스 클러스터를 로컬 머신에, 클라우드에, 온-프레미스 데이터센터에 배포할 수 있고, 아니면 매니지드 쿠버네티스 클러스터를 선택할 수도 있다. 넓은 범위의 클라우드 프로바이더에 걸치거나 베어 메탈 환경을 사용하는 커스텀 솔루션을 만들 수도 있다. - -더 간단하게 정리하면, 쿠버네티스 클러스터를 학습 환경과 운영 환경에 만들 수 있다. - +본 섹션에는 쿠버네티스를 설정하고 실행하는 다양한 방법이 나열되어 있다. +쿠버네티스를 설치할 때는 유지보수의 용이성, 보안, 제어, 사용 가능한 리소스, 그리고 +클러스터를 운영하고 관리하기 위해 필요한 전문성을 기반으로 설치 유형을 선택한다. +쿠버네티스 클러스터를 로컬 머신에, 클라우드에, 온-프레미스 데이터센터에 배포할 수 있고, 아니면 매니지드 쿠버네티스 클러스터를 선택할 수도 있다. 광범위한 클라우드 제공 업체 또는 베어 메탈 환경에 걸쳐 사용할 수 있는 맞춤형 솔루션도 있다. ## 학습 환경 -쿠버네티스를 배우고 있다면, 쿠버네티스 커뮤니티에서 지원하는 도구나, 로컬 머신에서 쿠버네티스를 설치하기 위한 생태계 내의 도구와 같은 도커 기반의 솔루션을 사용하자. - -{{< table caption="쿠버네티스를 배포하기 위해 커뮤니티와 생태계에서 지원하는 도구를 나열한 로컬 머신 솔루션 표." >}} - -|커뮤니티 |생태계 | -| ------------ | -------- | -| [Minikube](/docs/setup/learning-environment/minikube/) | [Docker Desktop](https://www.docker.com/products/docker-desktop)| -| [kind (Kubernetes IN Docker)](/docs/setup/learning-environment/kind/) | [Minishift](https://docs.okd.io/latest/minishift/)| -| | [MicroK8s](https://microk8s.io/)| - +쿠버네티스를 배우고 있다면, 쿠버네티스 커뮤니티에서 지원하는 도구나, 로컬 머신에서 쿠버네티스를 설치하기 위한 생태계 내의 도구를 사용하자. ## 운영 환경 운영 환경을 위한 솔루션을 평가할 때에는, 쿠버네티스 클러스터 운영에 대한 어떤 측면(또는 _추상적인 개념_)을 스스로 관리하기를 원하는지, 제공자에게 넘기기를 원하는지 고려하자. [쿠버네티스 파트너](https://kubernetes.io/partners/#conformance)에는 [공인 쿠버네티스](https://github.com/cncf/k8s-conformance/#certified-kubernetes) 공급자 목록이 포함되어 있다. - - diff --git a/content/ko/docs/setup/best-practices/certificates.md b/content/ko/docs/setup/best-practices/certificates.md index f5d1486f39146..e152e378c5412 100644 --- a/content/ko/docs/setup/best-practices/certificates.md +++ b/content/ko/docs/setup/best-practices/certificates.md @@ -36,7 +36,7 @@ etcd 역시 클라이언트와 피어 간에 상호 TLS 인증을 구현한다. ## 인증서를 저장하는 위치 -만약 쿠버네티스를 kubeadm으로 설치했다면 인증서는 `/etc/kubernets/pki`에 저장된다. 이 문서에 언급된 모든 파일 경로는 그 디렉토리에 상대적이다. +만약 쿠버네티스를 kubeadm으로 설치했다면 인증서는 `/etc/kubernets/pki`에 저장된다. 이 문서에 언급된 모든 파일 경로는 그 디렉터리에 상대적이다. ## 인증서 수동 설정 diff --git a/content/ko/docs/setup/best-practices/cluster-large.md b/content/ko/docs/setup/best-practices/cluster-large.md index df95e1dc5cf46..d29c8f49c20de 100644 --- a/content/ko/docs/setup/best-practices/cluster-large.md +++ b/content/ko/docs/setup/best-practices/cluster-large.md @@ -12,9 +12,6 @@ weight: 20 * 전체 컨테이너 300000개 이하 * 노드 당 파드 100개 이하 -
- -{{< toc >}} ## 설치 @@ -112,7 +109,7 @@ AWS에서, 마스터 노드의 크기는 클러스터 시작 시에 설정된 [#22940](http://issue.k8s.io/22940) 참조). 힙스터에 리소스가 부족한 경우라면, 힙스터 메모리 요청량(상세내용은 해당 PR 참조)을 계산하는 공식을 적용해보자. -애드온 컨테이너가 리소스 상한에 걸리는 것을 탐지하는 방법에 대해서는 [컴퓨트 리소스의 트러블슈팅 섹션](/docs/concepts/configuration/manage-compute-resources-container/#troubleshooting)을 참고하라. +애드온 컨테이너가 리소스 상한에 걸리는 것을 탐지하는 방법에 대해서는 [컴퓨트 리소스의 트러블슈팅 섹션](/ko/docs/concepts/configuration/manage-resources-containers/#문제-해결)을 참고하라. [미래](http://issue.k8s.io/13048)에는 모든 클러스터 애드온의 리소스 상한을 클러스터 크기에 맞게 설정해주고 클러스터를 키우거나 줄일 때 동적으로 조절해줄 수 있기를 기대한다. 이런 기능들에 대한 PR은 언제든 환영한다. diff --git a/content/ko/docs/setup/best-practices/multiple-zones.md b/content/ko/docs/setup/best-practices/multiple-zones.md index 13bdaa04a923b..2ccd3873a04da 100644 --- a/content/ko/docs/setup/best-practices/multiple-zones.md +++ b/content/ko/docs/setup/best-practices/multiple-zones.md @@ -6,7 +6,7 @@ content_type: concept -이 페이지는 여러 영역에서 어떻게 클러스터를 구동하는지 설명한다. +이 페이지는 여러 영역에서 어떻게 클러스터를 구동하는지 설명한다. @@ -77,7 +77,7 @@ located in a single zone. Users that want a highly available control plane should follow the [high availability](/docs/admin/high-availability) instructions. ### Volume limitations -The following limitations are addressed with [topology-aware volume binding](/docs/concepts/storage/storage-classes/#volume-binding-mode). +The following limitations are addressed with [topology-aware volume binding](/ko/docs/concepts/storage/storage-classes/#볼륨-바인딩-모드). * StatefulSet volume zone spreading when using dynamic provisioning is currently not compatible with pod affinity or anti-affinity policies. @@ -396,5 +396,3 @@ KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2c k KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b kubernetes/cluster/kube-down.sh KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh ``` - - diff --git a/content/ko/docs/setup/best-practices/node-conformance.md b/content/ko/docs/setup/best-practices/node-conformance.md index 3aa27d96ead86..d4579e2f896f1 100644 --- a/content/ko/docs/setup/best-practices/node-conformance.md +++ b/content/ko/docs/setup/best-practices/node-conformance.md @@ -3,7 +3,6 @@ title: 노드 구성 검증하기 weight: 30 --- -{{< toc >}} ## 노드 적합성 테스트 diff --git a/content/ko/docs/setup/learning-environment/minikube.md b/content/ko/docs/setup/learning-environment/minikube.md index 7cc0eb9874d72..16ea2ad587a2c 100644 --- a/content/ko/docs/setup/learning-environment/minikube.md +++ b/content/ko/docs/setup/learning-environment/minikube.md @@ -194,7 +194,7 @@ Minikube는 다음과 같은 쿠버네티스의 기능을 제공한다. 클러스터를 시작하기 위해서 `minikube start` 커멘드를 사용할 수 있다. 이 커멘드는 단일 노드 쿠버네티스 클러스터를 구동하는 가상 머신을 생성하고 구성한다. -이 커멘드는 또한 [kubectl](/docs/user-guide/kubectl-overview/)도 설정해서 클러스터와 통신할 수 있도록 한다. +이 커멘드는 또한 [kubectl](/ko/docs/reference/kubectl/overview/)도 설정해서 클러스터와 통신할 수 있도록 한다. {{< note >}} 웹 프록시 뒤에 있다면, `minikube start` 커맨드에 해당 정보를 전달해야 한다. @@ -447,9 +447,9 @@ spec: | Driver | OS | HostFolder | VM | | --- | --- | --- | --- | -| VirtualBox | Linux | /home | /hosthome | +| VirtualBox | 리눅스 | /home | /hosthome | | VirtualBox | macOS | /Users | /Users | -| VirtualBox | Windows | C://Users | /c/Users | +| VirtualBox | 윈도우 | C://Users | /c/Users | | VMware Fusion | macOS | /Users | /mnt/hgfs/Users | | Xhyve | macOS | /Users | /Users | @@ -505,7 +505,7 @@ Minikube에 대한 더 자세한 정보는, [제안](https://git.k8s.io/communit * **Minikube 빌드**: Minikube를 소스에서 빌드/테스트하는 방법은 [빌드 가이드](https://minikube.sigs.k8s.io/docs/contrib/building/)를 살펴보자. * **새 의존성 추가하기**: Minikube에 새 의존성을 추가하는 방법에 대해서는, [의존성 추가 가이드](https://minikube.sigs.k8s.io/docs/contrib/drivers/)를 보자. * **새 애드온 추가하기**: Minikube에 새 애드온을 추가하는 방법에 대해서는, [애드온 추가 가이드](https://minikube.sigs.k8s.io/docs/contrib/addons/)를 보자. -* **MicroK8s**: 가상 머신을 사용하지 않으려는 Linux 사용자는 대안으로 [MicroK8s](https://microk8s.io/)를 고려할 수 있다. +* **MicroK8s**: 가상 머신을 사용하지 않으려는 리눅스 사용자는 대안으로 [MicroK8s](https://microk8s.io/)를 고려할 수 있다. ## 커뮤니티 diff --git a/content/ko/docs/setup/production-environment/container-runtimes.md b/content/ko/docs/setup/production-environment/container-runtimes.md index f14834ff25de2..39c2d0746448c 100644 --- a/content/ko/docs/setup/production-environment/container-runtimes.md +++ b/content/ko/docs/setup/production-environment/container-runtimes.md @@ -25,7 +25,7 @@ weight: 10 ### 적용 가능성 {{< note >}} -이 문서는 Linux에 CRI를 설치하는 사용자를 위해 작성되었다. +이 문서는 리눅스에 CRI를 설치하는 사용자를 위해 작성되었다. 다른 운영 체제의 경우, 해당 플랫폼과 관련된 문서를 찾아보자. {{< /note >}} @@ -34,7 +34,7 @@ weight: 10 ### Cgroup 드라이버 -Linux 배포판의 init 시스템이 systemd인 경우, init 프로세스는 +리눅스 배포판의 init 시스템이 systemd인 경우, init 프로세스는 root control group(`cgroup`)을 생성 및 사용하는 cgroup 관리자로 작동한다. Systemd는 cgroup과의 긴밀한 통합을 통해 프로세스당 cgroup을 할당한다. 컨테이너 런타임과 kubelet이 `cgroupfs`를 사용하도록 설정할 수 있다. @@ -62,7 +62,7 @@ kubelet을 재시작 하는 것은 에러를 해결할 수 없을 것이다. ## 도커 각 머신들에 대해서, 도커를 설치한다. -버전 19.03.8이 추천된다. 그러나 1.13.1, 17.03, 17.06, 17.09, 18.06 그리고 18.09도 동작하는 것으로 알려져 있다. +버전 19.03.11이 추천된다. 그러나 1.13.1, 17.03, 17.06, 17.09, 18.06 그리고 18.09도 동작하는 것으로 알려져 있다. 쿠버네티스 릴리스 노트를 통해서, 최신에 검증된 도커 버전의 지속적인 파악이 필요하다. 시스템에 도커를 설치하기 위해서 아래의 커맨드들을 사용한다. @@ -94,9 +94,9 @@ add-apt-repository \ ```shell # 도커 CE 설치. apt-get update && apt-get install -y \ - containerd.io=1.2.13-1 \ - docker-ce=5:19.03.8~3-0~ubuntu-$(lsb_release -cs) \ - docker-ce-cli=5:19.03.8~3-0~ubuntu-$(lsb_release -cs) + containerd.io=1.2.13-2 \ + docker-ce=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) \ + docker-ce-cli=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) ``` ```shell @@ -142,8 +142,8 @@ yum-config-manager --add-repo \ # 도커 CE 설치. yum update -y && yum install -y \ containerd.io-1.2.13 \ - docker-ce-19.03.8 \ - docker-ce-cli-19.03.8 + docker-ce-19.03.11 \ + docker-ce-cli-19.03.11 ``` ```shell @@ -180,6 +180,12 @@ systemctl restart docker {{< /tab >}} {{< /tabs >}} +부팅 시 도커 서비스를 시작하려면, 다음 명령을 실행한다. + +```shell +sudo systemctl enable docker +``` + 자세한 내용은 [공식 도커 설치 가이드](https://docs.docker.com/engine/installation/) 를 참고한다. diff --git a/content/ko/docs/setup/production-environment/tools/kops.md b/content/ko/docs/setup/production-environment/tools/kops.md index 29716b44e19b4..644ca5dae48d2 100644 --- a/content/ko/docs/setup/production-environment/tools/kops.md +++ b/content/ko/docs/setup/production-environment/tools/kops.md @@ -9,7 +9,7 @@ weight: 20 이곳 빠른 시작에서는 사용자가 얼마나 쉽게 AWS에 쿠버네티스 클러스터를 설치할 수 있는지 보여준다. [`kops`](https://github.com/kubernetes/kops)라는 이름의 툴을 이용할 것이다. -kops는 자동화된 프로비저닝 시스템인데, +kops는 자동화된 프로비저닝 시스템인데, * 완전 자동화된 설치 * DNS를 통해 클러스터들의 신원 확인 @@ -23,11 +23,11 @@ kops는 자동화된 프로비저닝 시스템인데, ## {{% heading "prerequisites" %}} -* [kubectl](/docs/tasks/tools/install-kubectl/)을 반드시 설치해야 한다. +* [kubectl](/ko/docs/tasks/tools/install-kubectl/)을 반드시 설치해야 한다. * 반드시 64-bit (AMD64 그리고 Intel 64)디바이스 아키텍쳐 위에서 `kops` 를 [설치](https://github.com/kubernetes/kops#installing) 한다. -* [AWS 계정](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html)이 있고 [IAM 키](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys)를 생성하고 [구성](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) 해야 한다. +* [AWS 계정](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html)이 있고 [IAM 키](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys)를 생성하고 [구성](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration)해야 한다. IAM 사용자는 [적절한 권한](https://github.com/kubernetes/kops/blob/master/docs/getting_started/aws.md#setup-iam-user)이 필요하다. @@ -82,7 +82,7 @@ brew update && brew install kops ``` {{% /tab %}} -{{% tab name="Linux" %}} +{{% tab name="리눅스" %}} 최신 릴리즈를 다운로드 받는 명령어: @@ -127,7 +127,7 @@ brew update && brew install kops kops는 클러스터 내부와 외부 모두에서 검색을 위해 DNS을 사용하기에 클라이언트에서 쿠버네티스 API 서버에 연결할 수 있다. -이런 클러스터 이름에 kops는 명확한 견해을 가지는데: 반드시 유효한 DNS 이름이어야 한다. 이렇게 함으로써 +이런 클러스터 이름에 kops는 명확한 견해을 가지는데: 반드시 유효한 DNS 이름이어야 한다. 이렇게 함으로써 사용자는 클러스터를 헷갈리지 않을것이고, 동료들과 혼선없이 공유할 수 있으며, IP를 기억할 필요없이 접근할 수 있다. @@ -140,7 +140,7 @@ Route53 hosted zone은 서브도메인도 지원한다. 여러분의 hosted zone `example.com`하위에는 그렇지 않을 수 있다). `dev.example.com`을 hosted zone으로 사용하고 있다고 가정해보자. -보통 사용자는 [일반적인 방법](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html) 에 따라 생성하거나 +보통 사용자는 [일반적인 방법](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html) 에 따라 생성하거나 `aws route53 create-hosted-zone --name dev.example.com --caller-reference 1` 와 같은 커맨드를 이용한다. 그 후 도메인 내 레코드들을 확인할 수 있도록 상위 도메인내에 NS 레코드를 생성해야 한다. 여기서는, @@ -175,7 +175,7 @@ S3 버킷 이름으로 정하자. * `aws s3 mb s3://clusters.dev.example.com`를 이용해 S3 버킷을 생성한다. -* `export KOPS_STATE_STORE=s3://clusters.dev.example.com` 하면, kops는 이 위치를 기본값으로 인식할 것이다. +* `export KOPS_STATE_STORE=s3://clusters.dev.example.com` 하면, kops는 이 위치를 기본값으로 인식할 것이다. 이 부분을 bash profile등에 넣어두는것을 권장한다. @@ -185,7 +185,7 @@ S3 버킷 이름으로 정하자. `kops create cluster --zones=us-east-1c useast1.dev.example.com` -kops는 클러스터에 사용될 설정을 생성할것이다. 여기서 주의할 점은 실제 클러스트 리소스가 아닌 _설정_ +kops는 클러스터에 사용될 설정을 생성할것이다. 여기서 주의할 점은 실제 클러스트 리소스가 아닌 _설정_ 만을 생성한다는 것에 주의하자 - 이 부분은 다음 단계에서 `kops update cluster` 으로 구성해볼 것이다. 그 때 만들어진 설정을 점검하거나 변경할 수 있다. @@ -220,7 +220,7 @@ kops는 클러스터에 사용될 설정을 생성할것이다. 여기서 주의 ### 다른 애드온 탐험 -[애드온 리스트](/docs/concepts/cluster-administration/addons/) 에서 쿠버네티스 클러스터용 로깅, 모니터링, 네트워크 정책, 시각화 & 제어 등을 포함한 다른 애드온을 확인해본다. +[애드온 리스트](/ko/docs/concepts/cluster-administration/addons/) 에서 쿠버네티스 클러스터용 로깅, 모니터링, 네트워크 정책, 시각화 & 제어 등을 포함한 다른 애드온을 확인해본다. ## 정리하기 @@ -231,9 +231,7 @@ kops는 클러스터에 사용될 설정을 생성할것이다. 여기서 주의 ## {{% heading "whatsnext" %}} -* 쿠버네티스 [개념](/docs/concepts/) 과 [`kubectl`](/docs/user-guide/kubectl-overview/)에 대해 더 알아보기. +* 쿠버네티스 [개념](/ko/docs/concepts/) 과 [`kubectl`](/ko/docs/reference/kubectl/overview/)에 대해 더 알아보기. * 튜토리얼, 모범사례 및 고급 구성 옵션에 대한 `kops` [고급 사용법](https://kops.sigs.k8s.io/)에 대해 더 자세히 알아본다. * 슬랙(Slack)에서 `kops` 커뮤니티 토론을 할 수 있다: [커뮤니티 토론](https://github.com/kubernetes/kops#other-ways-to-communicate-with-the-contributors) * 문제를 해결하거나 이슈를 제기하여 `kops` 에 기여한다. [깃헙 이슈](https://github.com/kubernetes/kops/issues) - - diff --git a/content/ko/docs/setup/release/notes.md b/content/ko/docs/setup/release/notes.md index 3bc3dad135c9d..a0cd9168a1ba8 100644 --- a/content/ko/docs/setup/release/notes.md +++ b/content/ko/docs/setup/release/notes.md @@ -86,7 +86,7 @@ card: ### SIG CLI의 kubectl 디버그 소개 -SIG CLI는 이미 오랫동안 디버그 유틸리티의 필요성에 대해 논의하고 있었다. [임시(ephemeral) 컨테이너](https://kubernetes.io/ko/docs/concepts/workloads/pods/ephemeral-containers/)가 개발되면서, `kubectl exec` 위에 구축된 도구를 통해 개발자를 지원할 수 있는 방법이 더욱 분명해졌다. `kubectl debug` [커맨드](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/20190805-kubectl-debug.md) 추가(알파이지만 피드백은 언제나 환영)로 개발자는 클러스터 내에서 파드를 쉽게 디버깅할 수 있다. 우리는 이 추가 기능이 매우 유용하다고 생각한다. 이 커맨드를 사용하면 검사하려는 파드 바로 옆에서 실행되는 임시 컨테이너를 만들 수 있고, 대화식 문제 해결을 위해 콘솔에 연결할 수도 있다. +SIG CLI는 이미 오랫동안 디버그 유틸리티의 필요성에 대해 논의하고 있었다. [임시(ephemeral) 컨테이너](/ko/docs/concepts/workloads/pods/ephemeral-containers/)가 개발되면서, `kubectl exec` 위에 구축된 도구를 통해 개발자를 지원할 수 있는 방법이 더욱 분명해졌다. `kubectl debug` [커맨드](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/20190805-kubectl-debug.md) 추가(알파이지만 피드백은 언제나 환영)로 개발자는 클러스터 내에서 파드를 쉽게 디버깅할 수 있다. 우리는 이 추가 기능이 매우 유용하다고 생각한다. 이 커맨드를 사용하면 검사하려는 파드 바로 옆에서 실행되는 임시 컨테이너를 만들 수 있고, 대화식 문제 해결을 위해 콘솔에 연결할 수도 있다. ### 쿠버네티스를 위한 윈도우 CSI 지원 알파 소개 diff --git a/content/ko/docs/tasks/_index.md b/content/ko/docs/tasks/_index.md index 359ec82a9cd15..e6c80f32ed1c1 100644 --- a/content/ko/docs/tasks/_index.md +++ b/content/ko/docs/tasks/_index.md @@ -5,80 +5,11 @@ weight: 50 content_type: concept --- -{{< toc >}} - -쿠버네티스 문서에서 이 섹션은 개별의 태스크를 수행하는 방법을 -보여준다. 한 태스크 페이지는 일반적으로 여러 단계로 이루어진 짧은 +쿠버네티스 문서에서 이 섹션은 개별의 태스크를 수행하는 방법을 +보여준다. 한 태스크 페이지는 일반적으로 여러 단계로 이루어진 짧은 시퀀스를 제공함으로써, 하나의 일을 수행하는 방법을 보여준다. - - - - -## 웹 UI (대시보드) - -쿠버네티스 클러스터에서 컨테이너화 된 애플리케이션을 관리 및 모니터하는 것을 돕기 위해서 대시보드 웹 유저 인터페이스를 디플로이하고 접속한다. - -## kubectl 커맨드라인 사용하기 - -쿠버네티스 클러스터를 직접 관리하기 위해서 사용되는 `kubectl` 커맨드라인 툴을 설치 및 설정한다. - -## 파드 및 컨테이너 구성하기 - -파드 및 컨테이너에 대한 일반적인 구성 태스크를 수행한다. - -## 애플리케이션 동작시키기 - -롤링 업데이트, 파드에 정보 주입하기, 파드 수평적 오토스케일링 등, 일반적인 애플리케이션 관리 태스크를 수행한다. - -## 잡 동작시키기 - -병렬 프로세싱을 사용하는 잡을 동작시킨다. - -## 클러스터의 애플리케이션에 접근하기 - -클러스터 내에 있는 애플리케이션에 접근할 수 있도록 로드 밸런싱, 포트 포워딩, 방화벽 또는 DNS 구성 등을 구성한다. - -## 모니터링, 로깅, 디버깅 - -클러스터 문제를 해결하거나 컨테이너화 된 애플리케이션을 디버깅하기 위해서 모니터링과 로깅을 설정한다. - -## 쿠버네티스 API에 접근하기 - -쿠버네티스 API에 직접 접근하는 다양한 방법을 배운다. - -## TLS 사용하기 - -클러스터 루트 인증 기관(CA)을 신뢰 및 사용하도록 애플리케이션을 구성한다. - -## 클러스터 운영하기(administering) - -클러스터를 운영하기 위한 일반적인 태스크를 배운다. - -## 스테이트풀 애플리케이션 관리하기 - -스테이트풀셋(StatefulSet)의 스케일링, 삭제하기, 디버깅을 포함하는 스테이트풀 애플리케이션 관리를 위한 일반적인 태스크를 수행한다. - -## 클러스터 데몬 - -롤링 업데이트를 수행과 같은, 데몬 셋 관리를 위한 일반적인 태스크를 수행한다. - -## GPU 관리하기 - -클러스터의 노드들에 의해서 리소스로 사용될 NVIDIA GPU들을 구성 및 스케줄한다. - -## HugePage 관리하기 - -클러스터에서 스케줄 가능한 리소스로서 Huge Page들을 구성 및 스케줄한다. - - - -## {{% heading "whatsnext" %}} - - -만약 태스크 페이지를 작성하고 싶다면, -[문서 풀 리퀘스트(Pull Request) 생성하기](/docs/home/contribute/create-pull-request/)를 참조한다. - - +만약 태스크 페이지를 작성하고 싶다면, +[문서 풀 리퀘스트(Pull Request) 생성하기](/ko/docs/contribute/new-content/new-content/)를 참조한다. diff --git a/content/ko/docs/tasks/access-application-cluster/_index.md b/content/ko/docs/tasks/access-application-cluster/_index.md index 4cb552677c830..603b186517923 100755 --- a/content/ko/docs/tasks/access-application-cluster/_index.md +++ b/content/ko/docs/tasks/access-application-cluster/_index.md @@ -1,5 +1,6 @@ --- title: "클러스터 내 어플리케이션 액세스" +description: 클러스터의 애플리케이션에 접근하기 위해 로드 밸런싱, 포트 포워딩, 방화벽 설정 또는 DNS 구성을 설정한다. weight: 60 --- diff --git a/content/ko/docs/tasks/access-application-cluster/access-cluster.md b/content/ko/docs/tasks/access-application-cluster/access-cluster.md index ded8f15aad860..c17458a9124eb 100644 --- a/content/ko/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/ko/docs/tasks/access-application-cluster/access-cluster.md @@ -145,7 +145,7 @@ curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure 위 예제에서는 `--insecure` flag를 사용했다. 이는 MITM 공격을 받을 수 있는 상태로 두는 것이다. kubectl로 클러스터에 접속할 때 저장된 root 인증서와 클라이언트 인증서들을 서버 접속에 사용한다. -(이들은 `~/.kube` 디렉토리에 설치된다.) +(이들은 `~/.kube` 디렉터리에 설치된다.) 일반적으로 self-signed 인증서가 클러스터 인증서로 사용되므로 당신의 http 클라이언트가 root 인증서를 사용하려면 특수한 설정을 필요로 할 것이다. diff --git a/content/ko/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md b/content/ko/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md index 0218324ff2ef5..5a585eddd45bd 100644 --- a/content/ko/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md +++ b/content/ko/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md @@ -139,7 +139,7 @@ Debian 컨테이너에서 nginx 웹 서버가 호스팅하는 문서의 루트 * [모듈 구조를 위한 합성 컨테이너 구조](http://www.slideshare.net/Docker/slideshare-burns)에 관하여 더 공부한다. -* [파드에서 저장소로 볼룸을 사용하도록 구성하기](/docs/tasks/configure-pod-container/configure-volume-storage/)에 관하여 +* [파드에서 저장소로 볼룸을 사용하도록 구성하기](/ko/docs/tasks/configure-pod-container/configure-volume-storage/)에 관하여 확인한다. * [파드에서 컨테이너 간에 프로세스 네임스페이스를 공유하는 파드 구성하는 방법](/docs/tasks/configure-pod-container/share-process-namespace/)을 참고한다. @@ -147,8 +147,3 @@ Debian 컨테이너에서 nginx 웹 서버가 호스팅하는 문서의 루트 * [볼륨](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core)을 확인한다. * [파드](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core)을 확인한다. - - - - - diff --git a/content/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md b/content/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md index e279148e33db8..9f99c6acbd9de 100644 --- a/content/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md +++ b/content/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters.md @@ -46,7 +46,7 @@ card: 네임스페이스들을 생성하고 있다. development 클러스터에 접근하려면 인증서로 인증을 해야 하고, scratch 클러스터에 접근하려면 사용자네임과 패스워드로 인증을 해야 한다. -`config-exercise`라는 디렉토리를 생성한다. `config-exercise` 디렉토리에 +`config-exercise`라는 디렉터리를 생성한다. `config-exercise` 디렉터리에 다음 내용을 가진 `config-demo`라는 파일을 생성한다. ```shell @@ -76,7 +76,7 @@ contexts: 구성 파일은 클러스터들, 사용자들, 컨텍스트들을 기술한다. `config-demo` 파일은 두 클러스터들과 두 사용자들, 세 컨텍스트들을 기술하기 위한 프레임워크를 가진다. -`config-exercise` 디렉토리로 이동한다. 그리고 다음 커맨드들을 실행하여 구성 파일에 클러스터의 +`config-exercise` 디렉터리로 이동한다. 그리고 다음 커맨드들을 실행하여 구성 파일에 클러스터의 세부사항들을 추가한다. ```shell @@ -245,7 +245,7 @@ kubectl config --kubeconfig=config-demo view --minify ## 두 번째 구성 파일 생성 -`config-exercise` 디렉토리에서 다음 내용으로 `config-demo-2`라는 파일을 생성한다. +`config-exercise` 디렉터리에서 다음 내용으로 `config-demo-2`라는 파일을 생성한다. ```shell apiVersion: v1 @@ -268,31 +268,31 @@ contexts: 이후에 복원할 수 있도록 `KUBECONFIG` 환경 변수의 현재 값을 저장한다. 예: -### Linux +### 리눅스 ```shell export KUBECONFIG_SAVED=$KUBECONFIG ``` -### Windows PowerShell +### 윈도우 PowerShell ```shell $Env:KUBECONFIG_SAVED=$ENV:KUBECONFIG ``` `KUBECONFIG` 환경 변수는 구성 파일들의 경로의 리스트이다. 이 리스트는 -Linux와 Mac에서는 콜론으로 구분되며 Windows에서는 세미콜론으로 구분된다. +리눅스와 Mac에서는 콜론으로 구분되며 윈도우에서는 세미콜론으로 구분된다. `KUBECONFIG` 환경 변수를 가지고 있다면, 리스트에 포함된 구성 파일들에 익숙해지길 바란다. 다음 예와 같이 임시로 `KUBECONFIG` 환경 변수에 두 개의 경로들을 덧붙여보자. -### Linux +### 리눅스 ```shell export KUBECONFIG=$KUBECONFIG:config-demo:config-demo-2 ``` -### Windows PowerShell +### 윈도우 PowerShell ```shell $Env:KUBECONFIG=("config-demo;config-demo-2") ``` -`config-exercise` 디렉토리에서 다음 커맨드를 입력한다. +`config-exercise` 디렉터리에서 다음 커맨드를 입력한다. ```shell kubectl config view @@ -330,14 +330,14 @@ contexts: kubeconfig 파일들을 어떻게 병합하는지에 대한 상세정보는 [kubeconfig 파일을 사용하여 클러스터 접근 구성하기](/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/)를 참조한다. -## $HOME/.kube 디렉토리 탐색 +## $HOME/.kube 디렉터리 탐색 만약 당신이 이미 클러스터를 가지고 있고 `kubectl`을 사용하여 -해당 클러스터를 제어하고 있다면, 아마 `$HOME/.kube` 디렉토리에 `config`라는 +해당 클러스터를 제어하고 있다면, 아마 `$HOME/.kube` 디렉터리에 `config`라는 파일을 가지고 있을 것이다. `$HOME/.kube`로 가서 어떤 파일들이 존재하는지 보자. -보통 `config`라는 파일이 존재할 것이다. 해당 디렉토리 내에는 다른 구성 파일들도 있을 수 있다. +보통 `config`라는 파일이 존재할 것이다. 해당 디렉터리 내에는 다른 구성 파일들도 있을 수 있다. 간단하게 말하자면 당신은 이 파일들의 컨텐츠에 익숙해져야 한다. ## $HOME/.kube/config를 KUBECONFIG 환경 변수에 추가 @@ -346,17 +346,17 @@ kubeconfig 파일들을 어떻게 병합하는지에 대한 상세정보는 환경 변수에 나타나지 않는다면 `KUBECONFIG` 환경 변수에 추가해보자. 예: -### Linux +### 리눅스 ```shell export KUBECONFIG=$KUBECONFIG:$HOME/.kube/config ``` -### Windows Powershell +### 윈도우 Powershell ```shell $Env:KUBECONFIG="$Env:KUBECONFIG;$HOME\.kube\config" ``` 이제 `KUBECONFIG` 환경 변수에 리스트에 포함된 모든 파일들이 합쳐진 구성 정보를 보자. -config-exercise 디렉토리에서 다음 커맨드를 실행한다. +config-exercise 디렉터리에서 다음 커맨드를 실행한다. ```shell kubectl config view @@ -366,12 +366,12 @@ kubectl config view `KUBECONFIG` 환경 변수를 원래 값으로 되돌려 놓자. 예를 들면:
-### Linux +### 리눅스 ```shell export KUBECONFIG=$KUBECONFIG_SAVED ``` -### Windows PowerShell +### 윈도우 PowerShell ```shell $Env:KUBECONFIG=$ENV:KUBECONFIG_SAVED ``` diff --git a/content/ko/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/ko/docs/tasks/access-application-cluster/web-ui-dashboard.md index 75bfb61a76c5d..3aa05a92b036e 100644 --- a/content/ko/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/ko/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -97,12 +97,12 @@ Kubeconfig 인증 방법은 외부 아이덴티티 프로파이더 또는 x509 예를 들면: -```conf -release=1.0 -tier=frontend -environment=pod -track=stable -``` + ```conf + release=1.0 + tier=frontend + environment=pod + track=stable + ``` - **네임스페이스**: 쿠버네티스는 동일한 물리 클러스터를 바탕으로 여러 가상의 클러스터를 제공한다. 이러한 가상 클러스터들을 [네임스페이스](/docs/tasks/administer-cluster/namespaces/)라고 부른다. 논리적으로 명명된 그룹으로 리소스들을 분할 할 수 있다. @@ -119,15 +119,15 @@ track=stable - **CPU 요구 사항 (cores)** 와 **메모리 요구 사항 (MiB)**: 컨테이너를 위한 최소 [리소스 상한](/docs/tasks/configure-pod-container/limit-range/)을 정의할 수 있다. 기본적으로, 파드는 CPU와 메모리 상한을 두지 않고 동작한다. -- **커맨드 실행** 와 **커맨드 인수 실행**: 기본적으로, 컨테이너는 선택된 도커 이미지의 [기본 엔트리포인트 커맨드](/docs/tasks/inject-data-application/define-command-argument-container/)를 실행한다. 커맨드 옵션과 인자를 기본 옵션에 우선 적용하여 사용할 수 있다. +- **커맨드 실행** 와 **커맨드 인수 실행**: 기본적으로, 컨테이너는 선택된 도커 이미지의 [기본 엔트리포인트 커맨드](/ko/docs/tasks/inject-data-application/define-command-argument-container/)를 실행한다. 커맨드 옵션과 인자를 기본 옵션에 우선 적용하여 사용할 수 있다. -- **특권을 가진(privileged) 상태로 실행**: 다음 세팅은 호스트에서 루트 권한을 가진 프로세스들이 [특권을 가진 컨테이너](/docs/user-guide/pods/#privileged-mode-for-pod-containers)의 프로세스들과 동등한 지 아닌지 정의한다. 특권을 가진(privileged) 컨테이너는 네트워크 스택과 디바이스에 접근하는 것을 조작하도록 활용할 수 있다. +- **특권을 가진(privileged) 상태로 실행**: 다음 세팅은 호스트에서 루트 권한을 가진 프로세스들이 [특권을 가진 컨테이너](/ko/docs/concepts/workloads/pods/pod/#파드-컨테이너의-특권-privileged-모드)의 프로세스들과 동등한 지 아닌지 정의한다. 특권을 가진(privileged) 컨테이너는 네트워크 스택과 디바이스에 접근하는 것을 조작하도록 활용할 수 있다. - **환경 변수**: 쿠버네티스 서비스를 [환경 변수](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/)를 통해 노출한다. 환경 변수 또는 인자를 환경 변수들의 값으로 커맨드를 통해 구성할 수 있다. 애플리케이션들이 서비스를 찾는데 사용된다. 값들은 `$(VAR_NAME)` 구문을 사용하는 다른 변수들로 참조할 수 있다. ### YAML 또는 JSON 파일 업로드 -쿠버네티스는 선언적인 설정을 제공한다. 이 방식으로 모든 설정은 쿠버네티스 [API](/docs/concepts/overview/kubernetes-api/) 리소스 스키마를 이용하여 YAML 또는 JSON 설정 파일에 저장한다. +쿠버네티스는 선언적인 설정을 제공한다. 이 방식으로 모든 설정은 쿠버네티스 [API](/ko/docs/concepts/overview/kubernetes-api/) 리소스 스키마를 이용하여 YAML 또는 JSON 설정 파일에 저장한다. 배포 마법사를 통해 애플리케이션 세부사항들을 지정하는 대신, 애플리케이션을 YAML 또는 JSON 파일로 정의할 수 있고 대시보드를 이용해서 파일을 업로드할 수 있다. @@ -144,9 +144,9 @@ track=stable 클러스터와 네임스페이스 관리자에게 대시보드는 노드, 네임스페이스 그리고 퍼시스턴트 볼륨과 세부사항들이 보여진다. 노드는 모든 노드를 통틀어 CPU와 메모리 사용량을 보여준다. 세부사항은 각 노드들에 대한 사용량, 사양, 상태, 할당된 리소스, 이벤트 그리고 노드에서 돌아가는 파드를 보여준다. #### 워크로드 -선택된 네임스페이스에서 구동되는 모든 애플리케이션을 보여준다. 애플리케이션의 워크로드 종류(예를 들어, 디플로이먼트, 레플리카 셋, 스테이트풀셋(StatefulSet) 등)를 보여주고 각각의 워크로드 종류는 따로 보여진다. 리스트는 예를 들어 레플리카 셋에서 준비된 파드의 숫자 또는 파드의 현재 메모리 사용량과 같은 워크로드에 대한 실용적인 정보를 요약한다. +선택된 네임스페이스에서 구동되는 모든 애플리케이션을 보여준다. 애플리케이션의 워크로드 종류(예를 들어, 디플로이먼트, 레플리카셋(ReplicaSet), 스테이트풀셋(StatefulSet) 등)를 보여주고 각각의 워크로드 종류는 따로 보여진다. 리스트는 예를 들어 레플리카셋에서 준비된 파드의 숫자 또는 파드의 현재 메모리 사용량과 같은 워크로드에 대한 실용적인 정보를 요약한다. -워크로드에 대한 세부적인 것들은 상태와 사양 정보, 오프젝트들 간의 관계를 보여준다. 예를 들어, 레플리카 셋으로 관리하는 파드들 또는 새로운 레플리카 셋과 디플로이먼트를 위한 Horizontal Pod Autoscalers 이다. +워크로드에 대한 세부적인 것들은 상태와 사양 정보, 오프젝트들 간의 관계를 보여준다. 예를 들어, 레플리카셋으로 관리하는 파드들 또는 새로운 레플리카셋과 디플로이먼트를 위한 Horizontal Pod Autoscalers 이다. #### 서비스 외부로 노출되는 서비스들과 클러스터 내에 발견되는 서비스들을 허용하는 쿠버네티스 리소스들을 보여준다. 이러한 이유로 서비스와 인그레스는 클러스터간의 연결을 위한 내부 엔드포인트들과 외부 사용자를 위한 외부 엔드포인트들에 의해 타게팅된 파드들을 보여준다. @@ -169,5 +169,3 @@ track=stable 더 많은 정보는 [쿠버네티스 대시보드 프로젝트 페이지](https://github.com/kubernetes/dashboard)를 참고한다. - - diff --git a/content/ko/docs/tasks/administer-cluster/_index.md b/content/ko/docs/tasks/administer-cluster/_index.md index 77ca3f2479051..4913ccf73eea3 100755 --- a/content/ko/docs/tasks/administer-cluster/_index.md +++ b/content/ko/docs/tasks/administer-cluster/_index.md @@ -1,5 +1,6 @@ --- title: "클러스터 운영" +description: 클러스터를 운영하기 위한 공통 태스크를 배운다. weight: 20 --- diff --git a/content/ko/docs/tasks/administer-cluster/access-cluster-api.md b/content/ko/docs/tasks/administer-cluster/access-cluster-api.md new file mode 100644 index 0000000000000..1b92c1283ac4e --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/access-cluster-api.md @@ -0,0 +1,451 @@ +--- +title: 쿠버네티스 API를 사용하여 클러스터에 접근하기 +content_type: task +--- + + +이 페이지는 쿠버네티스 API를 사용하여 클러스터에 접근하는 방법을 보여준다. + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + +## 쿠버네티스 API에 접근 + +### kubectl을 사용하여 처음으로 접근 + +쿠버네티스 API에 처음 접근하는 경우, 쿠버네티스 +커맨드 라인 도구인 `kubectl` 을 사용한다. + +클러스터에 접근하려면, 클러스터 위치를 알고 접근할 수 있는 자격 증명이 +있어야 한다. 일반적으로, [시작하기 가이드](/ko/docs/setup/)를 +통해 작업하거나, +다른 사람이 클러스터를 설정하고 자격 증명과 위치를 제공할 때 자동으로 설정된다. + +다음의 명령으로 kubectl이 알고 있는 위치와 자격 증명을 확인한다. + +```shell +kubectl config view +``` + +많은 [예제](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/)는 kubectl 사용에 대한 소개를 +제공한다. 전체 문서는 [kubectl 매뉴얼](/ko/docs/reference/kubectl/overview/)에 있다. + +### REST API에 직접 접근 + +kubectl은 API 서버 찾기와 인증을 처리한다. `curl` 이나 `wget` 과 같은 http 클라이언트 또는 브라우저를 사용하여 REST API에 +직접 접근하려는 경우, API 서버를 찾고 인증할 수 있는 여러 가지 방법이 있다. + + 1. 프록시 모드에서 kubectl을 실행한다(권장). 이 방법은 저장된 API 서버 위치를 사용하고 자체 서명된 인증서를 사용하여 API 서버의 ID를 확인하므로 권장한다. 이 방법을 사용하면 중간자(man-in-the-middle, MITM) 공격이 불가능하다. + 1. 또는, 위치와 자격 증명을 http 클라이언트에 직접 제공할 수 있다. 이 방법은 프록시를 혼란스럽게 하는 클라이언트 코드와 동작한다. 중간자 공격으로부터 보호하려면, 브라우저로 루트 인증서를 가져와야 한다. + + Go 또는 Python 클라이언트 라이브러리를 사용하면 프록시 모드에서 kubectl에 접근할 수 있다. + +#### kubectl 프록시 사용 + +다음 명령은 kubectl을 리버스 프록시로 작동하는 모드에서 실행한다. API +서버 찾기와 인증을 처리한다. + +다음과 같이 실행한다. + +```shell +kubectl proxy --port=8080 & +``` + +자세한 내용은 [kubectl 프록시](/docs/reference/generated/kubectl/kubectl-commands/#proxy)를 참고한다. + +그런 다음 curl, wget 또는 브라우저를 사용하여 API를 탐색할 수 있다. + +```shell +curl http://localhost:8080/api/ +``` + +출력은 다음과 비슷하다. + +```json +{ + "versions": [ + "v1" + ], + "serverAddressByClientCIDRs": [ + { + "clientCIDR": "0.0.0.0/0", + "serverAddress": "10.0.1.149:443" + } + ] +} +``` + +#### kubectl 프록시 없이 접근 + +다음과 같이 인증 토큰을 API 서버에 직접 전달하여 kubectl 프록시 +사용을 피할 수 있다. + +`grep/cut` 방식을 사용한다. + +```shell +# .KUBECONFIG에 여러 콘텍스트가 있을 수 있으므로, 가능한 모든 클러스터를 확인한다. +kubectl config view -o jsonpath='{"Cluster name\tServer\n"}{range .clusters[*]}{.name}{"\t"}{.cluster.server}{"\n"}{end}' + +# 위의 출력에서 상호 작용하려는 클러스터의 이름을 선택한다. +export CLUSTER_NAME="some_server_name" + +# 클러스터 이름을 참조하는 API 서버를 가리킨다. +APISERVER=$(kubectl config view -o jsonpath="{.clusters[?(@.name==\"$CLUSTER_NAME\")].cluster.server}") + +# 토큰 값을 얻는다 +TOKEN=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}"|base64 --decode) + +# TOKEN으로 API 탐색 +curl -X GET $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure +``` + +출력은 다음과 비슷하다. + +```json +{ + "kind": "APIVersions", + "versions": [ + "v1" + ], + "serverAddressByClientCIDRs": [ + { + "clientCIDR": "0.0.0.0/0", + "serverAddress": "10.0.1.149:443" + } + ] +} +``` + +`jsonpath` 방식을 사용한다. + +```shell +APISERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}') +TOKEN=$(kubectl get secret $(kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode ) +curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure +{ + "kind": "APIVersions", + "versions": [ + "v1" + ], + "serverAddressByClientCIDRs": [ + { + "clientCIDR": "0.0.0.0/0", + "serverAddress": "10.0.1.149:443" + } + ] +} +``` + +위의 예는 `--insecure` 플래그를 사용한다. 이로 인해 MITM 공격이 +발생할 수 있다. kubectl이 클러스터에 접근하면 저장된 루트 인증서와 +클라이언트 인증서를 사용하여 서버에 접근한다. (`~/.kube` 디렉터리에 +설치된다.) 클러스터 인증서는 일반적으로 자체 서명되므로, +http 클라이언트가 루트 인증서를 사용하도록 하려면 특별한 구성이 +필요할 수 있다. + +일부 클러스터에서, API 서버는 인증이 필요하지 않다. +로컬 호스트에서 제공되거나, 방화벽으로 보호될 수 있다. 이에 대한 표준은 +없다. [API에 대한 접근 구성](/docs/reference/access-authn-authz/controlling-access/)은 +클러스터 관리자가 이를 구성하는 방법에 대해 설명한다. 이러한 접근 방식은 향후 +고 가용성 지원과 충돌할 수 있다. + +### API에 프로그래밍 방식으로 접근 + +쿠버네티스는 공식적으로 [Go](#go-client), [Python](#python-client), [Java](#java-client), [dotnet](#dotnet-client), [Javascript](#javascript-client) 및 [Haskell](#haskell-client) 용 클라이언트 라이브러리를 지원한다. 쿠버네티스 팀이 아닌 작성자가 제공하고 유지 관리하는 다른 클라이언트 라이브러리가 있다. 다른 언어에서 API에 접근하고 인증하는 방법에 대해서는 [클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/)를 참고한다. + +#### Go 클라이언트 {#go-client} + +* 라이브러리를 얻으려면, 다음 명령을 실행한다. `go get k8s.io/client-go@kubernetes-` 어떤 버전이 지원되는지를 확인하려면 [https://github.com/kubernetes/client-go/releases](https://github.com/kubernetes/client-go/releases)를 참고한다. +* client-go 클라이언트 위에 애플리케이션을 작성한다. + +{{< note >}} + +client-go는 자체 API 오브젝트를 정의하므로, 필요한 경우, 기본 리포지터리가 아닌 client-go에서 API 정의를 가져온다. 예를 들어, `import "k8s.io/client-go/kubernetes"` 가 맞다. + +{{< /note >}} + +Go 클라이언트는 kubectl CLI가 API 서버를 찾아 인증하기 위해 사용하는 것과 동일한 [kubeconfig 파일](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)을 +사용할 수 있다. 이 [예제](https://git.k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go)를 참고한다. + +```golang +import ( + "fmt" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +func main() { + // kubeconfig에서 현재 콘텍스트를 사용한다 + // path-to-kubeconfig -- 예를 들어, /root/.kube/config + config, _ := clientcmd.BuildConfigFromFlags("", "") + // clientset을 생성한다 + clientset, _ := kubernetes.NewForConfig(config) + // 파드를 나열하기 위해 API에 접근한다 + pods, _ := clientset.CoreV1().Pods("").List(v1.ListOptions{}) + fmt.Printf("There are %d pods in the cluster\n", len(pods.Items)) +} +``` + +애플리케이션이 클러스터에서 파드로 배치된 경우, [파드 내에서 API 접근](#accessing-the-api-from-within-a-pod)을 참고한다. + +#### Python 클라이언트 {#python-client} + +[Python 클라이언트](https://github.com/kubernetes-client/python)를 사용하려면, 다음 명령을 실행한다. `pip install kubernetes` 추가 설치 옵션은 [Python Client Library 페이지](https://github.com/kubernetes-client/python)를 참고한다. + +Python 클라이언트는 kubectl CLI가 API 서버를 찾아 인증하기 위해 사용하는 것과 동일한 [kubeconfig 파일](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)을 +사용할 수 있다. 이 [예제](https://github.com/kubernetes-client/python/blob/master/examples/out_of_cluster_config.py)를 참고한다. + +```python +from kubernetes import client, config + +config.load_kube_config() + +v1=client.CoreV1Api() +print("Listing pods with their IPs:") +ret = v1.list_pod_for_all_namespaces(watch=False) +for i in ret.items: + print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) +``` + +#### Java 클라이언트 {#java-client} + +* [Java 클라이언트](https://github.com/kubernetes-client/java)를 설치하려면, 다음을 실행한다. + +```shell +# java 라이브러리를 클론한다 +git clone --recursive https://github.com/kubernetes-client/java + +# 프로젝트 아티팩트, POM 등을 설치한다 +cd java +mvn install +``` + +어떤 버전이 지원되는지를 확인하려면 [https://github.com/kubernetes-client/java/releases](https://github.com/kubernetes-client/java/releases)를 참고한다. + +Java 클라이언트는 kubectl CLI가 API 서버를 찾아 인증하기 위해 사용하는 것과 동일한 [kubeconfig 파일](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)을 +사용할 수 있다. 이 [예제](https://github.com/kubernetes-client/java/blob/master/examples/src/main/java/io/kubernetes/client/examples/KubeConfigFileClientExample.java)를 참고한다. + +```java +package io.kubernetes.client.examples; + +import io.kubernetes.client.ApiClient; +import io.kubernetes.client.ApiException; +import io.kubernetes.client.Configuration; +import io.kubernetes.client.apis.CoreV1Api; +import io.kubernetes.client.models.V1Pod; +import io.kubernetes.client.models.V1PodList; +import io.kubernetes.client.util.ClientBuilder; +import io.kubernetes.client.util.KubeConfig; +import java.io.FileReader; +import java.io.IOException; + +/** + * 쿠버네티스 클러스터 외부의 애플리케이션에서 Java API를 사용하는 방법에 대한 간단한 예 + * + *

이것을 실행하는 가장 쉬운 방법: mvn exec:java + * -Dexec.mainClass="io.kubernetes.client.examples.KubeConfigFileClientExample" + * + */ +public class KubeConfigFileClientExample { + public static void main(String[] args) throws IOException, ApiException { + + // KubeConfig의 파일 경로 + String kubeConfigPath = "~/.kube/config"; + + // 파일시스템에서 클러스터 외부 구성인 kubeconfig 로드 + ApiClient client = + ClientBuilder.kubeconfig(KubeConfig.loadKubeConfig(new FileReader(kubeConfigPath))).build(); + + // 전역 디폴트 api-client를 위에서 정의한 클러스터 내 클라이언트로 설정 + Configuration.setDefaultApiClient(client); + + // CoreV1Api는 전역 구성에서 디폴트 api-client를 로드 + CoreV1Api api = new CoreV1Api(); + + // CoreV1Api 클라이언트를 호출한다 + V1PodList list = api.listPodForAllNamespaces(null, null, null, null, null, null, null, null, null); + System.out.println("Listing all pods: "); + for (V1Pod item : list.getItems()) { + System.out.println(item.getMetadata().getName()); + } + } +} +``` + +#### dotnet 클라이언트 {#dotnet-client} + +[dotnet 클라이언트](https://github.com/kubernetes-client/csharp)를 사용하려면, 다음 명령을 실행한다. `dotnet add package KubernetesClient --version 1.6.1` 추가 설치 옵션은 [dotnet Client Library 페이지](https://github.com/kubernetes-client/csharp)를 참고한다. 어떤 버전이 지원되는지를 확인하려면 [https://github.com/kubernetes-client/csharp/releases](https://github.com/kubernetes-client/csharp/releases)를 참고한다. + +dotnet 클라이언트는 kubectl CLI가 API 서버를 찾아 인증하기 위해 사용하는 것과 동일한 [kubeconfig 파일](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)을 +사용할 수 있다. 이 [예제](https://github.com/kubernetes-client/csharp/blob/master/examples/simple/PodList.cs)를 참고한다. + +```csharp +using System; +using k8s; + +namespace simple +{ + internal class PodList + { + private static void Main(string[] args) + { + var config = KubernetesClientConfiguration.BuildDefaultConfig(); + IKubernetes client = new Kubernetes(config); + Console.WriteLine("Starting Request!"); + + var list = client.ListNamespacedPod("default"); + foreach (var item in list.Items) + { + Console.WriteLine(item.Metadata.Name); + } + if (list.Items.Count == 0) + { + Console.WriteLine("Empty!"); + } + } + } +} +``` + +#### JavaScript 클라이언트 {#javascript-client} + +[JavaScript 클라이언트](https://github.com/kubernetes-client/javascript)를 설치하려면, 다음 명령을 실행한다. `npm install @kubernetes/client-node` 어떤 버전이 지원되는지를 확인하려면 [https://github.com/kubernetes-client/javascript/releases](https://github.com/kubernetes-client/javascript/releases)를 참고한다. + +JavaScript 클라이언트는 kubectl CLI가 API 서버를 찾아 인증하기 위해 사용하는 것과 동일한 [kubeconfig 파일](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)을 +사용할 수 있다. 이 [예제](https://github.com/kubernetes-client/javascript/blob/master/examples/example.js)를 참고한다. + +```javascript +const k8s = require('@kubernetes/client-node'); + +const kc = new k8s.KubeConfig(); +kc.loadFromDefault(); + +const k8sApi = kc.makeApiClient(k8s.CoreV1Api); + +k8sApi.listNamespacedPod('default').then((res) => { + console.log(res.body); +}); +``` + +#### Haskell 클라이언트 {#haskell-client} + +어떤 버전이 지원되는지를 확인하려면 [https://github.com/kubernetes-client/haskell/releases](https://github.com/kubernetes-client/haskell/releases)를 참고한다. + +Haskell 클라이언트는 kubectl CLI가 API 서버를 찾아 인증하기 위해 사용하는 것과 동일한 [kubeconfig 파일](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)을 +사용할 수 있다. 이 [예제](https://github.com/kubernetes-client/haskell/blob/master/kubernetes-client/example/App.hs)를 참고한다. + +```haskell +exampleWithKubeConfig :: IO () +exampleWithKubeConfig = do + oidcCache <- atomically $ newTVar $ Map.fromList [] + (mgr, kcfg) <- mkKubeClientConfig oidcCache $ KubeConfigFile "/path/to/kubeconfig" + dispatchMime + mgr + kcfg + (CoreV1.listPodForAllNamespaces (Accept MimeJSON)) + >>= print +``` + + +### 파드 내에서 API에 접근 {#accessing-the-api-from-within-a-pod} + +파드 내에서 API에 접근할 때, API 서버를 찾아 인증하는 것은 +위에서 설명한 외부 클라이언트 사례와 약간 다르다. + +파드에서 쿠버네티스 API를 사용하는 가장 쉬운 방법은 +공식 [클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/) 중 하나를 사용하는 것이다. 이러한 +라이브러리는 API 서버를 자동으로 감지하고 인증할 수 있다. + +#### 공식 클라이언트 라이브러리 사용 + +파드 내에서, 쿠버네티스 API에 연결하는 권장 방법은 다음과 같다. + + - Go 클라이언트의 경우, 공식 [Go 클라이언트 라이브러리](https://github.com/kubernetes/client-go/)를 사용한다. + `rest.InClusterConfig()` 기능은 API 호스트 검색과 인증을 자동으로 처리한다. + [여기 예제](https://git.k8s.io/client-go/examples/in-cluster-client-configuration/main.go)를 참고한다. + + - Python 클라이언트의 경우, 공식 [Python 클라이언트 라이브러리](https://github.com/kubernetes-client/python/)를 사용한다. + `config.load_incluster_config()` 기능은 API 호스트 검색과 인증을 자동으로 처리한다. + [여기 예제](https://github.com/kubernetes-client/python/blob/master/examples/in_cluster_config.py)를 참고한다. + + - 사용할 수 있는 다른 라이브러리가 많이 있다. [클라이언트 라이브러리](/ko/docs/reference/using-api/client-libraries/) 페이지를 참고한다. + +각각의 경우, 파드의 서비스 어카운트 자격 증명은 API 서버와 +안전하게 통신하는 데 사용된다. + +#### REST API에 직접 접근 + +파드에서 실행되는 동안, 쿠버네티스 apiserver는 `default` 네임스페이스에서 `kubernetes`라는 +서비스를 통해 접근할 수 있다. 따라서, 파드는 `kubernetes.default.svc` +호스트 이름을 사용하여 API 서버를 쿼리할 수 있다. 공식 클라이언트 라이브러리는 +이를 자동으로 수행한다. + +API 서버를 인증하는 권장 방법은 [서비스 어카운트](/docs/user-guide/service-accounts) +자격 증명을 사용하는 것이다. 기본적으로, 파드는 +서비스 어카운트와 연결되어 있으며, 해당 서비스 어카운트에 대한 자격 증명(토큰)은 +해당 파드에 있는 각 컨테이너의 파일시스템 트리의 +`/var/run/secrets/kubernetes.io/serviceaccount/token` 에 있다. + +사용 가능한 경우, 인증서 번들은 각 컨테이너의 +파일시스템 트리의 `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` 에 배치되며, +API 서버의 제공 인증서를 확인하는 데 사용해야 한다. + +마지막으로, 네임스페이스가 지정된 API 작업에 사용되는 기본 네임스페이스는 각 컨테이너의 +`/var/run/secrets/kubernetes.io/serviceaccount/namespace` 에 있는 파일에 배치된다. + +#### kubectl 프록시 사용 + +공식 클라이언트 라이브러리 없이 API를 쿼리하려면, 파드에서 +새 사이드카 컨테이너의 [명령](/ko/docs/tasks/inject-data-application/define-command-argument-container/)으로 +`kubectl proxy` 를 실행할 수 있다. 이런 식으로, `kubectl proxy` 는 +API를 인증하고 이를 파드의 `localhost` 인터페이스에 노출시켜서, 파드의 +다른 컨테이너가 직접 사용할 수 있도록 한다. + +#### 프록시를 사용하지 않고 접근 + +인증 토큰을 API 서버에 직접 전달하여 kubectl 프록시 사용을 +피할 수 있다. 내부 인증서는 연결을 보호한다. + +```shell +# 내부 API 서버 호스트 이름을 가리킨다 +APISERVER=https://kubernetes.default.svc + +# ServiceAccount 토큰 경로 +SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount + +# 이 파드의 네임스페이스를 읽는다 +NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + +# ServiceAccount 베어러 토큰을 읽는다 +TOKEN=$(cat ${SERVICEACCOUNT}/token) + +# 내부 인증 기관(CA)을 참조한다 +CACERT=${SERVICEACCOUNT}/ca.crt + +# TOKEN으로 API를 탐색한다 +curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -X GET ${APISERVER}/api +``` + +출력은 다음과 비슷하다. + +```json +{ + "kind": "APIVersions", + "versions": [ + "v1" + ], + "serverAddressByClientCIDRs": [ + { + "clientCIDR": "0.0.0.0/0", + "serverAddress": "10.0.1.149:443" + } + ] +} +``` diff --git a/content/ko/docs/tasks/administer-cluster/access-cluster-services.md b/content/ko/docs/tasks/administer-cluster/access-cluster-services.md new file mode 100644 index 0000000000000..0b1cf9540fa62 --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/access-cluster-services.md @@ -0,0 +1,134 @@ +--- +title: 클러스터에서 실행되는 서비스에 접근 +content_type: task +--- + + +이 페이지는 쿠버네티스 클러스터에서 실행되는 서비스에 연결하는 방법을 보여준다. + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + +## 클러스터에서 실행되는 서비스에 접근 + +쿠버네티스에서, [노드](/ko/docs/concepts/architecture/nodes/), [파드](/ko/docs/concepts/workloads/pods/pod/) 및 [서비스](/ko/docs/concepts/services-networking/service/)는 모두 +고유한 IP를 가진다. 대부분의 경우, 클러스터의 노드 IP, 파드 IP 및 일부 서비스 IP는 라우팅할 수 +없으므로, 데스크톱 시스템과 같은 클러스터 외부 시스템에서 +도달할 수 없다. + +### 연결하는 방법 + +클러스터 외부에서 노드, 파드 및 서비스에 연결하기 위한 몇 가지 옵션이 있다. + + - 퍼블릭 IP를 통해 서비스에 접근한다. + - `NodePort` 또는 `LoadBalancer` 타입의 서비스를 사용하여 해당 서비스를 클러스터 외부에서 + 접근할 수 있게 한다. [서비스](/ko/docs/concepts/services-networking/service/)와 + [kubectl expose](/docs/reference/generated/kubectl/kubectl-commands/#expose) 문서를 참고한다. + - 클러스터 환경에 따라, 서비스는 단지 회사 네트워크에 노출되기도 하며, + 인터넷에 노출되는 경우도 있다. 노출되는 서비스가 안전한지 생각한다. + 자체 인증을 수행하는가? + - 서비스 뒤에 파드를 배치한다. 디버깅과 같은 목적으로 레플리카 집합에서 특정 파드에 접근하려면, + 파드에 고유한 레이블을 배치하고 이 레이블을 선택하는 새 서비스를 생성한다. + - 대부분의 경우, 애플리케이션 개발자가 nodeIP를 통해 노드에 직접 + 접근할 필요는 없다. + - 프록시 작업(Proxy Verb)을 사용하여 서비스, 노드 또는 파드에 접근한다. + - 원격 서비스에 접근하기 전에 apiserver 인증과 권한 부여를 수행한다. + 서비스가 인터넷에 노출되거나, 노드 IP의 포트에 접근하거나, 디버깅하기에 + 충분히 안전하지 않은 경우 사용한다. + - 프록시는 일부 웹 애플리케이션에 문제를 일으킬 수 있다. + - HTTP/HTTPS에서만 작동한다. + - [여기](#apiserver-프록시-url-수동-구성)에 설명되어 있다. + - 클러스터의 노드 또는 파드에서 접근한다. + - 파드를 실행한 다음, [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec)를 사용하여 셸에 연결한다. + 해당 셸에서 다른 노드, 파드 및 서비스에 연결한다. + - 일부 클러스터는 클러스터의 노드로 ssh를 통해 접근하는 것을 허용한다. 거기에서 클러스터 서비스에 + 접근할 수 있다. 이것은 비표준 방법이며, 일부 클러스터에서는 작동하지만 다른 클러스터에서는 + 작동하지 않는다. 브라우저 및 기타 도구가 설치되거나 설치되지 않을 수 있다. 클러스터 DNS가 작동하지 않을 수도 있다. + +### 빌트인 서비스 검색 + +일반적으로, kube-system에 의해 클러스터에서 시작되는 몇 가지 서비스가 있다. `kubectl cluster-info` 명령을 +사용하여 이들의 목록을 얻는다. + +```shell +kubectl cluster-info +``` + +출력은 다음과 비슷하다. + +``` +Kubernetes master is running at https://104.197.5.247 +elasticsearch-logging is running at https://104.197.5.247/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy +kibana-logging is running at https://104.197.5.247/api/v1/namespaces/kube-system/services/kibana-logging/proxy +kube-dns is running at https://104.197.5.247/api/v1/namespaces/kube-system/services/kube-dns/proxy +grafana is running at https://104.197.5.247/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy +heapster is running at https://104.197.5.247/api/v1/namespaces/kube-system/services/monitoring-heapster/proxy +``` + +각 서비스에 접근하기 위한 프록시-작업 URL이 표시된다. +예를 들어, 이 클러스터에는 `https://104.197.5.247/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/` 로 +접근할 수 있는 (Elasticsearch를 사용한) 클러스터 수준 로깅이 활성화되어 있다. 적합한 자격 증명이 전달되는 경우나 kubectl proxy를 통해 도달할 수 있다. 예를 들어 다음의 URL에서 확인할 수 있다. +`http://localhost:8080/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/`. + +{{< note >}} +자격 증명을 전달하거나 kubectl proxy를 사용하는 방법은 [쿠버네티스 API를 사용하여 클러스터에 접근하기](/ko/docs/tasks/administer-cluster/access-cluster-api/)를 참고한다. +{{< /note >}} + +#### apiserver 프록시 URL 수동 구성 + +위에서 언급한 것처럼, `kubectl cluster-info` 명령을 사용하여 서비스의 프록시 URL을 검색한다. 서비스 엔드포인트, 접미사 및 매개 변수를 포함하는 프록시 URL을 작성하려면, 단순히 서비스의 프록시 URL에 추가하면 된다. +`http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`[https:]service_name[:port_name]`*`/proxy` + +포트에 대한 이름을 지정하지 않은 경우, URL에 *port_name* 을 지정할 필요가 없다. + +##### 예제 + +* Elasticsearch 서비스 엔드포인트 `_search?q=user:kimchy` 에 접근하려면, 다음을 사용한다. + + ``` + http://104.197.5.247/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/_search?q=user:kimchy + ``` + +* Elasticsearch 클러스터 상태 정보 `_cluster/health?pretty=true` 에 접근하려면, 다음을 사용한다. + + ``` + https://104.197.5.247/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/_cluster/health?pretty=true + ``` + + 상태 정보는 다음과 비슷하다. + + ```json + { + "cluster_name" : "kubernetes_logging", + "status" : "yellow", + "timed_out" : false, + "number_of_nodes" : 1, + "number_of_data_nodes" : 1, + "active_primary_shards" : 5, + "active_shards" : 5, + "relocating_shards" : 0, + "initializing_shards" : 0, + "unassigned_shards" : 5 + } + ``` + +* *https* Elasticsearch 서비스 상태 정보 `_cluster/health?pretty=true` 에 접근하려면, 다음을 사용한다. + + ``` + https://104.197.5.247/api/v1/namespaces/kube-system/services/https:elasticsearch-logging/proxy/_cluster/health?pretty=true + ``` + +#### 웹 브라우저를 사용하여 클러스터에서 실행되는 서비스에 접근 + +브라우저의 주소 표시줄에 apiserver 프록시 URL을 넣을 수 있다. 그러나, + + - 웹 브라우저는 일반적으로 토큰을 전달할 수 없으므로, 기본 (비밀번호) 인증을 사용해야 할 수도 있다. Apiserver는 기본 인증을 수락하도록 구성할 수 있지만, + 클러스터는 기본 인증을 수락하도록 구성되지 않을 수 있다. + - 일부 웹 앱, 특히 프록시 경로 접두사를 인식하지 못하는 방식으로 URL을 구성하는 클라이언트 측 자바스크립트가 있는 + 웹 앱이 작동하지 않을 수 있다. diff --git a/content/ko/docs/tasks/administer-cluster/change-default-storage-class.md b/content/ko/docs/tasks/administer-cluster/change-default-storage-class.md index 9c9341919ad2b..8fd7445fb7dc5 100644 --- a/content/ko/docs/tasks/administer-cluster/change-default-storage-class.md +++ b/content/ko/docs/tasks/administer-cluster/change-default-storage-class.md @@ -4,7 +4,7 @@ content_type: task --- -이 페이지는 특별한 요구사항이 없는 퍼시스턴트볼륨클레임(PersistentVolumeClaim)의 볼륨을 프로비저닝 +이 페이지는 특별한 요구사항이 없는 퍼시스턴트볼륨클레임(PersistentVolumeClaim)의 볼륨을 프로비저닝 하는데 사용되는 기본 스토리지 클래스를 변경하는 방법을 보여준다. @@ -20,21 +20,21 @@ content_type: task ## 왜 기본 스토리지 클래스를 변경하는가? -설치 방법에 따라, 사용자의 쿠버네티스 클러스터는 기본으로 표시된 기존 -스토리지클래스와 함께 배포될 수 있다. 이 기본 스토리지클래스는 특정 -스토리지 클래스가 필요하지 않은 퍼시스턴트볼륨클레임에 대해 스토리지를 +설치 방법에 따라, 사용자의 쿠버네티스 클러스터는 기본으로 표시된 기존 +스토리지클래스와 함께 배포될 수 있다. 이 기본 스토리지클래스는 특정 +스토리지 클래스가 필요하지 않은 퍼시스턴트볼륨클레임에 대해 스토리지를 동적으로 프로비저닝 하기 위해 사용된다. -더 자세한 내용은 [퍼시스턴트볼륨클레임 문서](/ko/docs/concepts/storage/persistent-volumes/#class-1)를 +더 자세한 내용은 [퍼시스턴트볼륨클레임 문서](/ko/docs/concepts/storage/persistent-volumes/#퍼시스턴트볼륨클레임)를 보자. -미리 설치된 기본 스토리지클래스가 사용자의 예상되는 워크로드에 적합하지 -않을수도 있다. 예를 들어, 너무 가격이 높은 스토리지를 프로비저닝 해야할 -수도 있다. 이런 경우에, 기본 스토리지 클래스를 변경하거나 완전히 비활성화 +미리 설치된 기본 스토리지클래스가 사용자의 예상되는 워크로드에 적합하지 +않을수도 있다. 예를 들어, 너무 가격이 높은 스토리지를 프로비저닝 해야할 +수도 있다. 이런 경우에, 기본 스토리지 클래스를 변경하거나 완전히 비활성화 하여 스토리지의 동적 프로비저닝을 방지할 수 있다. -단순하게 기본 스토리지클래스를 삭제하는 경우, 사용자의 클러스터에서 구동중인 -애드온 매니저에 의해 자동으로 다시 생성될 수 있으므로 정상적으로 삭제가 되지 않을 수도 있다. 애드온 관리자 -및 개별 애드온을 비활성화 하는 방법에 대한 자세한 내용은 설치 문서를 참조하자. +단순하게 기본 스토리지클래스를 삭제하는 경우, 사용자의 클러스터에서 구동중인 +애드온 매니저에 의해 자동으로 다시 생성될 수 있으므로 정상적으로 삭제가 되지 않을 수도 있다. 애드온 관리자 +및 개별 애드온을 비활성화 하는 방법에 대한 자세한 내용은 설치 문서를 참조하자. ## 기본 스토리지클래스 변경하기 @@ -56,7 +56,7 @@ content_type: task 1. 기본 스토리지클래스를 기본값이 아닌 것으로 표시한다. - 기본 스토리지클래스에는 + 기본 스토리지클래스에는 `storageclass.kubernetes.io/is-default-class` 의 값이 `true` 로 설정되어 있다. 다른 값이거나 어노테이션이 없을 경우 `false` 로 처리된다. diff --git a/content/ko/docs/tasks/administer-cluster/change-pv-reclaim-policy.md b/content/ko/docs/tasks/administer-cluster/change-pv-reclaim-policy.md new file mode 100644 index 0000000000000..dfd99231139ff --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/change-pv-reclaim-policy.md @@ -0,0 +1,97 @@ +--- +title: 퍼시스턴트볼륨 반환 정책 변경하기 +content_type: task +--- + + +이 페이지는 쿠버네티스 퍼시트턴트볼륨(PersistentVolume)의 반환 정책을 +변경하는 방법을 보여준다. + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## 왜 퍼시스턴트볼륨 반환 정책을 변경하는가? + +`PersistentVolumes` 은 "Retain(보존)", "Recycle(재활용)", "Delete(삭제)" 를 포함한 +다양한 반환 정책을 갖는다. 동적으로 프로비저닝 된 `PersistentVolumes` 의 경우 +기본 반환 정책은 "Delete" 이다. 이는 사용자가 해당 `PersistentVolumeClaim` 을 삭제하면, +동적으로 프로비저닝 된 볼륨이 자동적으로 삭제됨을 의미한다. +볼륨에 중요한 데이터가 포함된 경우, 이러한 자동 삭제는 부적절 할 수 있다. +이 경우에는, "Retain" 정책을 사용하는 것이 더 적합하다. +"Retain" 정책에서, 사용자가 `PersistentVolumeClaim` 을 삭제할 경우 해당하는 +`PersistentVolume` 은 삭제되지 않는다. +대신, `Released` 단계로 이동되어, 모든 데이터를 수동으로 복구할 수 있다. + +## 퍼시스턴트볼륨 반환 정책 변경하기 + +1. 사용자의 클러스터에서 퍼시스턴트볼륨을 조회한다. + + ```shell + kubectl get pv + ``` + + 결과는 아래와 같다. + + NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM STORAGECLASS REASON AGE + pvc-b6efd8da-b7b5-11e6-9d58-0ed433a7dd94 4Gi RWO Delete Bound default/claim1 manual 10s + pvc-b95650f8-b7b5-11e6-9d58-0ed433a7dd94 4Gi RWO Delete Bound default/claim2 manual 6s + pvc-bb3ca71d-b7b5-11e6-9d58-0ed433a7dd94 4Gi RWO Delete Bound default/claim3 manual 3s + + 이 목록은 동적으로 프로비저닝 된 볼륨을 쉽게 식별할 수 있도록 + 각 볼륨에 바인딩 되어 있는 퍼시스턴트볼륨클레임(PersistentVolumeClaim)의 이름도 포함한다. + +1. 사용자의 퍼시스턴트볼륨 중 하나를 선택한 후에 반환 정책을 변경한다. + + ```shell + kubectl patch pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + ``` + + `` 는 사용자가 선택한 퍼시스턴트볼륨의 이름이다. + + {{< note >}} + 윈도우에서는, 공백이 포함된 모든 JSONPath 템플릿에 _겹_ 따옴표를 사용해야 한다.(bash에 대해 위에서 표시된 홑 따옴표가 아니다.) 따라서 템플릿의 모든 표현식에서 홑 따옴표를 쓰거나, 이스케이프 처리된 겹 따옴표를 써야 한다. 예를 들면 다음과 같다. + +```cmd +kubectl patch pv -p "{\"spec\":{\"persistentVolumeReclaimPolicy\":\"Retain\"}}" +``` + + {{< /note >}} + +1. 선택한 PersistentVolume이 올바른 정책을 갖는지 확인한다. + + ```shell + kubectl get pv + ``` + + 결과는 아래와 같다. + + NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM STORAGECLASS REASON AGE + pvc-b6efd8da-b7b5-11e6-9d58-0ed433a7dd94 4Gi RWO Delete Bound default/claim1 manual 40s + pvc-b95650f8-b7b5-11e6-9d58-0ed433a7dd94 4Gi RWO Delete Bound default/claim2 manual 36s + pvc-bb3ca71d-b7b5-11e6-9d58-0ed433a7dd94 4Gi RWO Retain Bound default/claim3 manual 33s + + 위 결과에서, `default/claim3` 클레임과 바인딩 되어 있는 볼륨이 `Retain` 반환 정책을 + 갖는 것을 볼 수 있다. 사용자가 `default/claim3` 클레임을 삭제할 경우, + 볼륨은 자동으로 삭제 되지 않는다. + + + +## {{% heading "whatsnext" %}} + +* [퍼시스턴트볼륨](/ko/docs/concepts/storage/persistent-volumes/)에 대해 더 배워 보기. +* [퍼시스턴트볼륨클레임](/ko/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)에 대해 더 배워 보기. + +### Reference + +* [퍼시스턴트볼륨](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolume-v1-core) +* [퍼시스턴트볼륨클레임](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) +* [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core)의 `persistentVolumeReclaimPolicy` 필드에 대해 보기. + + diff --git a/content/ko/docs/tasks/administer-cluster/cluster-management.md b/content/ko/docs/tasks/administer-cluster/cluster-management.md index b84b42b7e1a2f..440ece1531200 100644 --- a/content/ko/docs/tasks/administer-cluster/cluster-management.md +++ b/content/ko/docs/tasks/administer-cluster/cluster-management.md @@ -5,9 +5,9 @@ content_type: concept -이 문서는 클러스터의 라이프사이클에 관련된 몇 가지 주제들을 설명한다. 신규 클러스터 생성, -클러스터의 마스터와 워커 노드들의 업그레이드, -노드 유지보수(예. 커널 업그레이드) 수행, 운영 중인 클러스터의 +이 문서는 클러스터의 라이프사이클에 관련된 몇 가지 주제들을 설명한다. 신규 클러스터 생성, +클러스터의 마스터와 워커 노드들의 업그레이드, +노드 유지보수(예. 커널 업그레이드) 수행, 운영 중인 클러스터의 쿠버네티스 API 버전 업그레이드. @@ -25,17 +25,17 @@ content_type: concept ### Azure Kubernetes Service (AKS) 클러스터 업그레이드 -Azure Kubernetes Service는 클러스터의 컨트롤 플레인과 노드를 손쉽게 셀프 서비스 업그레이드할 수 있게 해준다. 프로세스는 +Azure Kubernetes Service는 클러스터의 컨트롤 플레인과 노드를 손쉽게 셀프 서비스 업그레이드할 수 있게 해준다. 프로세스는 현재 사용자가 직접 시작하는 방식이며 [Azure AKS 문서](https://docs.microsoft.com/en-us/azure/aks/upgrade-cluster)에 설명되어 있다. ### Google Compute Engine 클러스터 업그레이드 -Google Compute Engine Open Source (GCE-OSS)는 마스터를 삭제하고 -재생성하는 방식으로 마스터 업그레이드를 지원한다. 하지만 업그레이드 간에 데이터를 보존하기 위해 +Google Compute Engine Open Source (GCE-OSS)는 마스터를 삭제하고 +재생성하는 방식으로 마스터 업그레이드를 지원한다. 하지만 업그레이드 간에 데이터를 보존하기 위해 동일한 Persistent Disk(PD)를 유지한다. -GCE의 노드 업그레이드는 [관리형 인스턴스 그룹](https://cloud.google.com/compute/docs/instance-groups/)을 사용하며, 각 노드는 -순차적으로 제거된 후에 신규 소프트웨어를 가지고 재생성된다. 해당 노드에서 동작하는 파드들은 +GCE의 노드 업그레이드는 [관리형 인스턴스 그룹](https://cloud.google.com/compute/docs/instance-groups/)을 사용하며, 각 노드는 +순차적으로 제거된 후에 신규 소프트웨어를 가지고 재생성된다. 해당 노드에서 동작하는 파드들은 레플리케이션 컨트롤러에 의해서 제어되거나, 롤 아웃 후에 수작업으로 재생성되어야 한다. open source Google Compute Engine(GCE) 클러스터 업그레이드는 `cluster/gce/upgrade.sh` 스크립트로 제어한다. @@ -81,7 +81,7 @@ Oracle은 당신이 고가용성의 관리형 쿠버네티스 컨트롤 플레 ## 클러스터 크기 재조정 -[노드 자가 등록 모드](/ko/docs/concepts/architecture/nodes/#노드에-대한-자체-등록)로 운영 중인 클러스터가 리소스가 부족하다면 쉽게 머신들을 더 추가할 수 있다. GCE나 Google Kubernetes Engine을 사용하고 있다면 노드들을 관리하는 인스턴스 그룹의 크기를 재조정하여 이를 수행할 수 있다. +[노드 자가 등록 모드](/ko/docs/concepts/architecture/nodes/#노드에-대한-자체-등록)로 운영 중인 클러스터가 리소스가 부족하다면 쉽게 머신들을 더 추가할 수 있다. GCE나 Google Kubernetes Engine을 사용하고 있다면 노드들을 관리하는 인스턴스 그룹의 크기를 재조정하여 이를 수행할 수 있다. [Google Cloud 콘솔 페이지](https://console.developers.google.com)를 사용한다면 `Compute > Compute Engine > Instance groups > your group > Edit group`에서 인스턴스들의 숫자를 고쳐서 이를 수행할 수 있으며 gcloud CLI를 사용한다면 다음 커맨드를 사용하여 이를 수행할 수 있다. ```shell @@ -99,23 +99,23 @@ Azure Kubernetes Service는 사용자가 CLI나 Azure 포털에서 클러스터 ### 클러스터 오토스케일링 -GCE나 Google Kubernetes Engine을 사용한다면, 파드가 필요로하는 리소스를 기반으로 클러스터의 크기를 자동으로 +GCE나 Google Kubernetes Engine을 사용한다면, 파드가 필요로하는 리소스를 기반으로 클러스터의 크기를 자동으로 재조정하도록 클러스터를 구성할 수 있다. -[컴퓨트 리소스](/docs/concepts/configuration/manage-compute-resources-container/)에 기술된 것처럼 사용자들은 파드에 얼마만큼의 CPU와 메모리를 할당할 것인지 예약할 수 있다. -이 정보는 쿠버네티스 스케줄러가 해당 파드를 어디에서 실행시킬 것인지를 결정할 때 사용된다. -여유 용량이 넉넉한 노드가 없다면 (또는 다른 파드 요구조건을 충족하지 못한다면) 해당 파드는 +[컴퓨트 리소스](/ko/docs/concepts/configuration/manage-resources-containers/)에 기술된 것처럼 사용자들은 파드에 얼마만큼의 CPU와 메모리를 할당할 것인지 예약할 수 있다. +이 정보는 쿠버네티스 스케줄러가 해당 파드를 어디에서 실행시킬 것인지를 결정할 때 사용된다. +여유 용량이 넉넉한 노드가 없다면 (또는 다른 파드 요구조건을 충족하지 못한다면) 해당 파드는 다른 파드들이 종료될 때까지 기다리거나 신규 노드가 추가될 때까지 기다린다. -Cluster autoscaler는 스케줄링될 수 없는 파드들을 검색하여 클러스터 내의 다른 노드들과 유사한 신규 노드를 +Cluster autoscaler는 스케줄링될 수 없는 파드들을 검색하여 클러스터 내의 다른 노드들과 유사한 신규 노드를 추가하는 것이 도움이 되는지를 체크한다. 만약 도움이 된다면 대기중인 파드들을 수용하기 위해 클러스터의 크기를 재조정한다. -Cluster autoscaler는 또한 하나 이상의 노드들이 장기간(10분, 하지만 미래에는 변경될 수 있다.)동안 +Cluster autoscaler는 또한 하나 이상의 노드들이 장기간(10분, 하지만 미래에는 변경될 수 있다.)동안 더 이상 필요하지 않다는 것을 확인했을 때 클러스터를 스케일 다운하기도 한다. Cluster autoscaler는 인스턴스 그룹(GCE)이나 노드 풀(Google Kubernetes Engine) 단위로 구성된다. -GCE를 사용한다면 kube-up.sh 스크립트로 클러스터를 생성할 때 Cluster autoscaler를 활성화할 수 있다. +GCE를 사용한다면 kube-up.sh 스크립트로 클러스터를 생성할 때 Cluster autoscaler를 활성화할 수 있다. cluster autoscaler를 구성하려면 다음 세 가지 환경 변수들을 설정해야 한다. * `KUBE_ENABLE_CLUSTER_AUTOSCALER` - true로 설정되면 cluster autoscaler를 활성화한다. @@ -128,8 +128,8 @@ cluster autoscaler를 구성하려면 다음 세 가지 환경 변수들을 설 KUBE_ENABLE_CLUSTER_AUTOSCALER=true KUBE_AUTOSCALER_MIN_NODES=3 KUBE_AUTOSCALER_MAX_NODES=10 NUM_NODES=5 ./cluster/kube-up.sh ``` -Google Kubernetes Engine에서는 클러스터 생성이나 업데이트, 또는 (오토스케일하려고 하는) 특정 노드 풀의 -생성 시기에 해당 `gcloud` 커맨드에 `--enable-autoscaling` `--minnodes` `--maxnodes` 플래그들을 +Google Kubernetes Engine에서는 클러스터 생성이나 업데이트, 또는 (오토스케일하려고 하는) 특정 노드 풀의 +생성 시기에 해당 `gcloud` 커맨드에 `--enable-autoscaling` `--minnodes` `--maxnodes` 플래그들을 전달하여 cluster autoscaler를 구성할 수 있다. 예제: @@ -144,17 +144,17 @@ gcloud container clusters update mytestcluster --enable-autoscaling --min-nodes= **Cluster autoscaler는 노드가 수작업으로 변경(예. kubectl을 통해 레이블을 추가)되는 경우를 예상하지 않는데, 동일한 인스턴스 그룹 내의 신규 노드들에 이 속성들이 전파되지 않을 것이기 때문이다.** -cluster autoscaler가 클러스터 스케일 여부와 언제 어떻게 클러스터 스케일하는지에 대한 상세 사항은 -autoscaler 프로젝트의 [FAQ](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md) +cluster autoscaler가 클러스터 스케일 여부와 언제 어떻게 클러스터 스케일하는지에 대한 상세 사항은 +autoscaler 프로젝트의 [FAQ](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md) 문서를 참조하기를 바란다. ## 노드 유지보수 -(커널 업그레이드, libc 업그레이드, 하드웨어 수리 등으로) 한 노드를 리부트해야하는데 다운타임이 짧다면, -Kubelet이 재시작할 때 해당 노드에 스케줄된 파드들을 재시작하려고 할 것이다. 만약 리부트가 길게 걸린다면 -(컨트롤러 관리자의 `--pod-eviction-timeout`으로 제어되는 기본 시간은 5분이다.) -노드 컨트롤러는 사용불가한 노드에 묶여져 있는 파드들을 종료 시킬 것이다. 만약 상응하는 -레플리카 셋 (또는 레플리케이션 컨트롤러)가 존재한다면, 해당 파드의 신규 복제본을 다른 노드에서 기동시킬 것이다. 따라서, 모든 파드들이 +(커널 업그레이드, libc 업그레이드, 하드웨어 수리 등으로) 한 노드를 리부트해야하는데 다운타임이 짧다면, +Kubelet이 재시작할 때 해당 노드에 스케줄된 파드들을 재시작하려고 할 것이다. 만약 리부트가 길게 걸린다면 +(컨트롤러 관리자의 `--pod-eviction-timeout`으로 제어되는 기본 시간은 5분이다.) +노드 컨트롤러는 사용불가한 노드에 묶여져 있는 파드들을 종료 시킬 것이다. 만약 상응하는 +레플리카셋(ReplicaSet) (또는 레플리케이션 컨트롤러)가 존재한다면, 해당 파드의 신규 복제본을 다른 노드에서 기동시킬 것이다. 따라서, 모든 파드들이 복제된 상황에서 모든 노드들이 동시에 다운되지 않는다고 가정했을 때, 별다른 조작없이 업데이트를 진행할 수 있다. 만약 업그레이드 과정을 상세하게 통제하기를 원한다면, 다음 워크플로우를 사용할 수 있다. @@ -167,9 +167,9 @@ kubectl drain $NODENAME 이렇게하면 파드가 종료되는 동안 신규 파드들이 해당 노드에 스케줄되는 것을 방지한다. -레플리카 셋의 파드들은 신규 노드에 스케줄되는 신규 파드로 교체될 것이다. 추가적으로 해당 파드가 한 서비스의 일부라면, 클라이언트들은 자동으로 신규 파드로 재전송될 것이다. +레플리카셋의 파드들은 신규 노드에 스케줄되는 신규 파드로 교체될 것이다. 추가적으로 해당 파드가 한 서비스의 일부라면, 클라이언트들은 자동으로 신규 파드로 재전송될 것이다. -레플리카 셋이 아닌 파드들은 직접 해당 파드의 새로운 복제본을 올려야 하며, 해당 파드가 한 서비스의 일부가 아니라면 클라이언트들을 신규 복제본으로 재전송해야 한다. +레플리카셋이 아닌 파드들은 직접 해당 파드의 새로운 복제본을 올려야 하며, 해당 파드가 한 서비스의 일부가 아니라면 클라이언트들을 신규 복제본으로 재전송해야 한다. 해당 노드에 유지보수 작업을 수행한다. @@ -179,8 +179,8 @@ kubectl drain $NODENAME kubectl uncordon $NODENAME ``` -해당 노드의 VM 인스턴스를 삭제하고 신규로 생성했다면, 신규로 스케줄 가능한 노드 리소스가 -자동으로 생성될 것이다.(당신이 노드 디스커버리를 지원하는 클라우드 제공자를 사용한다면; +해당 노드의 VM 인스턴스를 삭제하고 신규로 생성했다면, 신규로 스케줄 가능한 노드 리소스가 +자동으로 생성될 것이다.(당신이 노드 디스커버리를 지원하는 클라우드 제공자를 사용한다면; 이는 현재 Google Compute Engine만 지원되며 Google Compute Engine 상에서 kube-register를 사용하는 CoreOS를 포함하지는 않는다.) 상세 내용은 [노드](/ko/docs/concepts/architecture/nodes)를 참조하라. ## 고급 주제들 @@ -199,15 +199,15 @@ kubectl uncordon $NODENAME ### 클러스터에서 API 버전을 ON/OFF 하기 -특정 API 버전들은 API 서버가 올라오는 동안 `--runtime-config=api/` 플래그를 전달하여 ON/OFF 시킬 수 있다. 예를 들어, v1 API를 OFF 시키려면, `--runtime-config=api/v1=false`를 -전달한다. runtime-config는 모든 API들과 레거시 API들을 각각 제어하는 api/all과 api/legacy 2가지 특수 키도 지원한다. -예를 들어, v1을 제외한 모든 API 버전들을 OFF하려면 `--runtime-config=api/all=false,api/v1=true`를 전달한다. +특정 API 버전들은 API 서버가 올라오는 동안 `--runtime-config=api/` 플래그를 전달하여 ON/OFF 시킬 수 있다. 예를 들어, v1 API를 OFF 시키려면, `--runtime-config=api/v1=false`를 +전달한다. runtime-config는 모든 API들과 레거시 API들을 각각 제어하는 api/all과 api/legacy 2가지 특수 키도 지원한다. +예를 들어, v1을 제외한 모든 API 버전들을 OFF하려면 `--runtime-config=api/all=false,api/v1=true`를 전달한다. 이 플래그들을 위해 레거시 API들은 명확하게 사용중단된 API들이다.(예. `v1beta3`) ### 클러스터에서 스토리지 API 버전을 변경 클러스터 내에서 활성화된 쿠버네티스 리소스들의 클러스터의 내부 표현을 위해 디스크에 저장된 객체들은 특정 버전의 API를 사용하여 작성된다. -지원되는 API가 변경될 때, 이 객체들은 새로운 API로 재작성되어야 할 수도 있다. 이것이 실패하면 결과적으로 리소스들이 +지원되는 API가 변경될 때, 이 객체들은 새로운 API로 재작성되어야 할 수도 있다. 이것이 실패하면 결과적으로 리소스들이 쿠버네티스 API 서버에서 더 이상 해독되거나 사용할 수 없게 될 것이다. ### 구성 파일을 신규 API 버전으로 변경 @@ -219,5 +219,3 @@ kubectl convert -f pod.yaml --output-version v1 ``` 옵션에 대한 상세 정보는 [kubectl convert](/docs/reference/generated/kubectl/kubectl-commands#convert) 커맨드의 사용법을 참조하기를 바란다. - - diff --git a/content/ko/docs/tasks/administer-cluster/declare-network-policy.md b/content/ko/docs/tasks/administer-cluster/declare-network-policy.md new file mode 100644 index 0000000000000..58865f944343a --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/declare-network-policy.md @@ -0,0 +1,145 @@ +--- +title: 네트워크 폴리시(Network Policy) 선언하기 +min-kubernetes-server-version: v1.8 +content_type: task +--- + +이 문서는 사용자가 쿠버네티스 [네트워크폴리시 API](/ko/docs/concepts/services-networking/network-policies/)를 사용하여 파드(Pod)가 서로 통신하는 방법을 제어하는 네트워크 폴리시를 선언하는데 도움을 준다. + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +네트워크 폴리시를 지원하는 네트워크 제공자를 구성하였는지 확인해야 한다. 다음과 같이 네트워크폴리시를 제공하는 많은 네트워크 제공자들이 있다. + +* [캘리코(Calico)](/ko/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy/) +* [실리움(Cilium)](/ko/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy/) +* [Kube-router](/ko/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy/) +* [로마나(Romana)](/ko/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy/) +* [위브넷(Weave Net)](/ko/docs/tasks/administer-cluster/network-policy-provider/weave-network-policy/) + +{{< note >}} +위 목록은 추천순이나 선호도순이 아닌, 제품 이름의 알파벳 순으로 정렬되어 있다. 이 예제는 이러한 제공자 중 하나를 사용하는 쿠버네티스 클러스터에 유효하다. +{{< /note >}} + + + + +## `nginx` 디플로이먼트(Deployment)를 생성하고 서비스(Service)를 통해 노출하기 + +쿠버네티스 네트워크 폴리시가 어떻게 동작하는지 확인하기 위해서, `nginx` 디플로이먼트를 생성한다. + +```console +kubectl create deployment nginx --image=nginx +``` +```none +deployment.apps/nginx created +``` + +`nginx` 라는 이름의 서비스를 통해 디플로이먼트를 노출한다. + +```console +kubectl expose deployment nginx --port=80 +``` + +```none +service/nginx exposed +``` + +위 명령어들은 nginx 파드에 대한 디플로이먼트를 생성하고, `nginx` 라는 이름의 서비스를 통해 디플로이먼트를 노출한다. `nginx` 파드와 디플로이먼트는 `default` 네임스페이스(namespace)에 존재한다. + +```console +kubectl get svc,pod +``` + +```none +NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kubernetes 10.100.0.1 443/TCP 46m +service/nginx 10.100.0.16 80/TCP 33s + +NAME READY STATUS RESTARTS AGE +pod/nginx-701339712-e0qfq 1/1 Running 0 35s +``` + +## 다른 파드에서 접근하여 서비스 테스트하기 + +사용자는 다른 파드에서 새 `nginx` 서비스에 접근할 수 있어야 한다. `default` 네임스페이스에 있는 다른 파드에서 `nginx` 서비스에 접근하기 위하여, busybox 컨테이너를 생성한다. + +```console +kubectl run busybox --rm -ti --image=busybox -- /bin/sh +``` + +사용자 쉘에서, 다음의 명령을 실행한다. + +```shell +wget --spider --timeout=1 nginx +``` + +```none +Connecting to nginx (10.100.0.16:80) +remote file exists +``` + +## `nginx` 서비스에 대해 접근 제한하기 + +`access: true` 레이블을 가지고 있는 파드만 `nginx` 서비스에 접근할 수 있도록 하기 위하여, 다음과 같은 네트워크폴리시 오브젝트를 생성한다. + +{{< codenew file="service/networking/nginx-policy.yaml" >}} + +네트워크폴리시 오브젝트의 이름은 유효한 +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)이어야 한다. + +{{< note >}} +네트워크폴리시는 정책이 적용되는 파드의 그룹을 선택하는 `podSelector` 를 포함한다. 사용자는 이 정책이 `app=nginx` 레이블을 갖는 파드를 선택하는 것을 볼 수 있다. 레이블은 `nginx` 디플로이먼트에 있는 파드에 자동으로 추가된다. 빈 `podSelector` 는 네임스페이스의 모든 파드를 선택한다. +{{< /note >}} + +## 서비스에 정책 할당하기 + +kubectl을 사용하여 위 `nginx-policy.yaml` 파일로부터 네트워크폴리시를 생성한다. + +```console +kubectl apply -f https://k8s.io/examples/service/networking/nginx-policy.yaml +``` + +```none +networkpolicy.networking.k8s.io/access-nginx created +``` + +## access 레이블이 정의되지 않은 서비스에 접근 테스트 +올바른 레이블이 없는 파드에서 `nginx` 서비스에 접근하려 할 경우, 요청 타임 아웃이 발생한다. + +```console +kubectl run busybox --rm -ti --image=busybox -- /bin/sh +``` + +사용자 쉘에서, 다음의 명령을 실행한다. + +```shell +wget --spider --timeout=1 nginx +``` + +```none +Connecting to nginx (10.100.0.16:80) +wget: download timed out +``` + +## 접근 레이블을 정의하고 다시 테스트 + +사용자는 요청이 허용되도록 하기 위하여 올바른 레이블을 갖는 파드를 생성한다. + +```console +kubectl run busybox --rm -ti --labels="access=true" --image=busybox -- /bin/sh +``` + +사용자 쉘에서, 다음의 명령을 실행한다. + +```shell +wget --spider --timeout=1 nginx +``` + +```none +Connecting to nginx (10.100.0.16:80) +remote file exists +``` diff --git a/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md new file mode 100644 index 0000000000000..5af4ea94b0f45 --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -0,0 +1,261 @@ +--- +title: DNS 서비스 사용자 정의하기 +content_type: task +min-kubernetes-server-version: v1.12 +--- + + +이 페이지는 클러스터 안에서 사용자의 +DNS {{< glossary_tooltip text="파드(Pod)" term_id="pod" >}} 를 설정하고 +DNS 변환(DNS resolution) 절차를 사용자 정의하는 방법을 설명한다. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + +클러스터는 CoreDNS 애드온을 구동하고 있어야 한다. +[CoreDNS로 이관하기](/ko/docs/tasks/administer-cluster/coredns/#coredns로-이관하기) +는 `kubeadm` 을 이용하여 `kube-dns` 로부터 이관하는 방법을 설명한다. + +{{% version-check %}} + + + +## 소개 + +DNS는 _애드온 관리자_ 인 [클러스터 애드온](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/README.md)을 +사용하여 자동으로 시작되는 쿠버네티스 +내장 서비스이다. + +쿠버네티스 v1.12 부터, CoreDNS는 kube-dns를 대체하여 권장되는 DNS 서버이다. 만약 사용자의 클러스터가 원래 kube-dns를 사용하였을 경우, +CoreDNS 대신 `kube-dns` 를 계속 사용할 수도 있다. + +{{< note >}} +CoreDNS와 kube-dns 서비스 모두 `metadata.name` 필드에 `kube-dns` 로 이름이 지정된다. +이를 통해, 기존의 `kube-dns` 서비스 이름을 사용하여 클러스터 내부의 주소를 확인하는 워크로드에 대한 상호 운용성이 증가된다. `kube-dns` 로 서비스 이름을 사용하면, 해당 DNS 공급자가 어떤 공통 이름으로 실행되고 있는지에 대한 구현 세부 정보를 추상화한다. +{{< /note >}} + +CoreDNS를 디플로이먼트(Deployment)로 실행하고 있을 경우, 일반적으로 고정 IP 주소를 갖는 쿠버네티스 서비스로 노출된다. +Kubelet 은 `--cluster-dns=` 플래그를 사용하여 DNS 확인자 정보를 각 컨테이너에 전달한다. + +DNS 이름에도 도메인이 필요하다. 사용자는 kubelet 에 있는 `--cluster-domain=` 플래그를 +통하여 로컬 도메인을 설정할 수 있다. + +DNS 서버는 정방향 조회(A 및 AAAA 레코드), 포트 조회(SRV 레코드), 역방향 IP 주소 조회(PTR 레코드) 등을 지원한다. +더 자세한 내용은 [서비스 및 파드용 DNS](/ko/docs/concepts/services-networking/dns-pod-service/)를 참고한다. + +만약 파드의 `dnsPolicy` 가 `default` 로 지정되어 있는 경우, +파드는 자신이 실행되는 노드의 이름 변환(name resolution) 구성을 상속한다. +파드의 DNS 변환도 노드와 동일하게 작동해야 한다. +그 외에는 [알려진 이슈](/docs/tasks/debug-application-cluster/dns-debugging-resolution/#known-issues)를 참고한다. + +만약 위와 같은 방식을 원하지 않거나, 파드를 위해 다른 DNS 설정이 필요한 경우, +사용자는 kubelet 의 `--resolv-conf` 플래그를 사용할 수 있다. +파드가 DNS를 상속받지 못하도록 하기 위해 이 플래그를 ""로 설정한다. +DNS 상속을 위해 `/etc/resolv.conf` 이외의 파일을 지정할 경우 유효한 파일 경로를 설정한다. + +## CoreDNS + +CoreDNS는 [dns 명세](https://github.com/kubernetes/dns/blob/master/docs/specification.md)를 준수하며 클러스터 DNS 역할을 할 수 있는, 범용적인 권한을 갖는 DNS 서버이다. + +### CoreDNS 컨피그맵(ConfigMap) 옵션 + +CoreDNS는 모듈형이자 플러그인이 가능한 DNS 서버이며, 각 플러그인들은 CoreDNS에 새로운 기능을 부가한다. +이는 CoreDNS 구성 파일인 [Corefile](https://coredns.io/2017/07/23/corefile-explained/)을 관리하여 구성할 수 있다. +클러스터 관리자는 CoreDNS Corefile에 대한 {{< glossary_tooltip text="컨피그맵" term_id="configmap" >}}을 수정하여 +해당 클러스터에 대한 DNS 서비스 검색 동작을 +변경할 수 있다. + +쿠버네티스에서 CoreDNS는 아래의 기본 Corefile 구성으로 설치된다. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf + cache 30 + loop + reload + loadbalance + } +``` + +Corefile의 구성은 CoreDNS의 아래 [플러그인](https://coredns.io/plugins)을 포함한다. + +* [errors](https://coredns.io/plugins/errors/): 오류가 표준 출력(stdout)에 기록된다. +* [health](https://coredns.io/plugins/health/): CoreDNS의 상태(healthy)가 `http://localhost:8080/health` 에 기록된다. 이 확장 구문에서 `lameduck` 은 프로세스를 비정상 상태(unhealthy)로 만들고, 프로세스가 종료되기 전에 5초 동안 기다린다. +* [ready](https://coredns.io/plugins/ready/): 8181 포트의 HTTP 엔드포인트가, 모든 플러그인이 준비되었다는 신호를 보내면 200 OK 를 반환한다. +* [kubernetes](https://coredns.io/plugins/kubernetes/): CoreDNS가 쿠버네티스의 서비스 및 파드의 IP를 기반으로 DNS 쿼리에 대해 응답한다. 해당 플러그인에 대한 [세부 사항](https://coredns.io/plugins/kubernetes/)은 CoreDNS 웹사이트에서 확인할 수 있다. `ttl` 을 사용하면 응답에 대한 사용자 정의 TTL 을 지정할 수 있으며, 기본값은 5초이다. 허용되는 최소 TTL은 0초이며, 최대값은 3600초이다. 레코드가 캐싱되지 않도록 할 경우, TTL을 0으로 설정한다. + `pods insecure` 옵션은 _kube-dns_ 와의 하위 호환성을 위해 제공된다. `pods verified` 옵션을 사용하여, 일치하는 IP의 동일 네임스페이스(Namespace)에 파드가 존재하는 경우에만 A 레코드를 반환하게 할 수 있다. `pods disabled` 옵션은 파드 레코드를 사용하지 않을 경우 사용된다. +* [prometheus](https://coredns.io/plugins/metrics/): CoreDNS의 메트릭은 [프로메테우스](https://prometheus.io/) 형식(OpenMetrics 라고도 알려진)의 `http://localhost:9153/metrics` 에서 사용 가능하다. +* [forward](https://coredns.io/plugins/forward/): 쿠버네티스 클러스터 도메인에 없는 쿼리들은 모두 사전에 정의된 리졸버(/etc/resolv.conf)로 전달된다. +* [cache](https://coredns.io/plugins/cache/): 프론트 엔드 캐시를 활성화한다. +* [loop](https://coredns.io/plugins/loop/): 간단한 전달 루프(loop)를 감지하고, 루프가 발견되면 CoreDNS 프로세스를 중단(halt)한다. +* [reload](https://coredns.io/plugins/reload): 변경된 Corefile을 자동으로 다시 로드하도록 한다. 컨피그맵 설정을 변경한 후에 변경 사항이 적용되기 위하여 약 2분정도 소요된다. +* [loadbalance](https://coredns.io/plugins/loadbalance): 응답에 대하여 A, AAAA, MX 레코드의 순서를 무작위로 선정하는 라운드-로빈 DNS 로드밸런서이다. + +사용자는 컨피그맵을 변경하여 기본 CoreDNS 동작을 변경할 수 있다. + +### CoreDNS를 사용하는 스텁 도메인(Stub-domain)과 업스트림 네임서버(nameserver)의 설정 + +CoreDNS는 [포워드 플러그인](https://coredns.io/plugins/forward/)을 사용하여 스텁 도메인 및 업스트림 네임서버를 구성할 수 있다. + +#### 예시 +만약 클러스터 운영자가 10.150.0.1 에 위치한 [Consul](https://www.consul.io/) 도메인 서버를 가지고 있고, 모든 Consul 이름의 접미사가 .consul.local 인 경우, CoreDNS에서 이를 구성하기 위해 클러스터 관리자는 CoreDNS 컨피그맵에서 다음 구문을 생성한다. + +``` +consul.local:53 { + errors + cache 30 + forward . 10.150.0.1 + } +``` + +모든 비 클러스터의 DNS 조회가 172.16.0.1 의 특정 네임서버를 통과하도록 할 경우, `/etc/resolv.conf` 대신 `forward` 를 네임서버로 지정한다. + +``` +forward . 172.16.0.1 +``` + +기본 `Corefile` 구성에 따른 최종 컨피그맵은 다음과 같다. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . 172.16.0.1 + cache 30 + loop + reload + loadbalance + } + consul.local:53 { + errors + cache 30 + forward . 10.150.0.1 + } +``` + +`Kubeadm` 툴은 kube-dns 컨피그맵에서 동일한 설정의 CoreDNS 컨피그맵으로의 +자동 변환을 지원한다. + +{{< note >}} +kube-dns는 스텁 도메인 및 네임서버(예: ns.foo.com)에 대한 FQDN을 허용하지만 CoreDNS에서는 이 기능을 지원하지 않는다. +변환 과정에서, 모든 FQDN 네임서버는 CoreDNS 설정에서 생략된다. +{{< /note >}} + +## kube-dns에 대응되는 CoreDNS 설정 + +CoreDNS는 kube-dns 이상의 기능을 지원한다. +`StubDomains` 과 `upstreamNameservers` 를 지원하도록 생성된 kube-dns의 컨피그맵은 CoreDNS의 `forward` 플러그인으로 변환된다. +마찬가지로, kube-dns의 `Federations` 플러그인은 CoreDNS의 `federation` 플러그인으로 변환된다. + +### 예시 + +kube-dns에 대한 이 컨피그맵 예제는 federations, stubDomains 및 upstreamNameservers를 지정한다. + +```yaml +apiVersion: v1 +data: + federations: | + {"foo" : "foo.feddomain.com"} + stubDomains: | + {"abc.com" : ["1.2.3.4"], "my.cluster.local" : ["2.3.4.5"]} + upstreamNameservers: | + ["8.8.8.8", "8.8.4.4"] +kind: ConfigMap +``` + +CoreDNS에서는 동등한 설정으로 Corefile을 생성한다. + +* federations 에 대응하는 설정: +``` +federation cluster.local { + foo foo.feddomain.com +} +``` + +* stubDomains 에 대응하는 설정: +```yaml +abc.com:53 { + errors + cache 30 + forward . 1.2.3.4 +} +my.cluster.local:53 { + errors + cache 30 + forward . 2.3.4.5 +} +``` + +기본 플러그인으로 구성된 완전한 Corefile. + +``` +.:53 { + errors + health + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + federation cluster.local { + foo foo.feddomain.com + } + prometheus :9153 + forward . 8.8.8.8 8.8.4.4 + cache 30 +} +abc.com:53 { + errors + cache 30 + forward . 1.2.3.4 +} +my.cluster.local:53 { + errors + cache 30 + forward . 2.3.4.5 +} +``` + +## CoreDNS로의 이관 + +kube-dns에서 CoreDNS로 이관하기 위하여, +kube-dns를 CoreDNS로 교체하여 적용하는 방법에 대한 상세 정보는 +[블로그 기사](https://coredns.io/2018/05/21/migration-from-kube-dns-to-coredns/)를 참고한다. + +또한 공식적인 CoreDNS [배포 스크립트](https://github.com/coredns/deployment/blob/master/kubernetes/deploy.sh)를 +사용하여 이관할 수도 있다. + + +## {{% heading "whatsnext" %}} + +- [DNS 변환 디버깅하기](/docs/tasks/debug-application-cluster/dns-debugging-resolution/) 읽기 diff --git a/content/ko/docs/tasks/administer-cluster/extended-resource-node.md b/content/ko/docs/tasks/administer-cluster/extended-resource-node.md new file mode 100644 index 0000000000000..c9aed072638b6 --- /dev/null +++ b/content/ko/docs/tasks/administer-cluster/extended-resource-node.md @@ -0,0 +1,206 @@ +--- +title: 노드에 대한 확장 리소스 알리기 +content_type: task +--- + + + + +이 페이지는 노드의 확장 리소스를 지정하는 방법을 보여준다. +확장 리소스를 통해 클러스터 관리자는 쿠버네티스에게 +알려지지 않은 노드-레벨 리소스를 알릴 수 있다. + + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + + +## 노드의 이름을 확인한다 + +```shell +kubectl get nodes +``` + +이 연습에 사용할 노드 중 하나를 선택한다. + +## 노드 중 하나에 새로운 확장 리소스를 알린다 + +노드에서 새로운 확장 리소스를 알리려면, 쿠버네티스 API 서버에 +HTTP PATCH 요청을 보낸다. 예를 들어, 노드 중 하나에 4개의 동글(dongle)이 있다고 +가정한다. 다음은 노드에 4개의 동글 리소스를 알리는 PATCH 요청의 +예이다. + +```shell +PATCH /api/v1/nodes//status HTTP/1.1 +Accept: application/json +Content-Type: application/json-patch+json +Host: k8s-master:8080 + +[ + { + "op": "add", + "path": "/status/capacity/example.com~1dongle", + "value": "4" + } +] +``` + +참고로 쿠버네티스는 동글이 무엇인지 또는 동글이 무엇을 위한 것인지 알 필요가 없다. +위의 PATCH 요청은 노드에 동글이라고 하는 네 가지 항목이 있음을 +쿠버네티스에 알려준다. + +쿠버네티스 API 서버에 요청을 쉽게 보낼 수 있도록 프록시를 시작한다. + +```shell +kubectl proxy +``` + +다른 명령 창에서 HTTP PATCH 요청을 보낸다. +`` 을 노드의 이름으로 바꾼다. + +```shell +curl --header "Content-Type: application/json-patch+json" \ +--request PATCH \ +--data '[{"op": "add", "path": "/status/capacity/example.com~1dongle", "value": "4"}]' \ +http://localhost:8001/api/v1/nodes//status +``` + +{{< note >}} +이전 요청에서 `~1` 은 패치 경로의 / 문자에 대한 +인코딩이다. JSON-Patch의 작업 경로값은 JSON-Pointer로 +해석된다. 자세한 내용은 [IETF RFC 6901](https://tools.ietf.org/html/rfc6901)의 +섹션 3을 참고한다. +{{< /note >}} + +출력은 노드가 4개의 동글 용량을 가졌음을 나타낸다. + +``` +"capacity": { + "cpu": "2", + "memory": "2049008Ki", + "example.com/dongle": "4", +``` + +노드의 정보를 확인한다. + +``` +kubectl describe node +``` + +다시 한 번, 출력에 동글 리소스가 표시된다. + +```yaml +Capacity: + cpu: 2 + memory: 2049008Ki + example.com/dongle: 4 +``` + +이제, 애플리케이션 개발자는 특정 개수의 동글을 요청하는 파드를 +만들 수 있다. [컨테이너에 확장 리소스 할당하기](/docs/tasks/configure-pod-container/extended-resource/)를 +참고한다. + +## 토론 + +확장 리소스는 메모리 및 CPU 리소스와 비슷하다. 예를 들어, +노드에서 실행 중인 모든 컴포넌트가 공유할 특정 양의 메모리와 CPU가 +노드에 있는 것처럼, 노드에서 실행 중인 모든 컴포넌트가 +특정 동글을 공유할 수 있다. 또한 애플리케이션 개발자가 +특정 양의 메모리와 CPU를 요청하는 파드를 생성할 수 있는 것처럼, 특정 +동글을 요청하는 파드를 생성할 수 있다. + +확장 리소스는 쿠버네티스에게 불투명하다. 쿠버네티스는 그것들이 +무엇인지 전혀 모른다. 쿠버네티스는 노드에 특정 개수의 노드만 +있다는 것을 알고 있다. 확장 리소스는 정수로 알려야 +한다. 예를 들어, 노드는 4.5개의 동글이 아닌, 4개의 동글을 알릴 수 있다. + +### 스토리지 예제 + +노드에 800GiB의 특별한 종류의 디스크 스토리지가 있다고 가정한다. +example.com/special-storage와 같은 특별한 스토리지의 이름을 생성할 수 있다. +그런 다음 특정 크기, 100GiB의 청크로 알릴 수 있다. 이 경우, +노드에는 example.com/special-storage 유형의 8가지 리소스가 있다고 +알린다. + +```yaml +Capacity: + ... + example.com/special-storage: 8 +``` + +이 특별한 스토리지에 대한 임의 요청을 허용하려면, +1바이트 크기의 청크로 특별한 스토리지를 알릴 수 있다. 이 경우, example.com/special-storage 유형의 +800Gi 리소스를 알린다. + +```yaml +Capacity: + ... + example.com/special-storage: 800Gi +``` + +그런 다음 컨테이너는 최대 800Gi의 임의 바이트 수의 특별한 스토리지를 요청할 수 있다. + +## 정리 + +다음은 노드에서 동글 알림을 제거하는 PATCH 요청이다. + +``` +PATCH /api/v1/nodes//status HTTP/1.1 +Accept: application/json +Content-Type: application/json-patch+json +Host: k8s-master:8080 + +[ + { + "op": "remove", + "path": "/status/capacity/example.com~1dongle", + } +] +``` + +쿠버네티스 API 서버에 요청을 쉽게 보낼 수 있도록 프록시를 시작한다. + +```shell +kubectl proxy +``` + +다른 명령 창에서 HTTP PATCH 요청을 보낸다. +``을 노드의 이름으로 바꾼다. + +```shell +curl --header "Content-Type: application/json-patch+json" \ +--request PATCH \ +--data '[{"op": "remove", "path": "/status/capacity/example.com~1dongle"}]' \ +http://localhost:8001/api/v1/nodes//status +``` + +동글 알림이 제거되었는지 확인한다. + +``` +kubectl describe node | grep dongle +``` + +(출력이 보이지 않아야 함) + + + + +## {{% heading "whatsnext" %}} + + +### 애플리케이션 개발자를 위한 문서 + +* [컨테이너에 확장 리소스 할당하기](/docs/tasks/configure-pod-container/extended-resource/) + +### 클러스터 관리자를 위한 문서 + +* [네임스페이스에 대한 메모리의 최소 및 최대 제약 조건 구성](/ko/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) +* [네임스페이스에 대한 CPU의 최소 및 최대 제약 조건 구성](/ko/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md b/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md index 0388e2542bc73..2cf5a8b6e5289 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md @@ -1,5 +1,5 @@ --- -title: Windows 노드 추가 +title: 윈도우 노드 추가 min-kubernetes-server-version: 1.17 content_type: tutorial weight: 30 @@ -9,7 +9,7 @@ weight: 30 {{< feature-state for_k8s_version="v1.18" state="beta" >}} -쿠버네티스를 사용하여 리눅스와 Windows 노드를 혼합하여 실행할 수 있으므로, 리눅스에서 실행되는 파드와 Windows에서 실행되는 파드를 혼합할 수 있다. 이 페이지는 Windows 노드를 클러스터에 등록하는 방법을 보여준다. +쿠버네티스를 사용하여 리눅스와 윈도우 노드를 혼합하여 실행할 수 있으므로, 리눅스에서 실행되는 파드와 윈도우에서 실행되는 파드를 혼합할 수 있다. 이 페이지는 윈도우 노드를 클러스터에 등록하는 방법을 보여준다. @@ -17,8 +17,8 @@ weight: 30 ## {{% heading "prerequisites" %}} {{< version-check >}} -* Windows 컨테이너를 호스팅하는 Windows 노드를 구성하려면 -[Windows Server 2019 라이선스](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) 이상이 필요하다. +* 윈도우 컨테이너를 호스팅하는 윈도우 노드를 구성하려면 +[윈도우 서버 2019 라이선스](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) 이상이 필요하다. VXLAN/오버레이 네트워킹을 사용하는 경우 [KB4489899](https://support.microsoft.com/help/4489899)도 설치되어 있어야 한다. * 컨트롤 플레인에 접근할 수 있는 리눅스 기반의 쿠버네티스 kubeadm 클러스터([kubeadm을 사용하여 단일 컨트롤 플레인 클러스터 생성](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) 참고)가 필요하다. @@ -29,15 +29,15 @@ VXLAN/오버레이 네트워킹을 사용하는 경우 [KB4489899](https://suppo ## {{% heading "objectives" %}} -* 클러스터에 Windows 노드 등록 -* 리눅스 및 Windows의 파드와 서비스가 서로 통신할 수 있도록 네트워킹 구성 +* 클러스터에 윈도우 노드 등록 +* 리눅스 및 윈도우의 파드와 서비스가 서로 통신할 수 있도록 네트워킹 구성 -## 시작하기: 클러스터에 Windows 노드 추가 +## 시작하기: 클러스터에 윈도우 노드 추가 ### 네트워킹 구성 @@ -75,7 +75,7 @@ VXLAN/오버레이 네트워킹을 사용하는 경우 [KB4489899](https://suppo } ``` - {{< note >}}리눅스의 플란넬이 Windows의 플란넬과 상호 운용되도록 하려면 VNI를 4096으로, 포트를 4789로 설정해야 한다. 이 필드들에 대한 설명은 [VXLAN 문서](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan)를 + {{< note >}}리눅스의 플란넬이 윈도우의 플란넬과 상호 운용되도록 하려면 VNI를 4096으로, 포트를 4789로 설정해야 한다. 이 필드들에 대한 설명은 [VXLAN 문서](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan)를 참고한다.{{< /note >}} {{< note >}}L2Bridge/Host-gateway 모드를 대신 사용하려면 `Type` 의 값을 `"host-gw"` 로 변경하고 `VNI` 와 `Port` 를 생략한다.{{< /note >}} @@ -102,9 +102,9 @@ VXLAN/오버레이 네트워킹을 사용하는 경우 [KB4489899](https://suppo kube-system kube-flannel-ds-54954 1/1 Running 0 1m ``` -1. Windows 플란넬 및 kube-proxy 데몬셋 추가 +1. 윈도우 플란넬 및 kube-proxy 데몬셋 추가 - 이제 Windows 호환 버전의 플란넬과 kube-proxy를 추가할 수 있다. 호환 가능한 + 이제 윈도우 호환 버전의 플란넬과 kube-proxy를 추가할 수 있다. 호환 가능한 kube-proxy 버전을 얻으려면, 이미지의 태그를 대체해야 한다. 다음의 예시는 쿠버네티스 {{< param "fullversion" >}}의 사용법을 보여주지만, 사용자의 배포에 맞게 버전을 조정해야 한다. @@ -118,7 +118,7 @@ VXLAN/오버레이 네트워킹을 사용하는 경우 [KB4489899](https://suppo {{< /note >}} {{< note >}} -Windows 노드에서 이더넷이 아닌 다른 인터페이스(예: "Ethernet0 2")를 사용하는 경우, flannel-host-gw.yml이나 flannel-overlay.yml 파일에서 다음 라인을 수정한다. +윈도우 노드에서 이더넷이 아닌 다른 인터페이스(예: "Ethernet0 2")를 사용하는 경우, flannel-host-gw.yml이나 flannel-overlay.yml 파일에서 다음 라인을 수정한다. ```powershell wins cli process run --path /k/flannel/setup.exe --args "--mode=overlay --interface=Ethernet" @@ -134,14 +134,14 @@ curl -L https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/dow -### Windows 워커 노드 조인(joining) +### 윈도우 워커 노드 조인(joining) {{< note >}} `Containers` 기능을 설치하고 도커를 설치해야 한다. -[Windows Server에 Docker Engine - Enterprise 설치](https://docs.docker.com/ee/docker-ee/windows/docker-ee/#install-docker-engine---enterprise)에서 설치에 대한 내용을 참고할 수 있다. +[윈도우 서버에 Docker Engine - Enterprise 설치](https://docs.docker.com/ee/docker-ee/windows/docker-ee/#install-docker-engine---enterprise)에서 설치에 대한 내용을 참고할 수 있다. {{< /note >}} {{< note >}} -Windows 섹션의 모든 코드 스니펫(snippet)은 Windows 워커 노드의 +윈도우 섹션의 모든 코드 스니펫(snippet)은 윈도우 워커 노드의 높은 권한(관리자)이 있는 PowerShell 환경에서 실행해야 한다. {{< /note >}} @@ -160,7 +160,7 @@ Windows 섹션의 모든 코드 스니펫(snippet)은 Windows 워커 노드의 #### 설치 확인 -이제 다음을 실행하여 클러스터에서 Windows 노드를 볼 수 있다. +이제 다음을 실행하여 클러스터에서 윈도우 노드를 볼 수 있다. ```bash kubectl get nodes -o wide @@ -180,6 +180,6 @@ flannel 파드가 실행되면, 노드는 `Ready` 상태가 되고 워크로드 ## {{% heading "whatsnext" %}} -- [Windows kubeadm 노드 업그레이드](/ko/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes) +- [윈도우 kubeadm 노드 업그레이드](/ko/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes) diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index b6b1c878c6bc6..9e48ea900a657 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -242,7 +242,7 @@ min-kubernetes-server-version: 1.18 - CNI 제공자 플러그인을 수동으로 업그레이드한다. CNI(컨테이너 네트워크 인터페이스) 제공자는 자체 업그레이드 지침을 따를 수 있다. - [애드온](/docs/concepts/cluster-administration/addons/) 페이지에서 + [애드온](/ko/docs/concepts/cluster-administration/addons/) 페이지에서 사용하는 CNI 제공자를 찾고 추가 업그레이드 단계가 필요한지 여부를 확인한다. CNI 제공자가 데몬셋(DaemonSet)으로 실행되는 경우 추가 컨트롤 플레인 노드에는 이 단계가 필요하지 않다. diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md b/content/ko/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md index 779e6fe86a88c..66adcb6a9dff5 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md @@ -1,5 +1,5 @@ --- -title: Windows 노드 업그레이드 +title: 윈도우 노드 업그레이드 min-kubernetes-server-version: 1.17 content_type: task weight: 40 @@ -9,7 +9,7 @@ weight: 40 {{< feature-state for_k8s_version="v1.18" state="beta" >}} -이 페이지는 [kubeadm으로 생성된](/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes) Windows 노드를 업그레이드하는 방법을 설명한다. +이 페이지는 [kubeadm으로 생성된](/ko/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes) 윈도우 노드를 업그레이드하는 방법을 설명한다. @@ -18,7 +18,7 @@ weight: 40 {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} * [남은 kubeadm 클러스터를 업그레이드하는 프로세스](/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade)에 -익숙해져야 한다. Windows 노드를 +익숙해져야 한다. 윈도우 노드를 업그레이드하기 전에 컨트롤 플레인 노드를 업그레이드해야 한다. @@ -30,7 +30,7 @@ weight: 40 ### kubeadm 업그레이드 -1. Windows 노드에서, kubeadm을 업그레이드한다. +1. 윈도우 노드에서, kubeadm을 업그레이드한다. ```powershell # replace {{< param "fullversion" >}} with your desired version @@ -56,7 +56,7 @@ weight: 40 ### kubelet 구성 업그레이드 -1. Windows 노드에서, 다음의 명령을 호출하여 새 kubelet 구성을 동기화한다. +1. 윈도우 노드에서, 다음의 명령을 호출하여 새 kubelet 구성을 동기화한다. ```powershell kubeadm upgrade node @@ -64,7 +64,7 @@ weight: 40 ### kubelet 업그레이드 -1. Windows 노드에서, kubelet을 업그레이드하고 다시 시작한다. +1. 윈도우 노드에서, kubelet을 업그레이드하고 다시 시작한다. ```powershell stop-service kubelet diff --git a/content/ko/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md b/content/ko/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md index 494a09418c3b6..43160db786645 100644 --- a/content/ko/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md +++ b/content/ko/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace.md @@ -265,7 +265,4 @@ kubectl delete namespace constraints-cpu-example * [컨테이너와 파드 CPU 리소스 할당](/docs/tasks/configure-pod-container/assign-cpu-resource/) -* [파드에 대한 서비스 품질(QoS) 구성](/docs/tasks/configure-pod-container/quality-service-pod/) - - - +* [파드에 대한 서비스 품질(QoS) 구성](/ko/docs/tasks/configure-pod-container/quality-service-pod/) diff --git a/content/ko/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md b/content/ko/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md index 769f0bfb09bb7..ab77c226b0acf 100644 --- a/content/ko/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md +++ b/content/ko/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace.md @@ -188,6 +188,4 @@ kubectl delete namespace default-cpu-example * [컨테이너 및 파드 CPU 리소스 할당](/docs/tasks/configure-pod-container/assign-cpu-resource/) -* [파드에 대한 서비스 품질(QoS) 구성](/docs/tasks/configure-pod-container/quality-service-pod/) - - +* [파드에 대한 서비스 품질(QoS) 구성](/ko/docs/tasks/configure-pod-container/quality-service-pod/) diff --git a/content/ko/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md b/content/ko/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md index cf3cd826f6b40..19163fd7e7427 100644 --- a/content/ko/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md +++ b/content/ko/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md @@ -265,6 +265,4 @@ kubectl delete namespace constraints-mem-example * [컨테이너 및 파드 CPU 리소스 할당](/docs/tasks/configure-pod-container/assign-cpu-resource/) -* [파드에 대한 서비스 품질(QoS) 구성](/docs/tasks/configure-pod-container/quality-service-pod/) - - +* [파드에 대한 서비스 품질(QoS) 구성](/ko/docs/tasks/configure-pod-container/quality-service-pod/) diff --git a/content/ko/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md b/content/ko/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md index 7127a7f235f57..a74d492e5afe3 100644 --- a/content/ko/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md +++ b/content/ko/docs/tasks/administer-cluster/manage-resources/memory-default-namespace.md @@ -196,8 +196,4 @@ kubectl delete namespace default-mem-example * [컨테이너 및 파드 CPU 리소스 할당](/docs/tasks/configure-pod-container/assign-cpu-resource/) -* [파드에 대한 서비스 품질(QoS) 구성](/docs/tasks/configure-pod-container/quality-service-pod/) - - - - +* [파드에 대한 서비스 품질(QoS) 구성](/ko/docs/tasks/configure-pod-container/quality-service-pod/) diff --git a/content/ko/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md b/content/ko/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md index ce16eaeef1c0e..f17a387a5bc5b 100644 --- a/content/ko/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md +++ b/content/ko/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace.md @@ -172,6 +172,4 @@ kubectl delete namespace quota-mem-cpu-example * [컨테이너 및 파드 CPU 리소스 할당](/docs/tasks/configure-pod-container/assign-cpu-resource/) -* [파드에 대한 서비스 품질(QoS) 구성](/docs/tasks/configure-pod-container/quality-service-pod/) - - +* [파드에 대한 서비스 품질(QoS) 구성](/ko/docs/tasks/configure-pod-container/quality-service-pod/) diff --git a/content/ko/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md b/content/ko/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md index 880444cefd35a..9c4d78742934b 100644 --- a/content/ko/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md +++ b/content/ko/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace.md @@ -133,11 +133,4 @@ kubectl delete namespace quota-pod-example * [컨테이너 및 파드 CPU 리소스 할당](/docs/tasks/configure-pod-container/assign-cpu-resource/) -* [파드에 대한 서비스 품질(QoS) 구성](/docs/tasks/configure-pod-container/quality-service-pod/) - - - - - - - +* [파드에 대한 서비스 품질(QoS) 구성](/ko/docs/tasks/configure-pod-container/quality-service-pod/) diff --git a/content/ko/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md b/content/ko/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md index 5435bcf67ada7..fed25bc169ae2 100644 --- a/content/ko/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md +++ b/content/ko/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md @@ -48,7 +48,7 @@ Minikube에서 실리움의 데몬셋 구성과 적절한 RBAC 설정을 포함 간단한 ``올인원`` YAML 파일로 배포할 수 있다. ```shell -kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.6/install/kubernetes/quick-install.yaml +kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml ``` ``` configmap/cilium-config created diff --git a/content/ko/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md b/content/ko/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md index 462e482bff60a..70f0ec1aae2ce 100644 --- a/content/ko/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md +++ b/content/ko/docs/tasks/administer-cluster/network-policy-provider/romana-network-policy.md @@ -39,7 +39,3 @@ Kubeadm을 위한 [컨테이너화된 설치 안내서](https://github.com/roman 로마나를 설치한 후에는, 쿠버네티스 네트워크 폴리시를 시도하기 위해 [네트워크 폴리시 선언하기](/docs/tasks/administer-cluster/declare-network-policy/)를 따라 할 수 있다. - - - - diff --git a/content/ko/docs/tasks/configure-pod-container/_index.md b/content/ko/docs/tasks/configure-pod-container/_index.md index 560261ecad440..e43910867f3d0 100644 --- a/content/ko/docs/tasks/configure-pod-container/_index.md +++ b/content/ko/docs/tasks/configure-pod-container/_index.md @@ -1,4 +1,5 @@ --- title: "파드와 컨테이너 설정" +description: 파드와 컨테이너에 대한 공통 구성 태스크들을 수행한다. weight: 20 --- diff --git a/content/ko/docs/tasks/configure-pod-container/assign-memory-resource.md b/content/ko/docs/tasks/configure-pod-container/assign-memory-resource.md index 7083bdea675b1..980d71d9b5566 100644 --- a/content/ko/docs/tasks/configure-pod-container/assign-memory-resource.md +++ b/content/ko/docs/tasks/configure-pod-container/assign-memory-resource.md @@ -25,7 +25,7 @@ weight: 10 서비스 실행이 필요하다. 이미 실행중인 metrics-server가 있다면 다음 단계를 건너뛸 수 있다. -Minikube를 사용 중이라면, 다음 명령어를 실행해 metric-server를 +Minikube를 사용 중이라면, 다음 명령어를 실행해 metric-server를 활성화할 수 있다. ```shell @@ -53,7 +53,7 @@ v1beta1.metrics.k8s.io ## 네임스페이스 생성 -이 예제에서 생성할 자원과 클러스터 내 나머지를 분리하기 위해 +이 예제에서 생성할 자원과 클러스터 내 나머지를 분리하기 위해 네임스페이스를 생성한다. ```shell @@ -113,7 +113,7 @@ kubectl top pod memory-demo --namespace=mem-example ``` 출력은 파드가 약 150 MiB 해당하는 약 162,900,000 바이트 메모리를 사용하는 것을 보여준다. -이는 파드의 100 MiB 요청 보다 많으나 +이는 파드의 100 MiB 요청 보다 많으나 파드의 200 MiB 상한보다는 적다. ``` @@ -245,7 +245,7 @@ kubectl delete pod memory-demo-2 --namespace=mem-example 이 예제에서는 메모리 요청량이 너무 커 클러스터 내 모든 노드의 용량을 초과하는 파드를 생성한다. 다음은 클러스터 내 모든 노드의 용량을 초과할 수 있는 1000 GiB 메모리 요청을 포함하는 -컨테이너를 갖는 +컨테이너를 갖는 파드의 구성 파일이다. {{< codenew file="pods/resource/memory-request-limit-3.yaml" >}} @@ -340,25 +340,20 @@ kubectl delete namespace mem-example * [CPU 리소스를 컨테이너와 파드에 할당](/docs/tasks/configure-pod-container/assign-cpu-resource/) -* [파드에 서비스 품질 설정](/docs/tasks/configure-pod-container/quality-service-pod/) +* [파드에 서비스 품질 설정](/ko/docs/tasks/configure-pod-container/quality-service-pod/) ### 클러스터 관리자들을 위한 -* [네임스페이스에 기본 메모리 요청량 및 상한을 구성](/docs/tasks/administer-cluster/memory-default-namespace/) +* [네임스페이스에 기본 메모리 요청량 및 상한을 구성](/ko/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/) -* [네임스페이스에 기본 CPU 요청량 및 상한을 구성](/docs/tasks/administer-cluster/cpu-default-namespace/) - -* [네임스페이스에 최소 및 최대 메모리 제약 조건 구성](/docs/tasks/administer-cluster/memory-constraint-namespace/) - -* [네임스페이스에 최소 및 최대 CPU 제약 조건 구성](/docs/tasks/administer-cluster/cpu-constraint-namespace/) - -* [네임스페이스에 메모리 및 CPU 할당량 구성](/docs/tasks/administer-cluster/quota-memory-cpu-namespace/) - -* [네임스페이스에 파드 할당량 구성](/docs/tasks/administer-cluster/quota-pod-namespace/) - -* [API 오브젝트에 할당량 구성 ](/docs/tasks/administer-cluster/quota-api-object/) +* [네임스페이스에 기본 CPU 요청량 및 상한을 구성](/ko/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/) +* [네임스페이스에 최소 및 최대 메모리 제약 조건 구성](/ko/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace/) +* [네임스페이스에 최소 및 최대 CPU 제약 조건 구성](/ko/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) +* [네임스페이스에 메모리 및 CPU 할당량 구성](/ko/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/) +* [네임스페이스에 파드 할당량 구성](/ko/docs/tasks/administer-cluster/manage-resources/quota-pod-namespace/) +* [API 오브젝트에 할당량 구성](/docs/tasks/administer-cluster/quota-api-object/) diff --git a/content/ko/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md b/content/ko/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md index bc1446946dc27..5ad8b72d52178 100644 --- a/content/ko/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md +++ b/content/ko/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md @@ -117,6 +117,5 @@ weight: 120 ## {{% heading "whatsnext" %}} -[노드 어피니티](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)에 +[노드 어피니티](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#노드-어피니티)에 대해 더 알아보기. - diff --git a/content/ko/docs/tasks/configure-pod-container/configure-pod-initialization.md b/content/ko/docs/tasks/configure-pod-container/configure-pod-initialization.md index cff2ae900dc72..d973b42f99d82 100644 --- a/content/ko/docs/tasks/configure-pod-container/configure-pod-initialization.md +++ b/content/ko/docs/tasks/configure-pod-container/configure-pod-initialization.md @@ -89,7 +89,3 @@ init-demo 파드 내 실행 중인 nginx 컨테이너의 셸을 실행한다. * [초기화 컨테이너](/ko/docs/concepts/workloads/pods/init-containers/)에 대해 배우기. * [볼륨](/ko/docs/concepts/storage/volumes/)에 대해 배우기. * [초기화 컨테이너 디버깅](/docs/tasks/debug-application-cluster/debug-init-containers/)에 대해 배우기. - - - - diff --git a/content/ko/docs/tasks/configure-pod-container/static-pod.md b/content/ko/docs/tasks/configure-pod-container/static-pod.md index a6ddeae8fb3f6..8eb1c0a68ff55 100644 --- a/content/ko/docs/tasks/configure-pod-container/static-pod.md +++ b/content/ko/docs/tasks/configure-pod-container/static-pod.md @@ -9,10 +9,10 @@ content_template: task -*스태틱 파드* 는 {{< glossary_tooltip text="API 서버" term_id="kube-apiserver" >}} -없이 특정 노드에 있는 kubelet 데몬에 의해 +*스태틱 파드* 는 {{< glossary_tooltip text="API 서버" term_id="kube-apiserver" >}} +없이 특정 노드에 있는 kubelet 데몬에 의해 직접 관리된다. -컨트롤 플레인에 의해 관리되는 파드(예를 들어 {{< glossary_tooltip text="디플로이먼트(Deployment)" term_id="deployment" >}})와는 달리, +컨트롤 플레인에 의해 관리되는 파드(예를 들어 {{< glossary_tooltip text="디플로이먼트(Deployment)" term_id="deployment" >}})와는 달리, kubelet 이 각각의 스태틱 파드를 감시한다. (만약 충돌이 날 경우 다시 구동한다.) @@ -20,13 +20,13 @@ kubelet 이 각각의 스태틱 파드를 감시한다. Kubelet 은 각각의 스태틱 파드에 대하여 쿠버네티스 API 서버에서 {{< glossary_tooltip text="미러 파드(mirror pod)" term_id="mirror-pod" >}}를 생성하려고 자동으로 시도한다. -즉, 노드에서 구동되는 파드는 API 서버에 의해서 볼 수 있지만, +즉, 노드에서 구동되는 파드는 API 서버에 의해서 볼 수 있지만, API 서버에서 제어될 수는 없다. {{< note >}} -만약 클러스터로 구성된 쿠버네티스를 구동하고 있고, 스태틱 파드를 사용하여 +만약 클러스터로 구성된 쿠버네티스를 구동하고 있고, 스태틱 파드를 사용하여 모든 노드에서 파드를 구동하고 있다면, -스태틱 파드를 사용하는 대신 {{< glossary_tooltip text="데몬셋(DaemonSet)" term_id="daemonset" >}} +스태틱 파드를 사용하는 대신 {{< glossary_tooltip text="데몬셋(DaemonSet)" term_id="daemonset" >}} 을 사용하는 것이 바람직하다. {{< /note >}} @@ -37,7 +37,7 @@ API 서버에서 제어될 수는 없다. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -이 페이지는 파드를 실행하기 위해 {{< glossary_tooltip term_id="docker" >}}를 사용하며, +이 페이지는 파드를 실행하기 위해 {{< glossary_tooltip term_id="docker" >}}를 사용하며, 노드에서 Fedora 운영 체제를 구동하고 있다고 가정한다. 다른 배포판이나 쿠버네티스 설치 지침과는 다소 상이할 수 있다. @@ -49,12 +49,12 @@ API 서버에서 제어될 수는 없다. ## 스태틱 파드 생성하기 {#static-pod-creation} -[파일 시스템이 호스팅 하는 구성 파일](/ko/docs/tasks/configure-pod-container/static-pod/#configuration-files)이나 [웹이 호스팅 하는 구성 파일](/ko/docs/tasks/configure-pod-container/static-pod/#pods-created-via-http)을 사용하여 스태틱 파드를 구성할 수 있다. +[파일 시스템이 호스팅하는 구성 파일](/ko/docs/tasks/configure-pod-container/static-pod/#configuration-files)이나 [웹이 호스팅하는 구성 파일](/ko/docs/tasks/configure-pod-container/static-pod/#pods-created-via-http)을 사용하여 스태틱 파드를 구성할 수 있다. ### 파일시스템이 호스팅 하는 스태틱 파드 매니페스트 {#configuration-files} -매니페스트는 특정 디렉토리에 있는 JSON 이나 YAML 형식의 표준 파드 정의이다. [kubelet 구성 파일](/docs/tasks/administer-cluster/kubelet-config-file)의 `staticPodPath: ` 필드를 사용하자. 이 디렉토리를 정기적으로 스캔하여, 디렉토리 안의 YAML/JSON 파일이 생성되거나 삭제되었을 때 스태틱 파드를 생성하거나 삭제한다. -Kubelet 이 특정 디렉토리를 스캔할 때 점(.)으로 시작하는 단어를 무시한다는 점을 유의하자. +매니페스트는 특정 디렉터리에 있는 JSON 이나 YAML 형식의 표준 파드 정의이다. [kubelet 구성 파일](/docs/tasks/administer-cluster/kubelet-config-file)의 `staticPodPath: ` 필드를 사용하자. 이 디렉터리를 정기적으로 스캔하여, 디렉터리 안의 YAML/JSON 파일이 생성되거나 삭제되었을 때 스태틱 파드를 생성하거나 삭제한다. +Kubelet 이 특정 디렉터리를 스캔할 때 점(.)으로 시작하는 단어를 무시한다는 점을 유의하자. 예를 들어, 다음은 스태틱 파드로 간단한 웹 서버를 구동하는 방법을 보여준다. @@ -64,7 +64,7 @@ Kubelet 이 특정 디렉토리를 스캔할 때 점(.)으로 시작하는 단 ssh my-node1 ``` -2. `/etc/kubelet.d` 와 같은 디렉토리를 선택하고 웹 서버 파드의 정의를 해당 위치에, 예를 들어 `/etc/kubelet.d/static-web.yaml` 에 배치한다. +2. `/etc/kubelet.d` 와 같은 디렉터리를 선택하고 웹 서버 파드의 정의를 해당 위치에, 예를 들어 `/etc/kubelet.d/static-web.yaml` 에 배치한다. ```shell # kubelet 이 동작하고 있는 노드에서 이 명령을 수행한다. @@ -87,7 +87,7 @@ Kubelet 이 특정 디렉토리를 스캔할 때 점(.)으로 시작하는 단 EOF ``` -3. 노드에서 kubelet 실행 시에 `--pod-manifest-path=/etc/kubelet.d/` 와 같이 인자를 제공하여 해당 디렉토리를 사용하도록 구성한다. Fedora 의 경우 이 줄을 포함하기 위하여 `/etc/kubernetes/kubelet` 파일을 다음과 같이 수정한다. +3. 노드에서 kubelet 실행 시에 `--pod-manifest-path=/etc/kubelet.d/` 와 같이 인자를 제공하여 해당 디렉터리를 사용하도록 구성한다. Fedora 의 경우 이 줄을 포함하기 위하여 `/etc/kubernetes/kubelet` 파일을 다음과 같이 수정한다. ``` KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubelet.d/" @@ -103,10 +103,10 @@ Kubelet 이 특정 디렉토리를 스캔할 때 점(.)으로 시작하는 단 ### 웹이 호스팅 하는 스태틱 파드 매니페스트 {#pods-created-via-http} -Kubelet은 `--manifest-url=` 의 인수로 지정된 파일을 주기적으로 다운로드하여 +Kubelet은 `--manifest-url=` 의 인수로 지정된 파일을 주기적으로 다운로드하여 해당 파일을 파드의 정의가 포함된 JSON/YAML 파일로 해석한다. -[파일시스템이 호스팅 하는 매니페스트](#configuration-files) 의 작동 방식과 -유사하게 kubelet은 스케줄에 맞춰 매니페스트 파일을 다시 가져온다. 스태틱 파드의 목록에 +[파일시스템이 호스팅 하는 매니페스트](#configuration-files) 의 작동 방식과 +유사하게 kubelet은 스케줄에 맞춰 매니페스트 파일을 다시 가져온다. 스태틱 파드의 목록에 변경된 부분이 있을 경우, kubelet 은 이를 적용한다. 이 방법을 사용하기 위하여 다음을 수행한다. @@ -130,7 +130,7 @@ Kubelet은 `--manifest-url=` 의 인수로 지정된 파일을 주기적으 protocol: TCP ``` -2. 선택한 노드에서 `--manifest-url=` 을 실행하여 웹 메니페스트를 사용하도록 kubelet을 구성한다. Fedora 의 경우 이 줄을 포함하기 위하여 `/etc/kubernetes/kubelet` 파일을 수정한다. +2. 선택한 노드에서 `--manifest-url=` 을 실행하여 웹 메니페스트를 사용하도록 kubelet을 구성한다. Fedora 의 경우 이 줄을 포함하기 위하여 `/etc/kubernetes/kubelet` 파일을 수정한다. ``` KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --manifest-url=" @@ -145,7 +145,7 @@ Kubelet은 `--manifest-url=` 의 인수로 지정된 파일을 주기적으 ## 스태틱 파드 행동 관찰하기 {#behavior-of-static-pods} -Kubelet 을 시작하면, 정의된 모든 스태틱 파드가 자동으로 시작된다. +Kubelet 을 시작하면, 정의된 모든 스태틱 파드가 자동으로 시작된다. 스태틱 파드를 정의하고, kubelet을 재시작했으므로, 새로운 스태틱 파드가 이미 실행 중이어야 한다. @@ -174,15 +174,15 @@ static-web-my-node1 1/1 Running 0 2m {{< note >}} Kubelet에 API 서버에서 미러 파드를 생성할 수 있는 권한이 있는지 미리 확인해야 한다. 그렇지 않을 경우 API 서버에 의해서 생성 요청이 거부된다. -[파드시큐리티폴리시(PodSecurityPolicy)](/docs/concepts/policy/pod-security-policy/) 에 대해 보기. +[파드시큐리티폴리시(PodSecurityPolicy)](/ko/docs/concepts/policy/pod-security-policy/) 에 대해 보기. {{< /note >}} -스태틱 파드에 있는 {{< glossary_tooltip term_id="label" text="레이블" >}} 은 -미러 파드로 전파된다. {{< glossary_tooltip term_id="selector" text="셀렉터" >}} 등을 +스태틱 파드에 있는 {{< glossary_tooltip term_id="label" text="레이블" >}} 은 +미러 파드로 전파된다. {{< glossary_tooltip term_id="selector" text="셀렉터" >}} 등을 통하여 이러한 레이블을 사용할 수 있다. -만약 API 서버로부터 미러 파드를 지우기 위하여 `kubectl` 을 사용하려 해도, +만약 API 서버로부터 미러 파드를 지우기 위하여 `kubectl` 을 사용하려 해도, kubelet 은 스태틱 파드를 지우지 _않는다._ ```shell @@ -200,9 +200,9 @@ NAME READY STATUS RESTARTS AGE static-web-my-node1 1/1 Running 0 12s ``` -kubelet 이 구동 중인 노드로 돌아가서 도커 컨테이너를 수동으로 +kubelet 이 구동 중인 노드로 돌아가서 도커 컨테이너를 수동으로 중지할 수 있다. -일정 시간이 지나면, kubelet이 파드를 자동으로 인식하고 다시 시작하는 +일정 시간이 지나면, kubelet이 파드를 자동으로 인식하고 다시 시작하는 것을 볼 수 있다. ```shell @@ -218,7 +218,7 @@ CONTAINER ID IMAGE COMMAND CREATED ... ## 스태틱 파드의 동적 추가 및 제거 -실행 중인 kubelet 은 주기적으로, 설정된 디렉토리(예제에서는 `/etc/kubelet.d`)에서 변경 사항을 스캔하고, 이 디렉토리에 새로운 파일이 생성되거나 삭제될 경우, 파드를 생성/삭제 한다. +실행 중인 kubelet 은 주기적으로, 설정된 디렉터리(예제에서는 `/etc/kubelet.d`)에서 변경 사항을 스캔하고, 이 디렉터리에 새로운 파일이 생성되거나 삭제될 경우, 파드를 생성/삭제 한다. ```shell # 예제를 수행하는 사용자가 파일시스템이 호스팅하는 스태틱 파드 설정을 사용한다고 가정한다. @@ -227,7 +227,7 @@ CONTAINER ID IMAGE COMMAND CREATED ... mv /etc/kubelet.d/static-web.yaml /tmp sleep 20 docker ps -# 구동 중인 nginx 컨테이너가 없는 것을 확인한다. +# 구동 중인 nginx 컨테이너가 없는 것을 확인한다. mv /tmp/static-web.yaml /etc/kubelet.d/ sleep 20 docker ps diff --git a/content/ko/docs/tasks/debug-application-cluster/_index.md b/content/ko/docs/tasks/debug-application-cluster/_index.md index 0613fed1ef989..d0bda0ea0f05c 100755 --- a/content/ko/docs/tasks/debug-application-cluster/_index.md +++ b/content/ko/docs/tasks/debug-application-cluster/_index.md @@ -1,5 +1,6 @@ --- title: "모니터링, 로깅, 그리고 디버깅" +description: 모니터링 및 로깅을 설정하여 클러스터 문제를 해결하거나, 컨테이너화된 애플리케이션을 디버깅한다. weight: 80 --- diff --git a/content/ko/docs/tasks/debug-application-cluster/debug-init-containers.md b/content/ko/docs/tasks/debug-application-cluster/debug-init-containers.md new file mode 100644 index 0000000000000..dc774b9ce365b --- /dev/null +++ b/content/ko/docs/tasks/debug-application-cluster/debug-init-containers.md @@ -0,0 +1,125 @@ +--- +title: 초기화 컨테이너(Init Containers) 디버그하기 +content_type: task +--- + + + +이 페이지는 초기화 컨테이너의 실행과 관련된 문제를 +조사하는 방법에 대해 보여준다. 아래 예제의 커맨드 라인은 파드(Pod)를 `` 으로, +초기화 컨테이너를 `` 과 +`` 로 표시한다. + + + +## {{% heading "prerequisites" %}} + + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +* 사용자는 [초기화 컨테이너](/ko/docs/concepts/workloads/pods/init-containers/)의 + 기본 사항에 익숙해야 한다. +* 사용자는 [초기화 컨테이너를 구성](/ko/docs/tasks/configure-pod-container/configure-pod-initialization/#초기화-컨테이너를-갖는-파드-생성)해야 한다. + + + + + +## 초기화 컨테이너의 상태 체크하기 + +사용자 파드의 상태를 표시한다. + +```shell +kubectl get pod +``` + +예를 들어, `Init:1/2` 상태는 두 개의 초기화 컨테이너 중 +하나가 성공적으로 완료되었음을 나타낸다. + +``` +NAME READY STATUS RESTARTS AGE + 0/1 Init:1/2 0 7s +``` + +상태값과 그 의미에 대한 추가 예제는 +[파드 상태 이해하기](#파드의-상태-이해하기)를 참조한다. + +## 초기화 컨테이너에 대한 상세 정보 조회하기 + +초기화 컨테이너의 실행에 대한 상세 정보를 확인한다. + +```shell +kubectl describe pod +``` + +예를 들어, 2개의 초기화 컨테이너가 있는 파드는 다음과 같이 표시될 수 있다. + +``` +Init Containers: + : + Container ID: ... + ... + State: Terminated + Reason: Completed + Exit Code: 0 + Started: ... + Finished: ... + Ready: True + Restart Count: 0 + ... + : + Container ID: ... + ... + State: Waiting + Reason: CrashLoopBackOff + Last State: Terminated + Reason: Error + Exit Code: 1 + Started: ... + Finished: ... + Ready: False + Restart Count: 3 + ... +``` + +파드 스펙의 `status.initContainerStatuses` 필드를 읽어서 +프로그래밍 방식으로 초기화 컨테이너의 상태를 조회할 수도 있다. + + +```shell +kubectl get pod nginx --template '{{.status.initContainerStatuses}}' +``` + + +이 명령은 원시 JSON 방식으로 위와 동일한 정보를 반환한다. + +## 초기화 컨테이너의 로그 조회하기 + +초기화 컨테이너의 로그를 확인하기 위해 +파드의 이름과 초기화 컨테이너의 이름을 같이 전달한다. + +```shell +kubectl logs -c +``` + +셸 스크립트를 실행하는 초기화 컨테이너는, 초기화 컨테이너가 +실행될 때 명령어를 출력한다. 예를 들어, 스크립트의 시작 부분에 +`set -x` 를 추가하고 실행하여 Bash에서 명령어를 출력할 수 있도록 수행할 수 있다. + + + + + +## 파드의 상태 이해하기 + +`Init:` 으로 시작하는 파드 상태는 초기화 컨테이너의 +실행 상태를 요약한다. 아래 표는 초기화 컨테이너를 디버깅하는 +동안 사용자가 확인할 수 있는 몇 가지 상태값의 예이다. + +상태 | 의미 +------ | ------- +`Init:N/M` | 파드가 `M` 개의 초기화 컨테이너를 갖고 있으며, 현재까지 `N` 개가 완료. +`Init:Error` | 초기화 컨테이너 실행 실패. +`Init:CrashLoopBackOff` | 초기화 컨테이너가 반복적으로 실행 실패. +`Pending` | 파드가 아직 초기화 컨테이너를 실행하지 않음. +`PodInitializing` or `Running` | 파드가 이미 초기화 컨테이너 실행을 완료. diff --git a/content/ko/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md b/content/ko/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md index 246e6af3bba8d..3c8df08ede363 100644 --- a/content/ko/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md +++ b/content/ko/docs/tasks/debug-application-cluster/determine-reason-pod-failure.md @@ -1,19 +1,19 @@ --- title: 파드 실패의 원인 검증하기 -content_template: templates/task +content_type: task --- -이 페이지는 컨테이너 종료 메시지를 읽고 쓰는 +이 페이지는 컨테이너 종료 메시지를 읽고 쓰는 방법을 보여준다. -종료 메시지는 컨테이너가 치명적인 이벤트에 대한 정보를, +종료 메시지는 컨테이너가 치명적인 이벤트에 대한 정보를, 대시보드나 모니터링 소프트웨어 도구와 같이 -쉽게 조회 및 표시할 수 있는 위치에 +쉽게 조회 및 표시할 수 있는 위치에 기록하는 방법을 제공한다. -대부분의 경우에 종료 메시지에 넣는 정보는 -일반 +대부분의 경우에 종료 메시지에 넣는 정보는 +일반 [쿠버네티스 로그](/ko/docs/concepts/cluster-administration/logging/)에도 쓰여져야 한다. @@ -32,7 +32,7 @@ content_template: templates/task ## 종료 메시지 읽기 및 쓰기 이 예제에서는, 하나의 컨테이너를 실행하는 파드를 생성한다. -하단의 설정 파일은 컨테이너가 시작될 때 수행하는 +하단의 설정 파일은 컨테이너가 시작될 때 수행하는 명령어를 지정한다. {{< codenew file="debug/termination.yaml" >}} @@ -41,8 +41,8 @@ content_template: templates/task kubectl apply -f https://k8s.io/examples/debug/termination.yaml - YAML 파일에 있는 `cmd` 와 `args` 필드에서 컨테이너가 10초 간 잠든 뒤에 - "Sleep expired" 문자열을 `/dev/termination-log` 파일에 기록하는 + YAML 파일에 있는 `cmd` 와 `args` 필드에서 컨테이너가 10초 간 잠든 뒤에 + "Sleep expired" 문자열을 `/dev/termination-log` 파일에 기록하는 것을 확인할 수 있다. 컨테이너는 "Sleep expired" 메시지를 기록한 후에 종료된다. @@ -70,25 +70,25 @@ content_template: templates/task Sleep expired ... -1. 종료 메시지만을 포함하는 출력 결과를 보기 +1. 종료 메시지만을 포함하는 출력 결과를 보기 위해서는 Go 템플릿을 사용한다. kubectl get pod termination-demo -o go-template="{{range .status.containerStatuses}}{{.lastState.terminated.message}}{{end}}" ## 종료 메시지 사용자 정의하기 -쿠버네티스는 컨테이너의 `terminationMessagePath` 필드에 지정된 -종료 메시지 파일에서 종료 메시지를 검색하며, 이 필드의 기본값은 -`/dev/termination-log` 이다. 이 필드를 사용자 정의 함으로써 -쿠버네티스가 종료 메시지를 검색할 때 다른 파일을 사용하도록 조정할 수 있다. +쿠버네티스는 컨테이너의 `terminationMessagePath` 필드에 지정된 +종료 메시지 파일에서 종료 메시지를 검색하며, 이 필드의 기본값은 +`/dev/termination-log` 이다. 이 필드를 사용자 정의 함으로써 +쿠버네티스가 종료 메시지를 검색할 때 다른 파일을 사용하도록 조정할 수 있다. 쿠버네티스는 지정된 파일의 내용을 사용하여 컨테이너의 성공 및 실패에 대한 상태 메시지를 채운다. 종료 메시지는 assertion failure 메세지처럼 간결한 최종 상태로 생성된다. -kubelet은 4096 바이트보다 긴 메시지를 자른다. 모든 컨테이너의 총 메시지 길이는 -12KiB로 제한된다. 기본 종료 메시지 경로는 `/dev/termination-log`이다. +kubelet은 4096 바이트보다 긴 메시지를 자른다. 모든 컨테이너의 총 메시지 길이는 +12KiB로 제한된다. 기본 종료 메시지 경로는 `/dev/termination-log`이다. 파드가 시작된 후에는 종료 메시지 경로를 설정할 수 없다. -다음의 예제에서 컨테이너는, 쿠버네티스가 조회할 수 있도록 +다음의 예제에서 컨테이너는, 쿠버네티스가 조회할 수 있도록 `/tmp/my-log` 파일에 종료 메시지를 기록한다. ```yaml @@ -103,12 +103,12 @@ spec: terminationMessagePath: "/tmp/my-log" ``` -또한 사용자는 추가적인 사용자 정의를 위해 컨테이너의 `terminationMessagePolicy` -필드를 설정할 수 있다. 이 필드의 기본 값은 `File` 이며, -이는 오직 종료 메시지 파일에서만 종료 메시지가 조회되는 것을 의미한다. -`terminationMessagePolicy` 필드의 값을 "`FallbackToLogsOnError` 으로 -설정함으로써, 종료 메시지 파일이 비어 있고 컨테이너가 오류와 함께 종료 되었을 경우 -쿠버네티스가 컨테이너 로그 출력의 마지막 청크를 사용하도록 지시할 수 있다. +또한 사용자는 추가적인 사용자 정의를 위해 컨테이너의 `terminationMessagePolicy` +필드를 설정할 수 있다. 이 필드의 기본 값은 `File` 이며, +이는 오직 종료 메시지 파일에서만 종료 메시지가 조회되는 것을 의미한다. +`terminationMessagePolicy` 필드의 값을 "`FallbackToLogsOnError` 으로 +설정함으로써, 종료 메시지 파일이 비어 있고 컨테이너가 오류와 함께 종료 되었을 경우 +쿠버네티스가 컨테이너 로그 출력의 마지막 청크를 사용하도록 지시할 수 있다. 로그 출력은 2048 바이트나 80 행 중 더 작은 값으로 제한된다. @@ -118,9 +118,5 @@ spec: * [컨테이너](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) 에 있는 `terminationMessagePath` 에 대해 읽어보기. -* [로그 검색](/docs/concepts/cluster-administration/logging/)에 대해 배워보기. +* [로그 검색](/ko/docs/concepts/cluster-administration/logging/)에 대해 배워보기. * [Go 템플릿](https://golang.org/pkg/text/template/)에 대해 배워보기. - - - - diff --git a/content/ko/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md b/content/ko/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md index d52a6127becf2..ee6991ef30636 100644 --- a/content/ko/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md +++ b/content/ko/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md @@ -5,10 +5,10 @@ content_type: concept -컨테이너 CPU 및 메모리 사용량과 같은 리소스 사용량 메트릭은 -쿠버네티스의 메트릭 API를 통해 사용할 수 있다. 이 메트릭은 -`kubectl top` 커맨드 사용과 같이 사용자가 직접적으로 액세스하거나, -Horizontal Pod Autoscaler 같은 클러스터의 컨트롤러에서 결정을 내릴 때 사용될 수 있다. +컨테이너 CPU 및 메모리 사용량과 같은 리소스 사용량 메트릭은 +쿠버네티스의 메트릭 API를 통해 사용할 수 있다. 이 메트릭은 +`kubectl top` 커맨드 사용과 같이 사용자가 직접적으로 액세스하거나, +Horizontal Pod Autoscaler 같은 클러스터의 컨트롤러에서 결정을 내릴 때 사용될 수 있다. @@ -17,9 +17,9 @@ Horizontal Pod Autoscaler 같은 클러스터의 컨트롤러에서 결정을 ## 메트릭 API -메트릭 API를 통해 주어진 노드나 파드에서 현재 사용중인 -리소스의 양을 알 수 있다. 이 API는 메트릭 값을 저장하지 -않으므로 지정된 노드에서 10분 전에 사용된 리소스의 양을 +메트릭 API를 통해 주어진 노드나 파드에서 현재 사용중인 +리소스의 양을 알 수 있다. 이 API는 메트릭 값을 저장하지 +않으므로 지정된 노드에서 10분 전에 사용된 리소스의 양을 가져오는 것과 같은 일을 할 수는 없다. 이 API와 다른 API는 차이가 없다. @@ -27,7 +27,7 @@ Horizontal Pod Autoscaler 같은 클러스터의 컨트롤러에서 결정을 - 다른 쿠버네티스 API의 엔드포인트와 같이 `/apis/metrics.k8s.io/` 하위 경로에서 발견될 수 있다 - 동일한 보안, 확장성 및 신뢰성 보장을 제공한다 -[k8s.io/metrics](https://github.com/kubernetes/metrics/blob/master/pkg/apis/metrics/v1beta1/types.go) +[k8s.io/metrics](https://github.com/kubernetes/metrics/blob/master/pkg/apis/metrics/v1beta1/types.go) 리포지터리에서 이 API를 정의하고 있다. 여기에서 이 API에 대한 더 상세한 정보를 찾을 수 있다. {{< note >}} @@ -38,7 +38,7 @@ Horizontal Pod Autoscaler 같은 클러스터의 컨트롤러에서 결정을 ### CPU -CPU는 일정 기간 동안 [CPU 코어](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)에서 평균 사용량으로 리포트된다. 이 값은 커널(리눅스와 윈도우 커널 모두)에서 제공하는 누적 CPU 카운터보다 높은 비율을 적용해서 얻는다. kubelet은 비율 계산에 사용할 윈도우를 선택한다. +CPU는 일정 기간 동안 [CPU 코어](/ko/docs/concepts/configuration/manage-resources-containers/#cpu의-의미)에서 평균 사용량으로 리포트된다. 이 값은 커널(리눅스와 윈도우 커널 모두)에서 제공하는 누적 CPU 카운터보다 높은 비율을 적용해서 얻는다. kubelet은 비율 계산에 사용할 윈도우를 선택한다. ### 메모리 @@ -47,15 +47,13 @@ CPU는 일정 기간 동안 [CPU 코어](https://kubernetes.io/docs/concepts/con ## 메트릭 서버 [메트릭 서버](https://github.com/kubernetes-incubator/metrics-server)는 클러스터 전역에서 리소스 사용량 데이터를 집계한다. -`kube-up.sh` 스크립트에 의해 생성된 클러스터에는 기본적으로 메트릭 서버가 -디플로이먼트 오브젝트로 배포된다. 만약 다른 쿠버네티스 설치 메커니즘을 사용한다면, 제공된 +`kube-up.sh` 스크립트에 의해 생성된 클러스터에는 기본적으로 메트릭 서버가 +디플로이먼트 오브젝트로 배포된다. 만약 다른 쿠버네티스 설치 메커니즘을 사용한다면, 제공된 [디플로이먼트 components.yaml](https://github.com/kubernetes-sigs/metrics-server/releases) 파일을 사용하여 메트릭 서버를 배포할 수 있다. 메트릭 서버는 각 노드에서 [Kubelet](/docs/admin/kubelet/)에 의해 노출된 Summary API에서 메트릭을 수집한다. -메트릭 서버는 [쿠버네티스 aggregator](/docs/concepts/api-extension/apiserver-aggregation/)를 +메트릭 서버는 [쿠버네티스 aggregator](/ko/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)를 통해 메인 API 서버에 등록된다. [설계 문서](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/metrics-server.md)에서 메트릭 서버에 대해 자세하게 배울 수 있다. - - diff --git a/content/ko/docs/tasks/extend-kubectl/kubectl-plugins.md b/content/ko/docs/tasks/extend-kubectl/kubectl-plugins.md index 1a72dbf241811..d3b06d27034ba 100644 --- a/content/ko/docs/tasks/extend-kubectl/kubectl-plugins.md +++ b/content/ko/docs/tasks/extend-kubectl/kubectl-plugins.md @@ -1,6 +1,6 @@ --- title: 플러그인으로 kubectl 확장 -description: kubectl 플러그인을 사용하면, 새로운 하위 명령을 추가하여 kubectl 명령의 기능을 확장할 수 있다. +description: kubectl 플러그인을 작성하고 설치해서 kubectl을 확장한다. content_type: task --- diff --git a/content/ko/docs/tasks/inject-data-application/_index.md b/content/ko/docs/tasks/inject-data-application/_index.md index e7ae5375f4748..c5bddeca7e0c0 100644 --- a/content/ko/docs/tasks/inject-data-application/_index.md +++ b/content/ko/docs/tasks/inject-data-application/_index.md @@ -1,5 +1,5 @@ --- title: "애플리케이션에 데이터 주입하기" +description: 워크로드를 실행하는 파드에 대한 구성과 기타 데이터를 지정한다. weight: 30 - ---- \ No newline at end of file +--- diff --git a/content/ko/docs/tasks/inject-data-application/define-command-argument-container.md b/content/ko/docs/tasks/inject-data-application/define-command-argument-container.md index 8f821c7cf575e..68ee4726a5238 100644 --- a/content/ko/docs/tasks/inject-data-application/define-command-argument-container.md +++ b/content/ko/docs/tasks/inject-data-application/define-command-argument-container.md @@ -30,9 +30,9 @@ weight: 10 파일에 `args` 필드를 포함시킨다. 정의한 커맨드와 인자들은 파드가 생성되고 난 이후에는 변경될 수 없다. -구성 파일 안에서 정의하는 커맨드와 인자들은 컨테이너 이미지가 -제공하는 기본 커맨드와 인자들보다 우선시 된다. 만약 인자들을 -정의하고 커맨드를 정의하지 않는다면, 기본 커맨드가 새로운 인자와 +구성 파일 안에서 정의하는 커맨드와 인자들은 컨테이너 이미지가 +제공하는 기본 커맨드와 인자들보다 우선시 된다. 만약 인자들을 +정의하고 커맨드를 정의하지 않는다면, 기본 커맨드가 새로운 인자와 함께 사용된다. {{< note >}} @@ -103,7 +103,7 @@ args: ["$(MESSAGE)"] ## 셸 안에서 커맨드 실행하기 일부 경우들에서는 커맨드를 셸 안에서 실행해야할 필요가 있다. 예를 들어, 실행할 커맨드가 -서로 연결되어 있는 여러 개의 커맨드들로 구성되어 있거나, 셸 스크립트일 수도 있다. 셸 안에서 +서로 연결되어 있는 여러 개의 커맨드들로 구성되어 있거나, 셸 스크립트일 수도 있다. 셸 안에서 커맨드를 실행하려고 한다면, 이런 방식으로 감싸주면 된다. ```shell @@ -122,18 +122,18 @@ args: ["-c", "while true; do echo hello; sleep 10;done"] 기본 Entrypoint와 Cmd 값을 덮어쓰려고 한다면, 아래의 규칙들이 적용된다. -* 만약 컨테이너를 위한 `command` 값이나 `args` 값을 제공하지 않는다면, 도커 이미지 안에 +* 만약 컨테이너를 위한 `command` 값이나 `args` 값을 제공하지 않는다면, 도커 이미지 안에 제공되는 기본 값들이 사용된다. -* 만약 컨테이너를 위한 `command` 값을 제공하고, `args` 값을 제공하지 않는다면, -제공된 `command` 값만이 사용된다. 도커 이미지 안에 정의된 기본 EntryPoint 값과 기본 +* 만약 컨테이너를 위한 `command` 값을 제공하고, `args` 값을 제공하지 않는다면, +제공된 `command` 값만이 사용된다. 도커 이미지 안에 정의된 기본 EntryPoint 값과 기본 Cmd 값은 덮어쓰여진다. -* 만약 컨테이너를 위한 `args` 값만 제공한다면, 도커 이미지 안에 정의된 기본 EntryPoint +* 만약 컨테이너를 위한 `args` 값만 제공한다면, 도커 이미지 안에 정의된 기본 EntryPoint 값이 정의한 `args` 값들과 함께 실행된다. -* `command` 값과 `args` 값을 동시에 정의한다면, 도커 이미지 안에 정의된 기본 -EntryPoint 값과 기본 Cmd 값이 덮어쓰여진다. `command`가 `args` 값과 함께 +* `command` 값과 `args` 값을 동시에 정의한다면, 도커 이미지 안에 정의된 기본 +EntryPoint 값과 기본 Cmd 값이 덮어쓰여진다. `command`가 `args` 값과 함께 실행된다. 여기 몇 가지 예시들이 있다. @@ -154,7 +154,3 @@ EntryPoint 값과 기본 Cmd 값이 덮어쓰여진다. `command`가 `args` 값 * [파드와 컨테이너를 구성하는 방법](/ko/docs/tasks/)에 대해 더 알아본다. * [컨테이너 안에서 커맨드를 실행하는 방법](/docs/tasks/debug-application-cluster/get-shell-running-container/)에 대해 더 알아본다. * [컨테이너](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core)를 확인한다. - - - - diff --git a/content/ko/docs/tasks/manage-daemon/_index.md b/content/ko/docs/tasks/manage-daemon/_index.md index 58b87271c9754..1ff595ef61bd4 100644 --- a/content/ko/docs/tasks/manage-daemon/_index.md +++ b/content/ko/docs/tasks/manage-daemon/_index.md @@ -1,4 +1,5 @@ --- title: "클러스터 데몬 관리" +description: 롤링 업데이트 수행과 같은 데몬셋 관리를 위한 일반적인 작업을 수행한다. weight: 130 --- diff --git a/content/ko/docs/tasks/manage-daemon/rollback-daemon-set.md b/content/ko/docs/tasks/manage-daemon/rollback-daemon-set.md index 60aa4f4d1a069..0c79ae8d66aaa 100644 --- a/content/ko/docs/tasks/manage-daemon/rollback-daemon-set.md +++ b/content/ko/docs/tasks/manage-daemon/rollback-daemon-set.md @@ -2,27 +2,20 @@ title: 데몬셋(DaemonSet)에서 롤백 수행 content_type: task weight: 20 +min-kubernetes-server-version: 1.7 --- - - -이 페이지는 데몬셋에서 롤백을 수행하는 방법을 보여준다. - - +이 페이지는 {{< glossary_tooltip text="데몬셋" term_id="daemonset" >}}에서 롤백을 수행하는 방법을 보여준다. ## {{% heading "prerequisites" %}} +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -* 데몬셋 롤아웃 기록과 데몬셋 롤백 기능은 - 쿠버네티스 버전 1.7 이상의 `kubectl` 에서만 지원된다. -* [데몬셋에서 롤링 업데이트를 - 수행](/ko/docs/tasks/manage-daemon/update-daemon-set/)하는 방법을 알고 있어야 한다. - - - +[데몬셋에서 롤링 업데이트를 + 수행](/ko/docs/tasks/manage-daemon/update-daemon-set/)하는 방법을 이미 알고 있어야 한다. @@ -40,7 +33,7 @@ kubectl rollout history daemonset 이 명령은 데몬셋 리비전 목록을 반환한다. -```shell +``` daemonsets "" REVISION CHANGE-CAUSE 1 ... @@ -60,17 +53,17 @@ kubectl rollout history daemonset --revision=1 이 명령은 해당 리비전의 세부 사항을 반환한다. -```shell +``` daemonsets "" with revision #1 Pod Template: Labels: foo=bar Containers: app: - Image: ... - Port: ... - Environment: ... - Mounts: ... -Volumes: ... + Image: ... + Port: ... + Environment: ... + Mounts: ... +Volumes: ... ``` ### 2단계: 특정 리비전으로 롤백 @@ -82,16 +75,19 @@ kubectl rollout undo daemonset --to-revision= 성공하면, 명령은 다음을 반환한다. -```shell +``` daemonset "" rolled back ``` -`--to-revision` 플래그를 지정하지 않은 경우, 마지막 리비전이 선택된다. +{{< note >}} +`--to-revision` 플래그를 지정하지 않은 경우, kubectl은 가장 최신의 리비전을 선택한다. +{{< /note >}} ### 3단계: 데몬셋 롤백 진행 상황 확인 `kubectl rollout undo daemonset` 은 서버에 데몬셋 롤백을 시작하도록 -지시한다. 실제 롤백은 서버 측에서 비동기적으로 수행된다. +지시한다. 실제 롤백은 클러스터 {{< glossary_tooltip term_id="control-plane" text="컨트롤 플레인" >}} +내에서 비동기적으로 수행된다. 롤백 진행 상황을 보려면 다음의 명령을 수행한다. @@ -101,21 +97,17 @@ kubectl rollout status ds/ 롤백이 완료되면, 출력 결과는 다음과 비슷하다. -```shell +``` daemonset "" successfully rolled out ``` - - ## 데몬셋 리비전의 이해 이전 `kubectl rollout history` 단계에서, 데몬셋 리비전 목록을 -얻었다. 각 리비전은 `ControllerRevision` 이라는 리소스에 저장된다. -`ControllerRevision` 은 쿠버네티스 릴리스 1.7 이상에서만 사용할 수 있는 -리소스이다. +얻었다. 각 리비전은 ControllerRevision이라는 리소스에 저장된다. 각 리비전에 저장된 내용을 보려면, 데몬셋 리비전 원시 리소스를 찾는다. @@ -124,30 +116,29 @@ daemonset "" successfully rolled out kubectl get controllerrevision -l = ``` -이 명령은 `ControllerRevisions` 의 목록을 반환한다. +이 명령은 ControllerRevision의 목록을 반환한다. -```shell +``` NAME CONTROLLER REVISION AGE - DaemonSet/ 1 1h - DaemonSet/ 2 1h ``` -각 `ControllerRevision` 은 데몬셋 리비전의 어노테이션과 템플릿을 -저장한다. ControllerRevision 오브젝트의 이름은 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. +각 ControllerRevision은 데몬셋 리비전의 어노테이션과 템플릿을 +저장한다. -`kubectl rollout undo` 는 특정 `ControllerRevision` 을 가져와 데몬셋 -템플릿을 `ControllerRevision` 에 저장된 템플릿으로 바꾼다. +`kubectl rollout undo` 는 특정 ControllerRevision을 가져와 데몬셋 +템플릿을 ControllerRevision에 저장된 템플릿으로 바꾼다. `kubectl rollout undo` 는 `kubectl edit` 또는 `kubectl apply` 와 같은 다른 명령을 통해 데몬셋 템플릿을 이전 리비전으로 업데이트하는 것과 같다. {{< note >}} 데몬셋 리비전은 롤 포워드만 한다. 즉, 롤백이 -완료된 후, 롤백될 `ControllerRevision` 의 +완료된 후, 롤백될 ControllerRevision의 리비전 번호(`.revision` 필드)가 증가한다. 예를 들어, 시스템에 리비전 1과 2가 있고, 리비전 2에서 리비전 1으로 롤백하면, -`ControllerRevision` 은 `.revision: 1` 에서 `.revision: 3` 이 된다. +ControllerRevision은 `.revision: 1` 에서 `.revision: 3` 이 된다. {{< /note >}} ## 문제 해결 @@ -157,4 +148,3 @@ NAME CONTROLLER REVISION AGE - diff --git a/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md b/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md index 7ea6b51d93eee..78c5dc2cbdb49 100644 --- a/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md +++ b/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md @@ -1,6 +1,7 @@ --- content_type: concept title: GPU 스케줄링 +description: 클러스터의 노드별로 리소스로 사용할 GPU를 구성하고 스케줄링한다. --- @@ -140,7 +141,7 @@ Google은 GKE에서 NVIDIA GPU 사용에 대한 자체 [설명서](https://cloud 만약 클러스터의 노드들이 서로 다른 타입의 GPU를 가지고 있다면, 사용자는 파드를 적합한 노드에 스케줄 하기 위해서 -[노드 레이블과 노드 셀렉터](/docs/tasks/configure-pod-container/assign-pods-nodes/)를 사용할 수 있다. +[노드 레이블과 노드 셀렉터](/ko/docs/tasks/configure-pod-container/assign-pods-nodes/)를 사용할 수 있다. 예를 들면, @@ -215,5 +216,3 @@ spec: 이것은 파드가 사용자가 지정한 GPU 타입을 가진 노드에 스케줄 되도록 만든다. - - diff --git a/content/ko/docs/tasks/manage-hugepages/scheduling-hugepages.md b/content/ko/docs/tasks/manage-hugepages/scheduling-hugepages.md index 9d4e4ca2b38bb..edb0edb08f96d 100644 --- a/content/ko/docs/tasks/manage-hugepages/scheduling-hugepages.md +++ b/content/ko/docs/tasks/manage-hugepages/scheduling-hugepages.md @@ -1,16 +1,14 @@ --- title: HugePages 관리 content_type: task +description: 클러스터에서 huge page를 스케줄할 수 있는 리소스로 구성하고 관리한다. --- {{< feature-state state="stable" >}} -쿠버네티스는 **GA** 기능으로 파드의 애플리케이션에 미리 할당된 -huge page의 할당과 사용을 지원한다. 이 페이지에서는 사용자가 -huge page를 사용하는 방법과 현재의 제약 사항에 대해 설명한다. - - +쿠버네티스는 파드의 애플리케이션에 미리 할당된 +huge page의 할당과 사용을 지원한다. 이 페이지에서는 사용자가 huge page를 사용하는 방법에 대해 설명한다. ## {{% heading "prerequisites" %}} @@ -118,11 +116,3 @@ glossary_tooltip text="kube-apiserver" term_id="kube-apiserver" >}} (`--feature-gates=HugePageStorageMediumSize=true`)의 `HugePageStorageMediumSize` [기능 게이트](/docs/reference/command-line-tools-reference/feature-gates/)를 사용하여 활성화할 수 있다. - -## 향후 버전 - -- NUMA 지역성(locality)은 서비스 품질(QoS)의 기능으로 보장할 예정이다. -- 리밋레인지(LimitRange)를 지원할 예정이다. - - - diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/_index.md b/content/ko/docs/tasks/manage-kubernetes-objects/_index.md index 3c6566741c72e..ebb3c90272b1c 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/_index.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/_index.md @@ -1,4 +1,5 @@ --- title: "쿠버네티스 오브젝트 관리" +description: 쿠버네티스 API와 상호 작용하기 위한 선언적이고 명령적인 패러다임 weight: 25 --- diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/declarative-config.md b/content/ko/docs/tasks/manage-kubernetes-objects/declarative-config.md index 87469281c99d2..f3b15d3206092 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/declarative-config.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/declarative-config.md @@ -16,7 +16,7 @@ weight: 10 ## {{% heading "prerequisites" %}} -[`kubectl`](/docs/tasks/tools/install-kubectl/)를 설치한다. +[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)를 설치한다. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -1007,5 +1007,3 @@ template: * [구성 파일 사용하여 쿠버네티스 오브젝트 관리하기](/ko/docs/tasks/manage-kubernetes-objects/imperative-config/) * [Kubectl 명령어 참조](/docs/reference/generated/kubectl/kubectl/) * [쿠버네티스 API 참조](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) - - diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/imperative-command.md b/content/ko/docs/tasks/manage-kubernetes-objects/imperative-command.md index ddf2c2f1b9274..ac7690a325425 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/imperative-command.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/imperative-command.md @@ -12,7 +12,7 @@ weight: 30 ## {{% heading "prerequisites" %}} -[`kubectl`](/docs/tasks/tools/install-kubectl/)을 설치한다. +[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)을 설치한다. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -169,5 +169,3 @@ kubectl create --edit -f /tmp/srv.yaml * [오브젝트 구성을 이용하여 쿠버네티스 관리하기(선언형)](/ko/docs/tasks/manage-kubernetes-objects/declarative-config/) * [Kubectl 커맨드 참조](/docs/reference/generated/kubectl/kubectl/) * [쿠버네티스 API 참조](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) - - diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/imperative-config.md b/content/ko/docs/tasks/manage-kubernetes-objects/imperative-config.md index 927c6df079394..49691c341babf 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/imperative-config.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/imperative-config.md @@ -13,7 +13,7 @@ weight: 40 ## {{% heading "prerequisites" %}} -[`kubectl`](/docs/tasks/tools/install-kubectl/)을 설치한다. +[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)을 설치한다. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -152,5 +152,3 @@ template: * [오브젝트 구성을 이용하여 쿠버네티스 오브젝트 관리하기 (선언형)](/ko/docs/tasks/manage-kubernetes-objects/declarative-config/) * [Kubectl 커멘드 참조](/docs/reference/generated/kubectl/kubectl/) * [쿠버네티스 API 참조](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) - - diff --git a/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md index 20435a48813cd..6a45b5a73bcb4 100644 --- a/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/ko/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -6,12 +6,12 @@ weight: 20 -[Kustomize](https://github.com/kubernetes-sigs/kustomize)는 -[kustomization 파일](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md#kustomization)을 +[Kustomize](https://github.com/kubernetes-sigs/kustomize)는 +[kustomization 파일](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md#kustomization)을 통해 쿠버네티스 오브젝트를 사용자가 원하는 대로 변경하는(customize) 독립형 도구이다. -1.14 이후로, kubectl도 -kustomization 파일을 사용한 쿠버네티스 오브젝트의 관리를 지원한다. +1.14 이후로, kubectl도 +kustomization 파일을 사용한 쿠버네티스 오브젝트의 관리를 지원한다. kustomization 파일을 포함하는 디렉터리 내의 리소스를 보려면 다음 명령어를 실행한다. ```shell @@ -29,7 +29,7 @@ kubectl apply -k ## {{% heading "prerequisites" %}} -[`kubectl`](/docs/tasks/tools/install-kubectl/)을 설치한다. +[`kubectl`](/ko/docs/tasks/tools/install-kubectl/)을 설치한다. {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -47,7 +47,7 @@ Kustomize는 쿠버네티스 구성을 사용자 정의화하는 도구이다. ### 리소스 생성 -컨피그 맵과 시크릿은 파드같은 다른 쿠버네티스 오브젝트에서 사용되는 설정이나 민감한 데이터를 가지고 있다. +컨피그 맵과 시크릿은 파드같은 다른 쿠버네티스 오브젝트에서 사용되는 설정이나 민감한 데이터를 가지고 있다. 컨피그 맵이나 시크릿의 실질적인 소스는 일반적으로 `.properties` 파일이나 ssh key 파일과 같은 것들은 클러스터 외부에 있다. Kustomize는 시크릿과 컨피그 맵을 파일이나 문자열에서 생성하는 `secretGenerator`와 `configMapGenerator`를 가지고 있다. @@ -207,7 +207,7 @@ metadata: ### 교차 편집 필드 설정 -프로젝트 내 모든 쿠버네티스 리소스에 교차 편집 필드를 설정하는 것은 꽤나 일반적이다. +프로젝트 내 모든 쿠버네티스 리소스에 교차 편집 필드를 설정하는 것은 꽤나 일반적이다. 교차 편집 필드를 설정하는 몇 가지 사용 사례는 다음과 같다. * 모든 리소스에 동일한 네임스페이스를 설정 @@ -283,13 +283,13 @@ spec: ### 리소스 구성과 사용자 정의 -프로젝트 내 리소스의 집합을 구성하여 이들을 동일한 파일이나 디렉터리 내에서 -관리하는 것은 일반적이다. +프로젝트 내 리소스의 집합을 구성하여 이들을 동일한 파일이나 디렉터리 내에서 +관리하는 것은 일반적이다. Kustomize는 서로 다른 파일들로 리소스를 구성하고 패치나 다른 사용자 정의를 이들에 적용하는 것을 제공한다. #### 구성 -Kustomize는 서로 다른 리소스들의 구성을 지원한다. `kustomization.yaml` 파일 내 `resources` 필드는 구성 내에 포함하려는 리소스들의 리스트를 정의한다. `resources` 리스트 내에 리소스의 구성 파일의 경로를 설정한다. +Kustomize는 서로 다른 리소스들의 구성을 지원한다. `kustomization.yaml` 파일 내 `resources` 필드는 구성 내에 포함하려는 리소스들의 리스트를 정의한다. `resources` 리스트 내에 리소스의 구성 파일의 경로를 설정한다. 다음 예제는 디플로이먼트와 서비스로 구성된 NGINX 애플리케이션이다. ```shell @@ -344,7 +344,7 @@ EOF #### 사용자 정의 -패치는 리소스에 다른 사용자 정의를 적용하는 데 사용할 수 있다. Kustomize는 +패치는 리소스에 다른 사용자 정의를 적용하는 데 사용할 수 있다. Kustomize는 `patchesStrategicMerge`와 `patchesJson6902`를 통해 서로 다른 패치 메커니즘을 지원한다. `patchesStrategicMerge`는 파일 경로들의 리스트이다. 각각의 파일은 [전략적 병합 패치](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md)로 분석될 수 있어야 한다. 패치 내부의 네임은 반드시 이미 읽혀진 리소스 네임과 일치해야 한다. 한 가지 일을 하는 작은 패치가 권장된다. 예를 들기 위해 디플로이먼트 레플리카 숫자를 증가시키는 하나의 패치와 메모리 상한을 설정하는 다른 패치를 생성한다. ```shell @@ -432,10 +432,10 @@ spec: - containerPort: 80 ``` -모든 리소스 또는 필드가 전략적 병합 패치를 지원하는 것은 아니다. 임의의 리소스 내 임의의 필드의 수정을 지원하기 위해, -Kustomize는 `patchesJson6902`를 통한 [JSON 패치](https://tools.ietf.org/html/rfc6902) 적용을 제공한다. -Json 패치의 정확한 리소스를 찾기 위해, 해당 리소스의 group, version, kind, name이 -`kustomization.yaml` 내에 명시될 필요가 있다. 예를 들면, `patchesJson6902`를 통해 +모든 리소스 또는 필드가 전략적 병합 패치를 지원하는 것은 아니다. 임의의 리소스 내 임의의 필드의 수정을 지원하기 위해, +Kustomize는 `patchesJson6902`를 통한 [JSON 패치](https://tools.ietf.org/html/rfc6902) 적용을 제공한다. +Json 패치의 정확한 리소스를 찾기 위해, 해당 리소스의 group, version, kind, name이 +`kustomization.yaml` 내에 명시될 필요가 있다. 예를 들면, `patchesJson6902`를 통해 디플로이먼트 오브젝트의 레플리카 개수를 증가시킬 수 있다. ```shell @@ -508,7 +508,7 @@ spec: - containerPort: 80 ``` -패치 기능에 추가로 Kustomize는 패치를 생성하지 않고 컨테이너 이미지를 사용자 정의하거나 다른 오브젝트의 필드 값을 컨테이너에 주입하는 +패치 기능에 추가로 Kustomize는 패치를 생성하지 않고 컨테이너 이미지를 사용자 정의하거나 다른 오브젝트의 필드 값을 컨테이너에 주입하는 기능도 제공한다. 예를 들어 `kustomization.yaml`의 `images` 필드에 신규 이미지를 지정하여 컨테이너에서 사용되는 이미지를 변경할 수 있다. ```shell @@ -566,9 +566,9 @@ spec: - containerPort: 80 ``` -가끔, 파드 내에서 실행되는 애플리케이션이 다른 오브젝트의 설정 값을 사용해야 할 수도 있다. 예를 들어, -디플로이먼트 오브젝트의 파드는 Env 또는 커맨드 인수로 해당 서비스 네임을 읽어야 한다고 하자. -`kustomization.yaml` 파일에 `namePrefix` 또는 `nameSuffix`가 추가되면 서비스 네임이 변경될 수 있다. +가끔, 파드 내에서 실행되는 애플리케이션이 다른 오브젝트의 설정 값을 사용해야 할 수도 있다. 예를 들어, +디플로이먼트 오브젝트의 파드는 Env 또는 커맨드 인수로 해당 서비스 네임을 읽어야 한다고 하자. +`kustomization.yaml` 파일에 `namePrefix` 또는 `nameSuffix`가 추가되면 서비스 네임이 변경될 수 있다. 커맨드 인수 내에 서비스 네임을 하드 코딩하는 것을 권장하지 않는다. 이 용도에서 Kustomize는 `vars`를 통해 containers에 서비스 네임을 삽입할 수 있다. ```shell @@ -655,11 +655,11 @@ spec: ## Base와 Overlay -Kustomize는 **base**와 **overlay**의 개념을 가지고 있다. **base**는 `kustomization.yaml`과 함께 사용되는 디렉터리다. 이는 -사용자 정의와 관련된 리소스들의 집합을 포함한다. `kustomization.yaml`의 내부에 표시되는 base는 로컬 디렉터리이거나 원격 리포지터리의 디렉터리가 -될 수 있다. **overlay**는 `kustomization.yaml`이 있는 디렉터리로 -다른 kustomization 디렉터리들을 `bases`로 참조한다. **base**는 overlay에 대해서 알지 못하며 여러 overlay들에서 사용될 수 있다. -한 overlay는 다수의 base들을 가질 수 있고, base들에서 모든 리소스를 구성할 수 있으며, +Kustomize는 **base**와 **overlay**의 개념을 가지고 있다. **base**는 `kustomization.yaml`과 함께 사용되는 디렉터리다. 이는 +사용자 정의와 관련된 리소스들의 집합을 포함한다. `kustomization.yaml`의 내부에 표시되는 base는 로컬 디렉터리이거나 원격 리포지터리의 디렉터리가 +될 수 있다. **overlay**는 `kustomization.yaml`이 있는 디렉터리로 +다른 kustomization 디렉터리들을 `bases`로 참조한다. **base**는 overlay에 대해서 알지 못하며 여러 overlay들에서 사용될 수 있다. +한 overlay는 다수의 base들을 가질 수 있고, base들에서 모든 리소스를 구성할 수 있으며, 이들의 위에 사용자 정의도 가질 수 있다. 다음은 base에 대한 예이다. @@ -711,7 +711,7 @@ resources: EOF ``` -이 base는 다수의 overlay에서 사용될 수 있다. 다른 `namePrefix` 또는 다른 교차 편집 필드들을 +이 base는 다수의 overlay에서 사용될 수 있다. 다른 `namePrefix` 또는 다른 교차 편집 필드들을 서로 다른 overlay에 추가할 수 있다. 다음 예제는 동일한 base를 사용하는 두 overlay들이다. ```shell @@ -732,7 +732,7 @@ EOF ## Kustomize를 이용하여 오브젝트를 적용/확인/삭제하는 방법 -`kustomization.yaml`에서 관리되는 리소스를 인식하려면 `kubectl` 명령어에 `--kustomize` 나 `-k`를 사용한다. +`kustomization.yaml`에서 관리되는 리소스를 인식하려면 `kubectl` 명령어에 `--kustomize` 나 `-k`를 사용한다. `-k`는 다음과 같이 kustomization 디렉터리를 가리키고 있어야 한다는 것을 주의한다. ```shell @@ -835,5 +835,3 @@ deployment.apps "dev-my-nginx" deleted * [Kubectl Book](https://kubectl.docs.kubernetes.io) * [Kubectl Command Reference](/docs/reference/generated/kubectl/kubectl/) * [Kubernetes API Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) - - diff --git a/content/ko/docs/tasks/network/_index.md b/content/ko/docs/tasks/network/_index.md index 26317241ec603..e7ab0fae52f39 100644 --- a/content/ko/docs/tasks/network/_index.md +++ b/content/ko/docs/tasks/network/_index.md @@ -1,4 +1,5 @@ --- -title: "네트워크" +title: "네트워킹" +description: 클러스터에 대한 네트워킹 설정 방법에 대해 배운다. weight: 160 --- diff --git a/content/ko/docs/tasks/network/validate-dual-stack.md b/content/ko/docs/tasks/network/validate-dual-stack.md index 0bbb20b99dc0d..5364e7bebba2b 100644 --- a/content/ko/docs/tasks/network/validate-dual-stack.md +++ b/content/ko/docs/tasks/network/validate-dual-stack.md @@ -12,7 +12,7 @@ content_type: task * 이중 스택 네트워킹을 위한 제공자 지원 (클라우드 제공자 또는 기타 제공자들은 라우팅 가능한 IPv4/IPv6 네트워크 인터페이스를 제공하는 쿠버네티스 노드들을 제공해야 한다.) -* 이중 스택을 지원하는 [네트워크 플러그인](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) (예. Kubenet 또는 Calico) +* 이중 스택을 지원하는 [네트워크 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) (예. Kubenet 또는 Calico) * IPVS 모드로 구동되는 Kube-proxy * [이중 스택 활성화](/ko/docs/concepts/services-networking/dual-stack/) 클러스터 @@ -155,5 +155,3 @@ my-service ClusterIP fe80:20d::d06b 80/TCP 9s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE my-service ClusterIP fe80:20d::d06b 2001:db8:f100:4002::9d37:c0d7 80:31868/TCP 30s ``` - - diff --git a/content/ko/docs/tasks/run-application/_index.md b/content/ko/docs/tasks/run-application/_index.md index 203d2eb39c60c..13039181be221 100644 --- a/content/ko/docs/tasks/run-application/_index.md +++ b/content/ko/docs/tasks/run-application/_index.md @@ -1,4 +1,5 @@ --- title: "애플리케이션 실행" +description: 스테이트리스와 스테이트풀 애플리케이션 모두를 실행하고 관리한다. weight: 40 --- diff --git a/content/ko/docs/tasks/run-application/delete-stateful-set.md b/content/ko/docs/tasks/run-application/delete-stateful-set.md index ce5f030622afa..4c50079dc3dc5 100644 --- a/content/ko/docs/tasks/run-application/delete-stateful-set.md +++ b/content/ko/docs/tasks/run-application/delete-stateful-set.md @@ -52,7 +52,7 @@ kubectl delete pods -l app=myapp ### 퍼시스턴트볼륨(PersistentVolume) -스테이트풀셋의 파드들을 삭제하는 것이 연결된 볼륨을 삭제하는 것은 아니다. 이것은 볼륨을 삭제하기 전에 볼륨에서 데이터를 복사할 수 있는 기회를 준다. 파드들이 [terminating 상태](/ko/docs/concepts/workloads/pods/pod/#termination-of-pods)가 된 후 PVC를 삭제하는 것은 스토리지클래스(StorageClass) 와 반환 정책에 따라 백업 퍼시스턴트볼륨이 삭제될 수도 있다. 클레임 삭제 후 볼륨에 접근할 수 있다고 가정하면 안된다. +스테이트풀셋의 파드들을 삭제하는 것이 연결된 볼륨을 삭제하는 것은 아니다. 이것은 볼륨을 삭제하기 전에 볼륨에서 데이터를 복사할 수 있는 기회를 준다. 파드들이 [terminating 상태](/ko/docs/concepts/workloads/pods/pod/#파드의-종료)가 된 후 PVC를 삭제하는 것은 스토리지클래스(StorageClass) 와 반환 정책에 따라 백업 퍼시스턴트볼륨이 삭제될 수도 있다. 클레임 삭제 후 볼륨에 접근할 수 있다고 가정하면 안된다. {{< note >}} PVC를 삭제할 때 데이터 손실될 수 있음에 주의하자. @@ -82,7 +82,3 @@ kubectl delete pvc -l app=myapp [스테이트풀셋 파드 강제 삭제하기](/docs/tasks/run-application/force-delete-stateful-set-pod/)에 대해 더 알아보기. - - - - diff --git a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 6b294b78509e2..ee5db9d3f2288 100644 --- a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -8,7 +8,7 @@ weight: 100 Horizontal Pod Autoscaler는 CPU 사용량(또는 베타 지원의 다른 애플리케이션 지원 메트릭)을 관찰하여 -레플리케이션 컨트롤러, 디플로이먼트, 레플리카 셋 또는 스테이트풀셋(StatefulSet)의 파드 개수를 자동으로 스케일한다. +레플리케이션 컨트롤러, 디플로이먼트, 레플리카셋(ReplicaSet) 또는 스테이트풀셋(StatefulSet)의 파드 개수를 자동으로 스케일한다. 이 문서는 php-apache 서버를 대상으로 Horizontal Pod Autoscaler를 동작해보는 예제이다. Horizontal Pod Autoscaler 동작과 관련된 더 많은 정보를 위해서는 [Horizontal Pod Autoscaler 사용자 가이드](/ko/docs/tasks/run-application/horizontal-pod-autoscale/)를 참고하기 바란다. diff --git a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md index 2362c7f47519f..4afcb6927de81 100644 --- a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -179,9 +179,9 @@ CPU에 대한 오토스케일링 지원만 포함하는 안정된 버전은 새로운 필드는 `autoscaling/v1`로 작업할 때 어노테이션으로 보존된다. HorizontalPodAutoscaler API 오브젝트 생성시 지정된 이름이 유효한 -[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름들)인지 확인해야 한다. +[DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)인지 확인해야 한다. API 오브젝트에 대한 자세한 내용은 -[HorizontalPodAutoscaler 오브젝트](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object)에서 찾을 수 있다. +[HorizontalPodAutoscaler 오브젝트](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#horizontalpodautoscaler-v1-autoscaling)에서 찾을 수 있다. ## kubectl에서 Horizontal Pod Autoscaler 지원 @@ -199,9 +199,9 @@ Horizontal Pod Autoscaler는 모든 API 리소스와 마찬가지로 `kubectl` ## 롤링 업데이트 중 오토스케일링 -현재 쿠버네티스에서는 기본 레플리카 셋를 관리하는 디플로이먼트 오브젝트를 사용하여 롤링 업데이트를 수행할 수 있다. +현재 쿠버네티스에서는 기본 레플리카셋를 관리하는 디플로이먼트 오브젝트를 사용하여 롤링 업데이트를 수행할 수 있다. Horizontal Pod Autoscaler는 후자의 방법을 지원한다. Horizontal Pod Autoscaler는 디플로이먼트 오브젝트에 바인딩되고, -디플로이먼트 오브젝트를 위한 크기를 설정하며, 디플로이먼트는 기본 레플리카 셋의 크기를 결정한다. +디플로이먼트 오브젝트를 위한 크기를 설정하며, 디플로이먼트는 기본 레플리카셋의 크기를 결정한다. Horizontal Pod Autoscaler는 레플리케이션 컨트롤러를 직접 조작하는 롤링 업데이트에서 작동하지 않는다. 즉, Horizontal Pod Autoscaler를 레플리케이션 컨트롤러에 바인딩하고 롤링 업데이트를 수행할 수 없다. (예 : `kubectl rolling-update`) @@ -229,7 +229,7 @@ v1.12부터는 새로운 알고리즘 업데이트가 업스케일 지연에 대 이러한 파라미터 값을 조정할 때 클러스터 운영자는 가능한 결과를 알아야 한다. 지연(쿨-다운) 값이 너무 길면, Horizontal Pod Autoscaler가 워크로드 변경에 반응하지 않는다는 불만이 있을 수 있다. 그러나 지연 값을 -너무 짧게 설정하면, 레플리카 셋의 크기가 평소와 같이 계속 스래싱될 수 +너무 짧게 설정하면, 레플리카셋의 크기가 평소와 같이 계속 스래싱될 수 있다. {{< /note >}} @@ -439,5 +439,3 @@ behavior: * 디자인 문서: [Horizontal Pod Autoscaling](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md). * kubectl 오토스케일 커맨드: [kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale). * [Horizontal Pod Autoscaler](/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/)의 사용 예제. - - diff --git a/content/ko/docs/tasks/tls/_index.md b/content/ko/docs/tasks/tls/_index.md index 8607aa28d29b6..291b4b3eb171e 100644 --- a/content/ko/docs/tasks/tls/_index.md +++ b/content/ko/docs/tasks/tls/_index.md @@ -1,5 +1,6 @@ --- title: "TLS" +description: TLS(Transport Layer Security)를 사용하여 클러스터 내 트래픽을 보호하는 방법을 이해한다. weight: 100 --- diff --git a/content/ko/docs/tasks/tools/_index.md b/content/ko/docs/tasks/tools/_index.md index 799bc028f6231..bcfcd12e4e14d 100755 --- a/content/ko/docs/tasks/tools/_index.md +++ b/content/ko/docs/tasks/tools/_index.md @@ -1,5 +1,6 @@ --- title: "도구 설치" +description: 컴퓨터에서 쿠버네티스 도구를 설정한다. weight: 10 --- diff --git a/content/ko/docs/tasks/tools/install-kubectl.md b/content/ko/docs/tasks/tools/install-kubectl.md index 4b498f27ef130..e4c8011614bd7 100644 --- a/content/ko/docs/tasks/tools/install-kubectl.md +++ b/content/ko/docs/tasks/tools/install-kubectl.md @@ -111,34 +111,34 @@ kubectl version --client 1. 최신 릴리스를 다운로드한다. - ``` - curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl" - ``` + ```bash + curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl" + ``` - 특정 버전을 다운로드하려면, `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` 명령 부분을 특정 버전으로 바꾼다. + 특정 버전을 다운로드하려면, `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` 명령 부분을 특정 버전으로 바꾼다. - 예를 들어, macOS에서 버전 {{< param "fullversion" >}}을 다운로드하려면, 다음을 입력한다. - ``` - curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl + 예를 들어, macOS에서 버전 {{< param "fullversion" >}}을 다운로드하려면, 다음을 입력한다. + ```bash + curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl ``` -2. kubectl 바이너리를 실행 가능하게 만든다. + kubectl 바이너리를 실행 가능하게 만든다. - ``` - chmod +x ./kubectl - ``` + ```bash + chmod +x ./kubectl + ``` 3. 바이너리를 PATH가 설정된 디렉터리로 옮긴다. - ``` - sudo mv ./kubectl /usr/local/bin/kubectl - ``` + ```bash + sudo mv ./kubectl /usr/local/bin/kubectl + ``` 4. 설치한 버전이 최신 버전인지 확인한다. - ``` - kubectl version --client - ``` + ```bash + kubectl version --client + ``` ### macOS에서 Homebrew를 사용하여 설치 @@ -146,21 +146,21 @@ macOS에서 [Homebrew](https://brew.sh/) 패키지 관리자를 사용하는 경 1. 설치 명령을 실행한다. - ``` - brew install kubectl - ``` + ```bash + brew install kubectl + ``` - 또는 + 또는 - ``` - brew install kubernetes-cli - ``` + ```bash + brew install kubernetes-cli + ``` 2. 설치한 버전이 최신 버전인지 확인한다. - ``` - kubectl version --client - ``` + ```bash + kubectl version --client + ``` ### macOS에서 Macports를 사용하여 설치 @@ -168,117 +168,123 @@ macOS에서 [Macports](https://macports.org/) 패키지 관리자를 사용하 1. 설치 명령을 실행한다. - ``` - sudo port selfupdate - sudo port install kubectl - ``` + ```bash + sudo port selfupdate + sudo port install kubectl + ``` 2. 설치한 버전이 최신 버전인지 확인한다. - ``` - kubectl version --client - ``` + ```bash + kubectl version --client + ``` -## Windows에 kubectl 설치 +## 윈도우에 kubectl 설치 -### Windows에서 curl을 사용하여 kubectl 바이너리 설치 +### 윈도우에서 curl을 사용하여 kubectl 바이너리 설치 1. [이 링크](https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe)에서 최신 릴리스 {{< param "fullversion" >}}을 다운로드한다. - 또는 `curl` 을 설치한 경우, 다음 명령을 사용한다. + 또는 `curl` 을 설치한 경우, 다음 명령을 사용한다. - ``` - curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe - ``` + ```bash + curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe + ``` - 최신의 안정 버전(예: 스크립팅을 위한)을 찾으려면, [https://storage.googleapis.com/kubernetes-release/release/stable.txt](https://storage.googleapis.com/kubernetes-release/release/stable.txt)를 참고한다. + 최신의 안정 버전(예: 스크립팅을 위한)을 찾으려면, [https://storage.googleapis.com/kubernetes-release/release/stable.txt](https://storage.googleapis.com/kubernetes-release/release/stable.txt)를 참고한다. 2. 바이너리를 PATH가 설정된 디렉터리에 추가한다. 3. `kubectl` 의 버전이 다운로드한 버전과 같은지 확인한다. - ``` - kubectl version --client - ``` + ```bash + kubectl version --client + ``` {{< note >}} -[Windows용 도커 데스크톱](https://docs.docker.com/docker-for-windows/#kubernetes)은 자체 버전의 `kubectl` 을 PATH에 추가한다. +[윈도우용 도커 데스크톱](https://docs.docker.com/docker-for-windows/#kubernetes)은 자체 버전의 `kubectl` 을 PATH에 추가한다. 도커 데스크톱을 이전에 설치한 경우, 도커 데스크톱 설치 프로그램에서 추가한 PATH 항목 앞에 PATH 항목을 배치하거나 도커 데스크톱의 `kubectl` 을 제거해야 할 수도 있다. {{< /note >}} ### PSGallery에서 Powershell로 설치 -Windows에서 [Powershell Gallery](https://www.powershellgallery.com/) 패키지 관리자를 사용하는 경우, Powershell로 kubectl을 설치하고 업데이트할 수 있다. +윈도우에서 [Powershell Gallery](https://www.powershellgallery.com/) 패키지 관리자를 사용하는 경우, Powershell로 kubectl을 설치하고 업데이트할 수 있다. 1. 설치 명령을 실행한다(`DownloadLocation` 을 지정해야 한다). - ``` - Install-Script -Name install-kubectl -Scope CurrentUser -Force - install-kubectl.ps1 [-DownloadLocation ] - ``` + ```powershell + Install-Script -Name install-kubectl -Scope CurrentUser -Force + install-kubectl.ps1 [-DownloadLocation ] + ``` -{{< note >}}`DownloadLocation` 을 지정하지 않으면, `kubectl` 은 사용자의 임시 디렉터리에 설치된다.{{< /note >}} + {{< note >}} + `DownloadLocation` 을 지정하지 않으면, `kubectl` 은 사용자의 임시 디렉터리에 설치된다. + {{< /note >}} 설치 프로그램은 `$HOME/.kube` 를 생성하고 구성 파일을 작성하도록 지시한다. 2. 설치한 버전이 최신 버전인지 확인한다. - ``` - kubectl version --client - ``` + ```powershell + kubectl version --client + ``` {{< note >}} 설치 업데이트는 1 단계에서 나열한 두 명령을 다시 실행하여 수행한다. {{< /note >}} -### Chocolatey 또는 Scoop을 사용하여 Windows에 설치 - -1. Windows에 kubectl을 설치하기 위해서 [Chocolatey](https://chocolatey.org) 패키지 관리자나 [Scoop](https://scoop.sh) 커맨드 라인 설치 프로그램을 사용할 수 있다. +### Chocolatey 또는 Scoop을 사용하여 윈도우에 설치 -{{< tabs name="kubectl_win_install" >}} -{{% tab name="choco" %}} +1. 윈도우에 kubectl을 설치하기 위해서 [Chocolatey](https://chocolatey.org) 패키지 관리자나 [Scoop](https://scoop.sh) 커맨드 라인 설치 프로그램을 사용할 수 있다. + {{< tabs name="kubectl_win_install" >}} + {{% tab name="choco" %}} + ```powershell choco install kubernetes-cli - -{{% /tab %}} -{{% tab name="scoop" %}} - + ``` + {{% /tab %}} + {{% tab name="scoop" %}} + ```powershell scoop install kubectl - -{{% /tab %}} -{{< /tabs >}} + ``` + {{% /tab %}} + {{< /tabs >}} 2. 설치한 버전이 최신 버전인지 확인한다. - ``` - kubectl version --client - ``` + ```powershell + kubectl version --client + ``` -3. 홈 디렉토리로 이동한다. +3. 홈 디렉터리로 이동한다. + + ```powershell + # cmd.exe를 사용한다면, 다음을 실행한다. cd %USERPROFILE% + cd ~ + ``` - ``` - cd %USERPROFILE% - ``` 4. `.kube` 디렉터리를 생성한다. - ``` - mkdir .kube - ``` + ```powershell + mkdir .kube + ``` 5. 금방 생성한 `.kube` 디렉터리로 이동한다. - ``` - cd .kube - ``` + ```powershell + cd .kube + ``` 6. 원격 쿠버네티스 클러스터를 사용하도록 kubectl을 구성한다. - ``` - New-Item config -type file - ``` + ```powershell + New-Item config -type file + ``` -{{< note >}}메모장과 같은 텍스트 편집기를 선택하여 구성 파일을 편집한다.{{< /note >}} +{{< note >}} +메모장과 같은 텍스트 편집기를 선택하여 구성 파일을 편집한다. +{{< /note >}} ## Google Cloud SDK의 일부로 다운로드 @@ -288,15 +294,15 @@ kubectl을 Google Cloud SDK의 일부로 설치할 수 있다. 2. `kubectl` 설치 명령을 실행한다. - ``` - gcloud components install kubectl - ``` + ```shell + gcloud components install kubectl + ``` 3. 설치한 버전이 최신 버전인지 확인한다. - ``` - kubectl version --client - ``` + ```shell + kubectl version --client + ``` ## kubectl 구성 확인 @@ -312,7 +318,7 @@ URL 응답이 표시되면, kubectl이 클러스터에 접근하도록 올바르 다음과 비슷한 메시지가 표시되면, kubectl이 올바르게 구성되지 않았거나 쿠버네티스 클러스터에 연결할 수 없다. -```shell +``` The connection to the server was refused - did you specify the right host or port? ``` @@ -350,7 +356,7 @@ bash-completion은 많은 패키지 관리자에 의해 제공된다([여기](ht 확인하려면, 셸을 다시 로드하고 `type _init_completion` 을 실행한다. 명령이 성공하면, 이미 설정된 상태이고, 그렇지 않으면 `~/.bashrc` 파일에 다음을 추가한다. -```shell +```bash source /usr/share/bash-completion/bash_completion ``` @@ -362,17 +368,17 @@ source /usr/share/bash-completion/bash_completion - `~/.bashrc` 파일에서 완성 스크립트를 소싱한다. - ```shell - echo 'source <(kubectl completion bash)' >>~/.bashrc - ``` + ```bash + echo 'source <(kubectl completion bash)' >>~/.bashrc + ``` - 완성 스크립트를 `/etc/bash_completion.d` 디렉터리에 추가한다. - ```shell - kubectl completion bash >/etc/bash_completion.d/kubectl - ``` + ```bash + kubectl completion bash >/etc/bash_completion.d/kubectl + ``` kubectl에 대한 앨리어스(alias)가 있는 경우, 해당 앨리어스로 작업하도록 셸 완성을 확장할 수 있다. -```shell +```bash echo 'alias k=kubectl' >>~/.bashrc echo 'complete -F __start_kubectl k' >>~/.bashrc ``` @@ -403,19 +409,19 @@ bash-completion에는 v1과 v2 두 가지 버전이 있다. v1은 Bash 3.2(macOS 여기의 지침에서는 Bash 4.1 이상을 사용한다고 가정한다. 다음을 실행하여 Bash 버전을 확인할 수 있다. -```shell +```bash echo $BASH_VERSION ``` 너무 오래된 버전인 경우, Homebrew를 사용하여 설치/업그레이드할 수 있다. -```shell +```bash brew install bash ``` 셸을 다시 로드하고 원하는 버전을 사용 중인지 확인한다. -```shell +```bash echo $BASH_VERSION $SHELL ``` @@ -429,13 +435,13 @@ Homebrew는 보통 `/usr/local/bin/bash` 에 설치한다. bash-completion v2가 이미 설치되어 있는지 `type_init_completion` 으로 확인할 수 있다. 그렇지 않은 경우, Homebrew로 설치할 수 있다. -```shell +```bash brew install bash-completion@2 ``` 이 명령의 출력에 명시된 바와 같이, `~/.bash_profile` 파일에 다음을 추가한다. -```shell +```bash export BASH_COMPLETION_COMPAT_DIR="/usr/local/etc/bash_completion.d" [[ -r "/usr/local/etc/profile.d/bash_completion.sh" ]] && . "/usr/local/etc/profile.d/bash_completion.sh" ``` @@ -448,29 +454,29 @@ export BASH_COMPLETION_COMPAT_DIR="/usr/local/etc/bash_completion.d" - 완성 스크립트를 `~/.bash_profile` 파일에서 소싱한다. - ```shell + ```bash echo 'source <(kubectl completion bash)' >>~/.bash_profile ``` - 완성 스크립트를 `/usr/local/etc/bash_completion.d` 디렉터리에 추가한다. - ```shell + ```bash kubectl completion bash >/usr/local/etc/bash_completion.d/kubectl ``` - kubectl에 대한 앨리어스가 있는 경우, 해당 앨리어스로 작업하기 위해 셸 완성을 확장할 수 있다. - ```shell + ```bash echo 'alias k=kubectl' >>~/.bash_profile echo 'complete -F __start_kubectl k' >>~/.bash_profile ``` - Homebrew로 kubectl을 설치한 경우([위](#macos에서-homebrew를-사용하여-설치)의 설명을 참고), kubectl 완성 스크립트는 이미 `/usr/local/etc/bash_completion.d/kubectl` 에 있어야 한다. 이 경우, 아무 것도 할 필요가 없다. - {{< note >}} - bash-completion v2의 Homebrew 설치는 `BASH_COMPLETION_COMPAT_DIR` 디렉터리의 모든 파일을 소싱하므로, 후자의 두 가지 방법이 적용된다. - {{< /note >}} + {{< note >}} + bash-completion v2의 Homebrew 설치는 `BASH_COMPLETION_COMPAT_DIR` 디렉터리의 모든 파일을 소싱하므로, 후자의 두 가지 방법이 적용된다. + {{< /note >}} 어쨌든, 셸을 다시 로드 한 후에, kubectl 완성이 작동해야 한다. {{% /tab %}} @@ -481,13 +487,13 @@ Zsh용 kubectl 완성 스크립트는 `kubectl completion zsh` 명령으로 생 모든 셸 세션에서 사용하려면, `~/.zshrc` 파일에 다음을 추가한다. -```shell +```zsh source <(kubectl completion zsh) ``` kubectl에 대한 앨리어스가 있는 경우, 해당 앨리어스로 작업하도록 셸 완성을 확장할 수 있다. -```shell +```zsh echo 'alias k=kubectl' >>~/.zshrc echo 'complete -F __start_kubectl k' >>~/.zshrc ``` @@ -496,16 +502,13 @@ echo 'complete -F __start_kubectl k' >>~/.zshrc `complete:13: command not found: compdef` 와 같은 오류가 발생하면, `~/.zshrc` 파일의 시작 부분에 다음을 추가한다. -```shell +```zsh autoload -Uz compinit compinit ``` {{% /tab %}} {{< /tabs >}} - - - ## {{% heading "whatsnext" %}} * [Minikube 설치](/ko/docs/tasks/tools/install-minikube/) @@ -513,4 +516,3 @@ compinit * [애플리케이션을 시작하고 노출하는 방법에 대해 배운다.](/docs/tasks/access-application-cluster/service-access-application-cluster/) * 직접 생성하지 않은 클러스터에 접근해야하는 경우, [클러스터 접근 공유 문서](/ko/docs/tasks/access-application-cluster/configure-access-multiple-clusters/)를 참고한다. * [kubectl 레퍼런스 문서](/docs/reference/kubectl/kubectl/) 읽기 - diff --git a/content/ko/docs/tasks/tools/install-minikube.md b/content/ko/docs/tasks/tools/install-minikube.md index dc43810c143f9..04c4ca64a6968 100644 --- a/content/ko/docs/tasks/tools/install-minikube.md +++ b/content/ko/docs/tasks/tools/install-minikube.md @@ -65,7 +65,7 @@ Hyper-V Requirements: A hypervisor has been detected. Features required for ### kubectl 설치 -kubectl이 설치되었는지 확인한다. kubectl은 [kubectl 설치하고 설정하기](/docs/tasks/tools/install-kubectl/#install-kubectl-on-linux)의 요령을 따라서 설치할 수 있다. +kubectl이 설치되었는지 확인한다. kubectl은 [kubectl 설치하고 설정하기](/ko/docs/tasks/tools/install-kubectl/#리눅스에-kubectl-설치)의 요령을 따라서 설치할 수 있다. ## 하이퍼바이저(hypervisor) 설치 @@ -76,7 +76,7 @@ kubectl이 설치되었는지 확인한다. kubectl은 [kubectl 설치하고 설 • [VirtualBox](https://www.virtualbox.org/wiki/Downloads) Minikube는 쿠버네티스 컴포넌트를 VM이 아닌 호스트에서도 동작하도록 `--driver=none` 옵션도 지원한다. -이 드라이버를 사용하려면 [도커](https://www.docker.com/products/docker-desktop) 와 Linux 환경이 필요하지만, 하이퍼바이저는 필요하지 않다. +이 드라이버를 사용하려면 [도커](https://www.docker.com/products/docker-desktop)와 리눅스 환경이 필요하지만, 하이퍼바이저는 필요하지 않다. 데비안(Debian) 또는 파생된 배포판에서 `none` 드라이버를 사용하는 경우, Minikube에서는 동작하지 않는 스냅 패키지 대신 도커용 `.deb` 패키지를 사용한다. @@ -119,7 +119,7 @@ sudo install minikube /usr/local/bin/ ### Homebrew를 이용해서 Minikube 설치하기 -또 다른 대안으로 Linux [Homebrew](https://docs.brew.sh/Homebrew-on-Linux)를 이용해서 Minikube를 설치할 수 있다. +또 다른 대안으로 리눅스 [Homebrew](https://docs.brew.sh/Homebrew-on-Linux)를 이용해서 Minikube를 설치할 수 있다. ```shell brew install minikube @@ -129,7 +129,7 @@ brew install minikube {{% tab name="맥OS" %}} ### kubectl 설치 -kubectl이 설치되었는지 확인한다. kubectl은 [kubectl 설치하고 설정하기](/docs/tasks/tools/install-kubectl/#install-kubectl-on-macos)의 요령을 따라서 설치할 수 있다. +kubectl이 설치되었는지 확인한다. kubectl은 [kubectl 설치하고 설정하기](/ko/docs/tasks/tools/install-kubectl/#macos에-kubectl-설치)의 요령을 따라서 설치할 수 있다. ### 하이퍼바이저(hypervisor) 설치 @@ -162,10 +162,10 @@ sudo mv minikube /usr/local/bin ``` {{% /tab %}} -{{% tab name="Windows" %}} +{{% tab name="윈도우" %}} ### kubectl 설치하기 -kubectl이 설치되었는지 확인한다. kubectl은 [kubectl 설치하고 설정하기](/docs/tasks/tools/install-kubectl/#install-kubectl-on-windows)의 요령을 따라서 설치할 수 있다. +kubectl이 설치되었는지 확인한다. kubectl은 [kubectl 설치하고 설정하기](/ko/docs/tasks/tools/install-kubectl/#windows에-kubectl-설치)의 요령을 따라서 설치할 수 있다. ### 하이퍼바이저(hypervisor) 설치하기 @@ -206,7 +206,7 @@ Minikube 설치를 마친 후, 현재 CLI 세션을 닫고 재시작한다. Mini {{< note >}} -`minikube start` 시 `--driver` 를 설정하려면, 아래에 `` 로 소문자로 언급된 곳에 설치된 하이퍼바이저의 이름을 입력한다. `--driver` 값의 전체 목록은 [VM driver 문서에서 지정하기](https://kubernetes.io/docs/setup/learning-environment/minikube/#specifying-the-vm-driver)에서 확인할 수 있다. +`minikube start` 시 `--driver` 를 설정하려면, 아래에 `` 로 소문자로 언급된 곳에 설치된 하이퍼바이저의 이름을 입력한다. `--driver` 값의 전체 목록은 [VM driver 지정하기 문서](/ko/docs/setup/learning-environment/minikube/#vm-드라이버-지정하기)에서 확인할 수 있다. {{< /note >}} diff --git a/content/ko/docs/tutorials/_index.md b/content/ko/docs/tutorials/_index.md index 4a18224f02275..a0dfb80ca19fe 100644 --- a/content/ko/docs/tutorials/_index.md +++ b/content/ko/docs/tutorials/_index.md @@ -1,6 +1,7 @@ --- title: 튜토리얼 main_menu: true +no_list: true weight: 60 content_type: concept --- @@ -14,8 +15,6 @@ content_type: concept 각 튜토리얼을 따라하기 전에, 나중에 참조할 수 있도록 [표준 용어집](/ko/docs/reference/glossary/) 페이지를 북마크하기를 권한다. - - ## 기초 @@ -64,13 +63,8 @@ content_type: concept * [소스 IP 주소 이용하기](/ko/docs/tutorials/services/source-ip/) - - ## {{% heading "whatsnext" %}} - 튜토리얼을 작성하고 싶다면, 튜토리얼 페이지 유형에 대한 정보가 있는 -[내용 페이지 유형](/docs/contribute/style/page-content-types/) +[콘텐츠 페이지 유형](/docs/contribute/style/page-content-types/) 페이지를 참조한다. - - diff --git a/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md index 7c14a811a57cd..bb28639e83e8c 100644 --- a/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/ko/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -15,7 +15,7 @@ content_type: tutorial * 다음을 포함하는 `kustomization.yaml` 파일을 생성한다. * 컨피그 맵 생성자 * 컨피그 맵을 사용하는 파드 리소스 -* `kubectl apply -k ./`를 실행하여 작업한 디렉토리를 적용한다. +* `kubectl apply -k ./`를 실행하여 작업한 디렉터리를 적용한다. * 구성이 잘 적용되었는지 확인한다. @@ -65,7 +65,7 @@ resources: EOF ``` -컨피그 맵과 파드 개체를 생성하도록 kustomization 디렉토리를 적용한다. +컨피그 맵과 파드 개체를 생성하도록 kustomization 디렉터리를 적용한다. ```shell kubectl apply -k . diff --git a/content/ko/docs/tutorials/hello-minikube.md b/content/ko/docs/tutorials/hello-minikube.md index 4549694159e94..9d71638252acb 100644 --- a/content/ko/docs/tutorials/hello-minikube.md +++ b/content/ko/docs/tutorials/hello-minikube.md @@ -118,7 +118,7 @@ Katacode는 무료로 브라우저에서 쿠버네티스 환경을 제공한다. ``` {{< note >}} - `kubectl` 명령어에 관해 자세히 알기 원하면 [kubectl 개요](/docs/user-guide/kubectl-overview/)을 살펴보자. + `kubectl` 명령어에 관해 자세히 알기 원하면 [kubectl 개요](/ko/docs/reference/kubectl/overview/)을 살펴보자. {{< /note >}} ## 서비스 만들기 diff --git a/content/ko/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/ko/docs/tutorials/kubernetes-basics/expose/expose-intro.html index aed0258cf697e..6726fdd6e0d44 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/ko/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -28,7 +28,7 @@

목표

쿠버네티스 서비스들에 대한 개요

-

쿠버네티스 파드들 은 언젠가는 죽게된다. 실제 파드들은 생명주기를 갖는다. 워커 노드가 죽으면, 노드 상에서 동작하는 파드들 또한 종료된다. 레플리카 셋은 여러분의 애플리케이션이 지속적으로 동작할 수 있도록 새로운 파드들의 생성을 통해 동적으로 클러스터를 미리 지정해 둔 상태로 되돌려 줄 수도 있다. 또 다른 예시로서, 3개의 복제본을 갖는 이미지 처리용 백엔드를 고려해 보자. 그 복제본들은 교체 가능한 상태이다. 그래서 프론트엔드 시스템은 하나의 파드가 소멸되어 재생성이 되더라도, 백엔드 복제본들에 의한 영향을 받아서는 안된다. 즉, 동일 노드 상의 파드들이라 할지라도, 쿠버네티스 클러스터 내 각 파드는 유일한 IP 주소를 가지며, 여러분의 애플리케이션들이 지속적으로 기능할 수 있도록 파드들 속에서 발생하는 변화에 대해 자동으로 조정해 줄 방법이 있어야 한다.

+

쿠버네티스 파드들 은 언젠가는 죽게된다. 실제 파드들은 생명주기를 갖는다. 워커 노드가 죽으면, 노드 상에서 동작하는 파드들 또한 종료된다. 레플리카셋(ReplicaSet)은 여러분의 애플리케이션이 지속적으로 동작할 수 있도록 새로운 파드들의 생성을 통해 동적으로 클러스터를 미리 지정해 둔 상태로 되돌려 줄 수도 있다. 또 다른 예시로서, 3개의 복제본을 갖는 이미지 처리용 백엔드를 고려해 보자. 그 복제본들은 교체 가능한 상태이다. 그래서 프론트엔드 시스템은 하나의 파드가 소멸되어 재생성이 되더라도, 백엔드 복제본들에 의한 영향을 받아서는 안된다. 즉, 동일 노드 상의 파드들이라 할지라도, 쿠버네티스 클러스터 내 각 파드는 유일한 IP 주소를 가지며, 여러분의 애플리케이션들이 지속적으로 기능할 수 있도록 파드들 속에서 발생하는 변화에 대해 자동으로 조정해 줄 방법이 있어야 한다.

쿠버네티스에서 서비스는 하나의 논리적인 파드 셋과 그 파드들에 접근할 수 있는 정책을 정의하는 추상적 개념이다. 서비스는 종속적인 파드들 사이를 느슨하게 결합되도록 해준다. 서비스는 모든 쿠버네티스 오브젝트들과 같이 YAML (보다 선호하는) 또는 JSON을 이용하여 정의된다. 서비스가 대상으로 하는 파드 셋은 보통 LabelSelector에 의해 결정된다 (여러분이 왜 스펙에 selector가 포함되지 않은 서비스를 필요로 하게 될 수도 있는지에 대해 아래에서 확인해 보자).

@@ -80,7 +80,7 @@

서비스와 레이블

  • 태그들을 이용하는 객체들에 대한 분류
  • - +


    diff --git a/content/ko/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/ko/docs/tutorials/kubernetes-basics/scale/scale-intro.html index 24405ac955ca9..9790c1f31e1bc 100644 --- a/content/ko/docs/tutorials/kubernetes-basics/scale/scale-intro.html +++ b/content/ko/docs/tutorials/kubernetes-basics/scale/scale-intro.html @@ -44,7 +44,7 @@

    요약:

    -

    kubectl run 명령에 --replicas 파라미터를 사용해서 처음부터 복수의 인스턴스로 구동되는 +

    kubectl create deployment 명령에 --replicas 파라미터를 사용해서 처음부터 복수의 인스턴스로 구동되는 디플로이먼트를 만들 수도 있다

    diff --git a/content/ko/docs/tutorials/services/source-ip.md b/content/ko/docs/tutorials/services/source-ip.md index 4917fa5042426..ae9e5abf0387b 100644 --- a/content/ko/docs/tutorials/services/source-ip.md +++ b/content/ko/docs/tutorials/services/source-ip.md @@ -226,7 +226,7 @@ client_address=10.240.0.3 다른 노드로 트래픽 전달하지 않는다. 이 방법은 원본 소스 IP 주소를 보존한다. 만약 로컬 엔드 포인트가 없다면, 그 노드로 보내진 패킷은 버려지므로 -패킷 처리 규칙에서 정확한 소스 IP 임을 신뢰할 수 있으므로, +패킷 처리 규칙에서 정확한 소스 IP 임을 신뢰할 수 있으므로, 패킷을 엔드포인트까지 전달할 수 있다. 다음과 같이 `service.spec.externalTrafficPolicy` 필드를 설정하자. @@ -249,7 +249,7 @@ for node in $NODES; do curl --connect-timeout 1 -s $node:$NODEPORT | grep -i cli client_address=104.132.1.79 ``` -엔드포인트 파드가 실행 중인 노드에서 *올바른* 클라이언트 IP 주소인 +엔드포인트 파드가 실행 중인 노드에서 *올바른* 클라이언트 IP 주소인 딱 한 종류의 응답만 수신한다. 어떻게 이렇게 되었는가: @@ -319,7 +319,7 @@ client_address=10.240.0.5 그러나 구글 클라우드 엔진/GCE 에서 실행 중이라면 동일한 `service.spec.externalTrafficPolicy` 필드를 `Local`로 설정하면 서비스 엔드포인트가 *없는* 노드는 고의로 헬스 체크에 실패하여 -강제로 로드밸런싱 트래픽을 받을 수 있는 노드 목록에서 +강제로 로드밸런싱 트래픽을 받을 수 있는 노드 목록에서 자신을 스스로 제거한다. 시각적으로: @@ -447,6 +447,4 @@ kubectl delete deployment source-ip-app ## {{% heading "whatsnext" %}} * [서비스를 통한 애플리케이션 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)에 더 자세히 본다. -* 어떻게 [외부 로드밸런서 생성](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/)하는지 본다. - - +* 어떻게 [외부 로드밸런서 생성](/docs/tasks/access-application-cluster/create-external-load-balancer/)하는지 본다. diff --git a/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md b/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md index d384d076556f1..03cfdf88c63d8 100644 --- a/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md @@ -6,9 +6,9 @@ weight: 10 --- -이 튜토리얼은 스테이트풀셋([StatefulSets](/ko/docs/concepts/workloads/controllers/statefulset/))을 이용하여 -애플리케이션을 관리하는 방법을 소개한다. 어떻게 스테이트풀셋의 파드를 생성하고 삭제하며 -스케일링하고 업데이트하는지 시연한다. +이 튜토리얼은 {{< glossary_tooltip text="스테이트풀셋(StatefulSet)" term_id="statefulset" >}}을 이용하여 +애플리케이션을 관리하는 방법을 소개한다. +어떻게 스테이트풀셋의 파드를 생성하고, 삭제하며, 스케일링하고, 업데이트하는지 시연한다. ## {{% heading "prerequisites" %}} @@ -22,13 +22,14 @@ weight: 10 * [퍼시스턴트볼륨(PersistentVolumes)](/ko/docs/concepts/storage/persistent-volumes/) * [퍼시턴트볼륨 프로비저닝](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/) * [스테이트풀셋](/ko/docs/concepts/workloads/controllers/statefulset/) -* [kubectl CLI](/docs/user-guide/kubectl/) +* [kubectl](/docs/reference/kubectl/kubectl/) 커맨드 라인 도구 +{{< note >}} 이 튜토리얼은 클러스터가 퍼시스턴스볼륨을 동적으로 프로비저닝 하도록 설정되었다고 가정한다. 만약 클러스터가 이렇게 설정되어 있지 않다면, 튜토리얼 시작 전에 수동으로 2개의 1 GiB 볼륨을 프로비저닝해야 한다. - +{{< /note >}} ## {{% heading "objectives" %}} @@ -46,7 +47,6 @@ weight: 10 * 스테이트풀셋은 어떻게 스케일링하는지 * 스테이트풀셋의 파드는 어떻게 업데이트하는지 - ## 스테이트풀셋 생성하기 @@ -74,20 +74,24 @@ kubectl get pods -w -l app=nginx ```shell kubectl apply -f web.yaml +``` +``` service/nginx created statefulset.apps/web created ``` 상기 명령어는 [NGINX](https://www.nginx.com) 웹 서버를 -실행하는 2개의 파드를 생성한다. `nginx` 서비스와 -`web` 스테이트풀셋이 성공적으로 생성되었는지 알아보자. +실행하는 2개의 파드를 생성한다. `nginx` 서비스의 정보를 가져온다. ```shell kubectl get service nginx NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx ClusterIP None 80/TCP 12s - +``` +그리고 `web` 스테이트풀셋 정보를 가져와서 모두 성공적으로 생성되었는지 확인한다. +```shell kubectl get statefulset web +``` NAME DESIRED CURRENT AGE web 2 1 20s ``` @@ -101,6 +105,8 @@ N개의 레플리카를 가진 스테이트풀셋은 배포 시에 ```shell kubectl get pods -w -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 0/1 Pending 0 0s web-0 0/1 Pending 0 0s @@ -112,8 +118,8 @@ web-1 0/1 ContainerCreating 0 0s web-1 1/1 Running 0 18s ``` -`web-1` 파드는 `web-0` 파드가 [Running과 Ready](/ko/docs/concepts/workloads/pods/pod-lifecycle/) 상태가 되기 전에 -시작하지 않음을 주의하자. +참고로 `web-1` 파드는 `web-0` 파드가 _Running_ ([파드의 단계](/ko/docs/concepts/workloads/pods/pod-lifecycle/#파드의-단계-phase) 참고) +및 _Ready_ ([파드의 조건](/docs/concepts/workloads/pods/pod-lifecycle/#파드의-조건-condition)에서 `type` 참고) 상태가 되기 전에 시작하지 않음을 주의하자. ## 스테이트풀셋 안에 파드 @@ -125,16 +131,17 @@ web-1 1/1 Running 0 18s ```shell kubectl get pods -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 1m web-1 1/1 Running 0 1m - ``` [스테이트풀셋](/ko/docs/concepts/workloads/controllers/statefulset/) 개념에서 언급했듯 스테이트풀셋의 파드는 끈끈하고 고유한 정체성을 가진다. -이 정체성은 스테이트풀 컨트롤러에서 각 파드에 주어지는 -고유한 순번에 기인한다. 파드의 이름의 형식은 +이 정체성은 스테이트풀셋 {{< glossary_tooltip text="컨트롤러" term_id="controller" >}}에서 +각 파드에 주어지는 고유한 순번에 기인한다. 파드의 이름의 형식은 `<스테이트풀셋 이름>-<순번>` 이다. 앞서 `web` 스테이트풀셋은 2개의 레플리카를 가졌으므로 `web-0` 과 `web-1` 2개 파드를 생성한다. @@ -145,7 +152,9 @@ web-1 1/1 Running 0 1m [`kubectl exec`](/docs/reference/generated/kubectl/kubectl-commands/#exec)를 이용하자. ```shell -for i in 0 1; do kubectl exec web-$i -- sh -c 'hostname'; done +for i in 0 1; do kubectl exec "web-$i" -- sh -c 'hostname'; done +``` +``` web-0 web-1 ``` @@ -157,7 +166,14 @@ web-1 ```shell kubectl run -i --tty --image busybox:1.28 dns-test --restart=Never --rm +``` +위 명령으로 새로운 셸을 시작한다. 새 셸에서 다음을 실행한다. +```shell +# dns-test 컨테이너 셸에서 다음을 실행한다. nslookup web-0.nginx +``` +출력 결과는 다음과 비슷하다. +``` Server: 10.0.0.10 Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local @@ -172,6 +188,8 @@ Name: web-1.nginx Address 1: 10.244.2.6 ``` +(이제 `exit` 명령으로 컨테이너 셸에서 종료한다.) + 헤드리스 서비스의 CNAME은 SRV 레코드를 지칭한다 (Running과 Ready 상태의 각 파드마다 1개). SRV 레코드는 파드의 IP 주소를 포함한 A 레코드 엔트리를 지칭한다. @@ -196,6 +214,8 @@ pod "web-1" deleted ```shell kubectl get pod -w -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 0/1 ContainerCreating 0 0s NAME READY STATUS RESTARTS AGE @@ -207,15 +227,27 @@ web-1 1/1 Running 0 34s ``` 파드의 호스트네임과 클러스터 내부 DNS 엔트리를 보기 위해 -`kubectl exec`과 `kubectl run`을 이용하자. +`kubectl exec`과 `kubectl run`을 이용하자. 먼저, 파드의 호스트네임을 확인한다. ```shell for i in 0 1; do kubectl exec web-$i -- sh -c 'hostname'; done +``` +``` web-0 web-1 - +``` +그리고 다음을 실행한다. +``` kubectl run -i --tty --image busybox:1.28 dns-test --restart=Never --rm /bin/sh +``` +이 명령으로 새로운 셸이 시작된다. +새 셸에서 다음을 실행한다. +```shell +# dns-test 컨테이너 셸에서 이것을 실행한다. nslookup web-0.nginx +``` +출력 결과는 다음과 비슷하다. +``` Server: 10.0.0.10 Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local @@ -230,6 +262,8 @@ Name: web-1.nginx Address 1: 10.244.2.8 ``` +(이제 `exit` 명령으로 컨테이너 셸을 종료한다.) + 파드의 순번, 호스트네임, SRV 레코드와 A 레코드이름은 변경되지 않지만 파드의 IP 주소는 변경될 수 있다. 이는 튜토리얼에서 사용하는 클러스터나 다른 클러스터에도 동일하다. 따라서 다른 애플리케이션이 IP 주소로 @@ -255,12 +289,20 @@ Running과 Ready 상태의 모든 파드들을 ```shell kubectl get pvc -l app=nginx +``` +출력 결과는 다음과 비슷하다. +``` NAME STATUS VOLUME CAPACITY ACCESSMODES AGE www-web-0 Bound pvc-15c268c7-b507-11e6-932f-42010a800002 1Gi RWO 48s www-web-1 Bound pvc-15c79307-b507-11e6-932f-42010a800002 1Gi RWO 48s ``` -스테이트풀셋 컨트롤러는 2개의 [퍼시스턴트볼륨](/ko/docs/concepts/storage/persistent-volumes/)에 -묶인 2개의 퍼시스턴트볼륨클레임을 생성했다. 본 튜토리얼에서 사용되는 클러스터는 퍼시스턴트볼륨을 동적으로 + +스테이트풀셋 컨트롤러는 2개의 +{{< glossary_tooltip text="퍼시스턴트볼륨" term_id="persistent-volume" >}}에 +묶인 2개의 +{{< glossary_tooltip text="퍼시스턴트볼륨클레임" term_id="persistent-volume-claim" >}}을 생성했다. + +본 튜토리얼에서 사용되는 클러스터는 퍼시스턴트볼륨을 동적으로 프로비저닝하도록 설정되었으므로 생성된 퍼시스턴트볼륨도 자동으로 묶인다. NGINX 웹서버는 기본 색인 파일로 @@ -272,23 +314,23 @@ NGINX 웹서버는 기본 색인 파일로 NGINX 웹서버가 해당 호스트네임을 제공하는지 확인해보자. ```shell -for i in 0 1; do kubectl exec web-$i -- sh -c 'echo $(hostname) > /usr/share/nginx/html/index.html'; done +for i in 0 1; do kubectl exec "web-$i" -- sh -c 'echo $(hostname) > /usr/share/nginx/html/index.html'; done -for i in 0 1; do kubectl exec -it web-$i -- curl localhost; done +for i in 0 1; do kubectl exec -it "web-$i" -- curl localhost; done +``` +``` web-0 web-1 ``` {{< note >}} -위에 curl 명령어로 403 Forbidden 아닌 응답을 보려면 -`volumeMounts`로 마운트된 디렉터리의 퍼미션을 수정해야 한다 +위에 curl 명령어로 **403 Forbidden** 아닌 응답을 보려면 +다음을 실행해서 `volumeMounts`로 마운트된 디렉터리의 퍼미션을 수정해야 한다 ([hostPath 볼륨을 사용할 때에 버그](https://github.com/kubernetes/kubernetes/issues/2630)로 인함). -```shell -for i in 0 1; do kubectl exec web-$i -- chmod 755 /usr/share/nginx/html; done -``` +`for i in 0 1; do kubectl exec web-$i -- chmod 755 /usr/share/nginx/html; done` -위에 curl 명령을 재시도하기 전에 +위에 `curl` 명령을 재시도하기 전에 위 명령을 실행해야 한다. {{< /note >}} 첫째 터미널에서 스테이트풀셋의 파드를 감시하자. @@ -301,6 +343,8 @@ kubectl get pod -w -l app=nginx ```shell kubectl delete pod -l app=nginx +``` +``` pod "web-0" deleted pod "web-1" deleted ``` @@ -309,6 +353,8 @@ pod "web-1" deleted ```shell kubectl get pod -w -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 0/1 ContainerCreating 0 0s NAME READY STATUS RESTARTS AGE @@ -322,7 +368,9 @@ web-1 1/1 Running 0 34s 웹서버에서 자신의 호스트네임을 계속 제공하는지 확인하자. ``` -for i in 0 1; do kubectl exec -it web-$i -- curl localhost; done +for i in 0 1; do kubectl exec -i -t "web-$i" -- curl http://localhost/; done +``` +``` web-0 web-1 ``` @@ -334,6 +382,7 @@ web-1 각각의 퍼시스턴트볼륨은 적절하게 마운트된다. ## 스테이트풀셋 스케일링 + 스테이트풀셋을 스케일링하는 것은 레플리카 개수를 늘리거나 줄이는 것을 의미한다. 이것은 `replicas` 필드를 갱신하여 이뤄진다. [`kubectl scale`](/docs/reference/generated/kubectl/kubectl-commands/#scale)이나 [`kubectl patch`](/docs/reference/generated/kubectl/kubectl-commands/#patch)을 @@ -352,6 +401,8 @@ kubectl get pods -w -l app=nginx ```shell kubectl scale sts web --replicas=5 +``` +``` statefulset.apps/web scaled ``` @@ -360,6 +411,8 @@ statefulset.apps/web scaled ```shell kubectl get pods -w -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 2h web-1 1/1 Running 0 2h @@ -392,18 +445,22 @@ web-4 1/1 Running 0 19s kubectl get pods -w -l app=nginx ``` -다른 터미널에서 `kubectl patch`으로 스테이트풀셋을 뒤로 - 3개의 레플리카로 스케일링하자. +다른 터미널에서 `kubectl patch`으로 스테이트풀셋을 다시 +3개의 레플리카로 스케일링하자. ```shell kubectl patch sts web -p '{"spec":{"replicas":3}}' +``` +``` statefulset.apps/web patched ``` `web-4`와 `web-3`이 Terminating으로 전환되기까지 기다리자. -``` +```shell kubectl get pods -w -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 3h web-1 1/1 Running 0 3h @@ -428,6 +485,8 @@ web-3 1/1 Terminating 0 42s ```shell kubectl get pvc -l app=nginx +``` +``` NAME STATUS VOLUME CAPACITY ACCESSMODES AGE www-web-0 Bound pvc-15c268c7-b507-11e6-932f-42010a800002 1Gi RWO 13h www-web-1 Bound pvc-15c79307-b507-11e6-932f-42010a800002 1Gi RWO 13h @@ -459,6 +518,8 @@ www-web-4 Bound pvc-e11bb5f8-b508-11e6-932f-42010a800002 1Gi RWO ```shell kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate"}}}' +``` +``` statefulset.apps/web patched ``` @@ -467,13 +528,18 @@ statefulset.apps/web patched ```shell kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"gcr.io/google_containers/nginx-slim:0.8"}]' +``` +``` statefulset.apps/web patched ``` 다른 터미널창에서 스테이트풀셋의 파드를 감시하자. ```shell -kubectl get po -l app=nginx -w +kubectl get pod -l app=nginx -w +``` +출력 결과는 다음과 비슷하다. +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 7m web-1 1/1 Running 0 7m @@ -512,17 +578,20 @@ web-0 1/1 Running 0 10s 이 스테이트풀셋 컨트롤러는 각 파드를 종료시키고 다음 파드를 업데이트하기 전에 그것이 Running과 Ready 상태로 전환될 때까지 기다린다. 알아둘 것은 비록 스테이트풀셋 컨트롤러에서 이전 파드가 Running과 Ready 상태가 되기까지 -다음 파드를 업데이트하지 않아도 현재 버전으로 파드를 업데이트하다 실패하면 복원한다는 것이다. +다음 파드를 업데이트하지 않아도 현재 버전으로 파드를 업데이트하다 실패하면 +복원한다는 것이다. + 업데이트를 이미 받은 파드는 업데이트된 버전으로 복원되고 아직 업데이트를 받지 못한 파드는 -이전 버전으로 복원한다. -이런 식으로 컨트롤러는 간헐적인 오류가 발생해도 +이전 버전으로 복원한다. 이런 식으로 컨트롤러는 간헐적인 오류가 발생해도 애플리케이션을 계속 건강하게 유지하고 업데이트도 일관되게 유지하려 한다. 컨테이너 이미지를 살펴보기 위해 파드를 가져오자. ```shell -for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +for p in 0 1 2; do kubectl get pod "web-$p" --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +``` +``` k8s.gcr.io/nginx-slim:0.8 k8s.gcr.io/nginx-slim:0.8 k8s.gcr.io/nginx-slim:0.8 @@ -531,10 +600,13 @@ k8s.gcr.io/nginx-slim:0.8 스테이트풀셋의 모든 파드가 지금은 이전 컨테이너 이미지를 실행 중이이다. -**팁** 롤링 업데이트 상황을 살펴보기 위해 `kubectl rollout status sts/` +{{< note >}} +스테이트풀셋의 롤링 업데이트 상황을 살펴보기 위해 `kubectl rollout status sts/` 명령어도 사용할 수 있다. +{{< /note >}} #### 단계적으로 업데이트 하기 {#staging-an-update} + `RollingUpdate` 업데이트 전략의 파라미터인 `partition`를 이용하여 스테이트풀셋의 단계적으로 업데이트할 수 있다. 단계적 업데이트는 스테이트풀셋의 모든 파드를 현재 버전으로 유지하면서 @@ -544,6 +616,8 @@ k8s.gcr.io/nginx-slim:0.8 ```shell kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":3}}}}' +``` +``` statefulset.apps/web patched ``` @@ -551,20 +625,26 @@ statefulset.apps/web patched ```shell kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"k8s.gcr.io/nginx-slim:0.7"}]' +``` +``` statefulset.apps/web patched ``` 스테이트풀셋의 파드를 삭제하자. ```shell -kubectl delete po web-2 +kubectl delete pod web-2 +``` +``` pod "web-2" deleted ``` 파드가 Running과 Ready 상태가 되기까지 기다리자. ```shell -kubectl get po -l app=nginx -w +kubectl get pod -l app=nginx -w +``` +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 4m web-1 1/1 Running 0 4m @@ -572,12 +652,13 @@ web-2 0/1 ContainerCreating 0 11s web-2 1/1 Running 0 18s ``` -파드의 컨테이너를 가져오자. +파드의 컨테이너 이미지를 가져오자. ```shell -kubectl get po web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}' +kubectl get pod web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}' +``` +``` k8s.gcr.io/nginx-slim:0.8 - ``` 비록 업데이트 전략이 `RollingUpdate`이지만 스테이트풀셋은 @@ -586,6 +667,7 @@ k8s.gcr.io/nginx-slim:0.8 `파티션`보다 작기 때문이다. #### 카나리(Canary) 롤링 아웃 + [위에서](#staging-an-update) 지정한 `partition`값을 차감시키면 변경사항을 테스트하기 위해 카나리 롤아웃을 할 수 있다. @@ -593,13 +675,17 @@ k8s.gcr.io/nginx-slim:0.8 ```shell kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":2}}}}' +``` +``` statefulset.apps/web patched ``` `web-2` 파드가 Running과 Ready 상태가 되기까지 기다리자. ```shell -kubectl get po -l app=nginx -w +kubectl get pod -l app=nginx -w +``` +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 4m web-1 1/1 Running 0 4m @@ -611,6 +697,8 @@ web-2 1/1 Running 0 18s ```shell kubectl get po web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}' +``` +``` k8s.gcr.io/nginx-slim:0.7 ``` @@ -622,14 +710,19 @@ k8s.gcr.io/nginx-slim:0.7 `web-1` 파드를 삭제하자. ```shell -kubectl delete po web-1 +kubectl delete pod web-1 +``` +``` pod "web-1" deleted ``` `web-1` 파드가 Running과 Ready 상태가 되기까지 기다리자. ```shell -kubectl get po -l app=nginx -w +kubectl get pod -l app=nginx -w +``` +출력 결과는 다음과 비슷하다. +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 6m web-1 0/1 Terminating 0 6m @@ -643,12 +736,13 @@ web-1 0/1 ContainerCreating 0 0s web-1 1/1 Running 0 18s ``` -`web-1` 파드의 컨테이너를 가져오자. +`web-1` 파드의 컨테이너 이미지를 가져오자. ```shell -kubectl get po web-1 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}' +kubectl get pod web-1 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}' +``` +``` k8s.gcr.io/nginx-slim:0.8 - ``` `web-1` 는 원래 환경설정으로 복원되었는데 @@ -658,6 +752,7 @@ k8s.gcr.io/nginx-slim:0.8 종료되어 원래 환경설정으로 복원된다. #### 단계적 롤아웃 + [카나리 롤아웃](#카나리-canary-롤링-아웃)에서 했던 방법과 비슷하게 분할된 롤링 업데이트를 이용하여 단계적 롤아웃(e.g. 선형, 기하 또는 지수적 롤아웃)을 수행할 수 있다. 단계적 롤아웃을 수행하려면 @@ -668,13 +763,18 @@ partition은 현재 `2`이다. partition을 `0`으로 바꾸자. ```shell kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":0}}}}' +``` +``` statefulset.apps/web patched ``` 스테이트풀셋의 모든 파드가 Running과 Ready 상태가 되기까지 기다리자. ```shell -kubectl get po -l app=nginx -w +kubectl get pod -l app=nginx -w +``` +출력 결과는 다음과 비슷하다. +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 3m web-1 0/1 ContainerCreating 0 11s @@ -692,17 +792,19 @@ web-0 0/1 ContainerCreating 0 0s web-0 1/1 Running 0 3s ``` -파드의 컨테이너를 가져오자. +스테이트풀셋에 있는 파드의 컨테이너 이미지 상세 정보를 가져오자. ```shell -for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +for p in 0 1 2; do kubectl get pod "web-$p" --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +``` +``` k8s.gcr.io/nginx-slim:0.7 k8s.gcr.io/nginx-slim:0.7 k8s.gcr.io/nginx-slim:0.7 ``` -`partition`을 `0`으로 이동하여 스테이트풀셋 컨트롤러에서 계속해서 +`partition`을 `0`으로 이동하여 스테이트풀셋에서 계속해서 업데이트 처리를 하도록 허용하였다. ### 삭제 시 동작 @@ -733,6 +835,8 @@ kubectl get pods -w -l app=nginx ```shell kubectl delete statefulset web --cascade=false +``` +``` statefulset.apps "web" deleted ``` @@ -740,6 +844,8 @@ statefulset.apps "web" deleted ```shell kubectl get pods -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 6m web-1 1/1 Running 0 7m @@ -751,6 +857,8 @@ web-2 1/1 Running 0 5m ```shell kubectl delete pod web-0 +``` +``` pod "web-0" deleted ``` @@ -758,6 +866,8 @@ pod "web-0" deleted ```shell kubectl get pods -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-1 1/1 Running 0 10m web-2 1/1 Running 0 7m @@ -777,17 +887,21 @@ kubectl get pods -w -l app=nginx ```shell kubectl apply -f web.yaml +``` +``` statefulset.apps/web created service/nginx unchanged ``` 이 에러는 무시하자. 이것은 다만 해당 서비스가 있더라도 -nginx 헤드리스 서비스를 생성하려고 했음을 뜻한다. +_nginx_ 헤드리스 서비스를 생성하려고 했음을 뜻한다. 첫째 터미널에서 실행 중인 `kubectl get` 명령어의 출력을 살펴보자. ```shell kubectl get pods -w -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-1 1/1 Running 0 16m web-2 1/1 Running 0 2m @@ -813,7 +927,9 @@ web-2 0/1 Terminating 0 3m 다른 관점으로 살펴보자. ```shell -for i in 0 1; do kubectl exec -it web-$i -- curl localhost; done +for i in 0 1; do kubectl exec -i -t "web-$i" -- curl http://localhost/; done +``` +``` web-0 web-1 ``` @@ -837,6 +953,8 @@ kubectl get pods -w -l app=nginx ```shell kubectl delete statefulset web +``` +``` statefulset.apps "web" deleted ``` 첫째 터미널에서 실행 중인 `kubectl get` 명령어의 출력을 살펴보고 @@ -844,6 +962,8 @@ statefulset.apps "web" deleted ```shell kubectl get pods -w -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 11m web-1 1/1 Running 0 27m @@ -864,12 +984,17 @@ web-1 0/1 Terminating 0 29m 스테이트풀 컨트롤러는 이전 파드가 완전히 종료되기까지 기다린다. -스테이트풀셋과 그 파드를 종속적으로 삭제하는 중에 연관된 헤드리스 서비스를 -삭제하지 않음을 주의하자. +{{< note >}} +종속적 삭제는 파드와 함께 스테이트풀셋을 제거하지만, +스테이트풀셋과 관련된 헤드리스 서비스를 삭제하지 않는다. 꼭 `nginx` 서비스를 수동으로 삭제해라. +{{< /note >}} + ```shell kubectl delete service nginx +``` +``` service "nginx" deleted ``` @@ -877,6 +1002,8 @@ service "nginx" deleted ```shell kubectl apply -f web.yaml +``` +``` service/nginx created statefulset.apps/web created ``` @@ -885,22 +1012,30 @@ statefulset.apps/web created `index.html` 파일 내용을 검색하자. ```shell -for i in 0 1; do kubectl exec -it web-$i -- curl localhost; done +for i in 0 1; do kubectl exec -i -t "web-$i" -- curl http://localhost/; done +``` +``` web-0 web-1 ``` 스테이트풀셋과 그 내부의 모든 파드를 삭제했지만 퍼시스턴트볼륨이 마운트된 채로 -다시 생성되고 `web-0`과 `web-1`은 여전히 +다시 생성되고 `web-0`과 `web-1`은 계속 각 호스트네임을 제공한다. -최종적으로 `web` 스테이트풀셋과`nginx` 서비스를 삭제한다. +최종적으로 `web` 스테이트풀셋을 삭제한다. ```shell kubectl delete service nginx +``` +``` service "nginx" deleted - +``` +그리고 `nginx` 서비스를 삭제한다. +```shell kubectl delete statefulset web +``` +``` statefulset "web" deleted ``` @@ -934,13 +1069,15 @@ statefulset "web" deleted 터미널에서 스테이트풀셋의 파드를 감시하자. ```shell -kubectl get po -l app=nginx -w +kubectl get pod -l app=nginx -w ``` 다른 터미널에서 매니페스트 안에 스테이트풀셋과 서비스를 생성하자. ```shell kubectl apply -f web-parallel.yaml +``` +``` service/nginx created statefulset.apps/web created ``` @@ -948,7 +1085,9 @@ statefulset.apps/web created 첫째 터미널에서 실행했던 `kubectl get` 명령어의 출력을 살펴보자. ```shell -kubectl get po -l app=nginx -w +kubectl get pod -l app=nginx -w +``` +``` NAME READY STATUS RESTARTS AGE web-0 0/1 Pending 0 0s web-0 0/1 Pending 0 0s @@ -967,12 +1106,14 @@ web-1 1/1 Running 0 10s ```shell kubectl scale statefulset/web --replicas=4 +``` +``` statefulset.apps/web scaled ``` `kubectl get` 명령어를 실행 중인 터미널의 출력을 살펴보자. -```shell +``` web-3 0/1 Pending 0 0s web-3 0/1 Pending 0 0s web-3 0/1 Pending 0 7s @@ -982,18 +1123,24 @@ web-3 1/1 Running 0 26s ``` -스테이트풀 컨트롤러는 두 개의 새 파드를 시작하였다. +스테이트풀셋은 두 개의 새 파드를 시작하였다. 두 번째 것을 런칭하기 위해 먼저 런칭한 것이 Running과 Ready 상태가 될 때까지 기다리지 않는다. -이 터미널을 열어 놓고 다른 터미널에서 `web` 스테이트풀셋을 삭제하자. +## {{% heading "cleanup" %}} + +정리의 일환으로 `kubectl` 명령을 실행할 준비가 된 두 개의 터미널이 열려 +있어야 한다. ```shell kubectl delete sts web +# sts는 statefulset의 약자이다. ``` -다시 한번 다른 터미널에서 실행 중인 `kubectl get`명령의 출력을 확인해보자. - +`kubectl get` 명령으로 해당 파드가 삭제된 것을 확인할 수 있다. ```shell +kubectl get pod -l app=nginx -w +``` +``` web-3 1/1 Terminating 0 9m web-2 1/1 Terminating 0 9m web-3 1/1 Terminating 0 9m @@ -1019,7 +1166,7 @@ web-3 0/1 Terminating 0 9m web-3 0/1 Terminating 0 9m ``` -스테이트풀 컨트롤러는 모든 파드를 동시에 삭제한다. 파드를 삭제하기 전에 +삭제하는 동안, 스테이트풀셋은 모든 파드를 동시에 삭제한다. 해당 파드를 삭제하기 전에 그 파드의 순서상 후계자를 기다리지 않는다. `kubectl get` 명령어가 실행된 터미널을 닫고 @@ -1030,12 +1177,11 @@ kubectl delete svc nginx ``` -## {{% heading "cleanup" %}} - +{{< note >}} 이 튜토리얼에서 사용된 퍼시턴트볼륨을 위한 -퍼시스턴트 스토리지 미디어를 삭제해야 한다. -모든 스토리지를 반환하도록 환경, 스토리지 설정과 -프로비저닝 방법에 따른 단계를 따르자. - +퍼시스턴트 스토리지 미디어도 삭제해야 한다. +모든 스토리지를 반환하도록 환경, 스토리지 설정과 +프로비저닝 방법에 따른 단계를 따르자. +{{< /note >}} diff --git a/content/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md b/content/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md index 323a087408988..1e0aefd4a3601 100644 --- a/content/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md +++ b/content/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md @@ -190,8 +190,8 @@ kubectl apply -k ./ 응답은 아래와 비슷해야 한다. ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - wordpress ClusterIP 10.0.0.89 80:32406/TCP 4m + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + wordpress LoadBalancer 10.0.0.89 80:32406/TCP 4m ``` {{< note >}} @@ -237,7 +237,7 @@ kubectl apply -k ./ * [인트로스펙션과 디버깅](/docs/tasks/debug-application-cluster/debug-application-introspection/)를 알아보자. -* [잡](/ko/docs/concepts/workloads/controllers/jobs-run-to-completion/)를 알아보자. +* [잡](/ko/docs/concepts/workloads/controllers/job/)를 알아보자. * [포트 포워딩](/ko/docs/tasks/access-application-cluster/port-forward-access-application-cluster/)를 알아보자. * 어떻게 [컨테이너에서 셸을 사용하는지](/docs/tasks/debug-application-cluster/get-shell-running-container/)를 알아보자. diff --git a/content/ko/docs/tutorials/stateful-application/zookeeper.md b/content/ko/docs/tutorials/stateful-application/zookeeper.md index bf4c2e179ccee..490f6ff18d584 100644 --- a/content/ko/docs/tutorials/stateful-application/zookeeper.md +++ b/content/ko/docs/tutorials/stateful-application/zookeeper.md @@ -43,7 +43,7 @@ weight: 40 - 어떻게 지속적해서 컨피그맵을 이용해서 앙상블을 설정하는가. - 어떻게 ZooKeeper 서버 디플로이먼트를 앙상블 안에서 퍼뜨리는가. - 어떻게 파드디스룹션버짓을 이용하여 계획된 점검 기간 동안 서비스 가용성을 보장하는가. - + @@ -132,7 +132,7 @@ zk-2 1/1 Running 0 40s for i in 0 1 2; do kubectl exec zk-$i -- hostname; done ``` -스테이트풀셋 컨트롤러는 각 순번 인덱스에 기초하여 각 파드에 고유한 호스트네임을 부여한다. 각 호스트네임은 `<스테이트풀셋 이름>-<순번 인덱스>` 형식을 취한다. `zk` 스테이트풀셋의 `replicas` 필드는 `3`으로 설정되었기 때문에, 그 스테이트풀셋 컨트롤러는 3개 파드의 호스트네임을 `zk-0`, `zk-1`, +스테이트풀셋 컨트롤러는 각 순번 인덱스에 기초하여 각 파드에 고유한 호스트네임을 부여한다. 각 호스트네임은 `<스테이트풀셋 이름>-<순번 인덱스>` 형식을 취한다. `zk` 스테이트풀셋의 `replicas` 필드는 `3`으로 설정되었기 때문에, 그 스테이트풀셋 컨트롤러는 3개 파드의 호스트네임을 `zk-0`, `zk-1`, `zk-2`로 정한다. ```shell @@ -183,9 +183,9 @@ ZooKeeper는 그것의 애플리케이션 환경설정을 `zoo.cfg` 파일에 kubectl exec zk-0 -- cat /opt/zookeeper/conf/zoo.cfg ``` -아래 파일의 `server.1`, `server.2`, `server.3` 속성에서 -`1`, `2`, `3`은 ZooKeeper 서버의 `myid` 파일에 구분자와 -연관된다. +아래 파일의 `server.1`, `server.2`, `server.3` 속성에서 +`1`, `2`, `3`은 ZooKeeper 서버의 `myid` 파일에 구분자와 +연관된다. 이들은 `zk` 스테이트풀셋의 파드의 FQDNS을 설정한다. ```shell @@ -302,7 +302,7 @@ ZooKeeper는 모든 항목을 내구성있는 WAL에 커밋하고 메모리 상 복제된 상태 머신을 이루는 합의 프로토콜에서 이용하는 일반적인 기법이다. -[`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands/#delete) 명령을 이용하여 +[`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands/#delete) 명령을 이용하여 `zk` 스테이트풀셋을 삭제하자. ```shell @@ -448,7 +448,7 @@ ZooKeeper의 서버 디렉터리에 마운트한다. [리더 선출 촉진](#리더-선출-촉진)과 [합의 달성](#합의-달성) 섹션에서 알렸듯이, ZooKeeper 앙상블에 서버는 리더 선출과 쿼럼을 구성하기 위한 일관된 설정이 필요하다. -또한 Zab 프로토콜의 일관된 설정도 +또한 Zab 프로토콜의 일관된 설정도 네트워크에 걸쳐 올바르게 동작하기 위해서 필요하다. 이 예시에서는 메니페스트에 구성을 직접 포함시켜서 일관된 구성을 달성한다. @@ -496,7 +496,7 @@ ZooKeeper는 [Log4j](http://logging.apache.org/log4j/2.x/)를 이용하며 kubectl exec zk-0 cat /usr/etc/zookeeper/log4j.properties ``` -아래 로깅 구성은 ZooKeeper가 모든 로그를 +아래 로깅 구성은 ZooKeeper가 모든 로그를 표준 출력 스트림으로 처리하게 한다. ```shell @@ -544,7 +544,7 @@ kubectl logs zk-0 --tail 20 쿠버네티스는 더 강력하지만 조금 복잡한 로그 통합을 [스택드라이버](/docs/tasks/debug-application-cluster/logging-stackdriver/)와 -[Elasticsearch와 Kibana](/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana/)를 지원한다. +[Elasticsearch와 Kibana](/ko/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana/)를 지원한다. 클러스터 수준의 로그 적재(ship)와 통합을 위해서는 로그 순환과 적재를 위해 [사이드카](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) 컨테이너를 배포하는 것을 고려한다. @@ -564,7 +564,7 @@ securityContext: fsGroup: 1000 ``` -파드 컨테이너에서 UID 1000은 ZooKeeper 사용자이며, GID 1000은 +파드 컨테이너에서 UID 1000은 ZooKeeper 사용자이며, GID 1000은 ZooKeeper의 그룹에 해당한다. `zk-0` 파드에서 프로세스 정보를 얻어오자. @@ -866,7 +866,7 @@ kubernetes-node-2g2d kubectl get nodes ``` -[`kubectl cordon`](/docs/reference/generated/kubectl/kubectl-commands/#cordon)을 이용하여 +[`kubectl cordon`](/docs/reference/generated/kubectl/kubectl-commands/#cordon)을 이용하여 클러스터 내에 4개 노드를 제외하고 다른 모든 노드를 통제해보자. ```shell @@ -1093,7 +1093,3 @@ node "kubernetes-node-ixsl" uncordoned 퍼시스턴트 스토리지 미디어를 삭제하자. 귀하의 환경과 스토리지 구성과 프로비저닝 방법에서 필요한 절차를 따라서 모든 스토리지가 재확보되도록 하자. - - - - diff --git a/content/ko/docs/tutorials/stateless-application/expose-external-ip-address.md b/content/ko/docs/tutorials/stateless-application/expose-external-ip-address.md index 291cceba264bb..719c998366ba3 100644 --- a/content/ko/docs/tutorials/stateless-application/expose-external-ip-address.md +++ b/content/ko/docs/tutorials/stateless-application/expose-external-ip-address.md @@ -15,7 +15,7 @@ weight: 10 ## {{% heading "prerequisites" %}} - * [kubectl](/docs/tasks/tools/install-kubectl/)을 설치한다. + * [kubectl](/ko/docs/tasks/tools/install-kubectl/)을 설치한다. * Google Kubernetes Engine 또는 Amazon Web Services와 같은 클라우드 공급자를 사용하여 쿠버네티스 클러스터를 생성한다. @@ -52,10 +52,10 @@ kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml 위의 명령어는 - [디플로이먼트](/ko/docs/concepts/workloads/controllers/deployment/) + [디플로이먼트(Deployment)](/ko/docs/concepts/workloads/controllers/deployment/) 오브젝트와 관련된 - [레플리카 셋](/ko/docs/concepts/workloads/controllers/replicaset/) - 오브젝트를 생성한다. 레플리카 셋은 다섯 개의 + [레플리카셋(ReplicaSet)](/ko/docs/concepts/workloads/controllers/replicaset/) + 오브젝트를 생성한다. 레플리카셋은 다섯 개의 [파드](/ko/docs/concepts/workloads/pods/pod/)가 있으며, 각 파드는 Hello World 애플리케이션을 실행한다. @@ -64,7 +64,7 @@ kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml kubectl get deployments hello-world kubectl describe deployments hello-world -1. 레플리카 셋 오브젝트에 대한 정보를 확인한다. +1. 레플리카셋 오브젝트에 대한 정보를 확인한다. kubectl get replicasets kubectl describe replicasets @@ -84,7 +84,7 @@ kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml {{< note >}} - `type=LoadBalancer` 서비스는 이 예시에서 다루지 않은 외부 클라우드 공급자가 지원하며, 자세한 내용은 [이 페이지](/ko/docs/concepts/services-networking/service/#loadbalancer를 참조한다. + `type=LoadBalancer` 서비스는 이 예시에서 다루지 않은 외부 클라우드 공급자가 지원하며, 자세한 내용은 [이 페이지](/ko/docs/concepts/services-networking/service/#loadbalancer)를 참조한다. {{< /note >}} @@ -160,7 +160,7 @@ kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml kubectl delete services my-service -Hello World 애플리케이션을 실행 중인 디플로이먼트, 레플리카 셋, 파드를 삭제하려면, +Hello World 애플리케이션을 실행 중인 디플로이먼트, 레플리카셋, 파드를 삭제하려면, 아래의 명령어를 입력한다. kubectl delete deployment hello-world @@ -173,4 +173,3 @@ Hello World 애플리케이션을 실행 중인 디플로이먼트, 레플리카 [애플리케이션과 서비스 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)에 대해 더 배워 본다. - diff --git a/content/ko/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md b/content/ko/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md index cd8ff4e03f5b7..9d5cf7713bda7 100644 --- a/content/ko/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md +++ b/content/ko/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md @@ -402,6 +402,6 @@ kubectl scale --replicas=3 deployment/frontend ## {{% heading "whatsnext" %}} * [리소스 모니터링 도구](/ko/docs/tasks/debug-application-cluster/resource-usage-monitoring/)를 공부한다. -* [로깅 아키텍처](/docs/concepts/cluster-administration/logging/)를 더 읽어본다. +* [로깅 아키텍처](/ko/docs/concepts/cluster-administration/logging/)를 더 읽어본다. * [애플리케이션 검사 및 디버깅](/ko/docs/tasks/debug-application-cluster/)을 더 읽어본다. * [애플리케이션 문제 해결](/ko/docs/tasks/debug-application-cluster/resource-usage-monitoring/)을 더 읽어본다. diff --git a/content/ko/docs/tutorials/stateless-application/guestbook.md b/content/ko/docs/tutorials/stateless-application/guestbook.md index 422ed877e21cd..cce67800a6d68 100644 --- a/content/ko/docs/tutorials/stateless-application/guestbook.md +++ b/content/ko/docs/tutorials/stateless-application/guestbook.md @@ -47,7 +47,7 @@ card: {{< codenew file="application/guestbook/redis-master-deployment.yaml" >}} -1. 매니페스트 파일을 다운로드한 디렉토리에서 터미널 창을 시작한다. +1. 매니페스트 파일을 다운로드한 디렉터리에서 터미널 창을 시작한다. 1. `redis-master-deployment.yaml` 파일을 통해 Redis 마스터의 디플로이먼트에 적용한다. ```shell @@ -218,7 +218,7 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 1. 서비스의 목록을 질의하여 프론트엔드 서비스가 실행 중인지 확인한다. ```shell - kubectl get services + kubectl get services ``` 결과는 아래와 같은 형태로 나타난다. @@ -320,7 +320,7 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 redis-slave-2005841000-fpvqc 1/1 Running 0 1h redis-slave-2005841000-phfv9 1/1 Running 0 1h ``` - + ## {{% heading "cleanup" %}} @@ -346,7 +346,7 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 deployment.apps "frontend" deleted service "frontend" deleted ``` - + 1. 파드의 목록을 질의하여 실행 중인 파드가 없는지 확인한다. ```shell @@ -367,5 +367,4 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 * [쿠버네티스 기초](/ko/docs/tutorials/kubernetes-basics/) 튜토리얼을 완료 * [MySQL과 Wordpress을 위한 퍼시스턴트 볼륨](/ko/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/#visit-your-new-wordpress-blog)을 사용하여 블로그 생성하는데 쿠버네티스 이용하기 * [애플리케이션 접속](/ko/docs/concepts/services-networking/connect-applications-service/)에 대해 더 알아보기 -* [자원 관리](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively)에 대해 더 알아보기 - +* [자원 관리](/ko/docs/concepts/cluster-administration/manage-deployment/#효과적인-레이블-사용)에 대해 더 알아보기 diff --git a/content/ko/examples/service/networking/nginx-policy.yaml b/content/ko/examples/service/networking/nginx-policy.yaml new file mode 100644 index 0000000000000..89ee9886925e7 --- /dev/null +++ b/content/ko/examples/service/networking/nginx-policy.yaml @@ -0,0 +1,13 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: access-nginx +spec: + podSelector: + matchLabels: + app: nginx + ingress: + - from: + - podSelector: + matchLabels: + access: "true" diff --git a/content/ko/partners/_index.html b/content/ko/partners/_index.html index 2ac7e6945c839..3bd6a375f2311 100644 --- a/content/ko/partners/_index.html +++ b/content/ko/partners/_index.html @@ -7,79 +7,90 @@ ---
    -
    -
    쿠버네티스는 파트너와 협력하여 다양하게 보완하는 플랫폼을 지원하는 강력하고 활기찬 코드베이스를 만들어갑니다.
    -
    -
    -
    -
    - 공인 쿠버네티스 서비스 공급자(Kubernetes Certified Service Providers, KCSP) -
    -
    기업들이 쿠버네티스를 성공적으로 채택하도록 도와주는 풍부한 경험을 가진 노련한 서비스 공급자입니다. -


    - -

    KCSP에 관심이 있으신가요? -
    -
    -
    -
    -
    - 공인 쿠버네티스 배포, 호스트된 플랫폼 그리고 설치 프로그램 -
    소프트웨어 적합성은 모든 벤더의 쿠버네티스 버전이 필요한 API를 지원하도록 보장합니다. -


    - -

    공인 쿠버네티스에 관심이 있으신가요? -
    -
    -
    -
    -
    쿠버네티스 교육 파트너(Kubernetes Training Partners, KTP)
    -
    클라우드 네이티브 기술 교육 경험이 풍부하고 노련한 교육 공급자입니다. -



    - -

    KTP에 관심이 있으신가요? -
    -
    -
    - - + - -
    - - -
    - -
    + + +
    + + +
    + +