diff --git a/.go-version b/.go-version index 1b9335f13bc89..7bf9455f08c9b 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.20.4 \ No newline at end of file +1.20.5 diff --git a/CHANGELOG/CHANGELOG-1.27.md b/CHANGELOG/CHANGELOG-1.27.md index 72c7066501fae..4a473638f8e90 100644 --- a/CHANGELOG/CHANGELOG-1.27.md +++ b/CHANGELOG/CHANGELOG-1.27.md @@ -1,109 +1,107 @@ -- [v1.27.1](#v1271) - - [Downloads for v1.27.1](#downloads-for-v1271) +- [v1.27.2](#v1272) + - [Downloads for v1.27.2](#downloads-for-v1272) - [Source Code](#source-code) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - [Container Images](#container-images) - - [Changelog since v1.27.0](#changelog-since-v1270) + - [Changelog since v1.27.1](#changelog-since-v1271) - [Changes by Kind](#changes-by-kind) + - [API Change](#api-change) + - [Feature](#feature) + - [Failing Test](#failing-test) - [Bug or Regression](#bug-or-regression) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies) - [Added](#added) - [Changed](#changed) - [Removed](#removed) -- [v1.27.0](#v1270) - - [Downloads for v1.27.0](#downloads-for-v1270) +- [v1.27.1](#v1271) + - [Downloads for v1.27.1](#downloads-for-v1271) - [Source Code](#source-code-1) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - [Container Images](#container-images-1) - - [Changelog since v1.26.0](#changelog-since-v1260) - - [Known Issues](#known-issues) - - [The PreEnqueue extension point doesn't work for Pods going to activeQ through backoffQ](#the-preenqueue-extension-point-doesnt-work-for-pods-going-to-activeq-through-backoffq) - - [Urgent Upgrade Notes](#urgent-upgrade-notes) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) + - [Changelog since v1.27.0](#changelog-since-v1270) - [Changes by Kind](#changes-by-kind-1) - - [Deprecation](#deprecation) - - [API Change](#api-change) - - [Feature](#feature) - - [Documentation](#documentation) - - [Failing Test](#failing-test) - [Bug or Regression](#bug-or-regression-1) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake) - [Dependencies](#dependencies-1) - [Added](#added-1) - [Changed](#changed-1) - [Removed](#removed-1) -- [v1.27.0-rc.1](#v1270-rc1) - - [Downloads for v1.27.0-rc.1](#downloads-for-v1270-rc1) +- [v1.27.0](#v1270) + - [Downloads for v1.27.0](#downloads-for-v1270) - [Source Code](#source-code-2) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - [Container Images](#container-images-2) - - [Changelog since v1.27.0-rc.0](#changelog-since-v1270-rc0) + - [Changelog since v1.26.0](#changelog-since-v1260) + - [Known Issues](#known-issues) + - [The PreEnqueue extension point doesn't work for Pods going to activeQ through backoffQ](#the-preenqueue-extension-point-doesnt-work-for-pods-going-to-activeq-through-backoffq) + - [Urgent Upgrade Notes](#urgent-upgrade-notes) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) - [Changes by Kind](#changes-by-kind-2) + - [Deprecation](#deprecation) + - [API Change](#api-change-1) - [Feature](#feature-1) + - [Documentation](#documentation) + - [Failing Test](#failing-test-1) - [Bug or Regression](#bug-or-regression-2) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-2) - [Added](#added-2) - [Changed](#changed-2) - [Removed](#removed-2) -- [v1.27.0-rc.0](#v1270-rc0) - - [Downloads for v1.27.0-rc.0](#downloads-for-v1270-rc0) +- [v1.27.0-rc.1](#v1270-rc1) + - [Downloads for v1.27.0-rc.1](#downloads-for-v1270-rc1) - [Source Code](#source-code-3) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - [Container Images](#container-images-3) - - [Changelog since v1.27.0-beta.0](#changelog-since-v1270-beta0) + - [Changelog since v1.27.0-rc.0](#changelog-since-v1270-rc0) - [Changes by Kind](#changes-by-kind-3) - - [API Change](#api-change-1) - [Feature](#feature-2) - [Bug or Regression](#bug-or-regression-3) - [Dependencies](#dependencies-3) - [Added](#added-3) - [Changed](#changed-3) - [Removed](#removed-3) -- [v1.27.0-beta.0](#v1270-beta0) - - [Downloads for v1.27.0-beta.0](#downloads-for-v1270-beta0) +- [v1.27.0-rc.0](#v1270-rc0) + - [Downloads for v1.27.0-rc.0](#downloads-for-v1270-rc0) - [Source Code](#source-code-4) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - [Container Images](#container-images-4) - - [Changelog since v1.27.0-alpha.3](#changelog-since-v1270-alpha3) - - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) - - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) + - [Changelog since v1.27.0-beta.0](#changelog-since-v1270-beta0) - [Changes by Kind](#changes-by-kind-4) - - [Deprecation](#deprecation-1) - [API Change](#api-change-2) - [Feature](#feature-3) - - [Documentation](#documentation-1) - - [Failing Test](#failing-test-1) - [Bug or Regression](#bug-or-regression-4) - - [Other (Cleanup or Flake)](#other-cleanup-or-flake-1) - [Dependencies](#dependencies-4) - [Added](#added-4) - [Changed](#changed-4) - [Removed](#removed-4) -- [v1.27.0-alpha.3](#v1270-alpha3) - - [Downloads for v1.27.0-alpha.3](#downloads-for-v1270-alpha3) +- [v1.27.0-beta.0](#v1270-beta0) + - [Downloads for v1.27.0-beta.0](#downloads-for-v1270-beta0) - [Source Code](#source-code-5) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - [Container Images](#container-images-5) - - [Changelog since v1.27.0-alpha.2](#changelog-since-v1270-alpha2) + - [Changelog since v1.27.0-alpha.3](#changelog-since-v1270-alpha3) + - [Urgent Upgrade Notes](#urgent-upgrade-notes-1) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade-1) - [Changes by Kind](#changes-by-kind-5) - - [Deprecation](#deprecation-2) + - [Deprecation](#deprecation-1) - [API Change](#api-change-3) - [Feature](#feature-4) - - [Documentation](#documentation-2) + - [Documentation](#documentation-1) - [Failing Test](#failing-test-2) - [Bug or Regression](#bug-or-regression-5) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-2) @@ -111,46 +109,190 @@ - [Added](#added-5) - [Changed](#changed-5) - [Removed](#removed-5) -- [v1.27.0-alpha.2](#v1270-alpha2) - - [Downloads for v1.27.0-alpha.2](#downloads-for-v1270-alpha2) +- [v1.27.0-alpha.3](#v1270-alpha3) + - [Downloads for v1.27.0-alpha.3](#downloads-for-v1270-alpha3) - [Source Code](#source-code-6) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - [Container Images](#container-images-6) - - [Changelog since v1.27.0-alpha.1](#changelog-since-v1270-alpha1) + - [Changelog since v1.27.0-alpha.2](#changelog-since-v1270-alpha2) - [Changes by Kind](#changes-by-kind-6) + - [Deprecation](#deprecation-2) - [API Change](#api-change-4) - [Feature](#feature-5) + - [Documentation](#documentation-2) + - [Failing Test](#failing-test-3) - [Bug or Regression](#bug-or-regression-6) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-3) - [Dependencies](#dependencies-6) - [Added](#added-6) - [Changed](#changed-6) - [Removed](#removed-6) -- [v1.27.0-alpha.1](#v1270-alpha1) - - [Downloads for v1.27.0-alpha.1](#downloads-for-v1270-alpha1) +- [v1.27.0-alpha.2](#v1270-alpha2) + - [Downloads for v1.27.0-alpha.2](#downloads-for-v1270-alpha2) - [Source Code](#source-code-7) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - [Container Images](#container-images-7) - - [Changelog since v1.26.0](#changelog-since-v1260-1) + - [Changelog since v1.27.0-alpha.1](#changelog-since-v1270-alpha1) - [Changes by Kind](#changes-by-kind-7) - - [Deprecation](#deprecation-3) - [API Change](#api-change-5) - [Feature](#feature-6) - - [Documentation](#documentation-3) - - [Failing Test](#failing-test-3) - [Bug or Regression](#bug-or-regression-7) - [Other (Cleanup or Flake)](#other-cleanup-or-flake-4) - [Dependencies](#dependencies-7) - [Added](#added-7) - [Changed](#changed-7) - [Removed](#removed-7) +- [v1.27.0-alpha.1](#v1270-alpha1) + - [Downloads for v1.27.0-alpha.1](#downloads-for-v1270-alpha1) + - [Source Code](#source-code-8) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) + - [Container Images](#container-images-8) + - [Changelog since v1.26.0](#changelog-since-v1260-1) + - [Changes by Kind](#changes-by-kind-8) + - [Deprecation](#deprecation-3) + - [API Change](#api-change-6) + - [Feature](#feature-7) + - [Documentation](#documentation-3) + - [Failing Test](#failing-test-4) + - [Bug or Regression](#bug-or-regression-8) + - [Other (Cleanup or Flake)](#other-cleanup-or-flake-5) + - [Dependencies](#dependencies-8) + - [Added](#added-8) + - [Changed](#changed-8) + - [Removed](#removed-8) +# v1.27.2 + + +## Downloads for v1.27.2 + + + +### Source Code + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes.tar.gz) | c46c9c1c4cdb0b1532630ce0e01295c7185f725e494d4fd190bae0540283c679b1c8b0a1ad1f0f5d320ddbf439e5fdd6f925700080cdf810158e1f41b8c5d9c9 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-src.tar.gz) | b9be38a5506071a362864661b369c71b0a02e66df0a77a2afc68040fa9634751a189e3c6c94771aee3e17b50a73228ad992a08f31cf4b322ebd7003e7676d381 + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-darwin-amd64.tar.gz) | 7e4c0a207e505f6966999e0efb293c0e885d5975ad02f8e534b60ab1a94e0fdbcefd72c18fd536bb23e9356735098dd3fb85c6034e3ec07879f9511deb254f1b +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-darwin-arm64.tar.gz) | 41a51f588a9c19d0377921b66c21d7b406244aca4fbd32c0d7dcdd9b1cf80712c8f26a1134c2ae1d57612025b943e87a7bb55ee01c22838c0deb2754cf4a43cd +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-linux-386.tar.gz) | 729310d48d34fd21805869f849492227d3a74d1feafa4969d2aa5e0336c85d51f379865eda7b20c92b2f5122094884de5947a715f2fc9b6cb32e8a4e79dcd16c +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-linux-amd64.tar.gz) | cc34cffb3ec65a1b29dc3998341c8317dd1bf34d45b230a2379b1676d4d9a600cb662cde7caa7c8253e4cf2320d40b9581f97c0a04ac81037643b4fd105c6103 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-linux-arm.tar.gz) | b85927b9ff2f5871dac6814800c0fda43c4e69c27dd5c9d5cb3c73551c147ad2501675ffc5775cb70af4b643e88e784d034c3eec714de66c7ddf163a2ae4f500 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-linux-arm64.tar.gz) | cdf09ad3150c702e84c22158e95f164cffcbfa5e06af65e33dafada0d0d00fd6c160f41eb72d7966b0659e49705f7197d92dc3dc7153cf907fcdf318071138bc +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-linux-ppc64le.tar.gz) | 7219c79d43cc57a0866c854183dbed2629866e4ce081b62eef6c3034094bf0d3143e9e2eb7cf819a2f49bd98566cf4ac56cc9f2989f4c49906a71e2df68767e4 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-linux-s390x.tar.gz) | 9a5c0d13732ebf2d69f714ee953cc57f1c7ca2a27cb26333336a0da225414a96976f6a383cbe89aee80bcdd47a59dc17784acce377f656a6f959ed65f638a82d +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-windows-386.tar.gz) | 7bad7610f5a000cf40f68451d4cf94395d43907271aa98132cb6a52eae541e25cce7f40b5dfb1b45c79da5bbf54ce49cfbcd04f819635e16d91e03c63b48b8f4 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-windows-amd64.tar.gz) | 3d1c4f023867e8289d19dba43d702d40f3a8d8583e2c436e447af43127da9b0e90b5ca4ac055c3256c92d8fcaaa3734f0a83039480b35a012aed86ecd377da59 +[kubernetes-client-windows-arm64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-client-windows-arm64.tar.gz) | 0b08b36d4869b6b1de0314bb365ae45f85719297088f12b723a535a61f7b2c648969c12a4e1ecbd29a6deb804551815ed21c3b8ae9ed6813aa26d625723a273e + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-server-linux-amd64.tar.gz) | 53f1533aa8f493ebbdfb07ba59eaf971cf865c60d1ac9a5ad9f61e6d5f670e9d86e0dc70c6d3057953da2968d100de8d8bf50d5863ad2decb69c397aa6f185b9 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-server-linux-arm64.tar.gz) | 66466de2b1b5ad7ce09fba95da00d3451ae13b28b89755a64cc4e18e1254c5dfed290df5f8509f312396679bcc90eec98eb84e333ea9bd206ecb8bf00eeeba71 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-server-linux-ppc64le.tar.gz) | 283dd8c6391d62b1f11102ce3a252d78b1dd3268dd2b8c5f08276c9c764ced6f0f8e8056b5d302045c464efc063a81d815e6fc3f804b997770b40bc1b2a89f8c +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-server-linux-s390x.tar.gz) | ea30de775e794eb738a3c10c730c0e291ae1460fdaba984d4eb00bf52b552f192608e8213b382ac8161f1f13486ddf13b7f20e4f5838a1e38f08da9288c01a3c + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-node-linux-amd64.tar.gz) | 2a05c3ebcec8bce9ca2a7c835617f0b85dbf11a07d39c5b002b5389a45465807c437dd22d975c60e680ee34f3bc00e460d848927a8fa2c1543ec97fb66f50477 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-node-linux-arm64.tar.gz) | 9d6c45fb54c01ac9a106d356d8c9ed1c6564f4b86bb1ba55ced628bbbf5c4fb1f78d68a15c3055680c6dff6c85a32726300d09b7a85bdcb7ba263a434b148826 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-node-linux-ppc64le.tar.gz) | b029fe744619f9649e42c80ca2f0bb14ae72c934d4687100ee7f041cbcd72cc5567ae01acef9e2c7b5c579228b2af9a419bdbf2af64420754a5f6049cdd391bb +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-node-linux-s390x.tar.gz) | d3319d9a4a205cd1fa9da590407fe2be3b149c50e77422ff2b2f1e803c24e0d496fdc89d16c658fbef7a0bc59d1b0e295dfa5354ce3c3c5d9a6749e60e1580ee +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.27.2/kubernetes-node-windows-amd64.tar.gz) | a745fe1b46ec6c3bd27e72c2774a01fd53c27361de6ad7281c2adeaf88ab59ec725d60dc99a4de5f3579428ef4e923860a85143f14b51590ce43bfdee7a36a10 + +### Container Images + +All container images are available as manifest lists and support the described +architectures. It is also possible to pull a specific architecture directly by +adding the "-$ARCH" suffix to the container image name. + +name | architectures +---- | ------------- +[registry.k8s.io/conformance:v1.27.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/conformance-s390x) +[registry.k8s.io/kube-apiserver:v1.27.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-apiserver-s390x) +[registry.k8s.io/kube-controller-manager:v1.27.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-controller-manager-s390x) +[registry.k8s.io/kube-proxy:v1.27.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-proxy-s390x) +[registry.k8s.io/kube-scheduler:v1.27.2](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler) | [amd64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-amd64), [arm64](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-arm64), [ppc64le](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-ppc64le), [s390x](https://console.cloud.google.com/gcr/images/k8s-artifacts-prod/us/kube-scheduler-s390x) + +## Changelog since v1.27.1 + +## Changes by Kind + +### API Change + +- Added error handling for seccomp localhost configurations that do not properly set a localhostProfile ([#117020](https://github.com/kubernetes/kubernetes/pull/117020), [@cji](https://github.com/cji)) [SIG API Machinery and Node] +- Fixed an issue where kubelet does not set case-insensitive headers for http probes. (#117182, @dddddai) ([#117324](https://github.com/kubernetes/kubernetes/pull/117324), [@dddddai](https://github.com/dddddai)) [SIG API Machinery, Apps and Node] +- Revised the comment about the feature-gate level for PodFailurePolicy from alpha to beta ([#117815](https://github.com/kubernetes/kubernetes/pull/117815), [@kerthcet](https://github.com/kerthcet)) [SIG Apps] + +### Feature + +- Kubernetes is now built with Go 1.20.4 ([#117773](https://github.com/kubernetes/kubernetes/pull/117773), [@xmudrii](https://github.com/xmudrii)) [SIG Release and Testing] + +### Failing Test + +- Allow Azure Disk e2es to use newer topology labels if available from nodes ([#117216](https://github.com/kubernetes/kubernetes/pull/117216), [@gnufied](https://github.com/gnufied)) [SIG Storage and Testing] + +### Bug or Regression + +- CVE-2023-27561 CVE-2023-25809 CVE-2023-28642: Bump fix runc v1.1.4 -> v1.1.5 ([#117242](https://github.com/kubernetes/kubernetes/pull/117242), [@haircommander](https://github.com/haircommander)) [SIG Node] +- During device plugin allocation, resources requested by the pod can only be allocated if the device plugin has registered itself to kubelet AND healthy devices are present on the node to be allocated. If these conditions are not sattsfied, the pod would fail with `UnexpectedAdmissionError` error. ([#117719](https://github.com/kubernetes/kubernetes/pull/117719), [@swatisehgal](https://github.com/swatisehgal)) [SIG Node and Testing] +- Fallback from OpenAPI V3 to V2 when the OpenAPI V3 document is invalid or incomplete. ([#117980](https://github.com/kubernetes/kubernetes/pull/117980), [@seans3](https://github.com/seans3)) [SIG CLI] +- Fix bug where `listOfStrings.join()` in CEL expressions resulted in an unexpected internal error. ([#117596](https://github.com/kubernetes/kubernetes/pull/117596), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery] +- Fix incorrect calculation for ResourceQuota with PriorityClass as its scope. ([#117825](https://github.com/kubernetes/kubernetes/pull/117825), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG API Machinery] +- Fix performance regression in scheduler caused by frequent metric lookup on critical code path. ([#117617](https://github.com/kubernetes/kubernetes/pull/117617), [@tosi3k](https://github.com/tosi3k)) [SIG Scheduling] +- Fix: the volume is not detached after the pod and PVC objects are deleted ([#117236](https://github.com/kubernetes/kubernetes/pull/117236), [@cvvz](https://github.com/cvvz)) [SIG Storage] +- Fixed a memory leak in the Kubernetes API server that occurs during APIService processing. ([#117310](https://github.com/kubernetes/kubernetes/pull/117310), [@enj](https://github.com/enj)) [SIG API Machinery] +- Fixes a race condition serving OpenAPI content ([#117708](https://github.com/kubernetes/kubernetes/pull/117708), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery, Architecture, Auth, CLI, Cloud Provider, Instrumentation and Node] +- Fixes a regression in kubectl and client-go discovery when configured with a server URL other than the root of a server. ([#117685](https://github.com/kubernetes/kubernetes/pull/117685), [@ardaguclu](https://github.com/ardaguclu)) [SIG API Machinery] +- Fixes bug where an incomplete OpenAPI V3 document can cause a nil-pointer crash. + Ensures fallback to OpenAPI V2 endpoint for errors retrieving OpenAPI V3 document. ([#117918](https://github.com/kubernetes/kubernetes/pull/117918), [@seans3](https://github.com/seans3)) [SIG CLI] +- Kubeadm: fix a bug where file copy(backup) could not be executed correctly on Windows platform during upgrade ([#117861](https://github.com/kubernetes/kubernetes/pull/117861), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubelet terminates pods correctly upon restart, fixing an issue where pods may have not been fully terminated if the kubelet was restarted during pod termination. ([#117433](https://github.com/kubernetes/kubernetes/pull/117433), [@bobbypage](https://github.com/bobbypage)) [SIG Node and Testing] +- Number of errors reported to the metric `storage_operation_duration_seconds_count` for emptyDir decreased significantly because previously one error was reported for each projected volume created. ([#117022](https://github.com/kubernetes/kubernetes/pull/117022), [@mpatlasov](https://github.com/mpatlasov)) [SIG Storage] +- Resolves a spurious "Unknown discovery response content-type" error in client-go discovery requests by tolerating extra content-type parameters in API responses ([#117637](https://github.com/kubernetes/kubernetes/pull/117637), [@seans3](https://github.com/seans3)) [SIG API Machinery] +- Reverted NewVolumeManagerReconstruction and SELinuxMountReadWriteOncePod feature gates to disabled by default to resolve a regression of volume reconstruction on kubelet/node restart ([#117752](https://github.com/kubernetes/kubernetes/pull/117752), [@liggitt](https://github.com/liggitt)) [SIG Storage] +- Static pods were taking extra time to be restarted after being updated. Static pods that are waiting to restart were not correctly counted in `kubelet_working_pods`. ([#116995](https://github.com/kubernetes/kubernetes/pull/116995), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node] +- [KCCM] service controller: change the cloud controller manager to make `providerID` a predicate when synchronizing nodes. This change allows load balancer integrations to ensure that the `providerID` is set when configuring + load balancers and targets. ([#117450](https://github.com/kubernetes/kubernetes/pull/117450), [@alexanderConstantinescu](https://github.com/alexanderConstantinescu)) [SIG Cloud Provider and Network] + +### Other (Cleanup or Flake) + +- A v2-level info log will be added, which will output the details of the pod being preempted, including victim and preemptor ([#117214](https://github.com/kubernetes/kubernetes/pull/117214), [@HirazawaUi](https://github.com/HirazawaUi)) [SIG Scheduling] +- Structured logging of NamespacedName was inconsistent with klog.KObj. Now both use lower case field names and namespace is optional. ([#117238](https://github.com/kubernetes/kubernetes/pull/117238), [@pohly](https://github.com/pohly)) [SIG API Machinery, Architecture and Instrumentation] + +## Dependencies + +### Added +_Nothing has changed._ + +### Changed +- github.com/opencontainers/runc: [v1.1.4 → v1.1.6](https://github.com/opencontainers/runc/compare/v1.1.4...v1.1.6) +- k8s.io/kube-openapi: 15aac26 → 8b0f38b +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.1.1 → v0.1.2 + +### Removed +_Nothing has changed._ + + + # v1.27.1 @@ -505,9 +647,9 @@ The cause PR is [reverted](https://github.com/kubernetes/kubernetes/pull/117194) - Graduated `matchLabelKeys` in `podTopologySpread` to Beta ([#116291](https://github.com/kubernetes/kubernetes/pull/116291), [@denkensk](https://github.com/denkensk)) - Graduated the `CSINodeExpandSecret` feature to Beta. This feature facilitates passing secrets to CSI driver as part of Node Expansion CSI operation. ([#115621](https://github.com/kubernetes/kubernetes/pull/115621), [@humblec](https://github.com/humblec)) - Graduated the `LegacyServiceAccountTokenTracking` feature gate to Beta. The usage of auto-generated secret-based service account token now produces warnings by default, and relevant Secrets are labeled with a last-used timestamp (label key `kubernetes.io/legacy-token-last-used`). ([#114523](https://github.com/kubernetes/kubernetes/pull/114523), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] -- HPA controller exposes the following metrics from the kube-controller-manager. - - `metric_computation_duration_seconds`: Number of metric computations. - - `metric_computation_total`: The time(seconds) that the HPA controller takes to calculate one metric. ([#116326](https://github.com/kubernetes/kubernetes/pull/116326), [@sanposhiho](https://github.com/sanposhiho)) [SIG Apps, Autoscaling and Instrumentation] +- HPA controller exposes the following metrics from the kube-controller-manager. + - `metric_computation_duration_seconds`: The time(seconds) that the HPA controller takes to calculate one metric. + - `metric_computation_total`: Number of metric computations. ([#116326](https://github.com/kubernetes/kubernetes/pull/116326), [@sanposhiho](https://github.com/sanposhiho)) [SIG Apps, Autoscaling and Instrumentation] - HPA controller starts to expose metrics from the kube-controller-manager.\n- `reconciliations_total`: Number of reconciliation of HPA controller. \n- `reconciliation_duration_seconds`: The time(seconds) that the HPA controller takes to reconcile once. ([#116010](https://github.com/kubernetes/kubernetes/pull/116010), [@sanposhiho](https://github.com/sanposhiho)) - Kube-up now includes `CoreDNS` version `v1.9.3` ([#114279](https://github.com/kubernetes/kubernetes/pull/114279), [@pacoxu](https://github.com/pacoxu)) - Kubeadm: added the experimental (alpha) feature gate `EtcdLearnerMode` that allows etcd members to be joined as learner and only then promoted as voting members ([#113318](https://github.com/kubernetes/kubernetes/pull/113318), [@pacoxu](https://github.com/pacoxu)) diff --git a/build/build-image/cross/VERSION b/build/build-image/cross/VERSION index 18cfc5441432d..5451e5e9d372d 100644 --- a/build/build-image/cross/VERSION +++ b/build/build-image/cross/VERSION @@ -1 +1 @@ -v1.27.0-go1.20.4-bullseye.0 +v1.27.0-go1.20.5-bullseye.0 diff --git a/build/common.sh b/build/common.sh index 9c20381f4e2bc..e796c637217e3 100755 --- a/build/common.sh +++ b/build/common.sh @@ -96,7 +96,7 @@ readonly KUBE_CONTAINER_RSYNC_PORT=8730 # These are the default versions (image tags) for their respective base images. readonly __default_distroless_iptables_version=v0.2.3 -readonly __default_go_runner_version=v2.3.1-go1.20.4-bullseye.0 +readonly __default_go_runner_version=v2.3.1-go1.20.5-bullseye.0 readonly __default_setcap_version=bullseye-v1.4.2 # These are the base images for the Docker-wrapped binaries. diff --git a/build/dependencies.yaml b/build/dependencies.yaml index 01fc04dfafba7..06215426db8ff 100644 --- a/build/dependencies.yaml +++ b/build/dependencies.yaml @@ -95,7 +95,7 @@ dependencies: # Golang - name: "golang: upstream version" - version: 1.20.4 + version: 1.20.5 refPaths: - path: .go-version - path: build/build-image/cross/VERSION @@ -117,7 +117,7 @@ dependencies: match: minimum_go_version=go([0-9]+\.[0-9]+) - name: "registry.k8s.io/kube-cross: dependents" - version: v1.27.0-go1.20.4-bullseye.0 + version: v1.27.0-go1.20.5-bullseye.0 refPaths: - path: build/build-image/cross/VERSION @@ -147,7 +147,7 @@ dependencies: match: configs\[DistrolessIptables\] = Config{list\.BuildImageRegistry, "distroless-iptables", "v([0-9]+)\.([0-9]+)\.([0-9]+)"} - name: "registry.k8s.io/go-runner: dependents" - version: v2.3.1-go1.20.4-bullseye.0 + version: v2.3.1-go1.20.5-bullseye.0 refPaths: - path: build/common.sh match: __default_go_runner_version= diff --git a/cmd/kube-apiserver/app/aggregator.go b/cmd/kube-apiserver/app/aggregator.go index 397c151e66a4f..cf2347bfccc70 100644 --- a/cmd/kube-apiserver/app/aggregator.go +++ b/cmd/kube-apiserver/app/aggregator.go @@ -27,7 +27,6 @@ import ( "k8s.io/klog/v2" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -117,7 +116,7 @@ func createAggregatorConfig( return aggregatorConfig, nil } -func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) { +func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, apiExtensionInformers apiextensionsinformers.SharedInformerFactory, crdAPIEnabled bool) (*aggregatorapiserver.APIAggregator, error) { aggregatorServer, err := aggregatorConfig.Complete().NewWithDelegate(delegateAPIServer) if err != nil { return nil, err @@ -147,8 +146,12 @@ func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delega // let the CRD controller process the initial set of CRDs before starting the autoregistration controller. // this prevents the autoregistration controller's initial sync from deleting APIServices for CRDs that still exist. // we only need to do this if CRDs are enabled on this server. We can't use discovery because we are the source for discovery. - if aggregatorConfig.GenericConfig.MergedResourceConfig.ResourceEnabled(apiextensionsv1.SchemeGroupVersion.WithResource("customresourcedefinitions")) { + if crdAPIEnabled { + klog.Infof("waiting for initial CRD sync...") crdRegistrationController.WaitForInitialSync() + klog.Infof("initial CRD sync complete...") + } else { + klog.Infof("CRD API not enabled, starting APIService registration without waiting for initial CRD sync") } autoRegistrationController.Run(5, context.StopCh) }() diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index dc10b88658bd3..28effc2fd042d 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -39,6 +39,7 @@ import ( oteltrace "go.opentelemetry.io/otel/trace" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" extensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -232,6 +233,7 @@ func CreateServerChain(completedOptions completedServerRunOptions) (*aggregatora if err != nil { return nil, err } + crdAPIEnabled := apiExtensionsConfig.GenericConfig.MergedResourceConfig.ResourceEnabled(apiextensionsv1.SchemeGroupVersion.WithResource("customresourcedefinitions")) notFoundHandler := notfoundhandler.New(kubeAPIServerConfig.GenericConfig.Serializer, genericapifilters.NoMuxAndDiscoveryIncompleteKey) apiExtensionsServer, err := createAPIExtensionsServer(apiExtensionsConfig, genericapiserver.NewEmptyDelegateWithCustomHandler(notFoundHandler)) @@ -249,7 +251,7 @@ func CreateServerChain(completedOptions completedServerRunOptions) (*aggregatora if err != nil { return nil, err } - aggregatorServer, err := createAggregatorServer(aggregatorConfig, kubeAPIServer.GenericAPIServer, apiExtensionsServer.Informers) + aggregatorServer, err := createAggregatorServer(aggregatorConfig, kubeAPIServer.GenericAPIServer, apiExtensionsServer.Informers, crdAPIEnabled) if err != nil { // we don't need special handling for innerStopCh because the aggregator server doesn't create any go routines return nil, err diff --git a/cmd/kube-apiserver/app/testing/testserver.go b/cmd/kube-apiserver/app/testing/testserver.go index 56b23788d13ab..2fe17fb3e3455 100644 --- a/cmd/kube-apiserver/app/testing/testserver.go +++ b/cmd/kube-apiserver/app/testing/testserver.go @@ -62,6 +62,9 @@ type TearDownFunc func() // TestServerInstanceOptions Instance options the TestServer type TestServerInstanceOptions struct { + // SkipHealthzCheck returns without waiting for the server to become healthy. + // Useful for testing server configurations expected to prevent /healthz from completing. + SkipHealthzCheck bool // Enable cert-auth for the kube-apiserver EnableCertAuth bool // Wrap the storage version interface of the created server's generic server. @@ -262,40 +265,42 @@ func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, custo } }(stopCh) - t.Logf("Waiting for /healthz to be ok...") - client, err := kubernetes.NewForConfig(server.GenericAPIServer.LoopbackClientConfig) if err != nil { return result, fmt.Errorf("failed to create a client: %v", err) } - // wait until healthz endpoint returns ok - err = wait.Poll(100*time.Millisecond, time.Minute, func() (bool, error) { - select { - case err := <-errCh: - return false, err - default: - } + if !instanceOptions.SkipHealthzCheck { + t.Logf("Waiting for /healthz to be ok...") - req := client.CoreV1().RESTClient().Get().AbsPath("/healthz") - // The storage version bootstrap test wraps the storage version post-start - // hook, so the hook won't become health when the server bootstraps - if instanceOptions.StorageVersionWrapFunc != nil { - // We hardcode the param instead of having a new instanceOptions field - // to avoid confusing users with more options. - storageVersionCheck := fmt.Sprintf("poststarthook/%s", apiserver.StorageVersionPostStartHookName) - req.Param("exclude", storageVersionCheck) - } - result := req.Do(context.TODO()) - status := 0 - result.StatusCode(&status) - if status == 200 { - return true, nil + // wait until healthz endpoint returns ok + err = wait.Poll(100*time.Millisecond, time.Minute, func() (bool, error) { + select { + case err := <-errCh: + return false, err + default: + } + + req := client.CoreV1().RESTClient().Get().AbsPath("/healthz") + // The storage version bootstrap test wraps the storage version post-start + // hook, so the hook won't become health when the server bootstraps + if instanceOptions.StorageVersionWrapFunc != nil { + // We hardcode the param instead of having a new instanceOptions field + // to avoid confusing users with more options. + storageVersionCheck := fmt.Sprintf("poststarthook/%s", apiserver.StorageVersionPostStartHookName) + req.Param("exclude", storageVersionCheck) + } + result := req.Do(context.TODO()) + status := 0 + result.StatusCode(&status) + if status == 200 { + return true, nil + } + return false, nil + }) + if err != nil { + return result, fmt.Errorf("failed to wait for /healthz to return ok: %v", err) } - return false, nil - }) - if err != nil { - return result, fmt.Errorf("failed to wait for /healthz to return ok: %v", err) } // wait until default namespace is created diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 0f13dc72dc38a..0a838d0d61834 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -547,6 +547,7 @@ type ProxyServer struct { ConfigSyncPeriod time.Duration HealthzServer healthcheck.ProxierHealthUpdater localDetectorMode kubeproxyconfig.LocalMode + podCIDRs []string // only used for LocalModeNodeCIDR } // createClients creates a kube client and an event client from the given config and masterOverride. @@ -769,7 +770,7 @@ func (s *ProxyServer) Run() error { nodeConfig := config.NewNodeConfig(currentNodeInformerFactory.Core().V1().Nodes(), s.ConfigSyncPeriod) // https://issues.k8s.io/111321 if s.localDetectorMode == kubeproxyconfig.LocalModeNodeCIDR { - nodeConfig.RegisterEventHandler(&proxy.NodePodCIDRHandler{}) + nodeConfig.RegisterEventHandler(proxy.NewNodePodCIDRHandler(s.podCIDRs)) } nodeConfig.RegisterEventHandler(s.Proxier) diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go index 56ad20f9567e2..e836292564eef 100644 --- a/cmd/kube-proxy/app/server_others.go +++ b/cmd/kube-proxy/app/server_others.go @@ -135,12 +135,14 @@ func newProxyServer( } var nodeInfo *v1.Node + podCIDRs := []string{} if detectLocalMode == proxyconfigapi.LocalModeNodeCIDR { klog.InfoS("Watching for node, awaiting podCIDR allocation", "hostname", hostname) nodeInfo, err = waitForPodCIDR(client, hostname) if err != nil { return nil, err } + podCIDRs = nodeInfo.Spec.PodCIDRs klog.InfoS("NodeInfo", "podCIDR", nodeInfo.Spec.PodCIDR, "podCIDRs", nodeInfo.Spec.PodCIDRs) } @@ -357,6 +359,7 @@ func newProxyServer( ConfigSyncPeriod: config.ConfigSyncPeriod.Duration, HealthzServer: healthzServer, localDetectorMode: detectLocalMode, + podCIDRs: podCIDRs, }, nil } diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 60daa3fa04bc2..8dd7ae0b8499b 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -481,6 +481,7 @@ var ( 24: "3.5.7-0", 25: "3.5.7-0", 26: "3.5.7-0", + 27: "3.5.7-0", } // KubeadmCertsClusterRoleName sets the name for the ClusterRole that allows diff --git a/cmd/kubeadm/app/phases/etcd/local.go b/cmd/kubeadm/app/phases/etcd/local.go index b18fc2575ebed..daa8cc8b67a58 100644 --- a/cmd/kubeadm/app/phases/etcd/local.go +++ b/cmd/kubeadm/app/phases/etcd/local.go @@ -118,6 +118,10 @@ func RemoveStackedEtcdMemberFromCluster(client clientset.Interface, cfg *kubeadm klog.V(2).Infof("[etcd] get the member id from peer: %s", etcdPeerAddress) id, err := etcdClient.GetMemberID(etcdPeerAddress) if err != nil { + if errors.Is(etcdutil.ErrNoMemberIDForPeerURL, err) { + klog.V(5).Infof("[etcd] member was already removed, because no member id exists for peer %s", etcdPeerAddress) + return nil + } return err } diff --git a/cmd/kubeadm/app/util/etcd/etcd.go b/cmd/kubeadm/app/util/etcd/etcd.go index c2439536d0635..700ca0a9e9eff 100644 --- a/cmd/kubeadm/app/util/etcd/etcd.go +++ b/cmd/kubeadm/app/util/etcd/etcd.go @@ -53,6 +53,8 @@ var etcdBackoff = wait.Backoff{ Jitter: 0.1, } +var ErrNoMemberIDForPeerURL = errors.New("no member id found for peer URL") + // ClusterInterrogator is an interface to get etcd cluster related information type ClusterInterrogator interface { CheckClusterHealth() error @@ -66,27 +68,69 @@ type ClusterInterrogator interface { RemoveMember(id uint64) ([]Member, error) } +type etcdClient interface { + // Close shuts down the client's etcd connections. + Close() error + + // Endpoints lists the registered endpoints for the client. + Endpoints() []string + + // MemberList lists the current cluster membership. + MemberList(ctx context.Context) (*clientv3.MemberListResponse, error) + + // MemberAdd adds a new member into the cluster. + MemberAdd(ctx context.Context, peerAddrs []string) (*clientv3.MemberAddResponse, error) + + // MemberAddAsLearner adds a new learner member into the cluster. + MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*clientv3.MemberAddResponse, error) + + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, id uint64) (*clientv3.MemberPromoteResponse, error) + + // Status gets the status of the endpoint. + Status(ctx context.Context, endpoint string) (*clientv3.StatusResponse, error) + + // Sync synchronizes client's endpoints with the known endpoints from the etcd membership. + Sync(ctx context.Context) error +} + // Client provides connection parameters for an etcd cluster type Client struct { Endpoints []string - TLS *tls.Config + + newEtcdClient func(endpoints []string) (etcdClient, error) } // New creates a new EtcdCluster client func New(endpoints []string, ca, cert, key string) (*Client, error) { client := Client{Endpoints: endpoints} + var err error + var tlsConfig *tls.Config if ca != "" || cert != "" || key != "" { tlsInfo := transport.TLSInfo{ CertFile: cert, KeyFile: key, TrustedCAFile: ca, } - tlsConfig, err := tlsInfo.ClientConfig() + tlsConfig, err = tlsInfo.ClientConfig() if err != nil { return nil, err } - client.TLS = tlsConfig + } + + client.newEtcdClient = func(endpoints []string) (etcdClient, error) { + return clientv3.New(clientv3.Config{ + Endpoints: endpoints, + DialTimeout: etcdTimeout, + DialOptions: []grpc.DialOption{ + grpc.WithBlock(), // block until the underlying connection is up + }, + TLS: tlsConfig, + }) } return &client, nil @@ -192,24 +236,16 @@ func getRawEtcdEndpointsFromPodAnnotationWithoutRetry(client clientset.Interface // Sync synchronizes client's endpoints with the known endpoints from the etcd membership. func (c *Client) Sync() error { // Syncs the list of endpoints - var cli *clientv3.Client + var cli etcdClient var lastError error err := wait.ExponentialBackoff(etcdBackoff, func() (bool, error) { var err error - cli, err = clientv3.New(clientv3.Config{ - Endpoints: c.Endpoints, - DialTimeout: etcdTimeout, - DialOptions: []grpc.DialOption{ - grpc.WithBlock(), // block until the underlying connection is up - }, - TLS: c.TLS, - }) + cli, err = c.newEtcdClient(c.Endpoints) if err != nil { lastError = err return false, nil } defer cli.Close() - ctx, cancel := context.WithTimeout(context.Background(), etcdTimeout) err = cli.Sync(ctx) cancel() @@ -241,14 +277,7 @@ func (c *Client) listMembers() (*clientv3.MemberListResponse, error) { var lastError error var resp *clientv3.MemberListResponse err := wait.ExponentialBackoff(etcdBackoff, func() (bool, error) { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: c.Endpoints, - DialTimeout: etcdTimeout, - DialOptions: []grpc.DialOption{ - grpc.WithBlock(), // block until the underlying connection is up - }, - TLS: c.TLS, - }) + cli, err := c.newEtcdClient(c.Endpoints) if err != nil { lastError = err return false, nil @@ -283,7 +312,7 @@ func (c *Client) GetMemberID(peerURL string) (uint64, error) { return member.GetID(), nil } } - return 0, nil + return 0, ErrNoMemberIDForPeerURL } // ListMembers returns the member list. @@ -306,14 +335,7 @@ func (c *Client) RemoveMember(id uint64) ([]Member, error) { var lastError error var resp *clientv3.MemberRemoveResponse err := wait.ExponentialBackoff(etcdBackoff, func() (bool, error) { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: c.Endpoints, - DialTimeout: etcdTimeout, - DialOptions: []grpc.DialOption{ - grpc.WithBlock(), // block until the underlying connection is up - }, - TLS: c.TLS, - }) + cli, err := c.newEtcdClient(c.Endpoints) if err != nil { lastError = err return false, nil @@ -326,6 +348,10 @@ func (c *Client) RemoveMember(id uint64) ([]Member, error) { if err == nil { return true, nil } + if errors.Is(rpctypes.ErrMemberNotFound, err) { + klog.V(5).Infof("Member was already removed, because member %016x was not found", id) + return true, nil + } klog.V(5).Infof("Failed to remove etcd member: %v", err) lastError = err return false, nil @@ -365,14 +391,7 @@ func (c *Client) addMember(name string, peerAddrs string, isLearner bool) ([]Mem return nil, errors.Wrapf(err, "error parsing peer address %s", peerAddrs) } - cli, err := clientv3.New(clientv3.Config{ - Endpoints: c.Endpoints, - DialTimeout: etcdTimeout, - DialOptions: []grpc.DialOption{ - grpc.WithBlock(), // block until the underlying connection is up - }, - TLS: c.TLS, - }) + cli, err := c.newEtcdClient(c.Endpoints) if err != nil { return nil, err } @@ -482,14 +501,7 @@ func (c *Client) MemberPromote(learnerID uint64) error { } klog.V(1).Infof("[etcd] Promoting a learner as a voting member: %016x", learnerID) - cli, err := clientv3.New(clientv3.Config{ - Endpoints: c.Endpoints, - DialTimeout: etcdTimeout, - DialOptions: []grpc.DialOption{ - grpc.WithBlock(), // block until the underlying connection is up - }, - TLS: c.TLS, - }) + cli, err := c.newEtcdClient(c.Endpoints) if err != nil { return err } @@ -537,14 +549,7 @@ func (c *Client) getClusterStatus() (map[string]*clientv3.StatusResponse, error) var lastError error var resp *clientv3.StatusResponse err := wait.ExponentialBackoff(etcdBackoff, func() (bool, error) { - cli, err := clientv3.New(clientv3.Config{ - Endpoints: c.Endpoints, - DialTimeout: etcdTimeout, - DialOptions: []grpc.DialOption{ - grpc.WithBlock(), // block until the underlying connection is up - }, - TLS: c.TLS, - }) + cli, err := c.newEtcdClient(c.Endpoints) if err != nil { lastError = err return false, nil diff --git a/cmd/kubeadm/app/util/etcd/etcd_test.go b/cmd/kubeadm/app/util/etcd/etcd_test.go index d0aa67593b43e..ac05118c0797c 100644 --- a/cmd/kubeadm/app/util/etcd/etcd_test.go +++ b/cmd/kubeadm/app/util/etcd/etcd_test.go @@ -17,6 +17,7 @@ limitations under the License. package etcd import ( + "context" "fmt" "reflect" "strconv" @@ -24,6 +25,8 @@ import ( "github.com/pkg/errors" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + clientv3 "go.etcd.io/etcd/client/v3" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -35,9 +38,64 @@ import ( testresources "k8s.io/kubernetes/cmd/kubeadm/test/resources" ) +var errNotImplemented = errors.New("not implemented") + +type fakeEtcdClient struct { + members []*pb.Member + endpoints []string +} + +// Close shuts down the client's etcd connections. +func (f *fakeEtcdClient) Close() error { + f.members = []*pb.Member{} + return nil +} + +// Endpoints lists the registered endpoints for the client. +func (f *fakeEtcdClient) Endpoints() []string { + return f.endpoints +} + +// MemberList lists the current cluster membership. +func (f *fakeEtcdClient) MemberList(_ context.Context) (*clientv3.MemberListResponse, error) { + return &clientv3.MemberListResponse{ + Members: f.members, + }, nil +} + +// MemberAdd adds a new member into the cluster. +func (f *fakeEtcdClient) MemberAdd(_ context.Context, peerAddrs []string) (*clientv3.MemberAddResponse, error) { + return nil, errNotImplemented +} + +// MemberAddAsLearner adds a new learner member into the cluster. +func (f *fakeEtcdClient) MemberAddAsLearner(_ context.Context, peerAddrs []string) (*clientv3.MemberAddResponse, error) { + return nil, errNotImplemented +} + +// MemberRemove removes an existing member from the cluster. +func (f *fakeEtcdClient) MemberRemove(_ context.Context, id uint64) (*clientv3.MemberRemoveResponse, error) { + return nil, errNotImplemented +} + +// MemberPromote promotes a member from raft learner (non-voting) to raft voting member. +func (f *fakeEtcdClient) MemberPromote(_ context.Context, id uint64) (*clientv3.MemberPromoteResponse, error) { + return nil, errNotImplemented +} + +// Status gets the status of the endpoint. +func (f *fakeEtcdClient) Status(_ context.Context, endpoint string) (*clientv3.StatusResponse, error) { + return nil, errNotImplemented +} + +// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. +func (f *fakeEtcdClient) Sync(_ context.Context) error { + return errNotImplemented +} + func testGetURL(t *testing.T, getURLFunc func(*kubeadmapi.APIEndpoint) string, port int) { portStr := strconv.Itoa(port) - var tests = []struct { + tests := []struct { name string advertiseAddress string expectedURL string @@ -82,7 +140,7 @@ func TestGetPeerURL(t *testing.T) { func TestGetClientURLByIP(t *testing.T) { portStr := strconv.Itoa(constants.EtcdListenClientPort) - var tests = []struct { + tests := []struct { name string ip string expectedURL string @@ -118,7 +176,7 @@ func TestGetClientURLByIP(t *testing.T) { } func TestGetEtcdEndpointsWithBackoff(t *testing.T) { - var tests = []struct { + tests := []struct { name string pods []testresources.FakeStaticPod expectedEndpoints []string @@ -169,7 +227,7 @@ func TestGetEtcdEndpointsWithBackoff(t *testing.T) { } func TestGetRawEtcdEndpointsFromPodAnnotation(t *testing.T) { - var tests = []struct { + tests := []struct { name string pods []testresources.FakeStaticPod clientSetup func(*clientsetfake.Clientset) @@ -253,7 +311,7 @@ func TestGetRawEtcdEndpointsFromPodAnnotation(t *testing.T) { } func TestGetRawEtcdEndpointsFromPodAnnotationWithoutRetry(t *testing.T) { - var tests = []struct { + tests := []struct { name string pods []testresources.FakeStaticPod clientSetup func(*clientsetfake.Clientset) @@ -351,3 +409,88 @@ func TestGetRawEtcdEndpointsFromPodAnnotationWithoutRetry(t *testing.T) { }) } } + +func TestClient_GetMemberID(t *testing.T) { + type fields struct { + Endpoints []string + newEtcdClient func(endpoints []string) (etcdClient, error) + } + type args struct { + peerURL string + } + tests := []struct { + name string + fields fields + args args + want uint64 + wantErr error + }{ + { + name: "member ID found", + fields: fields{ + Endpoints: []string{}, + newEtcdClient: func(endpoints []string) (etcdClient, error) { + f := &fakeEtcdClient{ + members: []*pb.Member{ + { + ID: 1, + Name: "member1", + PeerURLs: []string{ + "https://member1:2380", + }, + }, + }, + } + return f, nil + }, + }, + args: args{ + peerURL: "https://member1:2380", + }, + wantErr: nil, + want: 1, + }, + { + name: "member ID not found", + fields: fields{ + Endpoints: []string{}, + newEtcdClient: func(endpoints []string) (etcdClient, error) { + f := &fakeEtcdClient{ + members: []*pb.Member{ + { + ID: 1, + Name: "member1", + PeerURLs: []string{ + "https://member1:2380", + }, + }, + }, + } + return f, nil + }, + }, + args: args{ + peerURL: "https://member2:2380", + }, + wantErr: ErrNoMemberIDForPeerURL, + want: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + Endpoints: tt.fields.Endpoints, + newEtcdClient: tt.fields.newEtcdClient, + } + + got, err := c.GetMemberID(tt.args.peerURL) + if !errors.Is(tt.wantErr, err) { + t.Errorf("Client.GetMemberID() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("Client.GetMemberID() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/cmd/kubeadm/app/util/staticpod/utils.go b/cmd/kubeadm/app/util/staticpod/utils.go index a5ff7cf4ccc09..0ed80c97e6b4d 100644 --- a/cmd/kubeadm/app/util/staticpod/utils.go +++ b/cmd/kubeadm/app/util/staticpod/utils.go @@ -18,7 +18,9 @@ package staticpod import ( "bytes" + "crypto/md5" "fmt" + "hash" "io" "math" "net/url" @@ -27,6 +29,7 @@ import ( "strings" "sync" + "github.com/davecgh/go-spew/spew" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" @@ -350,16 +353,22 @@ func GetEtcdProbeEndpoint(cfg *kubeadmapi.Etcd, isIPv6 bool) (string, int, v1.UR // ManifestFilesAreEqual compares 2 files. It returns true if their contents are equal, false otherwise func ManifestFilesAreEqual(path1, path2 string) (bool, error) { - content1, err := os.ReadFile(path1) + pod1, err := ReadStaticPodFromDisk(path1) if err != nil { return false, err } - content2, err := os.ReadFile(path2) + pod2, err := ReadStaticPodFromDisk(path2) if err != nil { return false, err } - return bytes.Equal(content1, content2), nil + hasher := md5.New() + DeepHashObject(hasher, pod1) + hash1 := hasher.Sum(nil)[0:] + DeepHashObject(hasher, pod2) + hash2 := hasher.Sum(nil)[0:] + + return bytes.Equal(hash1, hash2), nil } // getProbeAddress returns a valid probe address. @@ -382,3 +391,18 @@ func GetUsersAndGroups() (*users.UsersAndGroups, error) { }) return usersAndGroups, err } + +// DeepHashObject writes specified object to hash using the spew library +// which follows pointers and prints actual values of the nested objects +// ensuring the hash does not change when a pointer changes. +// Copied from k8s.io/kubernetes/pkg/util/hash/hash.go#DeepHashObject +func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { + hasher.Reset() + printer := spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + } + printer.Fprintf(hasher, "%#v", objectToWrite) +} diff --git a/cmd/kubeadm/app/util/staticpod/utils_test.go b/cmd/kubeadm/app/util/staticpod/utils_test.go index d04f7d3c5d902..8f7b2461cdc32 100644 --- a/cmd/kubeadm/app/util/staticpod/utils_test.go +++ b/cmd/kubeadm/app/util/staticpod/utils_test.go @@ -630,6 +630,35 @@ spec: - image: gcr.io/google_containers/etcd-amd64:3.1.11 status: {} ` + validPodWithDifferentFieldsOrder = ` +apiVersion: v1 +kind: Pod +metadata: + labels: + tier: control-plane + component: etcd + name: etcd + namespace: kube-system +spec: + containers: + - image: gcr.io/google_containers/etcd-amd64:3.1.11 +status: {} +` + validPod2 = ` +apiVersion: v1 +kind: Pod +metadata: + labels: + component: etcd + tier: control-plane + name: etcd + namespace: kube-system +spec: + containers: + - image: gcr.io/google_containers/etcd-amd64:3.1.12 +status: {} +` + invalidPod = `---{ broken yaml @@@` ) @@ -700,9 +729,15 @@ func TestManifestFilesAreEqual(t *testing.T) { expectedResult: true, expectErr: false, }, + { + description: "manifests are equal, ignore different fields order", + podYamls: []string{validPod, validPodWithDifferentFieldsOrder}, + expectedResult: true, + expectErr: false, + }, { description: "manifests are not equal", - podYamls: []string{validPod, validPod + "\n"}, + podYamls: []string{validPod, validPod2}, expectedResult: false, expectErr: false, }, diff --git a/openshift-hack/images/hyperkube/Dockerfile.rhel b/openshift-hack/images/hyperkube/Dockerfile.rhel index 85cee89c4de21..1abb8af2d9406 100644 --- a/openshift-hack/images/hyperkube/Dockerfile.rhel +++ b/openshift-hack/images/hyperkube/Dockerfile.rhel @@ -13,4 +13,4 @@ COPY --from=builder /tmp/build/* /usr/bin/ LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \ io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \ io.openshift.tags="openshift,hyperkube" \ - io.openshift.build.versions="kubernetes=1.27.2" + io.openshift.build.versions="kubernetes=1.27.3" diff --git a/pkg/kubelet/preemption/preemption.go b/pkg/kubelet/preemption/preemption.go index 5f0fb5e03c00b..e4d0cbd931b17 100644 --- a/pkg/kubelet/preemption/preemption.go +++ b/pkg/kubelet/preemption/preemption.go @@ -21,10 +21,13 @@ import ( "math" v1 "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/api/v1/resource" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" "k8s.io/kubernetes/pkg/kubelet/lifecycle" @@ -103,6 +106,14 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod, status.Phase = v1.PodFailed status.Reason = events.PreemptContainer status.Message = message + if utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { + podutil.UpdatePodCondition(status, &v1.PodCondition{ + Type: v1.DisruptionTarget, + Status: v1.ConditionTrue, + Reason: v1.PodReasonTerminationByKubelet, + Message: "Pod was preempted by Kubelet to accommodate a critical pod.", + }) + } }) if err != nil { klog.ErrorS(err, "Failed to evict pod", "pod", klog.KObj(pod)) diff --git a/pkg/proxy/node.go b/pkg/proxy/node.go index f2cbf6b1f2d65..1845818945a26 100644 --- a/pkg/proxy/node.go +++ b/pkg/proxy/node.go @@ -33,6 +33,12 @@ type NodePodCIDRHandler struct { podCIDRs []string } +func NewNodePodCIDRHandler(podCIDRs []string) *NodePodCIDRHandler { + return &NodePodCIDRHandler{ + podCIDRs: podCIDRs, + } +} + var _ config.NodeHandler = &NodePodCIDRHandler{} // OnNodeAdd is a handler for Node creates. diff --git a/pkg/proxy/node_test.go b/pkg/proxy/node_test.go index ab20130b03366..2f6d7b54ad7bd 100644 --- a/pkg/proxy/node_test.go +++ b/pkg/proxy/node_test.go @@ -37,6 +37,11 @@ func TestNodePodCIDRHandlerAdd(t *testing.T) { name: "initialized correctly", newNodePodCIDRs: []string{"192.168.1.0/24", "fd00:1:2:3::/64"}, }, + { + name: "already initialized and same node", + oldNodePodCIDRs: []string{"10.0.0.0/24", "fd00:3:2:1::/64"}, + newNodePodCIDRs: []string{"10.0.0.0/24", "fd00:3:2:1::/64"}, + }, { name: "already initialized and different node", oldNodePodCIDRs: []string{"192.168.1.0/24", "fd00:1:2:3::/64"}, diff --git a/plugin/pkg/admission/imagepolicy/admission.go b/plugin/pkg/admission/imagepolicy/admission.go index 6fd7f0dfad753..f1f88fef3b005 100644 --- a/plugin/pkg/admission/imagepolicy/admission.go +++ b/plugin/pkg/admission/imagepolicy/admission.go @@ -46,6 +46,7 @@ import ( // PluginName indicates name of admission plugin. const PluginName = "ImagePolicyWebhook" +const ephemeralcontainers = "ephemeralcontainers" // AuditKeyPrefix is used as the prefix for all audit keys handled by this // pluggin. Some well known suffixes are listed below. @@ -132,8 +133,9 @@ func (a *Plugin) webhookError(pod *api.Pod, attributes admission.Attributes, err // Validate makes an admission decision based on the request attributes func (a *Plugin) Validate(ctx context.Context, attributes admission.Attributes, o admission.ObjectInterfaces) (err error) { - // Ignore all calls to subresources or resources other than pods. - if attributes.GetSubresource() != "" || attributes.GetResource().GroupResource() != api.Resource("pods") { + // Ignore all calls to subresources other than ephemeralcontainers or calls to resources other than pods. + subresource := attributes.GetSubresource() + if (subresource != "" && subresource != ephemeralcontainers) || attributes.GetResource().GroupResource() != api.Resource("pods") { return nil } @@ -144,13 +146,21 @@ func (a *Plugin) Validate(ctx context.Context, attributes admission.Attributes, // Build list of ImageReviewContainerSpec var imageReviewContainerSpecs []v1alpha1.ImageReviewContainerSpec - containers := make([]api.Container, 0, len(pod.Spec.Containers)+len(pod.Spec.InitContainers)) - containers = append(containers, pod.Spec.Containers...) - containers = append(containers, pod.Spec.InitContainers...) - for _, c := range containers { - imageReviewContainerSpecs = append(imageReviewContainerSpecs, v1alpha1.ImageReviewContainerSpec{ - Image: c.Image, - }) + if subresource == "" { + containers := make([]api.Container, 0, len(pod.Spec.Containers)+len(pod.Spec.InitContainers)) + containers = append(containers, pod.Spec.Containers...) + containers = append(containers, pod.Spec.InitContainers...) + for _, c := range containers { + imageReviewContainerSpecs = append(imageReviewContainerSpecs, v1alpha1.ImageReviewContainerSpec{ + Image: c.Image, + }) + } + } else if subresource == ephemeralcontainers { + for _, c := range pod.Spec.EphemeralContainers { + imageReviewContainerSpecs = append(imageReviewContainerSpecs, v1alpha1.ImageReviewContainerSpec{ + Image: c.Image, + }) + } } imageReview := v1alpha1.ImageReview{ Spec: v1alpha1.ImageReviewSpec{ diff --git a/plugin/pkg/admission/imagepolicy/admission_test.go b/plugin/pkg/admission/imagepolicy/admission_test.go index d1f81d51950c3..a9188462fb9bc 100644 --- a/plugin/pkg/admission/imagepolicy/admission_test.go +++ b/plugin/pkg/admission/imagepolicy/admission_test.go @@ -37,7 +37,6 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "fmt" - "io/ioutil" "os" "path/filepath" "text/template" @@ -67,7 +66,7 @@ imagePolicy: ` func TestNewFromConfig(t *testing.T) { - dir, err := ioutil.TempDir("", "") + dir, err := os.MkdirTemp("", "") if err != nil { t.Fatal(err) } @@ -92,7 +91,7 @@ func TestNewFromConfig(t *testing.T) { {data.Key, clientKey}, } for _, file := range files { - if err := ioutil.WriteFile(file.name, file.data, 0400); err != nil { + if err := os.WriteFile(file.name, file.data, 0400); err != nil { t.Fatal(err) } } @@ -196,7 +195,7 @@ current-context: default // Use a closure so defer statements trigger between loop iterations. t.Run(tt.msg, func(t *testing.T) { err := func() error { - tempfile, err := ioutil.TempFile("", "") + tempfile, err := os.CreateTemp("", "") if err != nil { return err } @@ -211,7 +210,7 @@ current-context: default return fmt.Errorf("failed to execute test template: %v", err) } - tempconfigfile, err := ioutil.TempFile("", "") + tempconfigfile, err := os.CreateTemp("", "") if err != nil { return err } @@ -359,7 +358,7 @@ func (m *mockService) HTTPStatusCode() int { return m.statusCode } // newImagePolicyWebhook creates a temporary kubeconfig file from the provided arguments and attempts to load // a new newImagePolicyWebhook from it. func newImagePolicyWebhook(callbackURL string, clientCert, clientKey, ca []byte, cacheTime time.Duration, defaultAllow bool) (*Plugin, error) { - tempfile, err := ioutil.TempFile("", "") + tempfile, err := os.CreateTemp("", "") if err != nil { return nil, err } @@ -381,7 +380,7 @@ func newImagePolicyWebhook(callbackURL string, clientCert, clientKey, ca []byte, return nil, err } - tempconfigfile, err := ioutil.TempFile("", "") + tempconfigfile, err := os.CreateTemp("", "") if err != nil { return nil, err } @@ -595,17 +594,23 @@ func TestContainerCombinations(t *testing.T) { test string pod *api.Pod wantAllowed, wantErr bool + subresource string + operation admission.Operation }{ { test: "Single container allowed", pod: goodPod("good"), wantAllowed: true, + subresource: "", + operation: admission.Create, }, { test: "Single container denied", pod: goodPod("bad"), wantAllowed: false, wantErr: true, + subresource: "", + operation: admission.Create, }, { test: "One good container, one bad", @@ -627,6 +632,8 @@ func TestContainerCombinations(t *testing.T) { }, wantAllowed: false, wantErr: true, + subresource: "", + operation: admission.Create, }, { test: "Multiple good containers", @@ -648,6 +655,8 @@ func TestContainerCombinations(t *testing.T) { }, wantAllowed: true, wantErr: false, + subresource: "", + operation: admission.Create, }, { test: "Multiple bad containers", @@ -669,6 +678,8 @@ func TestContainerCombinations(t *testing.T) { }, wantAllowed: false, wantErr: true, + subresource: "", + operation: admission.Create, }, { test: "Good container, bad init container", @@ -692,6 +703,8 @@ func TestContainerCombinations(t *testing.T) { }, wantAllowed: false, wantErr: true, + subresource: "", + operation: admission.Create, }, { test: "Bad container, good init container", @@ -715,6 +728,8 @@ func TestContainerCombinations(t *testing.T) { }, wantAllowed: false, wantErr: true, + subresource: "", + operation: admission.Create, }, { test: "Good container, good init container", @@ -738,6 +753,123 @@ func TestContainerCombinations(t *testing.T) { }, wantAllowed: true, wantErr: false, + subresource: "", + operation: admission.Create, + }, + { + test: "Good container, good init container, bad ephemeral container when updating ephemeralcontainers subresource", + pod: &api.Pod{ + Spec: api.PodSpec{ + ServiceAccountName: "default", + SecurityContext: &api.PodSecurityContext{}, + Containers: []api.Container{ + { + Image: "good", + SecurityContext: &api.SecurityContext{}, + }, + }, + InitContainers: []api.Container{ + { + Image: "good", + SecurityContext: &api.SecurityContext{}, + }, + }, + EphemeralContainers: []api.EphemeralContainer{ + { + EphemeralContainerCommon: api.EphemeralContainerCommon{ + Image: "bad", + SecurityContext: &api.SecurityContext{}, + }, + }, + }, + }, + }, + wantAllowed: false, + wantErr: true, + subresource: "ephemeralcontainers", + operation: admission.Update, + }, + { + test: "Good container, good init container, bad ephemeral container when updating subresource=='' which sets initContainer and container only", + pod: &api.Pod{ + Spec: api.PodSpec{ + ServiceAccountName: "default", + SecurityContext: &api.PodSecurityContext{}, + Containers: []api.Container{ + { + Image: "good", + SecurityContext: &api.SecurityContext{}, + }, + }, + InitContainers: []api.Container{ + { + Image: "good", + SecurityContext: &api.SecurityContext{}, + }, + }, + EphemeralContainers: []api.EphemeralContainer{ + { + EphemeralContainerCommon: api.EphemeralContainerCommon{ + Image: "bad", + SecurityContext: &api.SecurityContext{}, + }, + }, + }, + }, + }, + wantAllowed: true, + wantErr: false, + subresource: "", + operation: admission.Update, + }, + + { + test: "Bad container, good ephemeral container when updating subresource=='ephemeralcontainers' which sets ephemeralcontainers only", + pod: &api.Pod{ + Spec: api.PodSpec{ + ServiceAccountName: "default", + SecurityContext: &api.PodSecurityContext{}, + Containers: []api.Container{ + { + Image: "bad", + SecurityContext: &api.SecurityContext{}, + }, + }, + EphemeralContainers: []api.EphemeralContainer{ + { + EphemeralContainerCommon: api.EphemeralContainerCommon{ + Image: "good", + SecurityContext: &api.SecurityContext{}, + }, + }, + }, + }, + }, + wantAllowed: true, + wantErr: false, + subresource: "ephemeralcontainers", + operation: admission.Update, + }, + { + test: "Good ephemeral container", + pod: &api.Pod{ + Spec: api.PodSpec{ + ServiceAccountName: "default", + SecurityContext: &api.PodSecurityContext{}, + EphemeralContainers: []api.EphemeralContainer{ + { + EphemeralContainerCommon: api.EphemeralContainerCommon{ + Image: "good", + SecurityContext: &api.SecurityContext{}, + }, + }, + }, + }, + }, + wantAllowed: true, + wantErr: false, + subresource: "ephemeralcontainers", + operation: admission.Update, }, } for _, tt := range tests { @@ -759,7 +891,7 @@ func TestContainerCombinations(t *testing.T) { return } - attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) + attr := admission.NewAttributesRecord(tt.pod, nil, api.Kind("Pod").WithVersion("version"), "namespace", "", api.Resource("pods").WithVersion("version"), tt.subresource, tt.operation, &metav1.CreateOptions{}, false, &user.DefaultInfo{}) err = wh.Validate(context.TODO(), attr, nil) if tt.wantAllowed { diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go index 0110f9e7bf2fd..7b2ee011f46cf 100644 --- a/plugin/pkg/admission/serviceaccount/admission.go +++ b/plugin/pkg/admission/serviceaccount/admission.go @@ -99,7 +99,7 @@ var _ = genericadmissioninitializer.WantsExternalKubeInformerFactory(&Plugin{}) // 5. If MountServiceAccountToken is true, it adds a VolumeMount with the pod's ServiceAccount's api token secret to containers func NewServiceAccount() *Plugin { return &Plugin{ - Handler: admission.NewHandler(admission.Create), + Handler: admission.NewHandler(admission.Create, admission.Update), // TODO: enable this once we've swept secret usage to account for adding secret references to service accounts LimitSecretReferences: false, // Auto mount service account API token secrets @@ -139,7 +139,10 @@ func (s *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission. if shouldIgnore(a) { return nil } - + if a.GetOperation() != admission.Create { + // we only mutate pods during create requests + return nil + } pod := a.GetObject().(*api.Pod) // Don't modify the spec of mirror pods. @@ -156,7 +159,7 @@ func (s *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission. serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName) if err != nil { - return admission.NewForbidden(a, fmt.Errorf("error looking up service account %s/%s: %v", a.GetNamespace(), pod.Spec.ServiceAccountName, err)) + return admission.NewForbidden(a, fmt.Errorf("error looking up service account %s/%s: %w", a.GetNamespace(), pod.Spec.ServiceAccountName, err)) } if s.MountServiceAccountToken && shouldAutomount(serviceAccount, pod) { s.mountServiceAccountToken(serviceAccount, pod) @@ -179,6 +182,15 @@ func (s *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi pod := a.GetObject().(*api.Pod) + if a.GetOperation() == admission.Update && a.GetSubresource() == "ephemeralcontainers" { + return s.limitEphemeralContainerSecretReferences(pod, a) + } + + if a.GetOperation() != admission.Create { + // we only validate pod specs during create requests + return nil + } + // Mirror pods have restrictions on what they can reference if _, isMirrorPod := pod.Annotations[api.MirrorPodAnnotationKey]; isMirrorPod { if len(pod.Spec.ServiceAccountName) != 0 { @@ -204,6 +216,10 @@ func (s *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi return nil } + // Require container pods to have service accounts + if len(pod.Spec.ServiceAccountName) == 0 { + return admission.NewForbidden(a, fmt.Errorf("no service account specified for pod %s/%s", a.GetNamespace(), pod.Name)) + } // Ensure the referenced service account exists serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName) if err != nil { @@ -220,10 +236,7 @@ func (s *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi } func shouldIgnore(a admission.Attributes) bool { - if a.GetResource().GroupResource() != api.Resource("pods") { - return true - } - if a.GetSubresource() != "" { + if a.GetResource().GroupResource() != api.Resource("pods") || (a.GetSubresource() != "" && a.GetSubresource() != "ephemeralcontainers") { return true } obj := a.GetObject() @@ -349,6 +362,36 @@ func (s *Plugin) limitSecretReferences(serviceAccount *corev1.ServiceAccount, po return nil } +func (s *Plugin) limitEphemeralContainerSecretReferences(pod *api.Pod, a admission.Attributes) error { + // Require ephemeral container pods to have service accounts + if len(pod.Spec.ServiceAccountName) == 0 { + return admission.NewForbidden(a, fmt.Errorf("no service account specified for pod %s/%s", a.GetNamespace(), pod.Name)) + } + // Ensure the referenced service account exists + serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("error looking up service account %s/%s: %w", a.GetNamespace(), pod.Spec.ServiceAccountName, err)) + } + if !s.enforceMountableSecrets(serviceAccount) { + return nil + } + // Ensure all secrets the ephemeral containers reference are allowed by the service account + mountableSecrets := sets.NewString() + for _, s := range serviceAccount.Secrets { + mountableSecrets.Insert(s.Name) + } + for _, container := range pod.Spec.EphemeralContainers { + for _, env := range container.Env { + if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil { + if !mountableSecrets.Has(env.ValueFrom.SecretKeyRef.Name) { + return fmt.Errorf("ephemeral container %s with envVar %s referencing secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", container.Name, env.Name, env.ValueFrom.SecretKeyRef.Name, serviceAccount.Name) + } + } + } + } + return nil +} + func (s *Plugin) mountServiceAccountToken(serviceAccount *corev1.ServiceAccount, pod *api.Pod) { // Find the volume and volume name for the ServiceAccountTokenSecret if it already exists tokenVolumeName := "" diff --git a/plugin/pkg/admission/serviceaccount/admission_test.go b/plugin/pkg/admission/serviceaccount/admission_test.go index 29470fae17ac8..ecc6b0cdf01ff 100644 --- a/plugin/pkg/admission/serviceaccount/admission_test.go +++ b/plugin/pkg/admission/serviceaccount/admission_test.go @@ -545,6 +545,34 @@ func TestAllowsReferencedSecret(t *testing.T) { if err := admissiontesting.WithReinvocationTesting(t, admit).Admit(context.TODO(), attrs, nil); err != nil { t.Errorf("Unexpected error: %v", err) } + + pod2 = &api.Pod{ + Spec: api.PodSpec{ + ServiceAccountName: DefaultServiceAccountName, + EphemeralContainers: []api.EphemeralContainer{ + { + EphemeralContainerCommon: api.EphemeralContainerCommon{ + Name: "container-2", + Env: []api.EnvVar{ + { + Name: "env-1", + ValueFrom: &api.EnvVarSource{ + SecretKeyRef: &api.SecretKeySelector{ + LocalObjectReference: api.LocalObjectReference{Name: "foo"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + // validate enforces restrictions on secret mounts when operation==create and subresource=='' or operation==update and subresource==ephemeralcontainers" + attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "ephemeralcontainers", admission.Update, &metav1.UpdateOptions{}, false, nil) + if err := admit.Validate(context.TODO(), attrs, nil); err != nil { + t.Errorf("Unexpected error: %v", err) + } } func TestRejectsUnreferencedSecretVolumes(t *testing.T) { @@ -622,6 +650,66 @@ func TestRejectsUnreferencedSecretVolumes(t *testing.T) { if err := admissiontesting.WithReinvocationTesting(t, admit).Admit(context.TODO(), attrs, nil); err == nil || !strings.Contains(err.Error(), "with envVar") { t.Errorf("Unexpected error: %v", err) } + + pod2 = &api.Pod{ + Spec: api.PodSpec{ + ServiceAccountName: DefaultServiceAccountName, + InitContainers: []api.Container{ + { + Name: "container-1", + Env: []api.EnvVar{ + { + Name: "env-1", + ValueFrom: &api.EnvVarSource{ + SecretKeyRef: &api.SecretKeySelector{ + LocalObjectReference: api.LocalObjectReference{Name: "foo"}, + }, + }, + }, + }, + }, + }, + }, + } + attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil) + if err := admissiontesting.WithReinvocationTesting(t, admit).Admit(context.TODO(), attrs, nil); err != nil { + t.Errorf("admit only enforces restrictions on secret mounts when operation==create. Unexpected error: %v", err) + } + attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil) + if err := admit.Validate(context.TODO(), attrs, nil); err == nil || !strings.Contains(err.Error(), "with envVar") { + t.Errorf("validate only enforces restrictions on secret mounts when operation==create and subresource==''. Unexpected error: %v", err) + } + + pod2 = &api.Pod{ + Spec: api.PodSpec{ + ServiceAccountName: DefaultServiceAccountName, + EphemeralContainers: []api.EphemeralContainer{ + { + EphemeralContainerCommon: api.EphemeralContainerCommon{ + Name: "container-2", + Env: []api.EnvVar{ + { + Name: "env-1", + ValueFrom: &api.EnvVarSource{ + SecretKeyRef: &api.SecretKeySelector{ + LocalObjectReference: api.LocalObjectReference{Name: "foo"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil) + if err := admissiontesting.WithReinvocationTesting(t, admit).Admit(context.TODO(), attrs, nil); err != nil { + t.Errorf("admit only enforces restrictions on secret mounts when operation==create and subresource==''. Unexpected error: %v", err) + } + attrs = admission.NewAttributesRecord(pod2, nil, api.Kind("Pod").WithVersion("version"), ns, "myname", api.Resource("pods").WithVersion("version"), "ephemeralcontainers", admission.Update, &metav1.UpdateOptions{}, false, nil) + if err := admit.Validate(context.TODO(), attrs, nil); err == nil || !strings.Contains(err.Error(), "with envVar") { + t.Errorf("validate enforces restrictions on secret mounts when operation==update and subresource==ephemeralcontainers. Unexpected error: %v", err) + } } func TestAllowUnreferencedSecretVolumesForPermissiveSAs(t *testing.T) { diff --git a/staging/publishing/rules.yaml b/staging/publishing/rules.yaml index 67449147d8c20..21aefded6f4cf 100644 --- a/staging/publishing/rules.yaml +++ b/staging/publishing/rules.yaml @@ -21,7 +21,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/code-generator - name: release-1.27 - go: 1.20.4 + go: 1.20.5 source: branch: release-1.27 dir: staging/src/k8s.io/code-generator @@ -47,7 +47,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/apimachinery - name: release-1.27 - go: 1.20.4 + go: 1.20.5 source: branch: release-1.27 dir: staging/src/k8s.io/apimachinery @@ -86,7 +86,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/api - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -152,7 +152,7 @@ rules: go build -mod=mod ./... go test -mod=mod ./... - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -216,7 +216,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/component-base - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -278,7 +278,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/component-helpers - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -309,7 +309,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/kms - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -383,7 +383,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/apiserver - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -477,7 +477,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/kube-aggregator - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -594,7 +594,7 @@ rules: # assumes GO111MODULE=on go build -mod=mod . - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -696,7 +696,7 @@ rules: # assumes GO111MODULE=on go build -mod=mod . - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -800,7 +800,7 @@ rules: required-packages: - k8s.io/code-generator - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -879,7 +879,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/metrics - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -943,7 +943,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/cli-runtime - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: api branch: release-1.27 @@ -1013,7 +1013,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/sample-cli-plugin - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: api branch: release-1.27 @@ -1084,7 +1084,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/kube-proxy - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -1156,7 +1156,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/kubelet - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -1228,7 +1228,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/kube-scheduler - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -1312,7 +1312,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/controller-manager - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: api branch: release-1.27 @@ -1416,7 +1416,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/cloud-provider - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: api branch: release-1.27 @@ -1532,7 +1532,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/kube-controller-manager - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -1598,7 +1598,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/cluster-bootstrap - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -1650,7 +1650,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/csi-translation-lib - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: api branch: release-1.27 @@ -1682,7 +1682,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/mount-utils - name: release-1.27 - go: 1.20.4 + go: 1.20.5 source: branch: release-1.27 dir: staging/src/k8s.io/mount-utils @@ -1793,7 +1793,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/legacy-cloud-providers - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: api branch: release-1.27 @@ -1839,7 +1839,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/cri-api - name: release-1.27 - go: 1.20.4 + go: 1.20.5 source: branch: release-1.27 dir: staging/src/k8s.io/cri-api @@ -1934,7 +1934,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/kubectl - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: api branch: release-1.27 @@ -2026,7 +2026,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/pod-security-admission - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: api branch: release-1.27 @@ -2078,7 +2078,7 @@ rules: branch: release-1.26 dir: staging/src/k8s.io/dynamic-resource-allocation - name: release-1.27 - go: 1.20.4 + go: 1.20.5 dependencies: - repository: apimachinery branch: release-1.27 @@ -2095,4 +2095,4 @@ rules: dir: staging/src/k8s.io/dynamic-resource-allocation recursive-delete-patterns: - '*/.gitattributes' -default-go-version: 1.20.4 +default-go-version: 1.20.5 diff --git a/staging/src/k8s.io/apiserver/pkg/util/webhook/webhook_test.go b/staging/src/k8s.io/apiserver/pkg/util/webhook/webhook_test.go index 0857cc36cb8a8..12652d247577a 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/webhook/webhook_test.go +++ b/staging/src/k8s.io/apiserver/pkg/util/webhook/webhook_test.go @@ -47,7 +47,7 @@ import ( ) const ( - errBadCertificate = "Get .*: remote error: tls: bad certificate" + errBadCertificate = "Get .*: remote error: tls: (bad certificate|unknown certificate authority)" errNoConfiguration = "invalid configuration: no configuration has been provided" errMissingCertPath = "invalid configuration: unable to read %s %s for %s due to open %s: .*" errSignedByUnknownCA = "Get .*: x509: .*(unknown authority|not standards compliant|not trusted)" diff --git a/test/e2e/framework/get.go b/test/e2e/framework/get.go index ffa26d3078df5..1e1a83856d6c2 100644 --- a/test/e2e/framework/get.go +++ b/test/e2e/framework/get.go @@ -100,7 +100,10 @@ func ShouldRetry(err error) (retry bool, retryAfter time.Duration) { } // these errors indicate a transient error that should be retried. - if apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) || errors.As(err, &transientError{}) { + if apierrors.IsTimeout(err) || + apierrors.IsTooManyRequests(err) || + apierrors.IsServiceUnavailable(err) || + errors.As(err, &transientError{}) { return true, 0 } diff --git a/test/e2e/storage/csi_mock/csi_storage_capacity.go b/test/e2e/storage/csi_mock/csi_storage_capacity.go index 52d79259a79d0..5a171b2519853 100644 --- a/test/e2e/storage/csi_mock/csi_storage_capacity.go +++ b/test/e2e/storage/csi_mock/csi_storage_capacity.go @@ -320,7 +320,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { } for _, t := range tests { test := t - ginkgo.It(t.name, ginkgo.SpecTimeout(f.Timeouts.PodStart), func(ctx context.Context) { + ginkgo.It(t.name, ginkgo.NodeTimeout(f.Timeouts.PodStart), func(ctx context.Context) { scName := "mock-csi-storage-capacity-" + f.UniqueName m.init(ctx, testParameters{ registerDriver: true, diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index fa700fdb08413..088640117ac49 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" kubeapi "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -87,6 +88,49 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod] } } }) + + ginkgo.It("should add DisruptionTarget condition to the preempted pod [NodeFeature:PodDisruptionConditions]", func(ctx context.Context) { + // because adminssion Priority enable, If the priority class is not found, the Pod is rejected. + node := getNodeName(ctx, f) + nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, node) + + criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{ + // request the entire resource capacity of the node, so that + // admitting this pod requires the other pod to be preempted + Requests: getNodeCPUAndMemoryCapacity(ctx, f), + }, node) + criticalPod.Namespace = kubeapi.NamespaceSystem + + ginkgo.By(fmt.Sprintf("create the non-critical pod %q", klog.KObj(nonCriticalGuaranteed))) + e2epod.NewPodClient(f).CreateSync(ctx, nonCriticalGuaranteed) + + ginkgo.By(fmt.Sprintf("create the critical pod %q", klog.KObj(criticalPod))) + e2epod.PodClientNS(f, kubeapi.NamespaceSystem).Create(ctx, criticalPod) + + ginkgo.By(fmt.Sprintf("await for the critical pod %q to be ready", klog.KObj(criticalPod))) + err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, criticalPod.Name, kubeapi.NamespaceSystem) + framework.ExpectNoError(err, "Failed to await for the pod to be running: %q", klog.KObj(criticalPod)) + + // Check that non-critical pods other than the besteffort have been evicted + updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) + framework.ExpectNoError(err) + for _, p := range updatedPodList.Items { + ginkgo.By(fmt.Sprintf("verify that the non-critical pod %q is preempted and has the DisruptionTarget condition", klog.KObj(&p))) + framework.ExpectEqual(p.Status.Phase, v1.PodSucceeded, fmt.Sprintf("pod: %v should be preempted with status: %#v", p.Name, p.Status)) + if condition := e2epod.FindPodConditionByType(&p.Status, v1.DisruptionTarget); condition == nil { + framework.Failf("pod %q should have the condition: %q, pod status: %v", klog.KObj(&p), v1.DisruptionTarget, p.Status) + } + } + }) ginkgo.AfterEach(func(ctx context.Context) { // Delete Pods e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout) diff --git a/test/images/Makefile b/test/images/Makefile index 0414c55784829..efaeac81ba206 100644 --- a/test/images/Makefile +++ b/test/images/Makefile @@ -16,7 +16,7 @@ REGISTRY ?= registry.k8s.io/e2e-test-images GOARM ?= 7 DOCKER_CERT_BASE_PATH ?= QEMUVERSION=v5.1.0-2 -GOLANG_VERSION=1.20.4 +GOLANG_VERSION=1.20.5 export ifndef WHAT diff --git a/test/integration/examples/apiserver_test.go b/test/integration/examples/apiserver_test.go index 0ce4944f65ebf..ac2ed7cfc165b 100644 --- a/test/integration/examples/apiserver_test.go +++ b/test/integration/examples/apiserver_test.go @@ -35,6 +35,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server/dynamiccertificates" @@ -48,6 +49,7 @@ import ( aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/cmd/kube-apiserver/app" kastesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" + "k8s.io/kubernetes/test/integration" "k8s.io/kubernetes/test/integration/framework" wardlev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" wardlev1beta1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1" @@ -56,6 +58,177 @@ import ( netutils "k8s.io/utils/net" ) +func TestAPIServiceWaitOnStart(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + t.Cleanup(cancel) + + stopCh := make(chan struct{}) + defer close(stopCh) + + etcdConfig := framework.SharedEtcd() + + etcd3Client, _, err := integration.GetEtcdClients(etcdConfig.Transport) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { etcd3Client.Close() }) + + // Pollute CRD path in etcd so CRD lists cannot succeed and the informer cannot sync + bogusCRDEtcdPath := path.Join("/", etcdConfig.Prefix, "apiextensions.k8s.io/customresourcedefinitions/bogus") + if _, err := etcd3Client.KV.Put(ctx, bogusCRDEtcdPath, `bogus data`); err != nil { + t.Fatal(err) + } + + // Populate a valid CRD and managed APIService in etcd + if _, err := etcd3Client.KV.Put( + ctx, + path.Join("/", etcdConfig.Prefix, "apiextensions.k8s.io/customresourcedefinitions/widgets.valid.example.com"), + `{ + "apiVersion":"apiextensions.k8s.io/v1beta1", + "kind":"CustomResourceDefinition", + "metadata":{ + "name":"widgets.valid.example.com", + "uid":"mycrd", + "creationTimestamp": "2022-06-08T23:46:32Z" + }, + "spec":{ + "scope": "Namespaced", + "group":"valid.example.com", + "version":"v1", + "names":{ + "kind": "Widget", + "listKind": "WidgetList", + "plural": "widgets", + "singular": "widget" + } + }, + "status": { + "acceptedNames": { + "kind": "Widget", + "listKind": "WidgetList", + "plural": "widgets", + "singular": "widget" + }, + "conditions": [ + { + "lastTransitionTime": "2023-05-18T15:03:57Z", + "message": "no conflicts found", + "reason": "NoConflicts", + "status": "True", + "type": "NamesAccepted" + }, + { + "lastTransitionTime": "2023-05-18T15:03:57Z", + "message": "the initial names have been accepted", + "reason": "InitialNamesAccepted", + "status": "True", + "type": "Established" + } + ], + "storedVersions": [ + "v1" + ] + } + }`); err != nil { + t.Fatal(err) + } + if _, err := etcd3Client.KV.Put( + ctx, + path.Join("/", etcdConfig.Prefix, "apiregistration.k8s.io/apiservices/v1.valid.example.com"), + `{ + "apiVersion":"apiregistration.k8s.io/v1", + "kind":"APIService", + "metadata": { + "name": "v1.valid.example.com", + "uid":"foo", + "creationTimestamp": "2022-06-08T23:46:32Z", + "labels":{"kube-aggregator.kubernetes.io/automanaged":"true"} + }, + "spec": { + "group": "valid.example.com", + "version": "v1", + "groupPriorityMinimum":100, + "versionPriority":10 + } + }`, + ); err != nil { + t.Fatal(err) + } + + // Populate a stale managed APIService in etcd + if _, err := etcd3Client.KV.Put( + ctx, + path.Join("/", etcdConfig.Prefix, "apiregistration.k8s.io/apiservices/v1.stale.example.com"), + `{ + "apiVersion":"apiregistration.k8s.io/v1", + "kind":"APIService", + "metadata": { + "name": "v1.stale.example.com", + "uid":"foo", + "creationTimestamp": "2022-06-08T23:46:32Z", + "labels":{"kube-aggregator.kubernetes.io/automanaged":"true"} + }, + "spec": { + "group": "stale.example.com", + "version": "v1", + "groupPriorityMinimum":100, + "versionPriority":10 + } + }`, + ); err != nil { + t.Fatal(err) + } + + // Start server + options := kastesting.NewDefaultTestServerOptions() + options.SkipHealthzCheck = true + testServer := kastesting.StartTestServerOrDie(t, options, nil, etcdConfig) + defer testServer.TearDownFn() + + kubeClientConfig := rest.CopyConfig(testServer.ClientConfig) + aggregatorClient := aggregatorclient.NewForConfigOrDie(kubeClientConfig) + + // ensure both APIService objects remain + for i := 0; i < 10; i++ { + if _, err := aggregatorClient.ApiregistrationV1().APIServices().Get(ctx, "v1.valid.example.com", metav1.GetOptions{}); err != nil { + t.Fatal(err) + } + if _, err := aggregatorClient.ApiregistrationV1().APIServices().Get(ctx, "v1.stale.example.com", metav1.GetOptions{}); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + } + + // Clear the bogus CRD data so the informer can sync + if _, err := etcd3Client.KV.Delete(ctx, bogusCRDEtcdPath); err != nil { + t.Fatal(err) + } + t.Log("cleaned up bogus CRD data") + + // ensure the stale APIService object is cleaned up + if err := wait.Poll(time.Second, wait.ForeverTestTimeout, func() (bool, error) { + _, err := aggregatorClient.ApiregistrationV1().APIServices().Get(ctx, "v1.stale.example.com", metav1.GetOptions{}) + if err == nil { + t.Log("stale APIService still exists, waiting...") + return false, nil + } + if !apierrors.IsNotFound(err) { + return false, err + } + return true, nil + }); err != nil { + t.Fatal(err) + } + + // ensure the valid APIService object remains + for i := 0; i < 5; i++ { + time.Sleep(time.Second) + if _, err := aggregatorClient.ApiregistrationV1().APIServices().Get(ctx, "v1.valid.example.com", metav1.GetOptions{}); err != nil { + t.Fatal(err) + } + } +} + func TestAggregatedAPIServer(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) t.Cleanup(cancel)